diff --git a/Cargo.lock b/Cargo.lock index 200c9a038..c01404fef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -137,6 +137,17 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "alloy-chains" +version = "0.2.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f374d3c6d729268bbe2d0e0ff992bb97898b2df756691a62ee1d5f0506bc39" +dependencies = [ + "alloy-primitives 1.5.2", + "num_enum 0.7.5", + "strum 0.27.2", +] + [[package]] name = "alloy-consensus" version = "1.3.0" @@ -385,6 +396,45 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "alloy-provider" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de2597751539b1cc8fe4204e5325f9a9ed83fcacfb212018dfcfa7877e76de21" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives 1.5.2", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-signer", + "alloy-sol-types 1.5.2", + "alloy-transport", + "alloy-transport-http", + "async-stream", + "async-trait", + "auto_impl", + "dashmap 6.1.0", + "either", + "futures", + "futures-utils-wasm", + "lru 0.13.0", + "parking_lot 0.12.4", + "pin-project", + "reqwest 0.12.20", + "serde", + "serde_json", + "thiserror 2.0.12", + "tokio", + "tracing", + "url", + "wasmtimer", +] + [[package]] name = "alloy-rlp" version = "0.3.12" @@ -407,6 +457,29 @@ dependencies = [ "syn 2.0.103", ] +[[package]] +name = "alloy-rpc-client" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edf8eb8be597cfa8c312934d2566ec4516f066d69164f9212d7a148979fdcfd8" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives 1.5.2", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "reqwest 0.12.20", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tracing", + "url", + "wasmtimer", +] + [[package]] name = "alloy-rpc-types-any" version = "1.0.41" @@ -629,6 +702,45 @@ dependencies = [ "serde", ] +[[package]] +name = "alloy-transport" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "025a940182bddaeb594c26fe3728525ae262d0806fe6a4befdf5d7bc13d54bce" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives 1.5.2", + "auto_impl", + "base64 0.22.1", + "derive_more 2.0.1", + "futures", + "futures-utils-wasm", + "parking_lot 0.12.4", + "serde", + "serde_json", + "thiserror 2.0.12", + "tokio", + "tower 0.5.2", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-transport-http" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5064d1e1e1aabc918b5954e7fb8154c39e77ec6903a581b973198b26628fa" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.20", + "serde_json", + "tower 0.5.2", + "tracing", + "url", +] + [[package]] name = "alloy-trie" version = "0.9.3" @@ -3322,8 +3434,8 @@ dependencies = [ "alloy-rlp", "alloy-rlp-derive", "anyhow", - "ark-ec 0.4.2", "bls_on_arkworks", + "crypto-utils", "geth-primitives", "ismp", "log", @@ -3332,7 +3444,6 @@ dependencies = [ "primitive-types 0.13.1", "ssz-rs", "sync-committee-primitives", - "sync-committee-verifier", ] [[package]] @@ -4648,10 +4759,15 @@ name = "crypto-utils" version = "0.2.0" dependencies = [ "anyhow", + "bls_on_arkworks", + "hex", "parity-scale-codec", "scale-info", + "serde", + "serde-hex-utils", "sp-core", "sp-io", + "ssz-rs", ] [[package]] @@ -5719,6 +5835,20 @@ dependencies = [ "parking_lot_core 0.9.11", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.11", +] + [[package]] name = "dashu" version = "0.4.2" @@ -8302,6 +8432,7 @@ dependencies = [ "ismp-optimism", "ismp-parachain", "ismp-parachain-runtime-api", + "ismp-pharos", "ismp-polygon", "ismp-sync-committee", "ismp-tendermint", @@ -8566,7 +8697,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" dependencies = [ "cfg-if", - "dashmap", + "dashmap 5.5.3", "futures", "futures-timer", "no-std-compat", @@ -10578,6 +10709,25 @@ dependencies = [ "polkadot-sdk", ] +[[package]] +name = "ismp-pharos" +version = "0.1.0" +dependencies = [ + "anyhow", + "geth-primitives", + "ismp", + "log", + "pallet-ismp", + "pallet-ismp-host-executive", + "parity-scale-codec", + "pharos-primitives", + "pharos-state-machine", + "pharos-verifier", + "polkadot-sdk", + "scale-info", + "sync-committee-primitives", +] + [[package]] name = "ismp-polygon" version = "0.1.0" @@ -12052,6 +12202,15 @@ dependencies = [ "hashbrown 0.15.4", ] +[[package]] +name = "lru" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" +dependencies = [ + "hashbrown 0.15.4", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -15380,19 +15539,23 @@ dependencies = [ "alloy-primitives 1.5.2", "alloy-sol-types 1.5.2", "anyhow", + "bls_on_arkworks", "ckb-merkle-mountain-range", + "crypto-utils", "dotenv", "env_logger 0.11.8", "ethereum-triedb", "ethers", "evm-state-machine", "futures", + "geth-primitives", "hash-db", "hex", "hyperbridge-client-machine", "ismp", "ismp-bsc", "ismp-grandpa", + "ismp-pharos", "ismp-solidity-abi", "ismp-sync-committee", "ismp-testsuite", @@ -15415,6 +15578,8 @@ dependencies = [ "pallet-token-governor", "pallet-xcm-gateway", "parity-scale-codec", + "pharos-primitives", + "pharos-prover", "polkadot-sdk", "primitive-types 0.13.1", "reqwest 0.11.27", @@ -17294,6 +17459,76 @@ dependencies = [ "rustc_version 0.4.1", ] +[[package]] +name = "pharos-primitives" +version = "0.1.0" +dependencies = [ + "alloy-primitives 1.5.2", + "anyhow", + "crypto-utils", + "geth-primitives", + "hex-literal 0.4.1", + "ismp", + "parity-scale-codec", + "primitive-types 0.13.1", + "serde", + "sp-io", +] + +[[package]] +name = "pharos-prover" +version = "0.1.0" +dependencies = [ + "alloy-eips", + "alloy-provider", + "ethabi", + "futures", + "geth-primitives", + "hex", + "pharos-primitives", + "pharos-verifier", + "primitive-types 0.13.1", + "reqwest 0.11.27", + "serde", + "serde_json", + "sp-core", + "thiserror 2.0.12", +] + +[[package]] +name = "pharos-state-machine" +version = "0.1.0" +dependencies = [ + "alloy-rlp", + "ethabi", + "evm-state-machine", + "geth-primitives", + "hex-literal 0.4.1", + "ismp", + "pallet-ismp-host-executive", + "parity-scale-codec", + "pharos-primitives", + "polkadot-sdk", + "primitive-types 0.13.1", +] + +[[package]] +name = "pharos-verifier" +version = "0.1.0" +dependencies = [ + "anyhow", + "bls_on_arkworks", + "crypto-utils", + "geth-primitives", + "hex", + "hex-literal 0.4.1", + "ismp", + "log", + "pharos-primitives", + "primitive-types 0.13.1", + "thiserror 2.0.12", +] + [[package]] name = "phf" version = "0.11.3" @@ -26367,6 +26602,15 @@ dependencies = [ "strum_macros 0.26.4", ] +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros 0.27.2", +] + [[package]] name = "strum_macros" version = "0.24.3" @@ -26406,6 +26650,18 @@ dependencies = [ "syn 2.0.103", ] +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.103", +] + [[package]] name = "subenum" version = "1.1.2" @@ -27148,6 +27404,7 @@ name = "sync-committee-primitives" version = "0.1.1" dependencies = [ "anyhow", + "crypto-utils", "hex", "hex-literal 0.4.1", "parity-scale-codec", @@ -27871,6 +28128,7 @@ dependencies = [ "hex-literal 0.4.1", "ismp", "ismp-grandpa", + "ismp-pharos", "ismp-solidity-abi", "log", "mmr-primitives", @@ -27879,6 +28137,7 @@ dependencies = [ "pallet-ismp-demo", "pallet-ismp-rpc", "parity-scale-codec", + "pharos-primitives", "polkadot-sdk", "primitive-types 0.12.2", "sp-core", @@ -27893,6 +28152,7 @@ dependencies = [ "tesseract-evm", "tesseract-grandpa", "tesseract-messaging", + "tesseract-pharos", "tesseract-polygon", "tesseract-primitives", "tesseract-substrate", @@ -27925,6 +28185,30 @@ dependencies = [ "transaction-fees", ] +[[package]] +name = "tesseract-pharos" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "ethers", + "geth-primitives", + "ismp", + "ismp-pharos", + "log", + "parity-scale-codec", + "pharos-primitives", + "pharos-prover", + "pharos-verifier", + "primitive-types 0.13.1", + "serde", + "serde_json", + "sp-core", + "tesseract-evm", + "tesseract-primitives", + "tokio", +] + [[package]] name = "tesseract-polygon" version = "0.1.0" @@ -30312,6 +30596,20 @@ dependencies = [ "winch-codegen", ] +[[package]] +name = "wasmtimer" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c598d6b99ea013e35844697fc4670d08339d5cda15588f193c6beedd12f644b" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.12.4", + "pin-utils", + "slab", + "wasm-bindgen", +] + [[package]] name = "web-sys" version = "0.3.77" diff --git a/Cargo.toml b/Cargo.toml index ec01273b5..669435209 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,7 @@ members = [ "modules/ismp/clients/arbitrum", "modules/ismp/clients/optimism", "modules/ismp/clients/bsc", + "modules/ismp/clients/pharos", "modules/ismp/clients/grandpa", "modules/ismp/testsuite", "modules/ismp/clients/ismp-arbitrum", @@ -59,6 +60,9 @@ members = [ "modules/consensus/tendermint/prover", "modules/consensus/tendermint/primitives", "modules/consensus/tendermint/ics23-primitives", + "modules/consensus/pharos/primitives", + "modules/consensus/pharos/verifier", + "modules/consensus/pharos/prover", "modules/trees/ethereum", "modules/pallets/mmr", "modules/pallets/mmr/primitives", @@ -68,6 +72,7 @@ members = [ "modules/ismp/state-machines/evm", "modules/ismp/state-machines/substrate", "modules/ismp/state-machines/hyperbridge", + "modules/ismp/state-machines/pharos", "modules/pallets/consensus-incentives", "modules/pallets/messaging-fees", @@ -117,6 +122,7 @@ members = [ "tesseract/consensus/relayer", "tesseract/consensus/polygon", "tesseract/consensus/tendermint", + "tesseract/consensus/pharos", # Airdrop @@ -264,9 +270,13 @@ tendermint-verifier = { path = "./modules/consensus/tendermint/verifier", defaul tendermint-primitives = { path = "./modules/consensus/tendermint/primitives", default-features = false } tendermint-prover = { path = "./modules/consensus/tendermint/prover", default-features = false } tendermint-ics23-primitives = { path = "./modules/consensus/tendermint/ics23-primitives", default-features = false } +pharos-primitives = { path = "./modules/consensus/pharos/primitives", default-features = false } +pharos-verifier = { path = "./modules/consensus/pharos/verifier", default-features = false } +pharos-prover = { path = "./modules/consensus/pharos/prover", default-features = false } # consensus clients ismp-bsc = { path = "./modules/ismp/clients/bsc", default-features = false } +ismp-pharos = { path = "./modules/ismp/clients/pharos", default-features = false } ismp-sync-committee = { path = "./modules/ismp/clients/sync-committee", default-features = false } arbitrum-verifier = { path = "./modules/ismp/clients/arbitrum", default-features = false } op-verifier = { path = "./modules/ismp/clients/optimism", default-features = false } @@ -277,6 +287,7 @@ ismp-tendermint = { path = "modules/ismp/clients/tendermint", default-features = # state machine clients evm-state-machine = { path = "./modules/ismp/state-machines/evm", default-features = false } +pharos-state-machine = { path = "./modules/ismp/state-machines/pharos", default-features = false } hyperbridge-client-machine = { path = "modules/ismp/state-machines/hyperbridge", default-features = false } # ismp modules @@ -333,6 +344,7 @@ tesseract-grandpa = { path = "tesseract/consensus/grandpa" } tesseract-consensus = { path = "tesseract/consensus/relayer" } tesseract-polygon = { path = "tesseract/consensus/polygon" } tesseract-tendermint = { path = "tesseract/consensus/tendermint" } +tesseract-pharos = { path = "tesseract/consensus/pharos" } [workspace.dependencies.codec] @@ -383,6 +395,15 @@ version = "1.1.2" default-features = false features = ["rlp"] +[workspace.dependencies.alloy-provider] +version = "~1.0" +default-features = false +features = ["reqwest", "reqwest-default-tls"] + +[workspace.dependencies.alloy-eips] +version = "~1.3" +default-features = false + [workspace.dependencies.scale-info] version = "2.1.1" default-features = false diff --git a/modules/consensus/bsc/verifier/Cargo.toml b/modules/consensus/bsc/verifier/Cargo.toml index b00886edc..43942c21c 100644 --- a/modules/consensus/bsc/verifier/Cargo.toml +++ b/modules/consensus/bsc/verifier/Cargo.toml @@ -17,10 +17,9 @@ primitive-types = { workspace = true } codec = { workspace = true } ismp = { workspace = true, default-features = false } geth-primitives = { workspace = true, default-features = false } -sync-committee-verifier = { workspace = true, default-features = false } +crypto-utils = { workspace = true, default-features = false } sync-committee-primitives = { workspace = true, default-features = false } bls = { workspace = true } -ark-ec = { workspace = true } ssz-rs = { git = "https://github.com/polytope-labs/ssz-rs", branch = "main", default-features = false } [dependencies.polkadot-sdk] @@ -38,9 +37,8 @@ std = [ "alloy-primitives/std", "alloy-rlp/std", "bls/std", - "sync-committee-verifier/std", + "crypto-utils/std", "sync-committee-primitives/std", "geth-primitives/std", - "ark-ec/std", "ssz-rs/std", ] diff --git a/modules/consensus/bsc/verifier/src/lib.rs b/modules/consensus/bsc/verifier/src/lib.rs index 0892e4a49..31a0650a1 100644 --- a/modules/consensus/bsc/verifier/src/lib.rs +++ b/modules/consensus/bsc/verifier/src/lib.rs @@ -20,14 +20,13 @@ use polkadot_sdk::*; use alloc::vec::Vec; use anyhow::anyhow; -use bls::{point_to_pubkey, types::G1ProjectivePoint}; +use crypto_utils::aggregate_public_keys; use geth_primitives::{CodecHeader, Header}; use ismp::messaging::Keccak256; use primitives::{parse_extra, BscClientUpdate, Config, VALIDATOR_BIT_SET_SIZE}; use sp_core::H256; use ssz_rs::{Bitvector, Deserialize}; use sync_committee_primitives::constants::BlsPublicKey; -use sync_committee_verifier::crypto::pubkey_to_projective; pub mod primitives; @@ -164,12 +163,3 @@ pub fn verify_bsc_header( next_validators: next_validator_addresses, }) } - -pub fn aggregate_public_keys(keys: &[BlsPublicKey]) -> Vec { - let aggregate = keys - .into_iter() - .filter_map(|key| pubkey_to_projective(key).ok()) - .fold(G1ProjectivePoint::default(), |acc, next| acc + next); - - point_to_pubkey(aggregate.into()) -} diff --git a/modules/consensus/pharos/primitives/Cargo.toml b/modules/consensus/pharos/primitives/Cargo.toml new file mode 100644 index 000000000..b4440eaf4 --- /dev/null +++ b/modules/consensus/pharos/primitives/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "pharos-primitives" +version = "0.1.0" +edition = "2021" +description = "Primitive types for Pharos consensus verifier" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +codec = { workspace = true, features = ["derive"] } +alloy-primitives = { workspace = true } +crypto-utils = { workspace = true, default-features = false } +primitive-types = { workspace = true, features = ["serde_no_std", "impl-codec"] } +hex-literal = { workspace = true } +serde = { workspace = true, optional = true, features = ["derive"] } +sp-io = { workspace = true, default-features = false } +anyhow = { workspace = true, default-features = false } +geth-primitives = { workspace = true, default-features = false } +ismp = { workspace = true, default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "alloy-primitives/std", + "crypto-utils/std", + "primitive-types/std", + "anyhow/std", + "sp-io/std", + "serde", + "geth-primitives/std", + "ismp/std", +] diff --git a/modules/consensus/pharos/primitives/src/constants.rs b/modules/consensus/pharos/primitives/src/constants.rs new file mode 100644 index 000000000..5a29bd4b6 --- /dev/null +++ b/modules/consensus/pharos/primitives/src/constants.rs @@ -0,0 +1,122 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Constants and configuration for Pharos consensus. + +use alloy_primitives::Address; + +/// Re-export BLS types from crypto-utils +pub use crypto_utils::{ + BlsPublicKey, BlsSignature, BLS_PUBLIC_KEY_BYTES_LEN, BLS_SIGNATURE_BYTES_LEN, +}; + +/// The staking contract address where validator set is stored. +/// Address: 0x4100000000000000000000000000000000000000 +pub const STAKING_CONTRACT_ADDRESS: Address = + Address::new([0x41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + +/// Consensus ID for Pharos network +pub const PHAROS_CONSENSUS_ID: [u8; 4] = *b"PHAR"; + +/// Mainnet epoch length in seconds (4 hours) +pub const MAINNET_EPOCH_LENGTH_SECS: u64 = 4 * 60 * 60; // 14400 seconds + +/// Testnet (Atlantic) epoch length in seconds (~93.8 minutes) +pub const TESTNET_EPOCH_LENGTH_SECS: u64 = 5628; + +/// Pharos Mainnet chain ID +pub const PHAROS_MAINNET_CHAIN_ID: u32 = 688600; + +/// Pharos Atlantic Testnet chain ID +pub const PHAROS_ATLANTIC_CHAIN_ID: u32 = 688689; + +/// Default withdraw window in epochs from the Pharos staking contract. +pub const DEFAULT_WITHDRAW_WINDOW_EPOCHS: u64 = 84; + +/// Configuration trait for Pharos network parameters. +pub trait Config: Clone + Send + Sync { + /// The epoch length in seconds + const EPOCH_LENGTH_SECS: u64; + + /// The epoch length in blocks (derived from epoch length and block time) + const EPOCH_LENGTH_BLOCKS: u64; + + /// The chain ID for this network + const CHAIN_ID: u64; + + /// Network identifier + const ID: [u8; 4]; + + /// The unstaking period in seconds (withdraw_window_epochs × epoch_length_secs). + /// Defaults to `DEFAULT_WITHDRAW_WINDOW_EPOCHS × EPOCH_LENGTH_SECS`. + const UNBONDING_PERIOD: u64 = DEFAULT_WITHDRAW_WINDOW_EPOCHS * Self::EPOCH_LENGTH_SECS; + + /// Calculate the epoch number for a given block number + fn compute_epoch(block_number: u64) -> u64 { + block_number / Self::EPOCH_LENGTH_BLOCKS + } + + /// Check if a block is an epoch boundary block (last block of an epoch). + /// + /// The epoch boundary is defined as the last block of an epoch, i.e., + /// `(block_number + 1) % epoch_length == 0`. + /// + /// At epoch boundaries, the validator set for the next epoch is finalized + fn is_epoch_boundary(block_number: u64) -> bool { + (block_number + 1) % Self::EPOCH_LENGTH_BLOCKS == 0 + } + + /// Get the first block number of the next epoch + fn next_epoch_start(current_block: u64) -> u64 { + let current_epoch = Self::compute_epoch(current_block); + (current_epoch + 1) * Self::EPOCH_LENGTH_BLOCKS + } +} + +/// Pharos Mainnet configuration +#[derive(Clone, Default, Debug)] +pub struct Mainnet; + +impl Config for Mainnet { + /// 4 hours epoch length + const EPOCH_LENGTH_SECS: u64 = MAINNET_EPOCH_LENGTH_SECS; + + /// With ~1 second finality (sub-second), assuming 1 block per second + /// 4 hours = 14400 blocks + const EPOCH_LENGTH_BLOCKS: u64 = 14400; + + /// Mainnet chain ID - TBD + /// Placeholder based on testnet pattern + const CHAIN_ID: u64 = 688600; + + const ID: [u8; 4] = PHAROS_CONSENSUS_ID; +} + +/// Pharos Testnet configuration +#[derive(Clone, Default, Debug)] +pub struct Testnet; + +impl Config for Testnet { + /// ~93.8 minutes epoch length + const EPOCH_LENGTH_SECS: u64 = TESTNET_EPOCH_LENGTH_SECS; + + /// Updated to match on-chain value from staking contract slot 5 (February 2026) + const EPOCH_LENGTH_BLOCKS: u64 = 5628; + + /// Pharos Testnet chain ID + const CHAIN_ID: u64 = 688689; + + const ID: [u8; 4] = PHAROS_CONSENSUS_ID; +} diff --git a/modules/consensus/pharos/primitives/src/lib.rs b/modules/consensus/pharos/primitives/src/lib.rs new file mode 100644 index 000000000..0972de5e1 --- /dev/null +++ b/modules/consensus/pharos/primitives/src/lib.rs @@ -0,0 +1,24 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +pub mod constants; +pub mod spv; +pub mod types; + +pub use constants::*; +pub use types::*; diff --git a/modules/consensus/pharos/primitives/src/spv.rs b/modules/consensus/pharos/primitives/src/spv.rs new file mode 100644 index 000000000..cd96c3b90 --- /dev/null +++ b/modules/consensus/pharos/primitives/src/spv.rs @@ -0,0 +1,202 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pharos hexary hash tree SPV (Simple Payment Verification) proof verification. +//! +//! Pharos uses a hexary hash tree with SHA-256 hashing instead of Ethereum's +//! Merkle-Patricia Trie with Keccak-256. This module implements bottom-up +//! proof verification matching the Pharos proof format. +//! +//! ## Proof Structure +//! +//! Each proof is an ordered list of nodes from root to leaf: +//! - **Leaf node** (last): 1 byte metadata + 32 bytes `sha256(key)` + 32 bytes `sha256(value)` +//! - **Internal node**: 3 bytes metadata + N × 32 byte child hashes (variable branching) +//! +//! ## Verification Algorithm (bottom-up) +//! +//! Verify leaf: `proof_node[1:33] == sha256(key)` and `proof_node[33:65] == sha256(value)` +//! Walk bottom-up: for each parent, find current hash at `[begin_offset..end_offset]` +//! Hash current node: `current_hash = sha256(proof_node)` +//! Root check: final hash == expected root + +use crate::types::PharosProofNode; + +/// Compute SHA-256 hash of the given data. +pub fn sha256(data: &[u8]) -> [u8; 32] { + sp_io::hashing::sha2_256(data) +} + +/// Walk bottom-up from the leaf through parent nodes, verifying each hash link, +/// and check that the final hash equals the expected root. +/// +/// `proof_nodes` are ordered root-to-leaf. The leaf hash is computed from the +/// last node, then verified against each parent up to the root. +fn verify_proof_walk(proof_nodes: &[PharosProofNode], root: &[u8; 32]) -> bool { + let leaf = &proof_nodes[proof_nodes.len() - 1]; + let mut current_hash = sha256(&leaf.proof_node); + + for i in (0..proof_nodes.len() - 1).rev() { + let parent = &proof_nodes[i]; + let begin = parent.next_begin_offset as usize; + let end = parent.next_end_offset as usize; + + if end > parent.proof_node.len() || begin >= end || (end - begin) != 32 { + return false; + } + + if parent.proof_node[begin..end] != current_hash { + return false; + } + + current_hash = sha256(&parent.proof_node); + } + + current_hash == *root +} + +/// Verify a Pharos hexary hash tree proof (bottom-up). +/// +/// `proof_nodes` are ordered root-to-leaf (index 0 = root, last = leaf). +/// `key` is the raw key bytes (address for accounts, address||slot_key for storage). +/// `value` is the raw value bytes (rawValue for accounts, 32-byte padded value for storage). +/// `root` is the expected root hash (stateRoot or storageHash). +/// +/// Returns `true` if the proof is valid. +pub fn verify_pharos_proof( + proof_nodes: &[PharosProofNode], + key: &[u8], + value: &[u8], + root: &[u8; 32], +) -> bool { + if proof_nodes.is_empty() { + return false; + } + + // Verify the leaf node (last in the array) + let leaf = &proof_nodes[proof_nodes.len() - 1]; + let leaf_data = &leaf.proof_node; + + // Leaf: 1 byte metadata + 32 bytes sha256(key) + 32 bytes sha256(value) = 65 bytes + if leaf_data.len() != 65 { + return false; + } + + let key_hash = sha256(key); + let value_hash = sha256(value); + + // Verify key hash at bytes [1..33] + if leaf_data[1..33] != key_hash { + return false; + } + + // Verify value hash at bytes [33..65] + if leaf_data[33..65] != value_hash { + return false; + } + + verify_proof_walk(proof_nodes, root) +} + +/// Verify an account proof against the state root. +/// +/// `address` is the 20-byte account address. +/// `raw_value` is the RLP-encoded account value (rawValue from eth_getProof). +/// `state_root` is the state root from the block header. +pub fn verify_account_proof( + proof_nodes: &[PharosProofNode], + address: &[u8; 20], + raw_value: &[u8], + state_root: &[u8; 32], +) -> bool { + // For account proofs, the key is just the address bytes + verify_pharos_proof(proof_nodes, address, raw_value, state_root) +} + +/// Verify a storage proof for a single key. +/// +/// `address` is the 20-byte contract address. +/// `slot_hash` is the 32-byte storage slot hash. +/// `storage_value` is the 32-byte padded storage value. +/// `storage_root` is the storage trie root from the account proof. +pub fn verify_storage_proof( + proof_nodes: &[PharosProofNode], + address: &[u8; 20], + slot_hash: &[u8; 32], + storage_value: &[u8; 32], + storage_root: &[u8; 32], +) -> bool { + // For storage proofs, the key is address || slot_hash (52 bytes) + let mut key = [0u8; 52]; + key[..20].copy_from_slice(address); + key[20..].copy_from_slice(slot_hash); + + verify_pharos_proof(proof_nodes, &key, storage_value, storage_root) +} + +/// Verify a Pharos proof for key membership without requiring the value (bottom-up). +/// +/// Like [`verify_pharos_proof`] but only verifies the key exists in the trie +/// without requiring the raw value. Returns `Some(value_hash)` extracted from +/// the leaf if the proof is valid, `None` otherwise. +pub fn verify_pharos_proof_membership( + proof_nodes: &[PharosProofNode], + key: &[u8], + root: &[u8; 32], +) -> Option<[u8; 32]> { + if proof_nodes.is_empty() { + return None; + } + + let leaf = &proof_nodes[proof_nodes.len() - 1]; + let leaf_data = &leaf.proof_node; + + // Leaf: 1 byte metadata + 32 bytes sha256(key) + 32 bytes sha256(value) = 65 bytes + if leaf_data.len() != 65 { + return None; + } + + let key_hash = sha256(key); + if leaf_data[1..33] != key_hash { + return None; + } + + // Extract value hash from the leaf + let mut value_hash = [0u8; 32]; + value_hash.copy_from_slice(&leaf_data[33..65]); + + if verify_proof_walk(proof_nodes, root) { + Some(value_hash) + } else { + None + } +} + +/// Verify a storage membership proof for a single key. +/// +/// Like [`verify_storage_proof`] but only verifies the key exists without +/// requiring the raw value. Returns `Some(value_hash)` if valid, `None` otherwise. +pub fn verify_storage_membership_proof( + proof_nodes: &[PharosProofNode], + address: &[u8; 20], + slot_hash: &[u8; 32], + storage_root: &[u8; 32], +) -> Option<[u8; 32]> { + let mut key = [0u8; 52]; + key[..20].copy_from_slice(address); + key[20..].copy_from_slice(slot_hash); + + verify_pharos_proof_membership(proof_nodes, &key, storage_root) +} diff --git a/modules/consensus/pharos/primitives/src/types.rs b/modules/consensus/pharos/primitives/src/types.rs new file mode 100644 index 000000000..a5672ba8c --- /dev/null +++ b/modules/consensus/pharos/primitives/src/types.rs @@ -0,0 +1,252 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Type definitions for Pharos consensus. + +use crate::constants::BlsPublicKey; +use alloc::{ + collections::{BTreeMap, BTreeSet}, + vec::Vec, +}; +use codec::{Decode, Encode}; +use core::cmp::Ordering; +use geth_primitives::CodecHeader; +use primitive_types::{H256, U256}; + +/// Unique identifier for a validator pool in the staking contract +pub type PoolId = H256; + +/// Information about a single validator. +/// +/// Each validator has a BLS public key for signing blocks and a stake amount +/// that determines their voting power in consensus. +/// +/// Validators are ordered by their BLS public key to enable use in BTreeSet, +/// which automatically prevents duplicates. +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct ValidatorInfo { + /// The validator's BLS public key (48 bytes compressed) + pub bls_public_key: BlsPublicKey, + /// The validator's pool ID in the staking contract + pub pool_id: PoolId, + /// The stake amount + pub stake: U256, +} + +impl PartialOrd for ValidatorInfo { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ValidatorInfo { + fn cmp(&self, other: &Self) -> Ordering { + self.bls_public_key.cmp(&other.bls_public_key) + } +} + +/// The complete validator set for a given epoch. +/// +/// This represents the set of validators that are eligible to sign blocks +/// during a specific epoch. The validator set is updated at epoch boundaries(last block of an +/// epoch). +/// +/// Uses `BTreeSet` to automatically prevent duplicate validators (by BLS public key). +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct ValidatorSet { + /// Set of all validators + pub validators: BTreeSet, + /// Total stake across all validators + pub total_stake: U256, + /// The epoch this validator set is valid for + pub epoch: u64, +} + +impl ValidatorSet { + /// Create a new empty validator set + pub fn new(epoch: u64) -> Self { + Self { validators: BTreeSet::new(), total_stake: U256::zero(), epoch } + } + + /// Add a validator to the set. + /// Returns true if the validator was added, false if it was a duplicate. + pub fn add_validator(&mut self, validator: ValidatorInfo) -> bool { + let stake = validator.stake; + if self.validators.insert(validator) { + self.total_stake = self.total_stake.saturating_add(stake); + true + } else { + false + } + } + + /// Check if a validator is in the set by their BLS public key + pub fn contains(&self, bls_key: &BlsPublicKey) -> bool { + self.validators.iter().any(|v| &v.bls_public_key == bls_key) + } + + /// Get a validator by their BLS public key + pub fn get_validator(&self, bls_key: &BlsPublicKey) -> Option<&ValidatorInfo> { + self.validators.iter().find(|v| &v.bls_public_key == bls_key) + } + + /// Calculate the stake of participating validators + pub fn participating_stake(&self, participants: &[BlsPublicKey]) -> U256 { + participants + .iter() + .filter_map(|key| self.get_validator(key)) + .fold(U256::zero(), |acc, v| acc.saturating_add(v.stake)) + } + + /// Check if participating stake meets the 2/3 + 1 threshold + pub fn has_supermajority(&self, participants: &[BlsPublicKey]) -> bool { + let participating = self.participating_stake(participants); + let required = (self.total_stake * 2 / 3) + 1; + participating >= required + } + + /// Get the number of validators in the set + pub fn len(&self) -> usize { + self.validators.len() + } + + /// Check if the validator set is empty + pub fn is_empty(&self) -> bool { + self.validators.is_empty() + } +} + +/// Block proof containing the BLS signature data. +/// +/// This contains the aggregated BLS signature for a block and the list +/// of participating validators who signed it. +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct BlockProof { + /// The aggregated BLS signature from participating validators (96 bytes) + pub aggregate_signature: Vec, + /// List of BLS public keys of validators who participated in signing + pub participant_keys: Vec, +} + +impl BlockProof { + /// Get the number of participants who signed this block + pub fn participant_count(&self) -> usize { + self.participant_keys.len() + } +} + +/// Single node in a Pharos hexary hash tree proof path. +/// +/// Each proof node contains the raw node bytes and offsets indicating where +/// the child hash appears within this node (used for bottom-up verification). +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct PharosProofNode { + /// Raw bytes of this proof node + pub proof_node: Vec, + /// Start offset within this node where the next (child) hash begins + pub next_begin_offset: u32, + /// End offset within this node where the next (child) hash ends + pub next_end_offset: u32, +} + +/// State proof for validator set stored in the staking contract. +/// +/// This proof is required when the validator set changes at epoch boundaries. +/// The validator set is decoded directly from the proof. +/// +/// Uses Pharos hexary hash tree proofs (SHA-256) instead of Ethereum MPT (Keccak-256). +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct ValidatorSetProof { + /// Per-key storage proof nodes (storage key -> proof path, verified against state_root) + pub storage_proof: BTreeMap>, + /// Raw storage values in order: [totalStake, activePoolIds length, + /// pool_id_0..pool_id_n, validator_0_bls_header, validator_0_bls_data_0..2, + /// validator_0_stake, ...] + pub storage_values: Vec>, +} + +/// The trusted state maintained by the Pharos consensus client. +/// +/// This state is updated as new blocks are verified and represents +/// the current view of the chain from the light client's perspective. +#[derive(Debug, Clone, Encode, Decode)] +pub struct VerifierState { + /// The current (active) validator set + pub current_validator_set: ValidatorSet, + /// The latest finalized block number + pub finalized_block_number: u64, + /// The hash of the finalized header + pub finalized_hash: H256, + /// The current epoch number + pub current_epoch: u64, +} + +impl VerifierState { + /// Create a new verifier state with initial trusted state + pub fn new( + initial_validator_set: ValidatorSet, + initial_block_number: u64, + initial_hash: H256, + ) -> Self { + let epoch = initial_validator_set.epoch; + Self { + current_validator_set: initial_validator_set, + finalized_block_number: initial_block_number, + finalized_hash: initial_hash, + current_epoch: epoch, + } + } +} + +/// Data required to update the verifier state. +/// +/// This is what the prover submits to advance the light client's state. +#[derive(Debug, Clone, Encode, Decode)] +pub struct VerifierStateUpdate { + /// The header being attested to + pub header: CodecHeader, + /// Block proof from debug_getBlockProof containing the BLS signature + pub block_proof: BlockProof, + /// Optional validator set update proof (required at epoch boundaries) + pub validator_set_proof: Option, +} + +impl VerifierStateUpdate { + /// Get the block number from the header + pub fn block_number(&self) -> u64 { + self.header.number.low_u64() + } + + /// Check if this update includes a validator set rotation + pub fn has_validator_set_update(&self) -> bool { + self.validator_set_proof.is_some() + } +} + +/// Result of successful verification +#[derive(Debug, Clone, Encode, Decode)] +pub struct VerificationResult { + /// The verified block hash + pub block_hash: H256, + /// The verified header + pub header: CodecHeader, + /// The new validator set if this was an epoch boundary block + pub new_validator_set: Option, +} diff --git a/modules/consensus/pharos/prover/Cargo.toml b/modules/consensus/pharos/prover/Cargo.toml new file mode 100644 index 000000000..614229848 --- /dev/null +++ b/modules/consensus/pharos/prover/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "pharos-prover" +version = "0.1.0" +edition = "2021" +description = "Pharos consensus prover" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +pharos-primitives = { path = "../primitives" } +pharos-verifier = { path = "../verifier" } +geth-primitives = { workspace = true } +primitive-types = { workspace = true } +thiserror = { workspace = true } +reqwest = { workspace = true, features = ["json", "rustls-tls"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { package = "serde_json", version = "1.0.99" } +sp-core = { workspace = true } +hex = { workspace = true } +ethabi = { workspace = true } +alloy-provider = { workspace = true } +alloy-eips = { workspace = true } +futures = { workspace = true } diff --git a/modules/consensus/pharos/prover/src/error.rs b/modules/consensus/pharos/prover/src/error.rs new file mode 100644 index 000000000..b8725bc40 --- /dev/null +++ b/modules/consensus/pharos/prover/src/error.rs @@ -0,0 +1,96 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use thiserror::Error; + +/// Errors that can occur during proof generation. +#[derive(Error, Debug)] +pub enum ProverError { + /// HTTP request failed + #[error("HTTP request failed: {0}")] + HttpRequest(#[from] reqwest::Error), + + /// JSON deserialization failed + #[error("JSON deserialization failed")] + JsonDeserialization, + + /// JSON-RPC returned an error + #[error("RPC error (code {code}): {message}")] + RpcError { code: i64, message: String }, + + /// RPC response missing result field + #[error("RPC response missing result")] + MissingRpcResult, + + /// Block not found at the specified height + #[error("Block not found: {0}")] + BlockNotFound(u64), + + /// Block proof not available (debug_getBlockProof may be disabled) + #[error("Block proof not available for block {0}")] + BlockProofNotAvailable(u64), + + /// Hex decoding failed + #[error("Invalid hex encoding")] + HexDecode, + + /// Invalid number format + #[error("Invalid number format")] + InvalidNumber, + + /// Invalid address length + #[error("Invalid address length: expected 20, got {0}")] + InvalidAddressLength(usize), + + /// Invalid H256 length + #[error("Invalid H256 length: expected 32, got {0}")] + InvalidH256Length(usize), + + /// Invalid logs bloom length + #[error("Invalid logs bloom length: expected 256, got {0}")] + InvalidLogsBloomLength(usize), + + /// Invalid BLS public key length + #[error("Invalid BLS public key length: expected 48, got {0}")] + InvalidBlsKeyLength(usize), + + /// Invalid BLS signature length + #[error("Invalid BLS signature length: expected 96, got {0}")] + InvalidBlsSignatureLength(usize), + + /// Validator set proof required but not available + #[error("Validator set proof required but not available")] + ValidatorSetProofRequired, + + /// Storage proof verification failed + #[error("Storage proof verification failed")] + StorageProofVerification, + + /// Missing storage value + #[error("Missing storage value at slot index {0}")] + MissingStorageValue(usize), + + /// Missing storage proof in eth_getProof response + #[error("Missing storage proof for {0}")] + MissingStorageProof(&'static str), + + /// Invalid RPC URL + #[error("Invalid RPC URL: {0}")] + InvalidUrl(String), + + /// Provider transport error + #[error("Provider error: {0}")] + ProviderError(String), +} diff --git a/modules/consensus/pharos/prover/src/lib.rs b/modules/consensus/pharos/prover/src/lib.rs new file mode 100644 index 000000000..98bdc2d2c --- /dev/null +++ b/modules/consensus/pharos/prover/src/lib.rs @@ -0,0 +1,382 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pharos consensus prover for light client. + +pub mod error; +pub mod rpc; + +pub use error::ProverError; + +use pharos_primitives::{ + BlockProof, BlsPublicKey, Config, PharosProofNode, ValidatorInfo, ValidatorSet, + ValidatorSetProof, VerifierStateUpdate, STAKING_CONTRACT_ADDRESS, +}; +use pharos_verifier::state_proof::StakingContractLayout; +use primitive_types::{H160, H256, U256}; +use rpc::{ + hex_to_bytes, hex_to_u64, PharosRpcClient, RpcBlockProof, RpcProofNode, RpcValidatorInfo, +}; +use std::{collections::BTreeMap, marker::PhantomData, sync::Arc}; + +/// Pharos prover for constructing light client updates. +#[derive(Clone)] +pub struct PharosProver { + pub rpc: Arc, + storage_layout: StakingContractLayout, + _config: PhantomData, +} + +impl PharosProver { + /// Create a new prover with the given RPC endpoint. + pub fn new(endpoint: impl Into) -> Result { + Ok(Self { + rpc: Arc::new(PharosRpcClient::new(endpoint)?), + storage_layout: StakingContractLayout::default(), + _config: PhantomData, + }) + } + + /// Create a new prover with a custom storage layout. + pub fn with_storage_layout( + endpoint: impl Into, + layout: StakingContractLayout, + ) -> Result { + Ok(Self { + rpc: Arc::new(PharosRpcClient::new(endpoint)?), + storage_layout: layout, + _config: PhantomData, + }) + } + + /// Fetch the latest block number from the node. + pub async fn get_latest_block(&self) -> Result { + self.rpc.get_block_number().await + } + + /// Fetch a block update for the given block number. + /// + /// This will: + /// 1. Fetch the block header + /// 2. Fetch the block proof + /// 3. If at epoch boundary, fetch validator set proof + pub async fn fetch_block_update( + &self, + block_number: u64, + ) -> Result { + let header = self.rpc.get_block_by_number(block_number).await?; + + let rpc_proof = self.rpc.get_block_proof(block_number).await?; + let block_proof = self.convert_rpc_block_proof(&rpc_proof)?; + + let validator_set_proof = if C::is_epoch_boundary(block_number) { + Some(self.fetch_validator_set_proof(block_number).await?) + } else { + None + }; + + Ok(VerifierStateUpdate { header, block_proof, validator_set_proof }) + } + + /// Fetch only the block proof for a given block number. + pub async fn fetch_block_proof(&self, block_number: u64) -> Result { + let rpc_proof = self.rpc.get_block_proof(block_number).await?; + self.convert_rpc_block_proof(&rpc_proof) + } + + /// Build a ValidatorSet from RPC validator info. + pub fn build_validator_set( + &self, + validators: &[RpcValidatorInfo], + epoch: u64, + ) -> Result { + let mut validator_set = ValidatorSet::new(epoch); + + for v in validators { + let bls_key_bytes = hex_to_bytes(&v.bls_key)?; + let len = bls_key_bytes.len(); + let bls_public_key: BlsPublicKey = + bls_key_bytes.try_into().map_err(|_| ProverError::InvalidBlsKeyLength(len))?; + + let pool_id_bytes = hex_to_bytes(&v.validator_id)?; + let pool_id = if pool_id_bytes.len() == 32 { + H256::from_slice(&pool_id_bytes) + } else { + let mut padded = [0u8; 32]; + let start = 32usize.saturating_sub(pool_id_bytes.len()); + padded[start..].copy_from_slice(&pool_id_bytes); + H256::from(padded) + }; + + let stake = Self::parse_stake(&v.staking)?; + + let info = ValidatorInfo { bls_public_key, pool_id, stake }; + validator_set.add_validator(info); + } + + Ok(validator_set) + } + + /// Parse a hex stake value to U256. + fn parse_stake(hex: &str) -> Result { + let hex = hex.trim_start_matches("0x"); + U256::from_str_radix(hex, 16).map_err(|_| ProverError::InvalidNumber) + } + + /// Fetch validator set proof for an epoch boundary block. + /// + /// This fetches the storage proof from the staking contract at the + /// given block, which contains the validator set for the next epoch. + /// + /// The storage layout follows the Pharos staking contract (V1): + /// - Slot 6: totalStake + /// - Slot 1: activePoolIds (bytes32[] array length) + /// - keccak256(1): array elements (pool IDs) + /// - For each pool ID: validator data via mapping at slot 0 + pub async fn fetch_validator_set_proof( + &self, + block_number: u64, + ) -> Result { + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + + // Fetch base slots (totalStake, activePoolIds length) + let base_keys = vec![ + self.storage_layout.raw_slot_key(self.storage_layout.total_stake_slot), + self.storage_layout.raw_slot_key(self.storage_layout.active_pool_set_slot), + ]; + + let base_proof = self.rpc.get_proof(address, base_keys.clone(), block_number).await?; + + // Get validator count from activePoolIds length (slot 1) + let validator_count = if base_proof.storage_proof.len() >= 2 { + hex_to_u64(&base_proof.storage_proof[1].value)? + } else { + return Err(ProverError::MissingStorageProof("activePoolIds length")); + }; + + // Fetch pool IDs from the activePoolIds array + let mut pool_id_keys = Vec::new(); + for i in 0..validator_count { + pool_id_keys.push(self.array_element_key(self.storage_layout.active_pool_set_slot, i)); + } + + if pool_id_keys.is_empty() { + return Err(ProverError::MissingStorageProof("activePoolIds array is empty")); + } + + let pool_id_proof = self.rpc.get_proof(address, pool_id_keys.clone(), block_number).await?; + + // Extract pool IDs + let mut pool_ids = Vec::new(); + for sp in &pool_id_proof.storage_proof { + let bytes = hex_to_bytes(&sp.value)?; + let mut padded = [0u8; 32]; + if bytes.len() <= 32 { + padded[32 - bytes.len()..].copy_from_slice(&bytes); + } + pool_ids.push(H256::from(padded)); + } + + // Collect storage values and per-key proof paths. + // Each storage key maps to its own proof path for individual verification. + let mut storage_proof: BTreeMap> = BTreeMap::new(); + let mut storage_values: Vec> = Vec::new(); + + for (i, sp) in base_proof.storage_proof.iter().enumerate() { + storage_proof.insert(base_keys[i], rpc_to_proof_nodes(&sp.proof)?); + storage_values.push(hex_to_bytes(&sp.value)?); + } + for (i, sp) in pool_id_proof.storage_proof.iter().enumerate() { + storage_proof.insert(pool_id_keys[i], rpc_to_proof_nodes(&sp.proof)?); + storage_values.push(hex_to_bytes(&sp.value)?); + } + + // Fetch validator data concurrently with two-phase fetching per validator. + // Phase 1: fetch BLS string header + stake to determine BLS data slot count. + // Phase 2: fetch the dynamically-computed BLS data slots. + // All validators run concurrently, with sequential phases per validator. + let validator_futures: Vec<_> = pool_ids + .iter() + .map(|pool_id| { + let (bls_string_slot, stake_slot) = + self.get_validator_header_and_stake_keys(pool_id); + let rpc = self.rpc.clone(); + async move { + // fetch BLS header + stake + let phase1_keys = vec![bls_string_slot, stake_slot]; + let phase1_proof = rpc.get_proof(address, phase1_keys, block_number).await?; + + if phase1_proof.storage_proof.len() < 2 { + return Err(ProverError::MissingStorageProof( + "BLS header or stake in phase 1", + )); + } + + // Determine BLS data slot count from the string header value + let header_hex = &phase1_proof.storage_proof[0].value; + let data_slot_count = bls_data_slots_from_hex(header_hex)?; + + // fetch BLS data slots + let bls_data_base = H256::from(keccak256(bls_string_slot.as_bytes())); + let bls_data_base_pos = U256::from_big_endian(bls_data_base.as_bytes()); + let mut data_keys = Vec::new(); + for i in 0..data_slot_count { + data_keys.push(H256((bls_data_base_pos + U256::from(i)).to_big_endian())); + } + + let phase2_proof = if !data_keys.is_empty() { + Some(rpc.get_proof(address, data_keys.clone(), block_number).await?) + } else { + None + }; + + // Assemble in order: [header, data_0..N, stake] + let mut all_keys = Vec::new(); + let mut all_proofs = Vec::new(); + let mut all_values = Vec::new(); + + // Header + all_keys.push(bls_string_slot); + all_proofs.push(rpc_to_proof_nodes(&phase1_proof.storage_proof[0].proof)?); + all_values.push(hex_to_bytes(&phase1_proof.storage_proof[0].value)?); + + // Data slots + if let Some(p2) = phase2_proof { + for (j, sp) in p2.storage_proof.iter().enumerate() { + all_keys.push(data_keys[j]); + all_proofs.push(rpc_to_proof_nodes(&sp.proof)?); + all_values.push(hex_to_bytes(&sp.value)?); + } + } + + // Stake + all_keys.push(stake_slot); + all_proofs.push(rpc_to_proof_nodes(&phase1_proof.storage_proof[1].proof)?); + all_values.push(hex_to_bytes(&phase1_proof.storage_proof[1].value)?); + + Ok::<_, ProverError>((all_keys, all_proofs, all_values)) + } + }) + .collect(); + + let validator_results = futures::future::join_all(validator_futures).await; + + for result in validator_results { + let (keys, proofs, values) = result?; + for (j, key) in keys.iter().enumerate() { + storage_proof.insert(*key, proofs[j].clone()); + storage_values.push(values[j].clone()); + } + } + + Ok(ValidatorSetProof { storage_proof, storage_values }) + } + + /// Calculate the storage key for a dynamic array element. + fn array_element_key(&self, base_slot: u64, index: u64) -> H256 { + let slot_bytes = U256::from(base_slot).to_big_endian(); + let base_key = keccak256(&slot_bytes); + let base_pos = U256::from_big_endian(&base_key); + let element_pos = base_pos + U256::from(index); + H256(element_pos.to_big_endian()) + } + + /// Get the BLS string header slot and stake slot for a validator. + /// + /// These are fetched first (phase 1) to determine the dynamic BLS data slot count + /// from the string header value before fetching the data slots (phase 2). + fn get_validator_header_and_stake_keys(&self, pool_id: &H256) -> (H256, H256) { + const BLS_PUBLIC_KEY_OFFSET: u64 = 3; + const TOTAL_STAKE_OFFSET: u64 = 8; + + // Calculate validator base slot: keccak256(pool_id || mapping_slot) + let mut data = [0u8; 64]; + data[..32].copy_from_slice(pool_id.as_bytes()); + data[32..64].copy_from_slice( + &U256::from(self.storage_layout.validators_mapping_slot).to_big_endian(), + ); + let base_slot = H256::from(keccak256(&data)); + let base_pos = U256::from_big_endian(base_slot.as_bytes()); + + let bls_string_slot = H256((base_pos + U256::from(BLS_PUBLIC_KEY_OFFSET)).to_big_endian()); + let stake_slot = H256((base_pos + U256::from(TOTAL_STAKE_OFFSET)).to_big_endian()); + + (bls_string_slot, stake_slot) + } + + /// Convert RPC block proof to BlockProof. + fn convert_rpc_block_proof( + &self, + rpc_proof: &RpcBlockProof, + ) -> Result { + let aggregate_signature = hex_to_bytes(&rpc_proof.bls_aggregated_signature)?; + + let participant_keys: Result, _> = rpc_proof + .signed_bls_keys + .iter() + .map(|k| { + let bytes = hex_to_bytes(k)?; + let len = bytes.len(); + bytes.try_into().map_err(|_| ProverError::InvalidBlsKeyLength(len)) + }) + .collect(); + + Ok(BlockProof { aggregate_signature, participant_keys: participant_keys? }) + } +} + +/// Convert RPC proof nodes to PharosProofNode format. +pub fn rpc_to_proof_nodes(nodes: &[RpcProofNode]) -> Result, ProverError> { + nodes + .iter() + .map(|n| { + Ok(PharosProofNode { + proof_node: hex_to_bytes(&n.proof_node)?, + next_begin_offset: n.next_begin_offset, + next_end_offset: n.next_end_offset, + }) + }) + .collect() +} + +/// Determine the number of BLS data slots from a hex-encoded string header value. +/// +/// For Solidity long strings, the header slot contains `length * 2 + 1`. +/// The byte length is `(value - 1) / 2`, and the slot count is `ceil(length / 32)`. +fn bls_data_slots_from_hex(hex_value: &str) -> Result { + let bytes = hex_to_bytes(hex_value)?; + let mut padded = [0u8; 32]; + if bytes.len() <= 32 { + padded[32 - bytes.len()..].copy_from_slice(&bytes); + } + let val = U256::from_big_endian(&padded); + let val_bytes = val.to_big_endian(); + let lowest_byte = val_bytes[31]; + + if lowest_byte & 1 == 0 { + // Short string - data is in the header itself + Ok(0) + } else { + // Long string - header = length * 2 + 1 + let length = (val - U256::from(1)) / U256::from(2); + let str_len = length.low_u64(); + Ok((str_len + 31) / 32) + } +} + +/// Keccak256 hash using sp_core. +fn keccak256(data: &[u8]) -> [u8; 32] { + sp_core::keccak_256(data) +} diff --git a/modules/consensus/pharos/prover/src/rpc.rs b/modules/consensus/pharos/prover/src/rpc.rs new file mode 100644 index 000000000..a426db2df --- /dev/null +++ b/modules/consensus/pharos/prover/src/rpc.rs @@ -0,0 +1,293 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! RPC client for Pharos node interactions. +//! +//! Standard Ethereum JSON-RPC calls (`eth_blockNumber`, `eth_getBlockByNumber`) +//! are handled via the alloy provider. Pharos-specific endpoints that use +//! non-standard response formats (`eth_getProof`, `debug_getBlockProof`, +//! `debug_getValidatorInfo`) are called through a raw reqwest client. + +use crate::ProverError; +use alloy_eips::BlockNumberOrTag; +use alloy_provider::{Provider, RootProvider}; +use ethabi::ethereum_types::H64; +use geth_primitives::CodecHeader; +use primitive_types::{H160, H256, U256}; +use serde::{Deserialize, Serialize}; +use std::sync::atomic::{AtomicU64, Ordering}; + +/// JSON-RPC request structure. +#[derive(Debug, Serialize)] +struct JsonRpcRequest

{ + pub jsonrpc: &'static str, + pub method: &'static str, + pub params: P, + pub id: u64, +} + +impl

JsonRpcRequest

{ + fn new(method: &'static str, params: P, id: u64) -> Self { + Self { jsonrpc: "2.0", method, params, id } + } +} + +/// JSON-RPC response structure. +#[derive(Debug, Deserialize)] +struct JsonRpcResponse { + #[allow(dead_code)] + pub jsonrpc: String, + pub result: Option, + pub error: Option, + #[allow(dead_code)] + pub id: u64, +} + +/// JSON-RPC error structure. +#[derive(Debug, Deserialize)] +struct JsonRpcError { + pub code: i64, + pub message: String, + #[allow(dead_code)] + pub data: Option, +} + +/// Block proof response from `debug_getBlockProof`. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcBlockProof { + /// Block number as hex string (e.g., "0x1234") + pub block_number: String, + /// Block proof hash - the message that was signed + pub block_proof_hash: String, + /// Aggregated BLS signature as hex string + pub bls_aggregated_signature: String, + /// List of BLS public keys that signed, as hex strings + pub signed_bls_keys: Vec, +} + +/// Single proof node in the Pharos hexary hash tree format. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcProofNode { + /// Raw node bytes as hex string + pub proof_node: String, + /// Start offset where the child hash begins + pub next_begin_offset: u32, + /// End offset where the child hash ends + pub next_end_offset: u32, +} + +/// Account proof response from `eth_getProof`. +/// +/// Uses a custom response format (Pharos hexary hash tree nodes instead of +/// standard Ethereum MPT nodes), so this endpoint is called via raw JSON-RPC +/// rather than the alloy provider. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcAccountProof { + pub account_proof: Vec, + pub balance: String, + pub code_hash: String, + pub nonce: String, + pub storage_hash: String, + /// RLP-encoded account value (rawValue) + pub raw_value: String, + pub storage_proof: Vec, +} + +/// Storage proof entry from `eth_getProof`. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcStorageProof { + pub key: String, + pub value: String, + pub proof: Vec, +} + +/// Validator info from `debug_getValidatorInfo`. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcValidatorInfo { + pub bls_key: String, + pub identity_key: String, + pub staking: String, + #[serde(rename = "validatorID")] + pub validator_id: String, +} + +/// Response from `debug_getValidatorInfo`. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcValidatorInfoResponse { + pub block_number: String, + pub validator_set: Vec, +} + +/// RPC client for Pharos node. +/// +/// Uses an alloy provider for standard Ethereum JSON-RPC queries and a raw +/// reqwest client for Pharos-specific debug endpoints and `eth_getProof` +/// (which returns non-standard proof node formats). +pub struct PharosRpcClient { + endpoint: String, + client: reqwest::Client, + provider: RootProvider, + request_id: AtomicU64, +} + +impl PharosRpcClient { + /// Create a new RPC client for the given endpoint URL. + pub fn new(endpoint: impl Into) -> Result { + let endpoint = endpoint.into(); + let provider = RootProvider::new_http( + endpoint.parse().map_err(|_| ProverError::InvalidUrl(endpoint.clone()))?, + ); + Ok(Self { + endpoint, + client: reqwest::Client::new(), + provider, + request_id: AtomicU64::new(1), + }) + } + + fn next_id(&self) -> u64 { + self.request_id.fetch_add(1, Ordering::SeqCst) + } + + /// Make a raw JSON-RPC call for non-standard endpoints. + async fn call Deserialize<'de>>( + &self, + method: &'static str, + params: P, + ) -> Result { + let request = JsonRpcRequest::new(method, params, self.next_id()); + + let response = self.client.post(&self.endpoint).json(&request).send().await?; + + let rpc_response: JsonRpcResponse = + response.json().await.map_err(|_| ProverError::JsonDeserialization)?; + + if let Some(error) = rpc_response.error { + return Err(ProverError::RpcError { code: error.code, message: error.message }); + } + + rpc_response.result.ok_or(ProverError::MissingRpcResult) + } + + /// Fetch the latest block number. + pub async fn get_block_number(&self) -> Result { + self.provider + .get_block_number() + .await + .map_err(|e| ProverError::ProviderError(e.to_string())) + } + + /// Fetch a block header by number, converting the response to [`CodecHeader`]. + pub async fn get_block_by_number(&self, block_number: u64) -> Result { + let block = self + .provider + .get_block_by_number(BlockNumberOrTag::Number(block_number)) + .await + .map_err(|e| ProverError::ProviderError(e.to_string()))? + .ok_or(ProverError::BlockNotFound(block_number))?; + + let h = &block.header.inner; + + Ok(CodecHeader { + parent_hash: H256::from(h.parent_hash.0), + uncle_hash: H256::from(h.ommers_hash.0), + coinbase: H160::from(h.beneficiary.0 .0), + state_root: H256::from(h.state_root.0), + transactions_root: H256::from(h.transactions_root.0), + receipts_root: H256::from(h.receipts_root.0), + logs_bloom: { + let mut bloom = [0u8; 256]; + bloom.copy_from_slice(h.logs_bloom.as_ref()); + bloom.into() + }, + difficulty: U256::from_big_endian(&h.difficulty.to_be_bytes::<32>()), + number: U256::from(h.number), + gas_limit: h.gas_limit, + gas_used: h.gas_used, + timestamp: h.timestamp, + extra_data: h.extra_data.to_vec(), + mix_hash: H256::from(h.mix_hash.0), + nonce: H64::from(h.nonce.0), + base_fee_per_gas: h.base_fee_per_gas.map(U256::from), + withdrawals_hash: h.withdrawals_root.map(|v| H256::from(v.0)), + blob_gas_used: h.blob_gas_used, + excess_blob_gas_used: h.excess_blob_gas, + parent_beacon_root: h.parent_beacon_block_root.map(|v| H256::from(v.0)), + requests_hash: h.requests_hash.map(|v| H256::from(v.0)), + }) + } + + /// Fetch block proof using `debug_getBlockProof`. + pub async fn get_block_proof(&self, block_number: u64) -> Result { + let block_hex = format!("0x{:x}", block_number); + self.call("debug_getBlockProof", vec![block_hex]).await + } + + /// Fetch account and storage proofs using `eth_getProof`. + /// + /// This uses the raw JSON-RPC client because Pharos returns proof nodes + /// in its own hexary hash tree format rather than standard Ethereum MPT nodes. + pub async fn get_proof( + &self, + address: H160, + storage_keys: Vec, + block_number: u64, + ) -> Result { + let address_hex = format!("0x{:x}", address); + let keys_hex: Vec = storage_keys.iter().map(|k| format!("0x{:x}", k)).collect(); + let block_hex = format!("0x{:x}", block_number); + + self.call("eth_getProof", (address_hex, keys_hex, block_hex)).await + } + + /// Fetch validator info using `debug_getValidatorInfo`. + pub async fn get_validator_info( + &self, + block_number: Option, + ) -> Result { + let block_param = match block_number { + Some(n) => format!("0x{:x}", n), + None => "latest".to_string(), + }; + self.call("debug_getValidatorInfo", vec![block_param]).await + } +} + +/// Parse a hex string to bytes. +pub fn hex_to_bytes(hex: &str) -> Result, ProverError> { + let hex = hex.trim_start_matches("0x"); + hex::decode(hex).map_err(|_| ProverError::HexDecode) +} + +/// Parse a hex string to H256. +pub fn hex_to_h256(hex: &str) -> Result { + let bytes = hex_to_bytes(hex)?; + if bytes.len() != 32 { + return Err(ProverError::InvalidH256Length(bytes.len())); + } + Ok(H256::from_slice(&bytes)) +} + +/// Parse a hex string to u64. +pub fn hex_to_u64(hex: &str) -> Result { + let hex = hex.trim_start_matches("0x"); + u64::from_str_radix(hex, 16).map_err(|_| ProverError::InvalidNumber) +} diff --git a/modules/consensus/pharos/verifier/Cargo.toml b/modules/consensus/pharos/verifier/Cargo.toml new file mode 100644 index 000000000..82042bbf3 --- /dev/null +++ b/modules/consensus/pharos/verifier/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "pharos-verifier" +version = "0.1.0" +edition = "2021" +description = "Pharos consensus verifier for light client" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +pharos-primitives = { workspace = true, default-features = false } +crypto-utils = { workspace = true, default-features = false } +geth-primitives = { workspace = true, default-features = false } +ismp = { workspace = true, default-features = false } +bls = { workspace = true } +log = { workspace = true, default-features = false } +anyhow = { workspace = true, default-features = false } +thiserror = { workspace = true } +primitive-types = { workspace = true } + +[features] +default = ["std"] +std = [ + "pharos-primitives/std", + "crypto-utils/std", + "geth-primitives/std", + "ismp/std", + "log/std", + "anyhow/std", + "bls/std", + "primitive-types/std", +] + +[dependencies.hex] +version = "0.4.3" +default-features = false +features = ["alloc"] + +[dev-dependencies] +hex-literal = { workspace = true, default-features = true } diff --git a/modules/consensus/pharos/verifier/src/error.rs b/modules/consensus/pharos/verifier/src/error.rs new file mode 100644 index 000000000..db74baf97 --- /dev/null +++ b/modules/consensus/pharos/verifier/src/error.rs @@ -0,0 +1,172 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Error types for Pharos verifier. + +use pharos_primitives::BlsPublicKey; +use primitive_types::U256; +use thiserror::Error; + +/// Errors that can occur during Pharos block verification. +#[derive(Debug, Error)] +pub enum Error { + /// The update is for a block that has already been finalized + #[error("Stale update: current finalized block {current} >= update block {update}")] + StaleUpdate { + /// Current finalized block number + current: u64, + /// Update block number + update: u64, + }, + + /// The update block is not in the expected epoch + #[error("Epoch mismatch: update block is in epoch {update_epoch}, expected {expected_epoch}")] + EpochMismatch { + /// The epoch of the update block + update_epoch: u64, + /// The expected epoch (current verifier state epoch) + expected_epoch: u64, + }, + + /// A participating validator is not in the trusted validator set + #[error("Unknown validator with BLS key: {}", hex::encode(&key.as_ref()[..8]))] + UnknownValidator { + /// The unknown validator's BLS public key + key: BlsPublicKey, + }, + + /// Not enough stake participated in signing the block + #[error( + "Insufficient stake: {participating} participated, {required} required (total: {total})" + )] + InsufficientStake { + /// Stake that participated + participating: U256, + /// Required stake (2/3 + 1 of total) + required: U256, + /// Total network stake + total: U256, + }, + + /// No validators participated in signing + #[error("No validators participated in signing")] + NoParticipants, + + /// BLS signature verification failed + #[error("BLS signature verification failed")] + InvalidSignature, + + /// BLS cryptography error + #[error("BLS error: {0:?}")] + BlsError(bls::errors::BLSError), + + /// Missing validator set proof for epoch boundary block + #[error("Missing validator set proof for epoch boundary block {block_number}")] + MissingValidatorSetProof { + /// Block number that requires a validator set proof + block_number: u64, + }, + + /// Unexpected validator set proof for non-epoch-boundary block + #[error("Unexpected validator set proof for non-epoch-boundary block {block_number}")] + UnexpectedValidatorSetProof { + /// Block number that should not have a validator set proof + block_number: u64, + }, + + /// Storage proof lookup failed + #[error("Storage proof lookup failed")] + StorageProofLookupFailed, + + /// Storage value exceeds maximum size for U256 + #[error("Storage value too large for U256")] + StorageValueTooLarge, + + /// Mismatch between number of storage slots and values + #[error("Slots and values length mismatch: {slots} slots, {values} values")] + SlotValueLengthMismatch { slots: usize, values: usize }, + + /// Not enough storage values provided for validator set + #[error("Insufficient storage values: expected at least {expected}, got {got}")] + InsufficientStorageValues { expected: usize, got: usize }, + + /// Not enough pool IDs provided for validators + #[error("Insufficient pool IDs: expected {expected} for {validators} validators, got {got}")] + InsufficientPoolIds { expected: usize, validators: usize, got: usize }, + + /// BLS public key slot value is missing + #[error("Missing BLS public key slot value")] + MissingBlsKeySlot, + + /// BLS key slot is empty + #[error("Empty BLS key slot")] + EmptyBlsKeySlot, + + /// Invalid short string length in BLS key slot + #[error("Invalid short string length in BLS key")] + InvalidBlsStringLength, + + /// BLS key string contains invalid UTF-8 + #[error("Invalid UTF-8 in BLS key string")] + InvalidBlsKeyUtf8, + + /// Long string BLS keys require additional data slots + #[error("Long string BLS key detected - string data slots required in proof")] + LongStringBlsKeyUnsupported, + + /// BLS key hex string is invalid + #[error("Invalid hex encoding in BLS key string")] + InvalidBlsKeyHex, + + /// BLS key has incorrect byte length + #[error("Invalid BLS key length: expected {expected}, got {got}")] + InvalidBlsKeyLength { expected: usize, got: usize }, + + /// Failed to convert BLS key bytes to the expected type + #[error("Failed to convert BLS key bytes")] + BlsKeyConversionFailed, + + /// Validator set contains no validators + #[error("Validator set is empty")] + EmptyValidatorSet, + + /// Computed total stake doesn't match claimed total + #[error("Total stake mismatch: computed {computed}, claimed {claimed}")] + ComputedStakeMismatch { computed: U256, claimed: U256 }, + + /// Duplicate validator detected in the set + #[error("Duplicate validator in set")] + DuplicateValidator, + + /// Validator has zero stake + #[error("Validator has zero stake")] + ZeroStakeValidator, + + /// Missing required storage value at a specific slot + #[error("Missing required storage value for {field}")] + MissingStorageValue { + /// Description of the missing field + field: &'static str, + }, + + /// Incomplete validator proof data + #[error("Incomplete validator proof: expected {expected} values, got {got}")] + IncompleteValidatorProof { + /// Expected number of values + expected: usize, + /// Actual number of values + got: usize, + }, +} diff --git a/modules/consensus/pharos/verifier/src/lib.rs b/modules/consensus/pharos/verifier/src/lib.rs new file mode 100644 index 000000000..41662d1f7 --- /dev/null +++ b/modules/consensus/pharos/verifier/src/lib.rs @@ -0,0 +1,165 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pharos consensus verifier. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +pub mod error; +pub mod state_proof; + +use error::Error; +use geth_primitives::Header; +use ismp::messaging::Keccak256; +use pharos_primitives::{ + BlockProof, BlsPublicKey, Config, ValidatorSet, VerifierState, VerifierStateUpdate, +}; +use primitive_types::H256; + +/// Domain Separation Tag for Pharos BLS signatures. +pub const PHAROS_BLS_DST: &str = "BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; + +/// Verifies a Pharos block proof and update the verifier state. +pub fn verify_pharos_block( + trusted_state: VerifierState, + update: VerifierStateUpdate, +) -> Result { + let update_block_number = update.block_number(); + let current_block_number = trusted_state.finalized_block_number; + + if update_block_number <= current_block_number { + return Err(Error::StaleUpdate { + current: current_block_number, + update: update_block_number, + }); + } + + let update_epoch = C::compute_epoch(update_block_number); + if update_epoch != trusted_state.current_epoch { + return Err(Error::EpochMismatch { + update_epoch, + expected_epoch: trusted_state.current_epoch, + }); + } + + verify_validator_membership( + &trusted_state.current_validator_set, + &update.block_proof.participant_keys, + )?; + + verify_stake_threshold( + &trusted_state.current_validator_set, + &update.block_proof.participant_keys, + )?; + + let computed_hash = Header::from(&update.header).hash::(); + + verify_bls_signature(&update.block_proof.participant_keys, &update.block_proof, computed_hash)?; + + let new_state = if C::is_epoch_boundary(update_block_number) { + // Epoch boundary block must always have validator set proof + let validator_set_proof = update + .validator_set_proof + .ok_or(Error::MissingValidatorSetProof { block_number: update_block_number })?; + + let next_epoch = C::compute_epoch(update_block_number) + 1; + let new_validator_set = state_proof::verify_validator_set_proof::( + update.header.state_root, + &validator_set_proof, + next_epoch, + )?; + + VerifierState { + current_validator_set: new_validator_set, + finalized_block_number: update_block_number, + finalized_hash: computed_hash, + current_epoch: next_epoch, + } + } else { + if update.validator_set_proof.is_some() { + return Err(Error::UnexpectedValidatorSetProof { block_number: update_block_number }); + } + + VerifierState { + finalized_block_number: update_block_number, + finalized_hash: computed_hash, + ..trusted_state + } + }; + + Ok(new_state) +} + +/// Verify that all participating validators are members of the trusted validator set. +fn verify_validator_membership( + validator_set: &ValidatorSet, + participants: &[BlsPublicKey], +) -> Result<(), Error> { + if let Some(key) = participants.iter().find(|key| !validator_set.contains(key)) { + return Err(Error::UnknownValidator { key: key.clone() }); + } + Ok(()) +} + +/// Verify that participating validators have more than 2/3 of total stake. +fn verify_stake_threshold( + validator_set: &ValidatorSet, + participants: &[BlsPublicKey], +) -> Result<(), Error> { + let participating_stake = validator_set.participating_stake(participants); + let total_stake = validator_set.total_stake; + let required = (total_stake * 2 / 3) + 1; + + if participating_stake >= required { + Ok(()) + } else { + Err(Error::InsufficientStake { + participating: participating_stake, + required, + total: total_stake, + }) + } +} + +/// Verify the BLS aggregate signature. +fn verify_bls_signature( + participants: &[BlsPublicKey], + block_proof: &BlockProof, + block_proof_hash: H256, +) -> Result<(), Error> { + if participants.is_empty() { + return Err(Error::NoParticipants); + } + + let aggregate_pubkey = crypto_utils::aggregate_public_keys(participants); + + // The message signed is the block_proof_hash + let message = block_proof_hash.as_bytes().to_vec(); + + let is_valid = bls::verify( + &aggregate_pubkey, + &message, + &block_proof.aggregate_signature, + &PHAROS_BLS_DST.as_bytes().to_vec(), + ); + + if !is_valid { + return Err(Error::InvalidSignature); + } + + Ok(()) +} diff --git a/modules/consensus/pharos/verifier/src/state_proof.rs b/modules/consensus/pharos/verifier/src/state_proof.rs new file mode 100644 index 000000000..07f7c4cc5 --- /dev/null +++ b/modules/consensus/pharos/verifier/src/state_proof.rs @@ -0,0 +1,620 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! State proof verification for Pharos validator sets. +//! +//! This module handles verification of storage proofs for the validator set +//! stored in the staking contract at `0x4100000000000000000000000000000000000000`. +//! +//! Pharos uses a flat trie, so storage slot proofs verify directly against the +//! state root — no separate account proof is needed. +//! +//! ## Verification Steps +//! +//! 1. Recompute expected storage keys from the storage values +//! 2. Verify each storage value against its per-key proof path and the state root +//! 3. Decode the verified storage values into a ValidatorSet + +use crate::error::Error; +use alloc::{collections::BTreeMap, vec::Vec}; +use ismp::messaging::Keccak256; +use pharos_primitives::{ + spv, PharosProofNode, ValidatorInfo, ValidatorSet, ValidatorSetProof, STAKING_CONTRACT_ADDRESS, +}; +use primitive_types::{H256, U256}; + +/// This function verifies that the provided validator set is correctly stored +/// in the staking contract at the given block. +/// +/// The `epoch` parameter is the epoch this validator set will be valid for. +/// +/// Verification steps: +/// 1. Recompute expected storage keys from the storage values +/// 2. Verify each storage value against its per-key proof path and the state root +/// 3. Decode the verified storage values into a ValidatorSet +pub fn verify_validator_set_proof( + state_root: H256, + proof: &ValidatorSetProof, + epoch: u64, +) -> Result { + let layout = StakingContractLayout::default(); + + // Recompute expected storage keys from the storage values + let keys = compute_all_storage_keys::(&proof.storage_values, &layout)?; + + // Verify each storage value against its per-key proof path. + // Pharos uses a flat trie — storage proofs verify directly against state_root. + verify_all_storage_proofs(&keys, &proof.storage_values, &proof.storage_proof, &state_root)?; + + // Decode the verified storage values into a ValidatorSet + let decoded_set = decode_validator_set_from_storage::(&proof.storage_values, epoch)?; + + validate_validator_set(&decoded_set)?; + + Ok(decoded_set) +} + +/// Decode validator set from storage values. +/// +/// This function interprets the raw storage values according to the +/// Pharos staking contract's actual storage layout. +/// +/// ## Pharos Storage Layout +/// +/// The contract at `0x4100000000000000000000000000000000000000` uses: +/// - Slot 0: validators mapping (mapping(bytes32 => Validator)) +/// - Slot 6: totalStake (uint256) +/// - Slot 1: activePoolIds (bytes32[] array) +/// +/// Values are ordered: [totalStake, poolCount, poolId_0..poolId_n, +/// validator_0_bls_header, validator_0_bls_data_0..N_0, validator_0_stake, ...] +/// +/// The number of BLS data slots per validator varies based on whether the key +/// was registered with or without a "0x" prefix (3 or 4 data slots respectively). +fn decode_validator_set_from_storage( + values: &[Vec], + epoch: u64, +) -> Result { + // We need 2 values at minimum: totalStake, activePoolIds length + if values.len() < 2 { + return Err(Error::InsufficientStorageValues { expected: 2, got: values.len() }); + } + + // Parse global state + // Index 0: totalStake + let _total_stake = decode_u256_from_storage(&values[0])?; + + // Index 1: activePoolIds array length (slot 1) + let validator_count = decode_u256_from_storage(&values[1])?; + + let count = validator_count.low_u64() as usize; + + // Pool IDs start at index 2 (after totalStake, array length) + let pool_set_start = 2; + let pool_ids_end = pool_set_start + count; + + if values.len() < pool_ids_end { + return Err(Error::InsufficientPoolIds { + expected: pool_ids_end, + validators: count, + got: values.len(), + }); + } + + let mut validator_set = ValidatorSet::new(epoch); + + let mut idx = pool_ids_end; + for i in 0..count { + // Pool ID from activePoolIds array + let pool_id = { + let v = &values[pool_set_start + i]; + let mut bytes = [0u8; 32]; + if v.len() <= 32 { + bytes[32 - v.len()..].copy_from_slice(v); + } + H256::from(bytes) + }; + + // BLS header at current index + if idx >= values.len() { + return Err(Error::InsufficientStorageValues { expected: idx + 1, got: values.len() }); + } + let data_slots = bls_data_slots_from_header(&values[idx])?; + + let bls_string_slot = &Some(values[idx].clone()); + idx += 1; + + // BLS data slots (dynamic count) + if idx + data_slots > values.len() { + return Err(Error::InsufficientStorageValues { + expected: idx + data_slots, + got: values.len(), + }); + } + let bls_data_slots: Vec>> = + values[idx..idx + data_slots].iter().map(|v| Some(v.clone())).collect(); + idx += data_slots; + + let bls_key = decode_bls_key_from_string_slot(bls_string_slot, Some(&bls_data_slots))?; + + // totalStake + if idx >= values.len() { + return Err(Error::InsufficientStorageValues { expected: idx + 1, got: values.len() }); + } + let stake = decode_u256_from_storage(&values[idx])?; + idx += 1; + + let validator = ValidatorInfo { bls_public_key: bls_key, pool_id, stake }; + + if !validator_set.add_validator(validator) { + return Err(Error::DuplicateValidator); + } + } + + log::debug!( + "Decoded validator set: {} validators, epoch {}, total stake {}", + validator_set.len(), + validator_set.epoch, + validator_set.total_stake + ); + + Ok(validator_set) +} + +/// Decode BLS public key from a Solidity string storage slot. +/// +/// In Solidity, strings are stored as: +/// - Short strings (< 32 bytes): data is stored directly in the slot, length in lowest byte +/// - Long strings (>= 32 bytes): slot contains (length * 2 + 1), data at keccak256(slot) +/// +/// The BLS public key is a 48-byte value, stored as a hex string. The number of +/// data slots varies based on whether the key includes a "0x" prefix: +/// - With prefix: 98 chars → ceil(98/32) = 4 data slots +/// - Without prefix: 96 chars → ceil(96/32) = 3 data slots +fn decode_bls_key_from_string_slot( + header_value: &Option>, + data_slots: Option<&[Option>]>, +) -> Result { + use alloc::string::String; + + let header = header_value.as_ref().ok_or(Error::MissingBlsKeySlot)?; + + if header.is_empty() { + return Err(Error::EmptyBlsKeySlot); + } + + let header_val = decode_u256_from_storage(header)?; + let header_bytes = header_val.to_big_endian(); + let lowest_byte = header_bytes[31]; + + let bls_hex: String = if lowest_byte & 1 == 0 { + // Short string: data is in the slot, length = lowest_byte / 2 + let len = (lowest_byte / 2) as usize; + if len > 31 { + return Err(Error::InvalidBlsStringLength); + } + // String data is stored in the high bytes of the slot + String::from_utf8(header_bytes[..len].to_vec()).map_err(|_| Error::InvalidBlsKeyUtf8)? + } else { + // Long string: header contains (length * 2 + 1) + let length = (header_val - 1) / 2; + let str_len = length.low_u64() as usize; + + // For BLS keys, we expect a 96 or 98 character hex string + // This requires 3 data slots (ceil(96/32) = 3) + let data_slots = data_slots.ok_or(Error::LongStringBlsKeyUnsupported)?; + + let slots_needed = (str_len + 31) / 32; + if data_slots.len() < slots_needed { + return Err(Error::InsufficientStorageValues { + expected: slots_needed, + got: data_slots.len(), + }); + } + + let mut string_data = Vec::with_capacity(str_len); + for (i, slot_value) in data_slots.iter().take(slots_needed).enumerate() { + let slot_data = slot_value.as_ref().ok_or(Error::MissingBlsKeySlot)?; + let decoded = decode_u256_from_storage(slot_data)?; + let bytes = decoded.to_big_endian(); + + let remaining = str_len - (i * 32); + let take = remaining.min(32); + string_data.extend_from_slice(&bytes[..take]); + } + + String::from_utf8(string_data).map_err(|_| Error::InvalidBlsKeyUtf8)? + }; + + let bls_hex = bls_hex.trim_start_matches("0x"); + let bls_bytes = hex::decode(bls_hex).map_err(|_| Error::InvalidBlsKeyHex)?; + + // The staking contract may store a prefix before the 48-byte BLS key. + // Extract the last 48 bytes which contain the actual G1 compressed key. + if bls_bytes.len() < 48 { + return Err(Error::InvalidBlsKeyLength { expected: 48, got: bls_bytes.len() }); + } + + let key_start = bls_bytes.len() - 48; + bls_bytes[key_start..].try_into().map_err(|_| Error::BlsKeyConversionFailed) +} + +/// Recompute the expected storage keys in the same order as `storage_values`. +/// +/// The order matches the prover's output: +/// [totalStake, activePoolIds length, pool_id_0..n, +/// validator_0_bls_header, validator_0_bls_data_0..N_0, validator_0_stake, ...] +/// +/// The number of BLS data slots per validator is dynamically determined from +/// each validator's BLS string header value in `storage_values`. +fn compute_all_storage_keys( + storage_values: &[Vec], + layout: &StakingContractLayout, +) -> Result, Error> { + if storage_values.len() < 2 { + return Err(Error::InsufficientStorageValues { expected: 2, got: storage_values.len() }); + } + + let mut keys = Vec::new(); + + // Index 0: totalStake + keys.push(layout.raw_slot_key(layout.total_stake_slot)); + + // Index 1: activePoolIds length + keys.push(layout.raw_slot_key(layout.active_pool_set_slot)); + + // Parse validator count from storage_values[1] + let count_val = decode_u256_from_storage(&storage_values[1])?; + let count = count_val.low_u64() as usize; + + // Pool ID array element keys + for i in 0..count { + keys.push(layout.array_element_key::(layout.active_pool_set_slot, i as u64)); + } + + // Extract pool IDs from storage values to compute validator keys + let pool_set_start = 2; + let pool_ids_end = pool_set_start + count; + + if storage_values.len() < pool_ids_end { + return Err(Error::InsufficientPoolIds { + expected: pool_ids_end, + validators: count, + got: storage_values.len(), + }); + } + + // For each validator, dynamically determine the BLS data slot count + // from the header value in storage_values + let mut idx = pool_ids_end; + for i in 0..count { + let v = &storage_values[pool_set_start + i]; + let mut bytes = [0u8; 32]; + if v.len() <= 32 { + bytes[32 - v.len()..].copy_from_slice(v); + } + let pool_id = H256::from(bytes); + + // The BLS header value is at the current index + if idx >= storage_values.len() { + return Err(Error::InsufficientStorageValues { + expected: idx + 1, + got: storage_values.len(), + }); + } + let data_slots = bls_data_slots_from_header(&storage_values[idx])?; + + let validator_keys = layout.get_validator_keys::(&pool_id, data_slots); + keys.extend(validator_keys); + + // Advance index: 1 (header) + data_slots + 1 (stake) + idx += 1 + data_slots + 1; + } + + Ok(keys) +} + +/// Verify each storage value against its per-key proof path in the storage trie. +fn verify_all_storage_proofs( + keys: &[H256], + values: &[Vec], + storage_proof: &BTreeMap>, + storage_hash: &H256, +) -> Result<(), Error> { + if keys.len() != values.len() { + return Err(Error::SlotValueLengthMismatch { slots: keys.len(), values: values.len() }); + } + + let address: [u8; 20] = STAKING_CONTRACT_ADDRESS.0 .0; + + for (key, value) in keys.iter().zip(values.iter()) { + let proof_nodes = storage_proof.get(key).ok_or(Error::StorageProofLookupFailed)?; + + // Pad value to 32 bytes (left-padded, big-endian) as stored in the trie + let mut padded_value = [0u8; 32]; + if value.len() <= 32 { + padded_value[32 - value.len()..].copy_from_slice(value); + } else { + return Err(Error::StorageValueTooLarge); + } + + if !spv::verify_storage_proof(proof_nodes, &address, &key.0, &padded_value, &storage_hash.0) + { + return Err(Error::StorageProofLookupFailed); + } + } + + Ok(()) +} + +/// Validate the internal consistency of a validator set. +pub fn validate_validator_set(validator_set: &ValidatorSet) -> Result<(), Error> { + if validator_set.is_empty() { + return Err(Error::EmptyValidatorSet); + } + + for validator in &validator_set.validators { + if validator.stake.is_zero() { + return Err(Error::ZeroStakeValidator); + } + } + + Ok(()) +} + +/// Storage layout information for the Pharos staking contract. +/// +/// Based on the actual Pharos staking contract at `0x4100000000000000000000000000000000000000`. +/// +/// ## Contract Storage Layout (StakingStorageV1) +/// +/// ```solidity +/// mapping(bytes32 => Validator) public validators; // slot 0 +/// bytes32[] public activePoolIds; // slot 1 +/// bytes32[] public pendingAddPoolIds; // slot 2 +/// bytes32[] public pendingUpdatePoolIds; // slot 3 +/// bytes32[] public pendingExitPoolIds; // slot 4 +/// uint256 public currentEpoch; // slot 5 +/// uint256 public totalStake; // slot 6 +/// IChainConfig public cfg; // slot 7 +/// mapping(address => uint256) public pendingWithdrawStakes; // slot 8 +/// uint256 public totalSupply; // slot 9 +/// uint256 public currentInflationRate; // slot 10 +/// uint256 public lastInflationAdjustmentTime; // slot 11 +/// uint256 public lastInflationTotalSupplySnapshot; // slot 12 +/// address internal implAddress; // slot 13 +/// ``` +/// +/// ## Contract Storage Layout (StakingStorageV2) +/// +/// ```solidity +/// uint256 lastEpochStartTime; // slot 14 +/// mapping(bytes32 => mapping(address => Delegator)) delegators; // slot 15 +/// mapping(bytes32 => mapping(address => bool)) validatorWhitelists; // slot 16 +/// mapping(bytes32 => uint256) accumulatedRewardPerShares; // slot 17 +/// mapping(bytes32 => uint256) commissionRates; // slot 18 +/// mapping(bytes32 => bool) delegationEnabledMapping; // slot 19 +/// mapping(bytes32 => uint256) delegatorCounts; // slot 20 +/// PendingUndelegation[] pendingUndelegations; // slot 21 +/// EnumerableSet.Bytes32Set activePoolSets; // slot 22-23 +/// EnumerableSet.Bytes32Set pendingAddPoolSets; // slot 24-25 +/// EnumerableSet.Bytes32Set pendingUpdatePoolSets; // slot 26-27 +/// EnumerableSet.Bytes32Set pendingExitPoolSets; // slot 28-29 +/// ``` +/// +/// The contract currently uses the V1 layout. Active pool IDs are stored +/// in `activePoolIds` at slot 1 as a simple `bytes32[]` array. +/// +/// ## Validator Struct +/// +/// ```solidity +/// struct Validator { +/// string description; // offset 0 +/// string publicKey; // offset 1 +/// string publicKeyPop; // offset 2 +/// string blsPublicKey; // offset 3 +/// string blsPublicKeyPop; // offset 4 +/// string endpoint; // offset 5 +/// uint8 status; // offset 6 +/// bytes32 poolId; // offset 7 +/// uint256 totalStake; // offset 8 +/// address owner; // offset 9 +/// uint256 stakeSnapshot; // offset 10 +/// uint256 pendingWithdrawStake; // offset 11 +/// uint8 pendingWithdrawWindow; // offset 12 +/// } +/// ``` +#[derive(Debug, Clone)] +pub struct StakingContractLayout { + /// Storage slot for the validators mapping + pub validators_mapping_slot: u64, + /// Storage slot for activePoolIds (bytes32[] array) + pub active_pool_set_slot: u64, + /// Storage slot for totalStake + pub total_stake_slot: u64, +} + +/// Offsets within the Validator struct for each field. +#[derive(Debug, Clone, Copy)] +pub struct ValidatorStructOffsets { + /// Offset for description (string) + pub description: u64, + /// Offset for publicKey (string) + pub public_key: u64, + /// Offset for publicKeyPop (string) + pub public_key_pop: u64, + /// Offset for blsPublicKey (string) + pub bls_public_key: u64, + /// Offset for blsPublicKeyPop (string) + pub bls_public_key_pop: u64, + /// Offset for endpoint (string) + pub endpoint: u64, + /// Offset for status (uint8) + pub status: u64, + /// Offset for poolId (bytes32) + pub pool_id: u64, + /// Offset for totalStake (uint256) + pub total_stake: u64, + /// Offset for owner (address) + pub owner: u64, + /// Offset for stakeSnapshot (uint256) + pub stake_snapshot: u64, + /// Offset for pendingWithdrawStake (uint256) + pub pending_withdraw_stake: u64, + /// Offset for pendingWithdrawWindow (uint8) + pub pending_withdraw_window: u64, +} + +impl Default for ValidatorStructOffsets { + fn default() -> Self { + Self { + description: 0, + public_key: 1, + public_key_pop: 2, + bls_public_key: 3, + bls_public_key_pop: 4, + endpoint: 5, + status: 6, + pool_id: 7, + total_stake: 8, + owner: 9, + stake_snapshot: 10, + pending_withdraw_stake: 11, + pending_withdraw_window: 12, + } + } +} + +impl Default for StakingContractLayout { + fn default() -> Self { + Self { validators_mapping_slot: 0, active_pool_set_slot: 1, total_stake_slot: 6 } + } +} + +impl StakingContractLayout { + /// Calculate the raw storage key for a simple slot (no hashing). + pub fn raw_slot_key(&self, slot: u64) -> H256 { + H256::from_low_u64_be(slot) + } + + /// Calculate the storage key for a dynamic array element. + pub fn array_element_key(&self, base_slot: u64, index: u64) -> H256 { + let slot_bytes = U256::from(base_slot).to_big_endian(); + let base_key = H::keccak256(&slot_bytes); + let base_pos = U256::from_big_endian(&base_key.0); + let element_pos = base_pos + U256::from(index); + H256(element_pos.to_big_endian()) + } + + /// Calculate the base storage slot for a validator in the mapping. + pub fn validator_base_slot(&self, pool_id: &H256) -> H256 { + let mut data = [0u8; 64]; + data[..32].copy_from_slice(pool_id.as_bytes()); + data[32..64].copy_from_slice(&U256::from(self.validators_mapping_slot).to_big_endian()); + H::keccak256(&data) + } + + /// Calculate the storage slot for a specific field within a Validator struct. + pub fn validator_field_slot(&self, pool_id: &H256, field_offset: u64) -> H256 { + let base = self.validator_base_slot::(pool_id); + let base_pos = U256::from_big_endian(base.as_bytes()); + let field_pos = base_pos + U256::from(field_offset); + H256(field_pos.to_big_endian()) + } + + /// Calculate the storage slot for string data. + pub fn string_data_slot(&self, string_slot: &H256) -> H256 { + H::keccak256(string_slot.as_bytes()) + } + + /// Get storage keys for a specific validator's data. + /// + /// Returns keys for: + /// - BLS public key string slot (offset 3) + /// - BLS public key data slots (dynamic count based on string length) + /// - totalStake (offset 8) + /// + /// The `bls_data_slot_count` parameter specifies how many data slots to include + /// for the BLS public key string. This is derived from the string header value: + /// - Keys with "0x" prefix (98 chars): ceil(98/32) = 4 slots + /// - Keys without prefix (96 chars): ceil(96/32) = 3 slots + pub fn get_validator_keys( + &self, + pool_id: &H256, + bls_data_slot_count: usize, + ) -> Vec { + let offsets = ValidatorStructOffsets::default(); + let mut keys = Vec::new(); + + // BLS public key string slot (stores length for long strings) + let bls_string_slot = self.validator_field_slot::(pool_id, offsets.bls_public_key); + keys.push(bls_string_slot); + + // BLS public key data slots (for long strings) + // Data is stored at keccak256(string_slot) for `bls_data_slot_count` slots + let bls_data_base = self.string_data_slot::(&bls_string_slot); + let bls_data_base_pos = U256::from_big_endian(bls_data_base.as_bytes()); + for i in 0..bls_data_slot_count { + let slot_pos = bls_data_base_pos + U256::from(i); + keys.push(H256(slot_pos.to_big_endian())); + } + + // totalStake field + keys.push(self.validator_field_slot::(pool_id, offsets.total_stake)); + + keys + } +} + +/// Determine the number of BLS data slots from the Solidity string header value. +/// +/// For long strings (>= 32 bytes), the header slot contains `length * 2 + 1`. +/// The actual byte length is `(header_value - 1) / 2`, and the number of 32-byte +/// data slots is `ceil(length / 32)`. +/// +/// For short strings (< 32 bytes), the data is stored directly in the header slot +/// and no additional data slots are needed (returns 0). +pub fn bls_data_slots_from_header(header_value: &[u8]) -> Result { + let header_val = decode_u256_from_storage(header_value)?; + let header_bytes = header_val.to_big_endian(); + let lowest_byte = header_bytes[31]; + + if lowest_byte & 1 == 0 { + // Short string - data is in the header itself + Ok(0) + } else { + // Long string - header = length * 2 + 1 + let length = (header_val - 1) / 2; + let str_len = length.low_u64() as usize; + Ok((str_len + 31) / 32) + } +} + +/// Decode a U256 value from RLP-encoded storage value. +pub fn decode_u256_from_storage(value: &[u8]) -> Result { + if value.is_empty() { + return Ok(U256::zero()); + } + + // Storage values are RLP encoded + // integers are stored as big-endian bytes + if value.len() <= 32 { + let mut padded = [0u8; 32]; + padded[32 - value.len()..].copy_from_slice(value); + Ok(U256::from_big_endian(&padded)) + } else { + Err(Error::StorageValueTooLarge) + } +} diff --git a/modules/consensus/sync-committee/primitives/Cargo.toml b/modules/consensus/sync-committee/primitives/Cargo.toml index 365d5065d..4f5104aab 100644 --- a/modules/consensus/sync-committee/primitives/Cargo.toml +++ b/modules/consensus/sync-committee/primitives/Cargo.toml @@ -16,6 +16,7 @@ serde = { workspace = true, optional = true, features = ["derive"] } hex = { workspace = true, default-features = false, features = ["alloc"] } anyhow = { workspace = true, default-features = false } serde-hex-utils = { workspace = true, default-features = false } +crypto-utils = { workspace = true, default-features = false } ssz-rs = { git = "https://github.com/polytope-labs/ssz-rs", branch = "main", default-features = false } @@ -31,5 +32,6 @@ std = [ "primitive-types/std", "serde", "serde-hex-utils/std", + "crypto-utils/std", ] nofulu = [] diff --git a/modules/consensus/sync-committee/primitives/src/ssz/mod.rs b/modules/consensus/sync-committee/primitives/src/ssz/mod.rs index ebeaa9b3b..6f071568e 100644 --- a/modules/consensus/sync-committee/primitives/src/ssz/mod.rs +++ b/modules/consensus/sync-committee/primitives/src/ssz/mod.rs @@ -1,5 +1,4 @@ mod byte_list; -mod byte_vector; use core::fmt; fn write_bytes_to_lower_hex>(f: &mut fmt::Formatter<'_>, data: T) -> fmt::Result { @@ -13,4 +12,4 @@ fn write_bytes_to_lower_hex>(f: &mut fmt::Formatter<'_>, data: T) } pub use byte_list::ByteList; -pub use byte_vector::ByteVector; +pub use crypto_utils::ssz::ByteVector; diff --git a/modules/ismp/clients/pharos/Cargo.toml b/modules/ismp/clients/pharos/Cargo.toml new file mode 100644 index 000000000..275063f2d --- /dev/null +++ b/modules/ismp/clients/pharos/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "ismp-pharos" +version = "0.1.0" +edition = "2021" +description = "ISMP Consensus Client for the Pharos Network" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +log = { workspace = true, default-features = false } +anyhow = { workspace = true, default-features = false } +codec = { workspace = true, default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +ismp = { workspace = true } +pharos-verifier = { workspace = true } +pharos-primitives = { workspace = true } +sync-committee-primitives = { workspace = true } +geth-primitives = { workspace = true } +pharos-state-machine = { workspace = true } +pallet-ismp-host-executive = { workspace = true } +pallet-ismp = { workspace = true } + +[dependencies.polkadot-sdk] +workspace = true +features = ["sp-core"] + +[features] +default = ["std"] +std = [ + "log/std", + "anyhow/std", + "polkadot-sdk/std", + "codec/std", + "scale-info/std", + "pharos-verifier/std", + "pharos-primitives/std", + "ismp/std", + "sync-committee-primitives/std", + "geth-primitives/std", + "pharos-state-machine/std", + "pallet-ismp-host-executive/std", + "pallet-ismp/std", +] +try-runtime = ["polkadot-sdk/try-runtime"] diff --git a/modules/ismp/clients/pharos/src/lib.rs b/modules/ismp/clients/pharos/src/lib.rs new file mode 100644 index 000000000..d75806c9a --- /dev/null +++ b/modules/ismp/clients/pharos/src/lib.rs @@ -0,0 +1,215 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! ISMP Consensus Client for Pharos Network. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use alloc::{boxed::Box, collections::BTreeMap, string::ToString, vec, vec::Vec}; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use pharos_state_machine::PharosStateMachine; +use geth_primitives::Header; +use ismp::{ + consensus::{ + ConsensusClient, ConsensusClientId, ConsensusStateId, StateCommitment, StateMachineClient, + StateMachineId, + }, + error::Error, + host::{IsmpHost, StateMachine}, + messaging::StateCommitmentHeight, +}; +pub use pharos_primitives::{Mainnet, Testnet}; +use pharos_primitives::{PHAROS_ATLANTIC_CHAIN_ID, PHAROS_MAINNET_CHAIN_ID, ValidatorSet, VerifierState, VerifierStateUpdate}; +use pharos_verifier::verify_pharos_block; +use polkadot_sdk::*; +use sp_core::H256; + +/// Consensus state ID for Pharos +pub const PHAROS_CONSENSUS_CLIENT_ID: ConsensusStateId = *b"PHAR"; + +/// Consensus state for Pharos light client. +#[derive(codec::Encode, codec::Decode, Debug, Default, PartialEq, Eq, Clone)] +pub struct ConsensusState { + pub current_validators: ValidatorSet, + pub finalized_height: u64, + pub finalized_hash: H256, + pub current_epoch: u64, + pub chain_id: u32, +} + +impl From for VerifierState { + fn from(state: ConsensusState) -> Self { + VerifierState { + current_validator_set: state.current_validators, + finalized_block_number: state.finalized_height, + finalized_hash: state.finalized_hash, + current_epoch: state.current_epoch, + } + } +} + +/// The Pharos consensus client. +pub struct PharosClient< + H: IsmpHost, + T: pallet_ismp_host_executive::Config, + C: pharos_primitives::Config, +>(PhantomData<(H, T, C)>); + +impl Default + for PharosClient +{ + fn default() -> Self { + Self(PhantomData) + } +} + +impl Clone + for PharosClient +{ + fn clone(&self) -> Self { + Self(PhantomData) + } +} + +impl< + H: IsmpHost + Send + Sync + Default + 'static, + T: pallet_ismp_host_executive::Config, + C: pharos_primitives::Config, + > ConsensusClient for PharosClient +{ + fn verify_consensus( + &self, + _host: &dyn IsmpHost, + consensus_state_id: ConsensusStateId, + trusted_consensus_state: Vec, + proof: Vec, + ) -> Result<(Vec, ismp::consensus::VerifiedCommitments), Error> { + let update = VerifierStateUpdate::decode(&mut &proof[..]) + .map_err(|e| Error::AnyHow(anyhow::anyhow!("{:?}", e).into()))?; + + let consensus_state = ConsensusState::decode(&mut &trusted_consensus_state[..]) + .map_err(|_| Error::Custom("Cannot decode trusted consensus state".to_string()))?; + + if consensus_state.finalized_height >= update.block_number() { + return Err(Error::Custom("Expired update".to_string())); + } + + let trusted_state = VerifierState { + current_validator_set: consensus_state.current_validators.clone(), + finalized_block_number: consensus_state.finalized_height, + finalized_hash: consensus_state.finalized_hash, + current_epoch: consensus_state.current_epoch, + }; + + let new_state = verify_pharos_block::(trusted_state, update.clone()) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + + let state_commitment = StateCommitmentHeight { + commitment: StateCommitment { + timestamp: update.header.timestamp, + overlay_root: None, + state_root: update.header.state_root, + }, + height: new_state.finalized_block_number, + }; + + let new_consensus_state = ConsensusState { + current_validators: new_state.current_validator_set, + finalized_height: new_state.finalized_block_number, + finalized_hash: new_state.finalized_hash, + current_epoch: new_state.current_epoch, + chain_id: consensus_state.chain_id, + }; + + let mut state_machine_map: BTreeMap> = + BTreeMap::new(); + state_machine_map.insert( + StateMachineId { + state_id: StateMachine::Evm(new_consensus_state.chain_id), + consensus_state_id, + }, + vec![state_commitment], + ); + + Ok((new_consensus_state.encode(), state_machine_map)) + } + + fn verify_fraud_proof( + &self, + _host: &dyn IsmpHost, + trusted_consensus_state: Vec, + proof_1: Vec, + proof_2: Vec, + ) -> Result<(), Error> { + let update_1 = VerifierStateUpdate::decode(&mut &proof_1[..]) + .map_err(|_| Error::Custom("Cannot decode pharos update for proof 1".to_string()))?; + + let update_2 = VerifierStateUpdate::decode(&mut &proof_2[..]) + .map_err(|_| Error::Custom("Cannot decode pharos update for proof 2".to_string()))?; + + let header_1 = &update_1.header; + let header_2 = &update_2.header; + + if header_1.number != header_2.number { + return Err(Error::Custom("Invalid fraud proof: different block numbers".to_string())); + } + + let header_1_hash = Header::from(header_1).hash::(); + let header_2_hash = Header::from(header_2).hash::(); + + if header_1_hash == header_2_hash { + return Err(Error::Custom("Invalid fraud proof: identical headers".to_string())); + } + + let consensus_state = ConsensusState::decode(&mut &trusted_consensus_state[..]) + .map_err(|_| Error::Custom("Cannot decode trusted consensus state".to_string()))?; + + let trusted_state = VerifierState { + current_validator_set: consensus_state.current_validators.clone(), + finalized_block_number: consensus_state.finalized_height, + finalized_hash: consensus_state.finalized_hash, + current_epoch: consensus_state.current_epoch, + }; + + verify_pharos_block::(trusted_state.clone(), update_1) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + + verify_pharos_block::(trusted_state, update_2) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + + Ok(()) + } + + fn consensus_client_id(&self) -> ConsensusClientId { + PHAROS_CONSENSUS_CLIENT_ID + } + + fn state_machine( + &self, + id: StateMachine, + ) -> Result, Error> { + match id { + StateMachine::Evm(chain_id) + if chain_id == PHAROS_MAINNET_CHAIN_ID || + chain_id == PHAROS_ATLANTIC_CHAIN_ID => + Ok(Box::new(>::default())), + state_machine => + Err(Error::Custom(alloc::format!("Unsupported state machine: {state_machine:?}"))), + } + } +} diff --git a/modules/ismp/core/src/error.rs b/modules/ismp/core/src/error.rs index 6bd64ab43..31f4247b8 100644 --- a/modules/ismp/core/src/error.rs +++ b/modules/ismp/core/src/error.rs @@ -20,7 +20,10 @@ use crate::{ consensus::{ConsensusClientId, ConsensusStateId, StateMachineHeight, StateMachineId}, events::Meta, }; -use alloc::{string::String, vec::Vec}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; use codec::{Decode, Encode}; use core::time::Duration; use scale_info::TypeInfo; @@ -214,4 +217,51 @@ pub enum Error { }, /// Error decoding signature SignatureDecodingFailed, + /// Anyhow error: {0} + AnyHow(AnyhowError), +} + +/// SCALE-compatible wrapper around [`anyhow::Error`]. +#[derive(Debug)] +pub struct AnyhowError(pub anyhow::Error); + +impl core::fmt::Display for AnyhowError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::Display::fmt(&self.0, f) + } +} + +impl PartialEq for AnyhowError { + fn eq(&self, other: &Self) -> bool { + self.0.to_string() == other.0.to_string() + } +} + +impl Eq for AnyhowError {} + +impl Encode for AnyhowError { + fn encode_to(&self, dest: &mut W) { + self.0.to_string().encode_to(dest) + } +} + +impl Decode for AnyhowError { + fn decode(input: &mut I) -> Result { + let s = String::decode(input)?; + Ok(Self(anyhow::Error::msg(s))) + } +} + +impl TypeInfo for AnyhowError { + type Identity = String; + + fn type_info() -> scale_info::Type { + String::type_info() + } +} + +impl From for AnyhowError { + fn from(e: anyhow::Error) -> Self { + Self(e) + } } diff --git a/modules/ismp/state-machines/pharos/Cargo.toml b/modules/ismp/state-machines/pharos/Cargo.toml new file mode 100644 index 000000000..4eb01e823 --- /dev/null +++ b/modules/ismp/state-machines/pharos/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "pharos-state-machine" +version = "0.1.0" +edition = "2021" +description = "Pharos state machine verification using hexary hash tree (SHA-256) proofs" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +ismp = { workspace = true } +pharos-primitives = { workspace = true } +pallet-ismp-host-executive = { workspace = true } +geth-primitives = { workspace = true } +evm-state-machine = { path = "../evm", default-features = false } + +codec = { workspace = true, default-features = false } +primitive-types = { workspace = true } +ethabi = { workspace = true } +hex-literal = { workspace = true } +alloy-rlp = { workspace = true } + +[dependencies.polkadot-sdk] +workspace = true +features = ["sp-core"] + +[features] +default = ["std"] +std = [ + "codec/std", + "ismp/std", + "pharos-primitives/std", + "pallet-ismp-host-executive/std", + "geth-primitives/std", + "evm-state-machine/std", + "primitive-types/std", + "ethabi/std", + "alloy-rlp/std", + "polkadot-sdk/std", +] diff --git a/modules/ismp/state-machines/pharos/src/lib.rs b/modules/ismp/state-machines/pharos/src/lib.rs new file mode 100644 index 000000000..f7b9dd8cf --- /dev/null +++ b/modules/ismp/state-machines/pharos/src/lib.rs @@ -0,0 +1,216 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pharos state machine verification. +//! +//! Uses Pharos hexary hash tree proofs with SHA-256 hashing instead of +//! Ethereum's Merkle-Patricia Trie with Keccak-256. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use alloc::{collections::BTreeMap, format, string::ToString, vec::Vec}; +use codec::{Decode, Encode}; +use evm_state_machine::{req_res_commitment_key, req_res_receipt_keys}; +use ismp::{ + consensus::{StateCommitment, StateMachineClient}, + error::Error, + host::IsmpHost, + messaging::{Keccak256, Proof}, + router::RequestResponse, +}; +use pallet_ismp_host_executive::EvmHosts; +use pharos_primitives::{spv, PharosProofNode}; +use primitive_types::{H160, H256}; + +/// Pharos-specific state proof (replaces EvmStateProof). +/// +/// Contains Pharos hexary hash tree proof data with SHA-256 hashing. +/// Pharos uses a flat trie where storage proofs verify directly against +/// the state_root, so no separate account proof is needed. +#[derive(Encode, Decode, Clone)] +pub struct PharosStateProof { + /// Map of storage key (slot hash) to storage proof nodes + pub storage_proof: BTreeMap, Vec>, + /// Map of storage key (slot hash) to the 32-byte padded storage value + pub storage_values: BTreeMap, Vec>, +} + +/// Pharos state machine client for ISMP state proof verification. +pub struct PharosStateMachine( + core::marker::PhantomData<(H, T)>, +); + +impl Default for PharosStateMachine { + fn default() -> Self { + Self(core::marker::PhantomData) + } +} + +impl Clone for PharosStateMachine { + fn clone(&self) -> Self { + PharosStateMachine::::default() + } +} + +impl StateMachineClient + for PharosStateMachine +{ + fn verify_membership( + &self, + _host: &dyn IsmpHost, + item: RequestResponse, + root: StateCommitment, + proof: &Proof, + ) -> Result<(), Error> { + let contract_address = EvmHosts::::get(&proof.height.id.state_id) + .ok_or_else(|| Error::Custom("Ismp contract address not found".to_string()))?; + verify_membership::(item, root, proof, contract_address) + } + + fn receipts_state_trie_key(&self, items: RequestResponse) -> Vec> { + req_res_receipt_keys::(items) + } + + fn verify_state_proof( + &self, + _host: &dyn IsmpHost, + keys: Vec>, + root: StateCommitment, + proof: &Proof, + ) -> Result, Option>>, Error> { + let ismp_address = EvmHosts::::get(&proof.height.id.state_id) + .ok_or_else(|| Error::Custom("Ismp contract address not found".to_string()))?; + verify_state_proof::(keys, root, proof, ismp_address) + } +} + +/// Decode a PharosStateProof from the proof bytes. +fn decode_pharos_state_proof(proof: &Proof) -> Result { + PharosStateProof::decode(&mut &proof.proof[..]) + .map_err(|_| Error::Custom(format!("Cannot decode pharos state proof"))) +} + +/// Verify membership of ISMP commitments in the Pharos state. +pub fn verify_membership( + item: RequestResponse, + root: StateCommitment, + proof: &Proof, + contract_address: H160, +) -> Result<(), Error> { + let pharos_proof = decode_pharos_state_proof(proof)?; + + let state_root = H256::from_slice(&root.state_root[..]); + let address: [u8; 20] = contract_address.0; + + let commitment_keys = req_res_commitment_key::(item, |k| k.to_vec()); + + // Pharos uses a flat trie — storage proofs verify directly against state_root. + for slot_hash in commitment_keys { + let storage_proof_nodes = pharos_proof + .storage_proof + .get(&slot_hash) + .ok_or_else(|| Error::Custom("Missing storage proof for commitment key".to_string()))?; + + let slot_key: [u8; 32] = slot_hash + .try_into() + .map_err(|_| Error::Custom("Invalid slot hash length".to_string()))?; + + spv::verify_storage_membership_proof( + storage_proof_nodes, + &address, + &slot_key, + &state_root.0, + ) + .ok_or_else(|| Error::Custom("Storage membership proof verification failed".to_string()))?; + } + + Ok(()) +} + +/// Verify state proof and return key-value map. +pub fn verify_state_proof( + keys: Vec>, + root: StateCommitment, + proof: &Proof, + ismp_address: H160, +) -> Result, Option>>, Error> { + let pharos_proof = decode_pharos_state_proof(proof)?; + + let state_root = H256::from_slice(&root.state_root[..]); + + // Pharos uses a flat trie — storage proofs verify directly against state_root. + let mut map = BTreeMap::new(); + + for key in keys { + let (contract_addr, slot_hash) = if key.len() == 52 { + // First 20 bytes = contract address, last 32 = slot hash + let addr = H160::from_slice(&key[..20]); + (addr, key[20..].to_vec()) + } else if key.len() == 32 { + // Direct slot hash for the ISMP host contract + (ismp_address, key.clone()) + } else if key.len() == 20 { + map.insert(key, None); + continue; + } else { + return Err(Error::Custom( + "Unsupported key type: expected length 20, 32, or 52".to_string(), + )); + }; + + let contract_address: [u8; 20] = contract_addr.0; + + let slot_key: [u8; 32] = slot_hash.clone().try_into().map_err(|_| { + Error::Custom("Invalid slot hash length: expected 32 bytes".to_string()) + })?; + + // Pharos does not support non-membership proofs, so all keys must have + // a proof and value. Missing entries indicate an incomplete proof. + let storage_proof_nodes = pharos_proof + .storage_proof + .get(slot_key.as_slice()) + .ok_or_else(|| Error::Custom("Missing storage proof for key".to_string()))?; + + let storage_value = pharos_proof + .storage_values + .get(&slot_hash) + .ok_or_else(|| Error::Custom("Missing storage value for key".to_string()))?; + + // Pad value to 32 bytes for proof verification + let mut padded_value = [0u8; 32]; + if storage_value.len() <= 32 { + padded_value[32 - storage_value.len()..].copy_from_slice(storage_value); + } else { + return Err(Error::Custom("Storage value exceeds 32 bytes".to_string())); + } + + // Verify the proof with the actual value + if !spv::verify_storage_proof( + storage_proof_nodes, + &contract_address, + &slot_key, + &padded_value, + &state_root.0, + ) { + return Err(Error::Custom("Storage proof verification failed".to_string())); + } + + map.insert(key, Some(storage_value.clone())); + } + + Ok(map) +} diff --git a/modules/pallets/testsuite/Cargo.toml b/modules/pallets/testsuite/Cargo.toml index d6572df76..073d96005 100644 --- a/modules/pallets/testsuite/Cargo.toml +++ b/modules/pallets/testsuite/Cargo.toml @@ -52,6 +52,12 @@ hyperbridge-client-machine = {workspace = true, default-features = true } evm-state-machine = { workspace = true, default-features = true } subxt-utils = { workspace = true, default-features = true } ismp-grandpa = { workspace = true, default-features = true } +ismp-pharos = { workspace = true, default-features = true } +pharos-primitives = { workspace = true, default-features = true } +pharos-prover = { workspace = true, default-features = true } +geth-primitives = { workspace = true, default-features = true } +bls = { workspace = true } +crypto-utils = { workspace = true, default-features = true } rs_merkle = { version = "1.5.0"} log = { workspace = true } primitive-types = { workspace = true } diff --git a/modules/pallets/testsuite/src/runtime.rs b/modules/pallets/testsuite/src/runtime.rs index 73ca10fb2..b8b0de961 100644 --- a/modules/pallets/testsuite/src/runtime.rs +++ b/modules/pallets/testsuite/src/runtime.rs @@ -258,6 +258,7 @@ impl pallet_ismp::Config for Test { Test, HyperbridgeClientMachine, >, + ismp_pharos::PharosClient, ); type OffchainDB = Mmr; type FeeHandler = ( diff --git a/modules/pallets/testsuite/src/tests/ismp_pharos.rs b/modules/pallets/testsuite/src/tests/ismp_pharos.rs new file mode 100644 index 000000000..2b7efea23 --- /dev/null +++ b/modules/pallets/testsuite/src/tests/ismp_pharos.rs @@ -0,0 +1,235 @@ +// Copyright (c) 2025 Polytope Labs. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use crate::runtime::{Ismp, Test}; +use codec::{Decode, Encode}; +use ismp::{ + consensus::{ConsensusClient, StateMachineId}, + host::StateMachine, +}; +use ismp_pharos::{ConsensusState, PharosClient, PHAROS_CONSENSUS_CLIENT_ID}; +use pharos_primitives::{Config, Testnet, PHAROS_ATLANTIC_CHAIN_ID}; +use pharos_prover::PharosProver; +use primitive_types::H256; + +#[tokio::test] +#[ignore] +async fn test_ismp_pharos_non_epoch_boundary_consensus_verification() { + let rpc_url = std::env::var("PHAROS_ATLANTIC_RPC") + .expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let prover = PharosProver::::new(&rpc_url).expect("Failed to create prover"); + + let latest_block_num = prover.get_latest_block().await.expect("Failed to get block number"); + println!("Latest block: {}", latest_block_num); + + let mut target_block = latest_block_num.saturating_sub(5); + + // ensuring we're not at an epoch boundary so as to avoid needing staking contract verification + while Testnet::is_epoch_boundary(target_block) { + target_block = target_block.saturating_sub(1); + } + println!("Target block: {}", target_block); + + let validator_info = + prover.rpc.get_validator_info(None).await.expect("Failed to get validator info"); + println!("Validators: {}", validator_info.validator_set.len()); + + let current_epoch = Testnet::compute_epoch(target_block); + let validator_set = prover + .build_validator_set(&validator_info.validator_set, current_epoch) + .expect("Failed to build validator set"); + println!("Total stake: {}", validator_set.total_stake); + + let initial_block = target_block - 1; + let initial_consensus_state = ConsensusState { + current_validators: validator_set, + finalized_height: initial_block, + finalized_hash: H256::zero(), + current_epoch, + chain_id: PHAROS_ATLANTIC_CHAIN_ID, + }; + + let update = prover + .fetch_block_update(target_block) + .await + .expect("Failed to fetch block update"); + println!("Block update is for block: {}", update.block_number()); + println!("Participant keys Length: {}", update.block_proof.participant_count()); + + let pharos_client = PharosClient::::default(); + + let host = Ismp::default(); + let result = pharos_client.verify_consensus( + &host, + PHAROS_CONSENSUS_CLIENT_ID, + initial_consensus_state.encode(), + update.encode(), + ); + + match result { + Ok((new_state_bytes, commitments)) => { + let new_state = ConsensusState::decode(&mut &new_state_bytes[..]) + .expect("Failed to decode new state"); + + println!("\nVerification Successful"); + println!("Finalized height: {}", new_state.finalized_height); + println!("Epoch: {}", new_state.current_epoch); + + // the epoch should remain the same + assert_eq!( + new_state.current_epoch, initial_consensus_state.current_epoch, + "Epoch should not change for non-epoch-boundary blocks" + ); + + let state_id = StateMachineId { + state_id: StateMachine::Evm(PHAROS_ATLANTIC_CHAIN_ID), + consensus_state_id: PHAROS_CONSENSUS_CLIENT_ID, + }; + assert!(commitments.contains_key(&state_id), "Should have state commitment"); + + let heights = &commitments[&state_id]; + assert_eq!(heights.len(), 1, "Should have exactly one state commitment"); + assert_eq!( + heights[0].height, target_block, + "Commitment height should match the target block" + ); + + assert_eq!( + new_state.finalized_height, target_block, + "Finalized height should match the target block" + ); + }, + Err(e) => { + panic!("Verification failed: {:?}", e); + }, + } +} + +#[tokio::test] +#[ignore] +async fn test_ismp_pharos_epoch_boundary_consensus_verification() { + let rpc_url = std::env::var("PHAROS_ATLANTIC_RPC") + .expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let prover = PharosProver::::new(&rpc_url).expect("Failed to create prover"); + + let latest_block_num = prover.get_latest_block().await.expect("Failed to get block number"); + println!("Latest block: {}", latest_block_num); + + // Find the most recent epoch boundary block. + let epoch_length = Testnet::EPOCH_LENGTH_BLOCKS; + let current_epoch = Testnet::compute_epoch(latest_block_num); + // last epoch boundary block is at (current_epoch * epoch_length) - 1 + let target_block = (current_epoch * epoch_length) - 1; + + assert!( + Testnet::is_epoch_boundary(target_block), + "Target block {} should be an epoch boundary", + target_block + ); + println!("Target epoch boundary block: {}", target_block); + println!("Current epoch: {}, target epoch: {}", current_epoch, Testnet::compute_epoch(target_block)); + + let validator_info = prover + .rpc + .get_validator_info(Some(target_block)) + .await + .expect("Failed to get validator info"); + println!("Validators: {}", validator_info.validator_set.len()); + + let target_epoch = Testnet::compute_epoch(target_block); + let validator_set = prover + .build_validator_set(&validator_info.validator_set, target_epoch) + .expect("Failed to build validator set"); + println!("Total stake: {}", validator_set.total_stake); + + // trusted consensus state at the block before the epoch boundary + let initial_block = target_block - 1; + let initial_consensus_state = ConsensusState { + current_validators: validator_set, + finalized_height: initial_block, + finalized_hash: H256::zero(), + current_epoch: target_epoch, + chain_id: PHAROS_ATLANTIC_CHAIN_ID, + }; + + // should include a validator_set_proof because it's an epoch boundary. + let update = prover + .fetch_block_update(target_block) + .await + .expect("Failed to fetch block update for epoch boundary"); + println!("Block update is for block: {}", update.block_number()); + println!("Participant keys Length: {}", update.block_proof.participant_count()); + assert!( + update.validator_set_proof.is_some(), + "Epoch boundary block should include a validator set proof" + ); + + let pharos_client = PharosClient::::default(); + + let host = Ismp::default(); + let result = pharos_client.verify_consensus( + &host, + PHAROS_CONSENSUS_CLIENT_ID, + initial_consensus_state.encode(), + update.encode(), + ); + + match result { + Ok((new_state_bytes, commitments)) => { + let new_state = ConsensusState::decode(&mut &new_state_bytes[..]) + .expect("Failed to decode new state"); + + println!("\nEpoch Boundary Verification Successful"); + println!("Finalized height: {}", new_state.finalized_height); + println!("Previous epoch: {}", initial_consensus_state.current_epoch); + println!("New epoch: {}", new_state.current_epoch); + println!("New validator count: {}", new_state.current_validators.len()); + + assert_eq!( + new_state.current_epoch, + initial_consensus_state.current_epoch + 1, + "Epoch should increment by 1 at epoch boundary" + ); + + assert_eq!( + new_state.finalized_height, target_block, + "Finalized height should match the epoch boundary block" + ); + + assert!( + !new_state.current_validators.is_empty(), + "New validator set should not be empty" + ); + + let state_id = StateMachineId { + state_id: StateMachine::Evm(PHAROS_ATLANTIC_CHAIN_ID), + consensus_state_id: PHAROS_CONSENSUS_CLIENT_ID, + }; + assert!(commitments.contains_key(&state_id), "Should have state commitment"); + + let heights = &commitments[&state_id]; + assert_eq!(heights.len(), 1, "Should have exactly one state commitment"); + assert_eq!( + heights[0].height, target_block, + "Commitment height should match the epoch boundary block" + ); + }, + Err(e) => { + panic!("Epoch boundary verification failed: {:?}", e); + }, + } +} diff --git a/modules/pallets/testsuite/src/tests/mod.rs b/modules/pallets/testsuite/src/tests/mod.rs index aa4ac8f2a..a39238acb 100644 --- a/modules/pallets/testsuite/src/tests/mod.rs +++ b/modules/pallets/testsuite/src/tests/mod.rs @@ -10,9 +10,11 @@ mod pallet_xcm_gateway; mod xcm_integration_test; mod common; +mod ismp_pharos; mod pallet_bridge_airdrop; mod pallet_collator_manager; mod pallet_consensus_incentives; mod pallet_messaging_fees; mod pallet_token_gateway; +mod pharos_state_machine; mod substrate_evm_state_machine; diff --git a/modules/pallets/testsuite/src/tests/pharos_state_machine.rs b/modules/pallets/testsuite/src/tests/pharos_state_machine.rs new file mode 100644 index 000000000..f2e353266 --- /dev/null +++ b/modules/pallets/testsuite/src/tests/pharos_state_machine.rs @@ -0,0 +1,248 @@ +// Copyright (c) 2025 Polytope Labs. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use pharos_primitives::{spv, Config, Testnet, STAKING_CONTRACT_ADDRESS}; +use pharos_prover::{ + rpc::{hex_to_bytes, PharosRpcClient}, + rpc_to_proof_nodes, PharosProver, +}; +use primitive_types::{H160, H256, U256}; +use std::sync::Arc; + +#[tokio::test] +#[ignore] +async fn test_pharos_account_proof_verification() { + let rpc_url = std::env::var("PHAROS_ATLANTIC_RPC") + .expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + println!("State root: {:?}", state_root); + + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let total_stake_slot = H256(U256::from(6u64).to_big_endian()); + let proof = rpc + .get_proof(address, vec![total_stake_slot], target_block) + .await + .expect("Failed to get proof"); + + println!("Account proof nodes: {}", proof.account_proof.len()); + println!("Storage hash: {}", proof.storage_hash); + println!("Raw value length: {}", proof.raw_value.len()); + + let account_proof_nodes = + rpc_to_proof_nodes(&proof.account_proof).expect("Failed to convert account proof nodes"); + let raw_value = hex_to_bytes(&proof.raw_value).expect("Failed to parse raw_value"); + + assert!(!account_proof_nodes.is_empty(), "Account proof should not be empty"); + assert!(!raw_value.is_empty(), "Raw account value should not be empty"); + + let address_bytes: [u8; 20] = address.0; + let is_valid = + spv::verify_account_proof(&account_proof_nodes, &address_bytes, &raw_value, &state_root.0); + + assert!(is_valid, "Account proof verification should pass for staking contract"); + println!("Account proof verification: PASSED"); +} + +#[tokio::test] +#[ignore] +async fn test_pharos_storage_proof_verification() { + let rpc_url = std::env::var("PHAROS_ATLANTIC_RPC") + .expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + println!("State root: {:?}", state_root); + + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let total_stake_slot = H256(U256::from(6u64).to_big_endian()); + let proof = rpc + .get_proof(address, vec![total_stake_slot], target_block) + .await + .expect("Failed to get proof"); + + println!("Account proof nodes: {}", proof.account_proof.len()); + println!("Storage proof entries: {}", proof.storage_proof.len()); + println!("Storage hash: {}", proof.storage_hash); + + let account_proof_nodes = + rpc_to_proof_nodes(&proof.account_proof).expect("Failed to convert account proof nodes"); + let raw_value = hex_to_bytes(&proof.raw_value).expect("Failed to parse raw_value"); + let address_bytes: [u8; 20] = address.0; + + let account_valid = + spv::verify_account_proof(&account_proof_nodes, &address_bytes, &raw_value, &state_root.0); + assert!(account_valid, "Account proof verification should pass"); + println!("Account proof verification: PASSED"); + + assert!(!proof.storage_proof.is_empty(), "Should have at least one storage proof"); + let storage_entry = &proof.storage_proof[0]; + let storage_proof_nodes = + rpc_to_proof_nodes(&storage_entry.proof).expect("Failed to convert storage proof nodes"); + + println!("Storage key: {}", storage_entry.key); + println!("Storage value: {}", storage_entry.value); + println!("Storage proof nodes: {}", storage_proof_nodes.len()); + + assert!(!storage_proof_nodes.is_empty(), "Storage proof should not be empty"); + + let value_bytes = hex_to_bytes(&storage_entry.value).expect("Failed to parse storage value"); + let mut padded_value = [0u8; 32]; + if value_bytes.len() <= 32 { + padded_value[32 - value_bytes.len()..].copy_from_slice(&value_bytes); + } + + let total_stake = U256::from_big_endian(&padded_value); + println!("Total stake: {}", total_stake); + assert!(total_stake > U256::zero(), "Total stake should be non-zero"); + + let key_bytes = hex_to_bytes(&storage_entry.key).expect("Failed to parse storage key"); + let mut storage_key = [0u8; 32]; + if key_bytes.len() <= 32 { + storage_key[32 - key_bytes.len()..].copy_from_slice(&key_bytes); + } + + // Pharos uses a flat trie — storage proofs verify directly against state_root. + let storage_valid = spv::verify_storage_proof( + &storage_proof_nodes, + &address_bytes, + &storage_key, + &padded_value, + &state_root.0, + ); + + assert!(storage_valid, "Storage proof verification should pass for totalStake"); + println!("Storage proof verification: PASSED"); +} + +#[tokio::test] +#[ignore] +async fn test_pharos_multiple_storage_proofs() { + let rpc_url = std::env::var("PHAROS_ATLANTIC_RPC") + .expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let total_stake_slot = H256(U256::from(6u64).to_big_endian()); + let epoch_length_slot = H256(U256::from(5u64).to_big_endian()); + + let proof_stake = rpc + .get_proof(address, vec![total_stake_slot], target_block) + .await + .expect("Failed to get proof for totalStake"); + + let proof_epoch = rpc + .get_proof(address, vec![epoch_length_slot], target_block) + .await + .expect("Failed to get proof for epochLength"); + + let account_proof_nodes = rpc_to_proof_nodes(&proof_stake.account_proof) + .expect("Failed to convert account proof nodes"); + let raw_value = hex_to_bytes(&proof_stake.raw_value).expect("Failed to parse raw_value"); + let address_bytes: [u8; 20] = address.0; + + let account_valid = + spv::verify_account_proof(&account_proof_nodes, &address_bytes, &raw_value, &state_root.0); + assert!(account_valid, "Account proof verification should pass"); + println!("Account proof verification: PASSED"); + + // Pharos uses a flat trie — storage proofs verify directly against state_root. + assert!(!proof_stake.storage_proof.is_empty(), "Should have storage proof for totalStake"); + let stake_entry = &proof_stake.storage_proof[0]; + let stake_proof_nodes = + rpc_to_proof_nodes(&stake_entry.proof).expect("Failed to convert storage proof nodes"); + + let stake_value_bytes = + hex_to_bytes(&stake_entry.value).expect("Failed to parse totalStake value"); + let mut stake_padded = [0u8; 32]; + if stake_value_bytes.len() <= 32 { + stake_padded[32 - stake_value_bytes.len()..].copy_from_slice(&stake_value_bytes); + } + + let stake_key_bytes = hex_to_bytes(&stake_entry.key).expect("Failed to parse storage key"); + let mut stake_key = [0u8; 32]; + if stake_key_bytes.len() <= 32 { + stake_key[32 - stake_key_bytes.len()..].copy_from_slice(&stake_key_bytes); + } + + let total_stake = U256::from_big_endian(&stake_padded); + println!("Storage proof [totalStake]: key={}, value={}", stake_entry.key, total_stake); + assert!(total_stake > U256::zero(), "Total stake should be non-zero"); + + let stake_valid = spv::verify_storage_proof( + &stake_proof_nodes, + &address_bytes, + &stake_key, + &stake_padded, + &state_root.0, + ); + assert!(stake_valid, "Storage proof for totalStake should pass"); + println!("Storage proof [totalStake] verification: PASSED"); + + assert!( + !proof_epoch.storage_proof.is_empty(), + "Should have storage proof for epochLength" + ); + let epoch_entry = &proof_epoch.storage_proof[0]; + let epoch_proof_nodes = + rpc_to_proof_nodes(&epoch_entry.proof).expect("Failed to convert storage proof nodes"); + + let epoch_value_bytes = + hex_to_bytes(&epoch_entry.value).expect("Failed to parse epochLength value"); + let mut epoch_padded = [0u8; 32]; + if epoch_value_bytes.len() <= 32 { + epoch_padded[32 - epoch_value_bytes.len()..].copy_from_slice(&epoch_value_bytes); + } + + let epoch_key_bytes = hex_to_bytes(&epoch_entry.key).expect("Failed to parse storage key"); + let mut epoch_key = [0u8; 32]; + if epoch_key_bytes.len() <= 32 { + epoch_key[32 - epoch_key_bytes.len()..].copy_from_slice(&epoch_key_bytes); + } + + let epoch_length = U256::from_big_endian(&epoch_padded); + println!("Storage proof [epochLength]: key={}, value={}", epoch_entry.key, epoch_length); + assert!(epoch_length > U256::zero(), "Epoch length should be non-zero"); + + let epoch_valid = spv::verify_storage_proof( + &epoch_proof_nodes, + &address_bytes, + &epoch_key, + &epoch_padded, + &state_root.0, + ); + assert!(epoch_valid, "Storage proof for epochLength should pass"); + println!("Storage proof [epochLength] verification: PASSED"); +} diff --git a/modules/utils/crypto/Cargo.toml b/modules/utils/crypto/Cargo.toml index 565af5382..db32aa43a 100644 --- a/modules/utils/crypto/Cargo.toml +++ b/modules/utils/crypto/Cargo.toml @@ -16,6 +16,11 @@ anyhow = { workspace = true } scale-info = { workspace = true } sp-core = { workspace = true, default-features = false } sp-io = { workspace = true, default-features = false } +hex = { version = "0.4", default-features = false, features = ["alloc"] } +serde = { workspace = true, optional = true } +ssz-rs = { git = "https://github.com/polytope-labs/ssz-rs", branch = "main", default-features = false } +serde-hex-utils = { workspace = true, default-features = false } +bls = { workspace = true, default-features = false } [features] default = ["std"] @@ -25,4 +30,11 @@ std = [ "scale-info/std", "sp-core/std", "sp-io/std", + "hex/std", + "serde", + "serde/std", + "ssz-rs/default", + "ssz-rs/serde", + "serde-hex-utils/std", + "bls/std", ] diff --git a/modules/utils/crypto/src/bls.rs b/modules/utils/crypto/src/bls.rs new file mode 100644 index 000000000..aec3f5484 --- /dev/null +++ b/modules/utils/crypto/src/bls.rs @@ -0,0 +1,48 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! BLS12-381 cryptographic type definitions and utilities. + +use crate::ssz::ByteVector; +use alloc::vec::Vec; +use bls::{errors::BLSError, types::G1ProjectivePoint}; + +/// Length of a BLS12-381 public key in bytes (compressed G1 point). +pub const BLS_PUBLIC_KEY_BYTES_LEN: usize = 48; + +/// Length of a BLS12-381 signature in bytes (compressed G2 point). +pub const BLS_SIGNATURE_BYTES_LEN: usize = 96; + +/// A BLS12-381 public key (48 bytes compressed). +pub type BlsPublicKey = ByteVector; + +/// A BLS12-381 signature (96 bytes compressed). +pub type BlsSignature = ByteVector; + +/// Convert a compressed BLS public key to a projective point. +pub fn pubkey_to_projective(compressed_key: &BlsPublicKey) -> Result { + let affine_point = bls::pubkey_to_point(&compressed_key.to_vec())?; + Ok(affine_point.into()) +} + +/// Aggregate multiple BLS public keys into a single public key. +pub fn aggregate_public_keys(keys: &[BlsPublicKey]) -> Vec { + let aggregate = keys + .iter() + .filter_map(|key| pubkey_to_projective(key).ok()) + .fold(G1ProjectivePoint::default(), |acc, next| acc + next); + + bls::point_to_pubkey(aggregate.into()) +} diff --git a/modules/utils/crypto/src/lib.rs b/modules/utils/crypto/src/lib.rs index c2d05f546..8d05a608c 100644 --- a/modules/utils/crypto/src/lib.rs +++ b/modules/utils/crypto/src/lib.rs @@ -16,4 +16,12 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; +pub mod bls; +pub mod ssz; pub mod verification; + +pub use bls::{ + aggregate_public_keys, pubkey_to_projective, BlsPublicKey, BlsSignature, + BLS_PUBLIC_KEY_BYTES_LEN, BLS_SIGNATURE_BYTES_LEN, +}; +pub use ssz::ByteVector; diff --git a/modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs b/modules/utils/crypto/src/ssz/byte_vector.rs similarity index 75% rename from modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs rename to modules/utils/crypto/src/ssz/byte_vector.rs index 56903d721..d6e45d498 100644 --- a/modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs +++ b/modules/utils/crypto/src/ssz/byte_vector.rs @@ -1,3 +1,18 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use super::write_bytes_to_lower_hex; use alloc::{vec, vec::Vec}; use core::{ diff --git a/modules/utils/crypto/src/ssz/mod.rs b/modules/utils/crypto/src/ssz/mod.rs new file mode 100644 index 000000000..20e02f7b6 --- /dev/null +++ b/modules/utils/crypto/src/ssz/mod.rs @@ -0,0 +1,29 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod byte_vector; +use core::fmt; + +fn write_bytes_to_lower_hex>(f: &mut fmt::Formatter<'_>, data: T) -> fmt::Result { + if f.alternate() { + write!(f, "0x")?; + } + for i in data.as_ref() { + write!(f, "{i:02x}")?; + } + Ok(()) +} + +pub use byte_vector::ByteVector; diff --git a/parachain/runtimes/gargantua/Cargo.toml b/parachain/runtimes/gargantua/Cargo.toml index 94f7e3705..a174cb296 100644 --- a/parachain/runtimes/gargantua/Cargo.toml +++ b/parachain/runtimes/gargantua/Cargo.toml @@ -50,6 +50,7 @@ pallet-bridge-airdrop = { workspace = true } ismp-arbitrum = { workspace = true } ismp-optimism = { workspace = true } ismp-polygon = { workspace = true } +ismp-pharos = { workspace = true } ismp-tendermint = { workspace = true } pallet-messaging-fees = { workspace = true } evm-state-machine = { workspace = true } @@ -156,7 +157,8 @@ std = [ "ismp-tendermint/std", "pallet-messaging-fees/std", "evm-state-machine/std", - "substrate-state-machine/std" + "substrate-state-machine/std", + "ismp-pharos/std" ] runtime-benchmarks = [ "hex-literal", diff --git a/parachain/runtimes/gargantua/src/ismp.rs b/parachain/runtimes/gargantua/src/ismp.rs index aebc07f77..dbe85fe26 100644 --- a/parachain/runtimes/gargantua/src/ismp.rs +++ b/parachain/runtimes/gargantua/src/ismp.rs @@ -163,6 +163,7 @@ impl pallet_ismp::Config for Runtime { ismp_optimism::OptimismConsensusClient, ismp_polygon::PolygonClient, ismp_tendermint::TendermintClient, + ismp_pharos::PharosClient, ); type OffchainDB = Mmr; type FeeHandler = pallet_ismp::fee_handler::WeightFeeHandler< diff --git a/tesseract/consensus/integration-tests/Cargo.toml b/tesseract/consensus/integration-tests/Cargo.toml index 592d31a0c..85300dd11 100644 --- a/tesseract/consensus/integration-tests/Cargo.toml +++ b/tesseract/consensus/integration-tests/Cargo.toml @@ -50,6 +50,9 @@ substrate-state-machine = { workspace = true } arb-host = { workspace = true } op-host = { workspace = true } grandpa-verifier-primitives = { workspace = true } +tesseract-pharos = { workspace = true } +ismp-pharos = { workspace = true } +pharos-primitives = { workspace = true } [dependencies.polkadot-sdk] workspace = true diff --git a/tesseract/consensus/integration-tests/src/lib.rs b/tesseract/consensus/integration-tests/src/lib.rs index 80449684a..20442adff 100644 --- a/tesseract/consensus/integration-tests/src/lib.rs +++ b/tesseract/consensus/integration-tests/src/lib.rs @@ -8,6 +8,7 @@ mod ping; // mod substrate; //mod l2s; mod util; +//mod pharos; // use std::{ // sync::Arc, diff --git a/tesseract/consensus/integration-tests/src/pharos.rs b/tesseract/consensus/integration-tests/src/pharos.rs new file mode 100644 index 000000000..204a92073 --- /dev/null +++ b/tesseract/consensus/integration-tests/src/pharos.rs @@ -0,0 +1,93 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::Encode; +use ismp::{host::StateMachine, messaging::CreateConsensusState}; +use ismp_pharos::PHAROS_CONSENSUS_CLIENT_ID; +use std::sync::Arc; +use substrate_state_machine::HashAlgorithm; +use subxt_utils::Hyperbridge; +use tesseract_evm::EvmConfig; +use pharos_primitives::Config; +use tesseract_pharos::{PharosHost, PharosHostConfig, Testnet}; +use tesseract_primitives::IsmpHost; +use tesseract_substrate::{SubstrateClient, SubstrateConfig}; + +use crate::util::setup_logging; + +#[tokio::test] +async fn pharos_consensus_updates() -> anyhow::Result<()> { + setup_logging(); + dotenv::dotenv().ok(); + + let pharos_rpc_url = + std::env::var("PHAROS_RPC_URL").expect("PHAROS_RPC_URL must be set"); + + let evm_config = EvmConfig { + rpc_urls: vec![pharos_rpc_url.clone()], + state_machine: StateMachine::Evm(688689), + consensus_state_id: "PHAR".to_string(), + ismp_host: Default::default(), + signer: "2e0834786285daccd064ca17f1654f67b4aef298acbb82cef9ec422fb4975622".to_string(), + tracing_batch_size: None, + query_batch_size: None, + poll_interval: None, + gas_price_buffer: None, + client_type: Default::default(), + initial_height: None, + }; + + let host_config = PharosHostConfig { + consensus_update_frequency: Some(300), + }; + + let pharos_host = PharosHost::::new(&host_config, &evm_config).await?; + + let config_a = SubstrateConfig { + state_machine: StateMachine::Kusama(2000), + hashing: Some(HashAlgorithm::Keccak), + consensus_state_id: Some("PARA".to_string()), + rpc_ws: "ws://localhost:9990".to_string(), + max_rpc_payload_size: None, + signer: "0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a".to_string(), + initial_height: None, + max_concurent_queries: None, + poll_interval: None, + fee_token_decimals: None, + }; + let chain_a = SubstrateClient::::new(config_a).await?; + + println!("getting initial consensus state"); + let initial_consensus_state = pharos_host.get_consensus_state().await?; + + println!("creating initial consensus state"); + + chain_a + .create_consensus_state(CreateConsensusState { + consensus_state: initial_consensus_state.encode(), + consensus_client_id: PHAROS_CONSENSUS_CLIENT_ID, + consensus_state_id: *b"PHAR", + unbonding_period: Testnet::UNBONDING_PERIOD, + challenge_periods: vec![(StateMachine::Evm(688689), 5 * 60)].into_iter().collect(), + state_machine_commitments: vec![], + }) + .await?; + + println!("created consensus state"); + + pharos_host.start_consensus(Arc::new(chain_a)).await?; + + Ok(()) +} diff --git a/tesseract/consensus/pharos/Cargo.toml b/tesseract/consensus/pharos/Cargo.toml new file mode 100644 index 000000000..62a80315b --- /dev/null +++ b/tesseract/consensus/pharos/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "tesseract-pharos" +version = "0.1.0" +edition = "2021" +description = "Pharos consensus relayer for tesseract" +authors = ["Polytope Labs "] + +[dependencies] +pharos-prover = { path = "../../../modules/consensus/pharos/prover" } +pharos-primitives = { path = "../../../modules/consensus/pharos/primitives" } +pharos-verifier = { path = "../../../modules/consensus/pharos/verifier" } +ismp-pharos = { path = "../../../modules/ismp/clients/pharos" } +geth-primitives = { workspace = true } +ismp = { workspace = true } + +serde = { version = "1.0", features = ["derive"] } +serde_json = { package = "serde_json", version = "1.0" } +log = { workspace = true, default-features = true } +anyhow = { workspace = true, default-features = true } +codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +async-trait = "0.1.88" +tokio = { version = "1.27.0", features = ["rt-multi-thread", "macros"] } +primitive-types = { workspace = true } +sp-core = { workspace = true } + +tesseract-primitives = { workspace = true } +tesseract-evm = { workspace = true } +ethers = { workspace = true, features = ["rustls"] } diff --git a/tesseract/consensus/pharos/src/lib.rs b/tesseract/consensus/pharos/src/lib.rs new file mode 100644 index 000000000..881cc1424 --- /dev/null +++ b/tesseract/consensus/pharos/src/lib.rs @@ -0,0 +1,268 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tesseract consensus relayer for Pharos Network. + +use anyhow::Result; +use codec::Encode; +use ismp::{ + consensus::{ConsensusStateId, StateCommitment}, + host::StateMachine, + messaging::{ConsensusMessage, CreateConsensusState, Message, StateCommitmentHeight}, +}; +use ismp_pharos::{ConsensusState, PHAROS_CONSENSUS_CLIENT_ID}; +use pharos_primitives::Config; +use pharos_prover::PharosProver; +use serde::{Deserialize, Serialize}; +use std::{marker::PhantomData, sync::Arc, time::Duration}; +use tesseract_evm::{EvmClient, EvmConfig}; +use tesseract_primitives::{IsmpHost, IsmpProvider}; + +mod notification; + +pub use pharos_primitives::{Mainnet, Testnet}; + +/// Host configuration for Pharos relayer +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PharosHostConfig { + /// Frequency (in seconds) to check for new updates + pub consensus_update_frequency: Option, +} + +/// Top-level config for Pharos relayer +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PharosConfig { + /// Host configuration options + pub host: PharosHostConfig, + /// General EVM config + #[serde(flatten)] + pub evm_config: EvmConfig, +} + +impl PharosConfig { + /// Convert the config into a client. + pub async fn into_client(self) -> anyhow::Result> { + Ok(Arc::new(PharosHost::::new(&self.host, &self.evm_config).await?)) + } + + pub fn state_machine(&self) -> StateMachine { + self.evm_config.state_machine + } +} + +/// The relayer host for Pharos +#[derive(Clone)] +pub struct PharosHost { + /// Consensus state id on counterparty chain + pub consensus_state_id: ConsensusStateId, + /// State machine Identifier for this chain + pub state_machine: StateMachine, + /// Host config options + pub host: PharosHostConfig, + /// Ismp provider + pub provider: Arc, + /// Pharos prover for fetching proofs + pub prover: PharosProver, + /// Phantom data for config + _config: PhantomData, +} + +impl PharosHost { + /// Create a new PharosHost + pub async fn new(host: &PharosHostConfig, evm: &EvmConfig) -> Result { + let ismp_provider = EvmClient::new(evm.clone()).await?; + let rpc_url = evm + .rpc_urls + .first() + .ok_or_else(|| anyhow::anyhow!("No RPC URL configured in EVM config"))?; + let prover = PharosProver::new(rpc_url)?; + + Ok(Self { + consensus_state_id: { + let mut consensus_state_id: ConsensusStateId = Default::default(); + consensus_state_id.copy_from_slice(evm.consensus_state_id.as_bytes()); + consensus_state_id + }, + state_machine: evm.state_machine, + host: host.clone(), + provider: Arc::new(ismp_provider), + prover, + _config: PhantomData, + }) + } + + /// Fetch the current consensus state (for initial state creation) + pub async fn get_consensus_state(&self) -> Result { + let latest_block = self.prover.get_latest_block().await?; + let update = self.prover.fetch_block_update(latest_block).await?; + + let header = &update.header; + let header_hash = geth_primitives::Header::from(header).hash::(); + + let current_epoch = C::compute_epoch(latest_block); + + // try to get validator set for an epoch boundary + // else, we query the previous epoch boundary + let validator_set = if let Some(ref proof) = update.validator_set_proof { + pharos_verifier::state_proof::verify_validator_set_proof::( + header.state_root, + proof, + current_epoch + 1, + )? + } else { + // For initial state, fetch from an epoch boundary block + let epoch_start = current_epoch * C::EPOCH_LENGTH_BLOCKS; + let epoch_boundary = if epoch_start > 0 { epoch_start - 1 } else { 0 }; + let boundary_update = self.prover.fetch_block_update(epoch_boundary).await?; + + if let Some(ref proof) = boundary_update.validator_set_proof { + pharos_verifier::state_proof::verify_validator_set_proof::( + boundary_update.header.state_root, + proof, + current_epoch, + )? + } else { + return Err(anyhow::anyhow!("Cannot get initial validator set")); + } + }; + + let chain_id = match self.state_machine { + StateMachine::Evm(chain_id) => chain_id, + _ => return Err(anyhow::anyhow!("Unsupported state machine")), + }; + + Ok(ConsensusState { + current_validators: validator_set, + finalized_height: latest_block, + finalized_hash: header_hash, + current_epoch, + chain_id, + }) + } +} + +/// Keccak256 hasher implementation +pub struct KeccakHasher; + +impl ismp::messaging::Keccak256 for KeccakHasher { + fn keccak256(bytes: &[u8]) -> primitive_types::H256 + where + Self: Sized, + { + sp_core::keccak_256(bytes).into() + } +} + +#[async_trait::async_trait] +impl IsmpHost for PharosHost { + async fn start_consensus( + &self, + counterparty: Arc, + ) -> Result<(), anyhow::Error> { + use crate::notification::consensus_notification; + + let interval = tokio::time::interval(Duration::from_secs( + self.host.consensus_update_frequency.unwrap_or(300), + )); + + let client = self.clone(); + let counterparty_clone = counterparty.clone(); + let mut interval = Box::pin(interval); + let provider = self.provider(); + + loop { + interval.as_mut().tick().await; + + match consensus_notification(&client, counterparty_clone.clone()).await { + Ok(Some(update)) => { + let consensus_message = ConsensusMessage { + consensus_proof: update.encode(), + consensus_state_id: client.consensus_state_id, + signer: counterparty.address(), + }; + + log::info!( + target: "tesseract", + "Transmitting consensus message from {} to {}", + provider.name(), + counterparty.name() + ); + + let res = counterparty + .submit( + vec![Message::Consensus(consensus_message)], + counterparty.state_machine_id().state_id, + ) + .await; + + if let Err(err) = res { + log::error!( + "Failed to submit transaction to {}: {err:?}", + counterparty.name() + ) + } + }, + Ok(None) => { + // No update to send, just continue + }, + Err(e) => { + log::error!( + target: "tesseract", + "Consensus task {}->{} encountered an error: {e:?}", + provider.name(), + counterparty.name() + ) + }, + } + } + } + + async fn query_initial_consensus_state( + &self, + ) -> Result, anyhow::Error> { + let initial_consensus_state = self.get_consensus_state().await.map_err(|e| { + anyhow::anyhow!("PharosHost: fetch initial consensus state failed: {e}") + })?; + + let latest_block = self.prover.get_latest_block().await?; + let update = self.prover.fetch_block_update(latest_block).await?; + + Ok(Some(CreateConsensusState { + consensus_state: initial_consensus_state.encode(), + consensus_client_id: PHAROS_CONSENSUS_CLIENT_ID, + consensus_state_id: self.consensus_state_id, + unbonding_period: C::UNBONDING_PERIOD, + challenge_periods: vec![(self.state_machine, 5 * 60)].into_iter().collect(), + state_machine_commitments: vec![( + ismp::consensus::StateMachineId { + state_id: self.state_machine, + consensus_state_id: self.consensus_state_id, + }, + StateCommitmentHeight { + commitment: StateCommitment { + timestamp: update.header.timestamp, + overlay_root: None, + state_root: update.header.state_root, + }, + height: latest_block, + }, + )], + })) + } + + fn provider(&self) -> Arc { + self.provider.clone() + } +} diff --git a/tesseract/consensus/pharos/src/notification.rs b/tesseract/consensus/pharos/src/notification.rs new file mode 100644 index 000000000..79d51854a --- /dev/null +++ b/tesseract/consensus/pharos/src/notification.rs @@ -0,0 +1,90 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Consensus notification logic for Pharos relayer. + +use crate::PharosHost; +use codec::Decode; +use ismp_pharos::ConsensusState; +use pharos_primitives::{Config, VerifierStateUpdate}; +use std::sync::Arc; +use tesseract_primitives::IsmpProvider; + +pub async fn consensus_notification( + client: &PharosHost, + counterparty: Arc, +) -> Result, anyhow::Error> { + let counterparty_finalized = counterparty.query_finalized_height().await?; + let consensus_state_bytes = counterparty + .query_consensus_state(Some(counterparty_finalized), client.consensus_state_id) + .await?; + + let consensus_state = ConsensusState::decode(&mut &consensus_state_bytes[..]) + .map_err(|e| anyhow::anyhow!("Failed to decode consensus state: {:?}", e))?; + + let latest_block = client.prover.get_latest_block().await?; + + if latest_block <= consensus_state.finalized_height { + log::trace!( + target: "tesseract-pharos", + "No new blocks to sync. Latest: {}, Finalized: {}", + latest_block, + consensus_state.finalized_height + ); + return Ok(None); + } + + let current_epoch = consensus_state.current_epoch; + let latest_epoch = C::compute_epoch(latest_block); + + log::trace!( + target: "tesseract-pharos", + "New block available. Latest: {} (epoch {}), Finalized: {} (epoch {})", + latest_block, + latest_epoch, + consensus_state.finalized_height, + current_epoch + ); + + // Determine the target block for the update + // If we're still in the same epoch, just get the latest block + // If we've crossed epoch boundaries, we need to sync epoch by epoch + let target_block = if latest_epoch > current_epoch { + // We've crossed epoch boundaries — sync the first epoch boundary block. + // Since latest_block is in a later epoch, this boundary is always <= latest_block. + let next_epoch_boundary = (current_epoch + 1) * C::EPOCH_LENGTH_BLOCKS - 1; + log::trace!( + target: "tesseract-pharos", + "Syncing epoch boundary block {} for epoch transition {} -> {}", + next_epoch_boundary, + current_epoch, + current_epoch + 1 + ); + next_epoch_boundary + } else { + latest_block + }; + + let update = client.prover.fetch_block_update(target_block).await?; + + log::trace!( + target: "tesseract-pharos", + "Fetched update for block {}{}", + target_block, + if update.validator_set_proof.is_some() { " (with validator set proof)" } else { "" } + ); + + Ok(Some(update)) +}