Conversation
| return Err(Error::NoParticipants); | ||
| } | ||
|
|
||
| let aggregate_pubkey = bsc_verifier::aggregate_public_keys(participants); |
There was a problem hiding this comment.
I think we have this method in s crypto utils, if not let's move it there
| /// Compute the hash of a block header using RLP encoding and Keccak256. | ||
| /// | ||
| /// This follows the standard Ethereum header hash computation. | ||
| pub fn compute_header_hash<H: Keccak256>(header: &CodecHeader) -> H256 { |
There was a problem hiding this comment.
We already have this in the geth primitives, there's a hash function implemented on the header
| } | ||
|
|
||
| /// Get values from storage proof. | ||
| fn get_values_from_proof<H: Keccak256 + Send + Sync>( |
There was a problem hiding this comment.
Import all these EVM utility methods from the evm state machine instead of duplicating
| #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] | ||
| pub struct ValidatorSetProof { | ||
| /// The new validator set | ||
| pub validator_set: ValidatorSet, |
There was a problem hiding this comment.
| pub validator_set: ValidatorSet, |
The proof doesn't need to come with the validator set, we can trust the values we decode from the proof
| let storage_root = H256::from_slice(account.storage_root.as_slice()); | ||
|
|
||
| let layout = StakingContractLayout::default(); | ||
| let validator_count = proof.validator_set.validators.len() as u64; |
There was a problem hiding this comment.
We can just have a value activePoolIdTotalCount on the ValidatorSetProof
| } | ||
|
|
||
| /// Get the staking contract account from the account proof. | ||
| fn get_staking_contract_account<H: Keccak256 + Send + Sync>( |
There was a problem hiding this comment.
We have utility contracts for these in evm-state machine
|
|
||
| let decoded_set = decode_validator_set_from_storage::<H>(&all_slots, &all_values)?; | ||
|
|
||
| verify_validator_set_matches(&proof.validator_set, &decoded_set)?; |
There was a problem hiding this comment.
Not needed, the values decoded from the proof are correct
use existing apis
| /// | ||
| /// This is distinct from the block hash and is the actual message | ||
| /// that the BLS signature is computed over. | ||
| pub block_proof_hash: H256, |
There was a problem hiding this comment.
We should generate this hash onchain, this is the message signed by validators the light client should generate it, it should not be submitted as part of the proof, rather submit all the data required to generate this hash
| /// The block number this proof is for (from JSON "blockNumber") | ||
| pub block_number: u64, | ||
| /// The block hash (derived from header) | ||
| pub block_hash: H256, |
There was a problem hiding this comment.
The block header should be submitted not the block header hash, the light client will compute the header hash on chain
| .as_ref() | ||
| .map(|v| decode_u256_from_storage(v)) | ||
| .transpose()? | ||
| .unwrap_or_default(); |
There was a problem hiding this comment.
This should fail if not available unwrap_or_default()
| } else { | ||
| // Minimal proof - only pool IDs, no detailed validator data | ||
| // This is used when the validator set is provided separately | ||
| // and only pool IDs need to be verified |
There was a problem hiding this comment.
This should fail if the data is incomplete
| .iter() | ||
| .fold(U256::zero(), |acc, v| acc.saturating_add(v.stake)); | ||
|
|
||
| if computed_total != validator_set.total_stake { |
| assert!(!proof.storage_proof.is_empty(), "Should have at least one storage proof"); | ||
| let storage_entry = &proof.storage_proof[0]; | ||
| let storage_proof_nodes = rpc_to_proof_nodes(&storage_entry.proof); | ||
| let storage_hash = hex_to_h256(&proof.storage_hash).expect("Failed to parse storage_hash"); |
There was a problem hiding this comment.
Extract storage hash from verified account value
| println!("Account proof verification: PASSED"); | ||
|
|
||
| let storage_hash = | ||
| hex_to_h256(&proof_stake.storage_hash).expect("Failed to parse storage_hash"); |
| consensus_state: initial_consensus_state.encode(), | ||
| consensus_client_id: PHAROS_CONSENSUS_CLIENT_ID, | ||
| consensus_state_id: self.consensus_state_id, | ||
| unbonding_period: 60 * 60 * 27, |
There was a problem hiding this comment.
Is this the pharos unstaking period?
| // Find the first epoch boundary block we need to sync | ||
| // Epoch boundary is at (epoch + 1) * epoch_length - 1 | ||
| let next_epoch_boundary = (current_epoch + 1) * C::EPOCH_LENGTH_BLOCKS - 1; | ||
| if next_epoch_boundary <= latest_block { |
There was a problem hiding this comment.
Don't think we need this if check, next_epoch_boundary cannot be greater than latest_block since we've ascertained that it's in a different epoch.
storage root extraction from account value, validator proof fetching concurrently, unbonding period according to pharos contract deduplicate code
remove redundant check
| state_id: StateMachine::Evm(PHAROS_ATLANTIC_CHAIN_ID), | ||
| consensus_state_id: PHAROS_CONSENSUS_CLIENT_ID, | ||
| }; | ||
| assert!(commitments.contains_key(&state_id), "Should have state commitment"); |
There was a problem hiding this comment.
make assertions about height
|
will read up on the staking contract before giving my final approval, but really nice work so far |
rpc url as env assert commitment height
| let mut value_hash = [0u8; 32]; | ||
| value_hash.copy_from_slice(&leaf_data[33..65]); | ||
|
|
||
| // Walk bottom-up, hashing each node and checking the parent contains it |
There was a problem hiding this comment.
We should use the verify_pharos_proof for this, this verification code is duplicated
There was a problem hiding this comment.
They are basically the same function
| ) -> Result<ValidatorSet, Error> { | ||
| // Verify the account proof against the state root using Pharos SPV | ||
| let address: [u8; 20] = STAKING_CONTRACT_ADDRESS.0 .0; | ||
| if !spv::verify_account_proof( |
There was a problem hiding this comment.
I don't think we need an account proof since slot hashes can be verified against the state_root directly
remove redundant hashing use evm config url
| ) { | ||
| map.insert(key, Some(value_hash.to_vec())); | ||
| } else { | ||
| map.insert(key, None); |
There was a problem hiding this comment.
i'm not convinced this is correct
error on missing keys
| proof: Vec<u8>, | ||
| ) -> Result<(Vec<u8>, ismp::consensus::VerifiedCommitments), Error> { | ||
| let update = VerifierStateUpdate::decode(&mut &proof[..]) | ||
| .map_err(|e| Error::AnyHow(anyhow::anyhow!("{:?}", e).into()))?; |
There was a problem hiding this comment.
doesn't parity_scale_code::Error implement core::Error
There was a problem hiding this comment.
parity_scale_codec::Error only implements std::error::Error which is gated behind the std feature, not core::error::Error actually.
This PR introduces the integration to Pharos network for Hyperbridge.
closes #593