diff --git a/fil-proofs-param/Cargo.toml b/fil-proofs-param/Cargo.toml index 311a2aae2b..2fac3f153f 100644 --- a/fil-proofs-param/Cargo.toml +++ b/fil-proofs-param/Cargo.toml @@ -49,6 +49,7 @@ indicatif = "0.15.0" group = "0.11.0" dialoguer = "0.8.0" clap = "2.33.3" +blstrs = "0.4.0" [dependencies.reqwest] version = "0.10" diff --git a/fil-proofs-param/src/bin/paramcache.rs b/fil-proofs-param/src/bin/paramcache.rs index a9c18ad246..372d3f2406 100644 --- a/fil-proofs-param/src/bin/paramcache.rs +++ b/fil-proofs-param/src/bin/paramcache.rs @@ -2,7 +2,9 @@ use std::env; use std::process::exit; use std::str::FromStr; +use blstrs::Scalar as Fr; use dialoguer::{theme::ColorfulTheme, MultiSelect}; +use filecoin_hashers::{Domain, Hasher}; use filecoin_proofs::{ constants::{ DefaultPieceHasher, POREP_PARTITIONS, PUBLISHED_SECTOR_SIZES, WINDOW_POST_CHALLENGE_COUNT, @@ -22,14 +24,14 @@ use storage_proofs_core::{ }; use storage_proofs_porep::stacked::{StackedCircuit, StackedCompound, StackedDrg}; use storage_proofs_post::fallback::{FallbackPoSt, FallbackPoStCircuit, FallbackPoStCompound}; -use storage_proofs_update::constants::TreeRHasher; -use storage_proofs_update::{ - circuit::EmptySectorUpdateCircuit, compound::EmptySectorUpdateCompound, EmptySectorUpdate, - PublicParams, -}; +use storage_proofs_update::{constants::TreeRHasher, EmptySectorUpdateCompound}; use structopt::StructOpt; -fn cache_porep_params(porep_config: PoRepConfig) { +fn cache_porep_params(porep_config: PoRepConfig) +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ info!("generating PoRep groth params"); let public_params = public_params( @@ -66,7 +68,11 @@ fn cache_porep_params(porep_config: PoRepConfig .expect("failed to get verifying key"); } -fn cache_winning_post_params(post_config: &PoStConfig) { +fn cache_winning_post_params(post_config: &PoStConfig) +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ info!("generating Winning-PoSt groth params"); let public_params = winning_post_public_params::(post_config) @@ -92,7 +98,11 @@ fn cache_winning_post_params(post_config: &PoSt .expect("failed to get verifying key"); } -fn cache_window_post_params(post_config: &PoStConfig) { +fn cache_window_post_params(post_config: &PoStConfig) +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ info!("generating Window-PoSt groth params"); let public_params = window_post_public_params::(post_config) @@ -118,32 +128,36 @@ fn cache_window_post_params(post_config: &PoStC .expect("failed to get verifying key"); } -fn cache_empty_sector_update_params>( - porep_config: PoRepConfig, -) { +fn cache_empty_sector_update_params(porep_config: PoRepConfig) +where + Tree: 'static + MerkleTreeTrait>, +{ info!("generating EmptySectorUpdate groth params"); - let public_params: storage_proofs_update::PublicParams = - PublicParams::from_sector_size(u64::from(porep_config.sector_size)); + let public_params = + storage_proofs_update::PublicParams::from_sector_size(u64::from(porep_config.sector_size)); - let circuit = as CompoundProof< - EmptySectorUpdate, - EmptySectorUpdateCircuit, - >>::blank_circuit(&public_params); + let circuit = EmptySectorUpdateCompound::< + Tree::Arity, + Tree::SubTreeArity, + Tree::TopTreeArity, + >::blank_circuit(&public_params); - let _ = as CompoundProof< - EmptySectorUpdate, - EmptySectorUpdateCircuit, - >>::groth_params::(Some(&mut OsRng), &public_params) + let _ = EmptySectorUpdateCompound::< + Tree::Arity, + Tree::SubTreeArity, + Tree::TopTreeArity, + >::groth_params(Some(&mut OsRng), &public_params) .expect("failed to get groth params"); - let _ = >::get_param_metadata(circuit, &public_params) + let _ = EmptySectorUpdateCompound::get_param_metadata(circuit, &public_params) .expect("failed to get metadata"); - let _ = as CompoundProof< - EmptySectorUpdate, - EmptySectorUpdateCircuit, - >>::verifying_key::(Some(&mut OsRng), &public_params) + let _ = EmptySectorUpdateCompound::< + Tree::Arity, + Tree::SubTreeArity, + Tree::TopTreeArity, + >::verifying_key(Some(&mut OsRng), &public_params) .expect("failed to get verifying key"); } diff --git a/fil-proofs-tooling/src/bin/benchy/prodbench.rs b/fil-proofs-tooling/src/bin/benchy/prodbench.rs index 8451180939..47c04e6908 100644 --- a/fil-proofs-tooling/src/bin/benchy/prodbench.rs +++ b/fil-proofs-tooling/src/bin/benchy/prodbench.rs @@ -286,10 +286,11 @@ fn measure_porep_circuit(i: &ProdbenchInputs) -> usize { api_version: i.api_version(), }; - let pp = StackedDrg::::setup(&sp).expect("failed to setup DRG"); + let pp = + StackedDrg::>::setup(&sp).expect("failed to setup DRG"); let mut cs = BenchCS::::new(); - as CompoundProof, _>>::blank_circuit( + as CompoundProof>, _>>::blank_circuit( &pp, ) .synthesize(&mut cs) @@ -332,18 +333,21 @@ fn cache_porep_params(porep_config: PoRepConfig) { { let circuit = as CompoundProof< - StackedDrg, + StackedDrg>, _, >>::blank_circuit(&public_params); - StackedCompound::::get_param_metadata(circuit, &public_params) - .expect("cannot get param metadata"); + StackedCompound::>::get_param_metadata( + circuit, + &public_params, + ) + .expect("cannot get param metadata"); } { let circuit = as CompoundProof< - StackedDrg, + StackedDrg>, _, >>::blank_circuit(&public_params); - StackedCompound::::get_groth_params( + StackedCompound::>::get_groth_params( Some(&mut XorShiftRng::from_seed(SEED)), circuit, &public_params, @@ -352,11 +356,11 @@ fn cache_porep_params(porep_config: PoRepConfig) { } { let circuit = as CompoundProof< - StackedDrg, + StackedDrg>, _, >>::blank_circuit(&public_params); - StackedCompound::::get_verifying_key( + StackedCompound::>::get_verifying_key( Some(&mut XorShiftRng::from_seed(SEED)), circuit, &public_params, diff --git a/fil-proofs-tooling/src/bin/benchy/window_post.rs b/fil-proofs-tooling/src/bin/benchy/window_post.rs index 72e72772c4..81c67e9464 100644 --- a/fil-proofs-tooling/src/bin/benchy/window_post.rs +++ b/fil-proofs-tooling/src/bin/benchy/window_post.rs @@ -6,9 +6,11 @@ use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::{ensure, Context}; use bincode::{deserialize, serialize}; +use blstrs::Scalar as Fr; use fil_proofs_tooling::measure::FuncMeasurement; use fil_proofs_tooling::shared::{PROVER_ID, RANDOMNESS, TICKET_BYTES}; use fil_proofs_tooling::{measure, Metadata}; +use filecoin_hashers::{Domain, Hasher}; use filecoin_proofs::constants::{ POREP_PARTITIONS, WINDOW_POST_CHALLENGE_COUNT, WINDOW_POST_SECTOR_COUNT, }; @@ -96,7 +98,7 @@ fn get_porep_config(sector_size: u64, api_version: ApiVersion) -> PoRepConfig { } } -fn run_pre_commit_phases( +fn run_pre_commit_phases( sector_size: u64, api_version: ApiVersion, cache_dir: PathBuf, @@ -104,7 +106,11 @@ fn run_pre_commit_phases( skip_precommit_phase2: bool, test_resume: bool, skip_staging: bool, -) -> anyhow::Result<((u64, u64), (u64, u64), (u64, u64))> { +) -> anyhow::Result<((u64, u64), (u64, u64), (u64, u64))> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let (seal_pre_commit_phase1_measurement_cpu_time, seal_pre_commit_phase1_measurement_wall_time): (u64, u64) = if skip_precommit_phase1 { // generate no-op measurements (0, 0) @@ -335,7 +341,7 @@ fn run_pre_commit_phases( } #[allow(clippy::too_many_arguments)] -pub fn run_window_post_bench( +pub fn run_window_post_bench( sector_size: u64, api_version: ApiVersion, cache_dir: PathBuf, @@ -345,7 +351,11 @@ pub fn run_window_post_bench( skip_commit_phase1: bool, skip_commit_phase2: bool, test_resume: bool, -) -> anyhow::Result<()> { +) -> anyhow::Result<()> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let ( (seal_pre_commit_phase1_cpu_time_ms, seal_pre_commit_phase1_wall_time_ms), ( diff --git a/fil-proofs-tooling/src/bin/benchy/winning_post.rs b/fil-proofs-tooling/src/bin/benchy/winning_post.rs index c9eab15327..8e2883dd91 100644 --- a/fil-proofs-tooling/src/bin/benchy/winning_post.rs +++ b/fil-proofs-tooling/src/bin/benchy/winning_post.rs @@ -1,8 +1,10 @@ use std::io::stdout; use anyhow::anyhow; +use blstrs::Scalar as Fr; use fil_proofs_tooling::shared::{create_replica, PROVER_ID, RANDOMNESS}; use fil_proofs_tooling::{measure, Metadata}; +use filecoin_hashers::{Domain, Hasher}; use filecoin_proofs::constants::{WINNING_POST_CHALLENGE_COUNT, WINNING_POST_SECTOR_COUNT}; use filecoin_proofs::types::PoStConfig; use filecoin_proofs::{ @@ -46,10 +48,14 @@ impl Report { } } -pub fn run_fallback_post_bench( +pub fn run_fallback_post_bench( sector_size: u64, api_version: ApiVersion, -) -> anyhow::Result<()> { +) -> anyhow::Result<()> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ if WINNING_POST_SECTOR_COUNT != 1 { return Err(anyhow!( "This benchmark only works with WINNING_POST_SECTOR_COUNT == 1" diff --git a/fil-proofs-tooling/src/bin/circuitinfo/main.rs b/fil-proofs-tooling/src/bin/circuitinfo/main.rs index f8e4791b19..8881c0924b 100644 --- a/fil-proofs-tooling/src/bin/circuitinfo/main.rs +++ b/fil-proofs-tooling/src/bin/circuitinfo/main.rs @@ -3,6 +3,7 @@ use std::str::FromStr; use bellperson::{util_cs::bench_cs::BenchCS, Circuit}; use blstrs::Scalar as Fr; use dialoguer::{theme::ColorfulTheme, MultiSelect}; +use filecoin_hashers::{Domain, Hasher}; use filecoin_proofs::{ parameters::{public_params, window_post_public_params, winning_post_public_params}, with_shape, DefaultPieceHasher, PaddedBytesAmount, PoRepConfig, PoRepProofPartitions, @@ -36,7 +37,11 @@ fn circuit_info>(circuit: C) -> CircuitInfo { } } -fn get_porep_info(porep_config: PoRepConfig) -> CircuitInfo { +fn get_porep_info(porep_config: PoRepConfig) -> CircuitInfo +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ info!("PoRep info"); let public_params = public_params( @@ -55,7 +60,11 @@ fn get_porep_info(porep_config: PoRepConfig) -> circuit_info(circuit) } -fn get_winning_post_info(post_config: &PoStConfig) -> CircuitInfo { +fn get_winning_post_info(post_config: &PoStConfig) -> CircuitInfo +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ info!("Winning PoSt info"); let post_public_params = winning_post_public_params::(post_config) @@ -69,7 +78,11 @@ fn get_winning_post_info(post_config: &PoStConf circuit_info(circuit) } -fn get_window_post_info(post_config: &PoStConfig) -> CircuitInfo { +fn get_window_post_info(post_config: &PoStConfig) -> CircuitInfo +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ info!("Window PoSt info"); let post_public_params = window_post_public_params::(post_config) diff --git a/fil-proofs-tooling/src/bin/gen_graph_cache/main.rs b/fil-proofs-tooling/src/bin/gen_graph_cache/main.rs index 9e7034199d..ae88707199 100644 --- a/fil-proofs-tooling/src/bin/gen_graph_cache/main.rs +++ b/fil-proofs-tooling/src/bin/gen_graph_cache/main.rs @@ -4,8 +4,9 @@ use std::io::BufWriter; use std::path::Path; use anyhow::Result; +use blstrs::Scalar as Fr; use clap::{value_t, App, Arg}; -use filecoin_hashers::sha256::Sha256Hasher; +use filecoin_hashers::{sha256::Sha256Hasher, Domain, Hasher}; use filecoin_proofs::{ with_shape, DRG_DEGREE, EXP_DEGREE, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_GIB, SECTOR_SIZE_512_MIB, SECTOR_SIZE_64_GIB, SECTOR_SIZE_8_MIB, @@ -24,12 +25,16 @@ pub struct ParentCacheSummary { pub digest: String, } -fn gen_graph_cache( +fn gen_graph_cache( sector_size: usize, porep_id: [u8; 32], api_version: ApiVersion, parent_cache_summary_map: &mut ParentCacheSummaryMap, -) -> Result<()> { +) -> Result<()> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let nodes = (sector_size / 32) as usize; // Note that layers and challenge_count don't affect the graph, so @@ -47,7 +52,7 @@ fn gen_graph_cache( api_version, }; - let pp = StackedDrg::::setup(&sp).expect("failed to setup DRG"); + let pp = StackedDrg::>::setup(&sp).expect("failed to setup DRG"); let parent_cache = pp.graph.parent_cache()?; let data = ParentCacheSummary { diff --git a/fil-proofs-tooling/src/shared.rs b/fil-proofs-tooling/src/shared.rs index f5c9e7fed7..85011eb3ba 100644 --- a/fil-proofs-tooling/src/shared.rs +++ b/fil-proofs-tooling/src/shared.rs @@ -1,6 +1,8 @@ use std::cmp::min; use std::io::{BufWriter, Seek, SeekFrom, Write}; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use filecoin_proofs::{ add_piece, seal_pre_commit_phase1, seal_pre_commit_phase2, validate_cache_for_precommit_phase2, MerkleTreeTrait, PaddedBytesAmount, PieceInfo, PoRepConfig, PoRepProofPartitions, @@ -21,7 +23,11 @@ pub const PROVER_ID: [u8; 32] = [9; 32]; pub const RANDOMNESS: [u8; 32] = [44; 32]; pub const TICKET_BYTES: [u8; 32] = [1; 32]; -pub struct PreCommitReplicaOutput { +pub struct PreCommitReplicaOutput +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ pub piece_info: Vec, pub private_replica_info: PrivateReplicaInfo, pub public_replica_info: PublicReplicaInfo, @@ -65,11 +71,15 @@ pub fn create_piece(piece_bytes: UnpaddedBytesAmount) -> NamedTempFile { } /// Create a replica for a single sector -pub fn create_replica( +pub fn create_replica( sector_size: u64, porep_id: [u8; 32], api_version: ApiVersion, -) -> (SectorId, PreCommitReplicaOutput) { +) -> (SectorId, PreCommitReplicaOutput) +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let (_porep_config, result) = create_replicas::(SectorSize(sector_size), 1, false, porep_id, api_version); // Extract the sector ID and replica output out of the result @@ -81,7 +91,7 @@ pub fn create_replica( } #[allow(clippy::type_complexity)] -pub fn create_replicas( +pub fn create_replicas( sector_size: SectorSize, qty_sectors: usize, only_add: bool, @@ -93,7 +103,11 @@ pub fn create_replicas( Vec<(SectorId, PreCommitReplicaOutput)>, FuncMeasurement>, )>, -) { +) +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ info!("creating replicas: {:?} - {}", sector_size, qty_sectors); let sector_size_unpadded_bytes_ammount = UnpaddedBytesAmount::from(PaddedBytesAmount::from(sector_size)); diff --git a/filecoin-hashers/Cargo.toml b/filecoin-hashers/Cargo.toml index 679fa46b7f..4da367ad93 100644 --- a/filecoin-hashers/Cargo.toml +++ b/filecoin-hashers/Cargo.toml @@ -19,9 +19,11 @@ serde = "1.0.117" rand = "0.8.0" neptune = { version = "5.1.0", optional = true, features = ["arity2", "arity4", "arity8", "arity11", "arity16", "arity24", "arity36"] } +pasta_curves = "0.3.0" lazy_static = { version = "1.4.0", optional = true } blake2s_simd = { version = "0.5.11", optional = true } sha2 = { version = "0.9.2", optional = true } +typemap = { version = "0.3.3", optional = true } hex = "0.4.2" [features] @@ -32,7 +34,7 @@ opencl = ["bellperson/opencl", "neptune/opencl"] # available hashers blake2s = ["blake2s_simd"] -poseidon = ["neptune", "lazy_static"] +poseidon = ["neptune", "lazy_static", "typemap"] sha256 = ["sha2"] [dev-dependencies] diff --git a/filecoin-hashers/src/blake2s.rs b/filecoin-hashers/src/blake2s.rs index 89555dd24c..34c85c9b5a 100644 --- a/filecoin-hashers/src/blake2s.rs +++ b/filecoin-hashers/src/blake2s.rs @@ -1,174 +1,288 @@ +use std::cmp::Ordering; use std::fmt::{self, Debug, Formatter}; -use std::hash::Hasher as StdHasher; -use std::panic::panic_any; +use std::marker::PhantomData; -use anyhow::ensure; use bellperson::{ gadgets::{ blake2s::blake2s as blake2s_circuit, boolean::Boolean, multipack, num::AllocatedNum, }, ConstraintSystem, SynthesisError, }; -use blake2s_simd::{Hash as Blake2sHash, Params as Blake2s, State}; +use blake2s_simd::{Hash as Blake2sHash, Params as Blake2sBuilder, State}; use blstrs::Scalar as Fr; -use ff::{Field, PrimeField}; +use ff::PrimeField; use merkletree::{ hash::{Algorithm, Hashable}, merkle::Element, }; -use rand::RngCore; -use serde::{Deserialize, Serialize}; +use pasta_curves::{Fp, Fq}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use crate::types::{Domain, HashFunction, Hasher}; +use crate::{Domain, HashFunction, Hasher}; -#[derive(Default, Copy, Clone, PartialEq, Eq, Debug)] -pub struct Blake2sHasher {} - -impl Hasher for Blake2sHasher { - type Domain = Blake2sDomain; - type Function = Blake2sFunction; +#[derive(Copy, Clone, Default)] +pub struct Blake2sDomain { + pub state: [u8; 32], + _f: PhantomData, +} - fn name() -> String { - "Blake2sHasher".into() +impl Debug for Blake2sDomain { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "Blake2sDomain({})", hex::encode(&self.state)) } } -#[derive(Clone)] -pub struct Blake2sFunction(State); - -impl Default for Blake2sFunction { - fn default() -> Self { - Blake2sFunction(Blake2s::new().hash_length(32).to_state()) +// Can't blanket `impl From for Blake2sDomain where F: PrimeField` because it can conflict +// with `impl From<[u8; 32]> for Blake2sDomain`, i.e. `[u8; 32]` is an external type which may +// already implement the external trait `PrimeField`, which causes a "conflicting implementation" +// compiler error. +impl From for Blake2sDomain { + fn from(f: Fr) -> Self { + Blake2sDomain { + state: f.to_repr(), + _f: PhantomData, + } } } - -impl PartialEq for Blake2sFunction { - fn eq(&self, other: &Self) -> bool { - format!("{:?}", self) == format!("{:?}", other) +impl From for Blake2sDomain { + fn from(f: Fp) -> Self { + Blake2sDomain { + state: f.to_repr(), + _f: PhantomData, + } + } +} +impl From for Blake2sDomain { + fn from(f: Fq) -> Self { + Blake2sDomain { + state: f.to_repr(), + _f: PhantomData, + } } } -impl Eq for Blake2sFunction {} +#[allow(clippy::from_over_into)] +impl Into for Blake2sDomain { + fn into(self) -> Fr { + Fr::from_repr_vartime(self.state).expect("from_repr failure") + } +} +#[allow(clippy::from_over_into)] +impl Into for Blake2sDomain { + fn into(self) -> Fp { + Fp::from_repr_vartime(self.state).expect("from_repr failure") + } +} +#[allow(clippy::from_over_into)] +impl Into for Blake2sDomain { + fn into(self) -> Fq { + Fq::from_repr_vartime(self.state).expect("from_repr failure") + } +} -impl Debug for Blake2sFunction { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "Blake2sFunction({:?})", self.0) +// Currently, these panics serve as a stopgap to prevent accidental conversions of a Pasta field +// domains to/from a BLS12-381 scalar field domain. +impl From for Blake2sDomain { + fn from(_f: Fr) -> Self { + panic!("cannot convert BLS12-381 scalar into Blake2sDomain") + } +} +#[allow(clippy::from_over_into)] +impl Into for Blake2sDomain { + fn into(self) -> Fr { + panic!("cannot convert Blake2sDomain into BLS12-381 scalar"); + } +} +impl From for Blake2sDomain { + fn from(_f: Fr) -> Self { + panic!("cannot convert BLS12-381 scalar into Blake2sDomain") + } +} +#[allow(clippy::from_over_into)] +impl Into for Blake2sDomain { + fn into(self) -> Fr { + panic!("cannot convert Blake2sDomain into BLS12-381 scalar"); } } -impl StdHasher for Blake2sFunction { - #[inline] - fn write(&mut self, msg: &[u8]) { - self.0.update(msg); +impl From<[u8; 32]> for Blake2sDomain { + fn from(bytes: [u8; 32]) -> Self { + Blake2sDomain { + state: bytes, + _f: PhantomData, + } } +} - #[inline] - fn finish(&self) -> u64 { - unreachable!("unused by Function -- should never be called") +impl From for Blake2sDomain { + fn from(digest: Blake2sHash) -> Self { + let mut domain = Blake2sDomain { + state: *digest.as_array(), + _f: PhantomData, + }; + domain.trim_to_fr32(); + domain } } -#[derive( - Copy, Clone, PartialEq, Eq, Debug, PartialOrd, Ord, Default, Serialize, Deserialize, Hash, -)] -pub struct Blake2sDomain(pub [u8; 32]); +impl AsRef<[u8]> for Blake2sDomain { + fn as_ref(&self) -> &[u8] { + &self.state + } +} -impl AsRef for Blake2sDomain { +impl AsRef for Blake2sDomain { fn as_ref(&self) -> &Self { self } } -impl Blake2sDomain { - pub fn trim_to_fr32(&mut self) { - // strip last two bits, to ensure result is in Fr. - self.0[31] &= 0b0011_1111; +// Implement comparison traits by hand because we have not bound `F` to have those traits. +impl PartialEq for Blake2sDomain { + fn eq(&self, other: &Self) -> bool { + self.state == other.state } } -impl AsRef<[u8]> for Blake2sDomain { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} +impl Eq for Blake2sDomain {} -impl Hashable for Blake2sDomain { - fn hash(&self, state: &mut Blake2sFunction) { - state.write(self.as_ref()) +impl PartialOrd for Blake2sDomain { + fn partial_cmp(&self, other: &Self) -> Option { + self.state.partial_cmp(&other.state) } } - -impl From for Blake2sDomain { - fn from(val: Fr) -> Self { - Blake2sDomain(val.to_repr()) +impl Ord for Blake2sDomain { + fn cmp(&self, other: &Self) -> Ordering { + self.state.cmp(&other.state) } } -impl Element for Blake2sDomain { +impl Element for Blake2sDomain { fn byte_len() -> usize { 32 } fn from_slice(bytes: &[u8]) -> Self { - match Blake2sDomain::try_from_bytes(bytes) { - Ok(res) => res, - Err(err) => panic_any(err), - } + assert_eq!(bytes.len(), Self::byte_len(), "invalid number of bytes"); + let mut state = [0u8; 32]; + state.copy_from_slice(bytes); + state.into() } fn copy_to_slice(&self, bytes: &mut [u8]) { - bytes.copy_from_slice(&self.0); + bytes.copy_from_slice(&self.state); } } -impl From for Fr { - fn from(val: Blake2sDomain) -> Self { - Fr::from_repr_vartime(val.0).expect("from_repr failure") +impl std::hash::Hash for Blake2sDomain { + fn hash(&self, hasher: &mut H) { + std::hash::Hash::hash(&self.state, hasher); } } -impl Domain for Blake2sDomain { - fn into_bytes(&self) -> Vec { - self.0.to_vec() +// Implement `serde` traits by hand because we have not bound `F` to have those traits. +impl Serialize for Blake2sDomain { + fn serialize(&self, s: S) -> Result { + self.state.serialize(s) + } +} +impl<'de, F> Deserialize<'de> for Blake2sDomain { + fn deserialize>(d: D) -> Result { + <[u8; 32]>::deserialize(d).map(Into::into) + } +} + +// Implementing `Domain` for specific fields (rather than blanket implementing for all `F`) restricts +// users to using the fields which are compatible with `rust-fil-proofs`. +impl Domain for Blake2sDomain { + type Field = Fr; +} +impl Domain for Blake2sDomain { + type Field = Fp; +} +impl Domain for Blake2sDomain { + type Field = Fq; +} + +impl Blake2sDomain { + // Strip the last (most-significant) two bits to ensure that we state within the ~256-bit field + // `F`; note the fields `Fr`, `Fp`, and `Fq` are each 255-bit fields which fully utilize 254 + // bits, i.e. `254 < log2(field_modulus) < 255`. + pub fn trim_to_fr32(&mut self) { + self.state[31] &= 0b0011_1111; } +} - fn try_from_bytes(raw: &[u8]) -> anyhow::Result { - ensure!( - raw.len() == 32 && u32::from(raw[31]) <= Fr::NUM_BITS, - "invalid amount of bytes" - ); +#[derive(Clone, Debug)] +pub struct Blake2sFunction { + hasher: State, + _f: PhantomData, +} - let mut res = Blake2sDomain::default(); - res.0.copy_from_slice(&raw[0..32]); - Ok(res) +impl Default for Blake2sFunction { + fn default() -> Self { + Blake2sFunction { + hasher: Blake2sBuilder::new().hash_length(32).to_state(), + _f: PhantomData, + } } +} - fn write_bytes(&self, dest: &mut [u8]) -> anyhow::Result<()> { - ensure!(dest.len() >= 32, "too many bytes"); - dest[0..32].copy_from_slice(&self.0[..]); - Ok(()) +impl std::hash::Hasher for Blake2sFunction { + fn write(&mut self, msg: &[u8]) { + self.hasher.update(msg); } - fn random(rng: &mut R) -> Self { - // generating an Fr and converting it, to ensure we stay in the field - Fr::random(rng).into() + fn finish(&self) -> u64 { + unreachable!("unused by Function -- should never be called") } } -#[allow(clippy::from_over_into)] -impl Into for Blake2sHash { - fn into(self) -> Blake2sDomain { - let mut res = Blake2sDomain::default(); - res.0[..].copy_from_slice(self.as_ref()); - res.trim_to_fr32(); +impl Hashable> for Blake2sDomain { + fn hash(&self, hasher: &mut Blake2sFunction) { + as std::hash::Hasher>::write(hasher, self.as_ref()); + } +} - res +// Must add the trait bound `F: PrimeField` because `Algorithm` requires that `F` implements +// `Clone`. +impl Algorithm> for Blake2sFunction { + fn hash(&mut self) -> Blake2sDomain { + self.hasher.clone().finalize().into() + } + + fn reset(&mut self) { + self.hasher = Blake2sBuilder::new().hash_length(32).to_state(); + } + + fn leaf(&mut self, leaf: Blake2sDomain) -> Blake2sDomain { + leaf + } + + fn node( + &mut self, + left: Blake2sDomain, + right: Blake2sDomain, + _height: usize, + ) -> Blake2sDomain { + left.hash(self); + right.hash(self); + self.hash() + } + + fn multi_node(&mut self, parts: &[Blake2sDomain], _height: usize) -> Blake2sDomain { + for part in parts { + part.hash(self); + } + self.hash() } } -impl HashFunction for Blake2sFunction { - fn hash(data: &[u8]) -> Blake2sDomain { - Blake2s::new() +// Specialized implementation of `HashFunction` over the BLS12-381 scalar field `Fr` because that +// field is the only one which is compatible with `HashFunction`'s Groth16 circuit interfaces. +impl HashFunction> for Blake2sFunction { + fn hash(data: &[u8]) -> Blake2sDomain { + Blake2sBuilder::new() .hash_length(32) .to_state() .update(data) @@ -176,8 +290,8 @@ impl HashFunction for Blake2sFunction { .into() } - fn hash2(a: &Blake2sDomain, b: &Blake2sDomain) -> Blake2sDomain { - Blake2s::new() + fn hash2(a: &Blake2sDomain, b: &Blake2sDomain) -> Blake2sDomain { + Blake2sBuilder::new() .hash_length(32) .to_state() .update(a.as_ref()) @@ -234,14 +348,11 @@ impl HashFunction for Blake2sFunction { multipack::pack_bits(cs.namespace(|| "pack"), &alloc_bits) } - fn hash2_circuit( + fn hash2_circuit>( mut cs: CS, a_num: &AllocatedNum, b_num: &AllocatedNum, - ) -> Result, SynthesisError> - where - CS: ConstraintSystem, - { + ) -> Result, SynthesisError> { // Allocate as booleans let a = a_num.to_bits_le(cs.namespace(|| "a_bits"))?; let b = b_num.to_bits_le(cs.namespace(|| "b_bits"))?; @@ -262,45 +373,173 @@ impl HashFunction for Blake2sFunction { } } -impl Algorithm for Blake2sFunction { - #[inline] - fn hash(&mut self) -> Blake2sDomain { - self.0.clone().finalize().into() +// Specialized implementation of `HashFunction` over the Pasta scalar fields `Fp` and `Fq` because +// those fields are incompatible with `HashFunction`'s circuit Groth16 interfaces. +impl HashFunction> for Blake2sFunction { + fn hash(data: &[u8]) -> Blake2sDomain { + Blake2sBuilder::new() + .hash_length(32) + .to_state() + .update(data) + .finalize() + .into() } - #[inline] - fn reset(&mut self) { - self.0 = Blake2s::new().hash_length(32).to_state() + fn hash2(a: &Blake2sDomain, b: &Blake2sDomain) -> Blake2sDomain { + Blake2sBuilder::new() + .hash_length(32) + .to_state() + .update(a.as_ref()) + .update(b.as_ref()) + .finalize() + .into() } - fn leaf(&mut self, leaf: Blake2sDomain) -> Blake2sDomain { - leaf + fn hash_leaf_circuit>( + mut _cs: CS, + _left: &AllocatedNum, + _right: &AllocatedNum, + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") } - fn node(&mut self, left: Blake2sDomain, right: Blake2sDomain, _height: usize) -> Blake2sDomain { - left.hash(self); - right.hash(self); - self.hash() + fn hash_multi_leaf_circuit>( + mut _cs: CS, + _leaves: &[AllocatedNum], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") } - fn multi_node(&mut self, parts: &[Blake2sDomain], _height: usize) -> Blake2sDomain { - for part in parts { - part.hash(self) - } - self.hash() + fn hash_md_circuit>( + _cs: &mut CS, + _elements: &[AllocatedNum], + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") + } + + fn hash_leaf_bits_circuit>( + _cs: CS, + _left: &[Boolean], + _right: &[Boolean], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") + } + + fn hash_circuit>( + mut _cs: CS, + _bits: &[Boolean], + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") + } + + fn hash2_circuit>( + mut _cs: CS, + _a_num: &AllocatedNum, + _b_num: &AllocatedNum, + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") } } +impl HashFunction> for Blake2sFunction { + fn hash(data: &[u8]) -> Blake2sDomain { + Blake2sBuilder::new() + .hash_length(32) + .to_state() + .update(data) + .finalize() + .into() + } -impl From<[u8; 32]> for Blake2sDomain { - #[inline] - fn from(val: [u8; 32]) -> Self { - Blake2sDomain(val) + fn hash2(a: &Blake2sDomain, b: &Blake2sDomain) -> Blake2sDomain { + Blake2sBuilder::new() + .hash_length(32) + .to_state() + .update(a.as_ref()) + .update(b.as_ref()) + .finalize() + .into() + } + + fn hash_leaf_circuit>( + mut _cs: CS, + _left: &AllocatedNum, + _right: &AllocatedNum, + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") + } + + fn hash_multi_leaf_circuit>( + mut _cs: CS, + _leaves: &[AllocatedNum], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") + } + + fn hash_md_circuit>( + _cs: &mut CS, + _elements: &[AllocatedNum], + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") + } + + fn hash_leaf_bits_circuit>( + _cs: CS, + _left: &[Boolean], + _right: &[Boolean], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") + } + + fn hash_circuit>( + mut _cs: CS, + _bits: &[Boolean], + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") + } + + fn hash2_circuit>( + mut _cs: CS, + _a_num: &AllocatedNum, + _b_num: &AllocatedNum, + ) -> Result, SynthesisError> { + unimplemented!("Blake2sFunction cannot be used within Groth16 circuits") } } -impl From for [u8; 32] { - #[inline] - fn from(val: Blake2sDomain) -> Self { - val.0 +#[derive(Default, Copy, Clone, PartialEq, Eq, Debug)] +pub struct Blake2sHasher { + _f: PhantomData, +} + +// Implementing `Hasher` for specific fields (rather than blanket implementing for all `F`) restricts +// users to using the fields which are compatible with `rust-fil-proofs`. +impl Hasher for Blake2sHasher { + type Domain = Blake2sDomain; + type Function = Blake2sFunction; + + fn name() -> String { + "Blake2sHasher".into() + } +} +impl Hasher for Blake2sHasher { + type Domain = Blake2sDomain; + type Function = Blake2sFunction; + + fn name() -> String { + "Blake2sHasher_pallas".into() + } +} +impl Hasher for Blake2sHasher { + type Domain = Blake2sDomain; + type Function = Blake2sFunction; + + fn name() -> String { + "Blake2sHasher_vesta".into() } } diff --git a/filecoin-hashers/src/poseidon.rs b/filecoin-hashers/src/poseidon.rs index 86c69b30b5..57d1d30ba1 100644 --- a/filecoin-hashers/src/poseidon.rs +++ b/filecoin-hashers/src/poseidon.rs @@ -1,240 +1,368 @@ use std::cmp::Ordering; -use std::hash::{Hash as StdHash, Hasher as StdHasher}; -use std::panic::panic_any; +use std::marker::PhantomData; -use anyhow::ensure; use bellperson::{ gadgets::{boolean::Boolean, num::AllocatedNum}, ConstraintSystem, SynthesisError, }; use blstrs::Scalar as Fr; use ff::{Field, PrimeField}; -use generic_array::typenum::{marker_traits::Unsigned, U2}; +use generic_array::typenum::{Unsigned, U2, U4, U8}; use merkletree::{ - hash::{Algorithm as LightAlgorithm, Hashable}, + hash::{Algorithm, Hashable}, merkle::Element, }; -use neptune::{circuit::poseidon_hash, poseidon::Poseidon}; -use rand::RngCore; -use serde::{Deserialize, Serialize}; - -use crate::types::{ - Domain, HashFunction, Hasher, PoseidonArity, PoseidonMDArity, POSEIDON_CONSTANTS_16, - POSEIDON_CONSTANTS_2, POSEIDON_CONSTANTS_4, POSEIDON_CONSTANTS_8, POSEIDON_MD_CONSTANTS, +use neptune::{circuit::poseidon_hash, Poseidon}; +use pasta_curves::{Fp, Fq}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + Domain, FieldArity, HashFunction, Hasher, PoseidonArity, PoseidonMDArity, POSEIDON_CONSTANTS, + POSEIDON_CONSTANTS_2, POSEIDON_CONSTANTS_2_PALLAS, POSEIDON_CONSTANTS_2_VESTA, + POSEIDON_MD_CONSTANTS, POSEIDON_MD_CONSTANTS_PALLAS, POSEIDON_MD_CONSTANTS_VESTA, }; -#[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct PoseidonHasher {} - -impl Hasher for PoseidonHasher { - type Domain = PoseidonDomain; - type Function = PoseidonFunction; +#[derive(Default, Copy, Clone, Debug)] +pub struct PoseidonDomain { + repr: [u8; 32], + _f: PhantomData, +} - fn name() -> String { - "poseidon_hasher".into() +// Can't blanket `impl From for PoseidonDomain where F: PrimeField` because it can conflict +// with `impl From<[u8; 32]> for PoseidonDomain`, i.e. `[u8; 32]` is an external type which +// may already implement the external trait `PrimeField`, which causes a +// "conflicting implementation" compiler error. +impl From for PoseidonDomain { + fn from(f: Fr) -> Self { + PoseidonDomain { + repr: f.to_repr(), + _f: PhantomData, + } } } - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct PoseidonFunction(Fr); - -impl Default for PoseidonFunction { - fn default() -> PoseidonFunction { - PoseidonFunction(Fr::zero()) +impl From for PoseidonDomain { + fn from(f: Fp) -> Self { + PoseidonDomain { + repr: f.to_repr(), + _f: PhantomData, + } } } - -impl Hashable for Fr { - fn hash(&self, state: &mut PoseidonFunction) { - state.write(&self.to_repr()); +impl From for PoseidonDomain { + fn from(f: Fq) -> Self { + PoseidonDomain { + repr: f.to_repr(), + _f: PhantomData, + } } } -impl Hashable for PoseidonDomain { - fn hash(&self, state: &mut PoseidonFunction) { - state.write(&self.0); +#[allow(clippy::from_over_into)] +impl Into for PoseidonDomain { + fn into(self) -> Fr { + Fr::from_repr_vartime(self.repr).expect("from_repr failure") } } - -#[derive(Copy, Clone, Debug, Serialize, Deserialize)] -pub struct PoseidonDomain(pub ::Repr); - -impl AsRef for PoseidonDomain { - fn as_ref(&self) -> &PoseidonDomain { - self +#[allow(clippy::from_over_into)] +impl Into for PoseidonDomain { + fn into(self) -> Fp { + Fp::from_repr_vartime(self.repr).expect("from_repr failure") } } - -impl StdHash for PoseidonDomain { - fn hash(&self, state: &mut H) { - StdHash::hash(&self.0, state); +#[allow(clippy::from_over_into)] +impl Into for PoseidonDomain { + fn into(self) -> Fq { + Fq::from_repr_vartime(self.repr).expect("from_repr failure") } } -impl PartialEq for PoseidonDomain { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 +// Currently, these panics serve as a stopgap to prevent accidental conversions of a Pasta field +// domains to/from a BLS12-381 scalar field domain. +impl From for PoseidonDomain { + fn from(_f: Fr) -> Self { + panic!("cannot convert BLS12-381 scalar into PoseidonDomain") } } - -impl Eq for PoseidonDomain {} - -impl Default for PoseidonDomain { - fn default() -> PoseidonDomain { - PoseidonDomain(::Repr::default()) +#[allow(clippy::from_over_into)] +impl Into for PoseidonDomain { + fn into(self) -> Fr { + panic!("cannot convert PoseidonDomain into BLS12-381 scalar"); } } - -impl Ord for PoseidonDomain { - #[inline(always)] - fn cmp(&self, other: &PoseidonDomain) -> Ordering { - (self.0).cmp(&other.0) +impl From for PoseidonDomain { + fn from(_f: Fr) -> Self { + panic!("cannot convert BLS12-381 scalar into PoseidonDomain") + } +} +#[allow(clippy::from_over_into)] +impl Into for PoseidonDomain { + fn into(self) -> Fr { + panic!("cannot convert PoseidonDomain into BLS12-381 scalar"); } } -impl PartialOrd for PoseidonDomain { - #[inline(always)] - fn partial_cmp(&self, other: &PoseidonDomain) -> Option { - Some((self.0).cmp(&other.0)) +impl From<[u8; 32]> for PoseidonDomain { + fn from(bytes: [u8; 32]) -> Self { + PoseidonDomain { + repr: bytes, + _f: PhantomData, + } } } -impl AsRef<[u8]> for PoseidonDomain { - #[inline] +impl AsRef<[u8]> for PoseidonDomain { fn as_ref(&self) -> &[u8] { - &self.0 + &self.repr } } -impl Domain for PoseidonDomain { - fn into_bytes(&self) -> Vec { - self.0.to_vec() +impl AsRef for PoseidonDomain { + fn as_ref(&self) -> &Self { + self } +} - fn try_from_bytes(raw: &[u8]) -> anyhow::Result { - ensure!( - raw.len() == PoseidonDomain::byte_len(), - "invalid amount of bytes" - ); - let mut repr = ::Repr::default(); - repr.copy_from_slice(raw); - Ok(PoseidonDomain(repr)) +// Implement comparison traits by hand because we have not bound `F` to have those traits. +impl PartialEq for PoseidonDomain { + fn eq(&self, other: &Self) -> bool { + self.repr == other.repr } +} - fn write_bytes(&self, dest: &mut [u8]) -> anyhow::Result<()> { - ensure!( - dest.len() == PoseidonDomain::byte_len(), - "invalid amount of bytes" - ); - dest.copy_from_slice(&self.0); - Ok(()) +impl Eq for PoseidonDomain {} + +impl PartialOrd for PoseidonDomain { + fn partial_cmp(&self, other: &Self) -> Option { + self.repr.partial_cmp(&other.repr) } +} - fn random(rng: &mut R) -> Self { - // generating an Fr and converting it, to ensure we stay in the field - Fr::random(rng).into() +impl Ord for PoseidonDomain { + fn cmp(&self, other: &Self) -> Ordering { + self.repr.cmp(&other.repr) } } -impl Element for PoseidonDomain { +// Must add the trait bound `F: PrimeField` because `Element` requires that `F` implements `Clone`, +// `Send`, and `Sync`. +impl Element for PoseidonDomain { fn byte_len() -> usize { 32 } fn from_slice(bytes: &[u8]) -> Self { - match PoseidonDomain::try_from_bytes(bytes) { - Ok(res) => res, - Err(err) => panic_any(err), - } + assert_eq!(bytes.len(), Self::byte_len(), "invalid number of bytes"); + let mut repr = [0u8; 32]; + repr.copy_from_slice(bytes); + repr.into() } fn copy_to_slice(&self, bytes: &mut [u8]) { - bytes.copy_from_slice(&self.0); + bytes.copy_from_slice(&self.repr); } } -impl StdHasher for PoseidonFunction { - #[inline] - fn write(&mut self, msg: &[u8]) { - self.0 = Fr::from_repr_vartime(shared_hash(msg).0).expect("from_repr failure"); +impl std::hash::Hash for PoseidonDomain { + fn hash(&self, hasher: &mut H) { + std::hash::Hash::hash(&self.repr, hasher); } +} - #[inline] - fn finish(&self) -> u64 { - unimplemented!() +// Implement `serde` traits by hand because we have not bound `F` to have those traits. +impl Serialize for PoseidonDomain { + fn serialize(&self, s: S) -> Result { + self.repr.serialize(s) + } +} +impl<'de, F> Deserialize<'de> for PoseidonDomain { + fn deserialize>(d: D) -> Result { + <[u8; 32]>::deserialize(d).map(Into::into) + } +} + +// Implementing `Domain` for specific fields (rather than blanket implementing for all `F`) restricts +// users to using the fields which are compatible with `rust-fil-proofs`. +impl Domain for PoseidonDomain { + type Field = Fr; +} +impl Domain for PoseidonDomain { + type Field = Fp; +} +impl Domain for PoseidonDomain { + type Field = Fq; +} + +impl PoseidonDomain { + pub fn repr(&self) -> [u8; 32] { + self.repr } } -fn shared_hash(data: &[u8]) -> PoseidonDomain { - // FIXME: We shouldn't unwrap here, but doing otherwise will require an interface change. - // We could truncate so `bytes_into_frs` cannot fail, then ensure `data` is always `fr_safe`. - let preimage = data +fn shared_hash(data: &[u8]) -> PoseidonDomain +where + F: PrimeField, + PoseidonDomain: Domain, +{ + let preimage: Vec = data .chunks(32) .map(|chunk| { - Fr::from_repr_vartime(PoseidonDomain::from_slice(chunk).0).expect("from_repr failure") + let mut repr = F::Repr::default(); + // FIXME: We shouldn't panic here, but doing otherwise will require an interface change. + // We could truncate so `bytes_into_frs` cannot fail, then ensure `data` is always + // `fr_safe`. + repr.as_mut().copy_from_slice(chunk); + F::from_repr_vartime(repr).expect("from_repr failure") }) - .collect::>(); + .collect(); shared_hash_frs(&preimage).into() } -fn shared_hash_frs(preimage: &[Fr]) -> Fr { +// Must add trait bound `F: PrimeField` because `FieldArity` requires `F: PrimeField`. +fn shared_hash_frs(preimage: &[F]) -> F { match preimage.len() { 2 => { - let mut p = Poseidon::new_with_preimage(preimage, &POSEIDON_CONSTANTS_2); - p.hash() + let consts = &POSEIDON_CONSTANTS + .get::>() + .expect("arity-2 Poseidon constants not found for field"); + Poseidon::new_with_preimage(preimage, consts).hash() } 4 => { - let mut p = Poseidon::new_with_preimage(preimage, &POSEIDON_CONSTANTS_4); - p.hash() + let consts = &POSEIDON_CONSTANTS + .get::>() + .expect("arity-4 Poseidon constants not found for field"); + Poseidon::new_with_preimage(preimage, consts).hash() } 8 => { - let mut p = Poseidon::new_with_preimage(preimage, &POSEIDON_CONSTANTS_8); - p.hash() - } - 16 => { - let mut p = Poseidon::new_with_preimage(preimage, &POSEIDON_CONSTANTS_16); - p.hash() + let consts = &POSEIDON_CONSTANTS + .get::>() + .expect("arity-8 Poseidon constants not found for field"); + Poseidon::new_with_preimage(preimage, consts).hash() } + n => panic!("unsupported arity for Poseidon hasher: {}", n), + } +} + +#[derive(Default, Clone, Debug)] +pub struct PoseidonFunction(F); + +impl std::hash::Hasher for PoseidonFunction +where + F: PrimeField, + PoseidonDomain: Domain, +{ + fn write(&mut self, preimage: &[u8]) { + self.0 = shared_hash(preimage).into(); + } - _ => panic_any(format!( - "Unsupported arity for Poseidon hasher: {}", - preimage.len() - )), + fn finish(&self) -> u64 { + unreachable!("unused by Function -- should never be called") + } +} + +impl Hashable> for PoseidonDomain +where + F: PrimeField, + PoseidonDomain: Domain, +{ + fn hash(&self, hasher: &mut PoseidonFunction) { + as std::hash::Hasher>::write(hasher, self.as_ref()) + } +} + +// We can't blanket `impl Hashable> for F where F: PrimeField` because we can't +// implement an external trait `Hashable` for an external type `F: PrimeField`. +impl Hashable> for Fr { + fn hash(&self, hasher: &mut PoseidonFunction) { + as std::hash::Hasher>::write(hasher, &self.to_repr()) + } +} +impl Hashable> for Fp { + fn hash(&self, hasher: &mut PoseidonFunction) { + as std::hash::Hasher>::write(hasher, &self.to_repr()) + } +} +impl Hashable> for Fq { + fn hash(&self, hasher: &mut PoseidonFunction) { + as std::hash::Hasher>::write(hasher, &self.to_repr()) + } +} + +impl Algorithm> for PoseidonFunction +where + // Must add the trait bounds `F: PrimeField` and `PoseidonDomain: Domain` because + // they are required by `shared_hash_frs`. + F: PrimeField, + PoseidonDomain: Domain, +{ + fn hash(&mut self) -> PoseidonDomain { + self.0.into() + } + + fn reset(&mut self) { + self.0 = F::zero(); + } + + fn leaf(&mut self, leaf: PoseidonDomain) -> PoseidonDomain { + leaf + } + + fn node( + &mut self, + left: PoseidonDomain, + right: PoseidonDomain, + _height: usize, + ) -> PoseidonDomain { + shared_hash_frs::(&[left.into(), right.into()]).into() + } + + fn multi_node(&mut self, preimage: &[PoseidonDomain], _height: usize) -> PoseidonDomain { + match preimage.len() { + 2 | 4 | 8 => { + let preimage: Vec = preimage.iter().map(|domain| (*domain).into()).collect(); + shared_hash_frs(&preimage).into() + } + arity => panic!("unsupported Halo Poseidon hasher arity: {}", arity), + } } } -impl HashFunction for PoseidonFunction { - fn hash(data: &[u8]) -> PoseidonDomain { +// Specialized implementation of `HashFunction` over the BLS12-381 scalar field `Fr` because `Fr` +// is the only field which is compatible with `HashFunction`'s Groth16 circuit interfaces. +impl HashFunction> for PoseidonFunction { + fn hash(data: &[u8]) -> PoseidonDomain { shared_hash(data) } - fn hash2(a: &PoseidonDomain, b: &PoseidonDomain) -> PoseidonDomain { - let mut p = - Poseidon::new_with_preimage(&[(*a).into(), (*b).into()][..], &*POSEIDON_CONSTANTS_2); - let fr: Fr = p.hash(); - fr.into() + fn hash2(a: &PoseidonDomain, b: &PoseidonDomain) -> PoseidonDomain { + let preimage = [(*a).into(), (*b).into()]; + Poseidon::new_with_preimage(&preimage, &*POSEIDON_CONSTANTS_2) + .hash() + .into() } - fn hash_md(input: &[PoseidonDomain]) -> PoseidonDomain { - assert!(input.len() > 1, "hash_md needs more than one element."); - let arity = PoseidonMDArity::to_usize(); + fn hash_md(input: &[PoseidonDomain]) -> PoseidonDomain { + assert!( + input.len() > 1, + "hash_md preimage must contain more than one element" + ); + let arity = PoseidonMDArity::to_usize(); let mut p = Poseidon::new(&*POSEIDON_MD_CONSTANTS); - let fr_input = input - .iter() - .map(|x| Fr::from_repr_vartime(x.0).expect("from_repr failure")) - .collect::>(); + let fr_input: Vec = input.iter().map(|domain| (*domain).into()).collect(); fr_input[1..] .chunks(arity - 1) - .fold(fr_input[0], |acc, elts| { + .fold(fr_input[0], |acc, frs| { p.reset(); - p.input(acc).expect("input failure"); // These unwraps will panic iff arity is incorrect, but it was checked above. - elts.iter().for_each(|elt| { - let _ = p.input(*elt).expect("input failure"); - }); + // Calling `.expect()` will panic iff we call `.input()` more that `arity` number + // of times prior to resetting the hasher (i.e. if we exceed the arity of the + // Poseidon constants) or if `preimge.len() == 1`; we prevent both scenarios. + p.input(acc).expect("input failure"); + for fr in frs { + p.input(*fr).expect("input failure"); + } p.hash() }) .into() @@ -246,9 +374,7 @@ impl HashFunction for PoseidonFunction { right: &AllocatedNum, _height: usize, ) -> Result, SynthesisError> { - let preimage = vec![left.clone(), right.clone()]; - - poseidon_hash::(cs, preimage, U2::PARAMETERS()) + Self::hash2_circuit(cs, left, right) } fn hash_multi_leaf_circuit>( @@ -256,15 +382,21 @@ impl HashFunction for PoseidonFunction { leaves: &[AllocatedNum], _height: usize, ) -> Result, SynthesisError> { - let params = Arity::PARAMETERS(); - poseidon_hash::(cs, leaves.to_vec(), params) + let consts = &POSEIDON_CONSTANTS + .get::>() + .unwrap_or_else(|| { + panic!( + "arity-{} Poseidon constants not found for field", + Arity::to_usize(), + ) + }); + poseidon_hash::(cs, leaves.to_vec(), consts) } fn hash_md_circuit>( cs: &mut CS, elements: &[AllocatedNum], ) -> Result, SynthesisError> { - let params = PoseidonMDArity::PARAMETERS(); let arity = PoseidonMDArity::to_usize(); let mut hash = elements[0].clone(); @@ -284,7 +416,12 @@ impl HashFunction for PoseidonFunction { .expect("alloc failure"); } let cs = cs.namespace(|| format!("hash md {}", hash_num)); - hash = poseidon_hash::<_, Fr, PoseidonMDArity>(cs, preimage.clone(), params)?.clone(); + hash = poseidon_hash::<_, Fr, PoseidonMDArity>( + cs, + preimage.clone(), + &*POSEIDON_MD_CONSTANTS, + )? + .clone(); } Ok(hash) @@ -306,266 +443,215 @@ impl HashFunction for PoseidonFunction { CS: ConstraintSystem, { let preimage = vec![a.clone(), b.clone()]; - poseidon_hash::(cs, preimage, U2::PARAMETERS()) + poseidon_hash::(cs, preimage, &*POSEIDON_CONSTANTS_2) } } -impl LightAlgorithm for PoseidonFunction { - #[inline] - fn hash(&mut self) -> PoseidonDomain { - self.0.into() +// Specialized implementation of `HashFunction` over the Pasta scalar fields `Fp` and `Fq` because +// those fields are incompatible with `HashFunction`'s Groth16 circuit interfaces. +impl HashFunction> for PoseidonFunction { + fn hash(data: &[u8]) -> PoseidonDomain { + shared_hash(data) } - #[inline] - fn reset(&mut self) { - self.0 = Fr::zero(); + fn hash2(a: &PoseidonDomain, b: &PoseidonDomain) -> PoseidonDomain { + let preimage = [(*a).into(), (*b).into()]; + Poseidon::new_with_preimage(&preimage, &*POSEIDON_CONSTANTS_2_PALLAS) + .hash() + .into() } - fn leaf(&mut self, leaf: PoseidonDomain) -> PoseidonDomain { - leaf + fn hash_md(input: &[PoseidonDomain]) -> PoseidonDomain { + assert!( + input.len() > 1, + "hash_md preimage must contain more than one element" + ); + + let arity = PoseidonMDArity::to_usize(); + let mut p = Poseidon::new(&*POSEIDON_MD_CONSTANTS_PALLAS); + + let fr_input: Vec = input.iter().map(|domain| (*domain).into()).collect(); + + fr_input[1..] + .chunks(arity - 1) + .fold(fr_input[0], |acc, frs| { + p.reset(); + // Calling `.expect()` will panic iff we call `.input()` more that `arity` number + // of times prior to resetting the hasher (i.e. if we exceed the arity of the + // Poseidon constants) or if `preimge.len() == 1`; we prevent both scenarios. + p.input(acc).expect("input failure"); + for fr in frs { + p.input(*fr).expect("input failure"); + } + p.hash() + }) + .into() } - fn node( - &mut self, - left: PoseidonDomain, - right: PoseidonDomain, + fn hash_leaf_circuit>( + _cs: CS, + _left: &AllocatedNum, + _right: &AllocatedNum, _height: usize, - ) -> PoseidonDomain { - shared_hash_frs(&[ - Fr::from_repr_vartime(left.0).expect("from_repr failure"), - Fr::from_repr_vartime(right.0).expect("from_repr failure"), - ]) - .into() - } - - fn multi_node(&mut self, parts: &[PoseidonDomain], _height: usize) -> PoseidonDomain { - match parts.len() { - 1 | 2 | 4 | 8 | 16 => shared_hash_frs( - &parts - .iter() - .enumerate() - .map(|(i, x)| { - if let Some(fr) = Fr::from_repr_vartime(x.0) { - fr - } else { - panic_any(format!("from_repr failure at {}", i)); - } - }) - .collect::>(), - ) - .into(), - arity => panic_any(format!("unsupported arity {}", arity)), - } + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") } -} -impl From for PoseidonDomain { - #[inline] - fn from(val: Fr) -> Self { - PoseidonDomain(val.to_repr()) + fn hash_multi_leaf_circuit>( + _cs: CS, + _leaves: &[AllocatedNum], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") } -} -impl From<[u8; 32]> for PoseidonDomain { - #[inline] - fn from(val: [u8; 32]) -> Self { - PoseidonDomain(val) + fn hash_md_circuit>( + _cs: &mut CS, + _elements: &[AllocatedNum], + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") } -} -impl From for Fr { - #[inline] - fn from(val: PoseidonDomain) -> Self { - Fr::from_repr_vartime(val.0).expect("from_repr failure") + fn hash_leaf_bits_circuit>( + _cs: CS, + _left: &[Boolean], + _right: &[Boolean], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") } -} - -#[cfg(test)] -mod tests { - use super::*; - use bellperson::util_cs::test_cs::TestConstraintSystem; - use merkletree::{merkle::MerkleTree, store::VecStore}; - - fn u64s_to_u8s(u64s: [u64; 4]) -> [u8; 32] { - let mut bytes = [0u8; 32]; - bytes[..8].copy_from_slice(&u64s[0].to_le_bytes()); - bytes[8..16].copy_from_slice(&u64s[1].to_le_bytes()); - bytes[16..24].copy_from_slice(&u64s[2].to_le_bytes()); - bytes[24..].copy_from_slice(&u64s[3].to_le_bytes()); - bytes + fn hash_circuit>( + _cs: CS, + _bits: &[Boolean], + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") } - #[test] - fn test_path() { - let values = [ - PoseidonDomain(Fr::one().to_repr()), - PoseidonDomain(Fr::one().to_repr()), - PoseidonDomain(Fr::one().to_repr()), - PoseidonDomain(Fr::one().to_repr()), - ]; - - let t = MerkleTree::, U2>::new( - values.iter().copied(), - ) - .expect("merkle tree new failure"); - - let p = t.gen_proof(0).expect("gen_proof failure"); // create a proof for the first value =k Fr::one() - - assert_eq!(*p.path(), vec![0, 0]); - assert!(p - .validate::() - .expect("failed to validate")); + fn hash2_circuit>( + _cs: CS, + _a: &AllocatedNum, + _b: &AllocatedNum, + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") + } +} +impl HashFunction> for PoseidonFunction { + fn hash(data: &[u8]) -> PoseidonDomain { + shared_hash(data) } - // #[test] - // fn test_poseidon_quad() { - // let leaves = [Fr::one(), Fr::zero(), Fr::zero(), Fr::one()]; - - // assert_eq!(Fr::zero().to_repr(), shared_hash_frs(&leaves[..]).0); - // } - - #[test] - fn test_poseidon_hasher() { - let leaves = [ - PoseidonDomain(Fr::one().to_repr()), - PoseidonDomain(Fr::zero().to_repr()), - PoseidonDomain(Fr::zero().to_repr()), - PoseidonDomain(Fr::one().to_repr()), - ]; - - let t = MerkleTree::, U2>::new( - leaves.iter().copied(), - ) - .expect("merkle tree new failure"); - - assert_eq!(t.leafs(), 4); - - let mut a = PoseidonFunction::default(); - - assert_eq!(t.read_at(0).expect("read_at failure"), leaves[0]); - assert_eq!(t.read_at(1).expect("read_at failure"), leaves[1]); - assert_eq!(t.read_at(2).expect("read_at failure"), leaves[2]); - assert_eq!(t.read_at(3).expect("read_at failure"), leaves[3]); - - let i1 = a.node(leaves[0], leaves[1], 0); - a.reset(); - let i2 = a.node(leaves[2], leaves[3], 0); - a.reset(); - - assert_eq!(t.read_at(4).expect("read_at failure"), i1); - assert_eq!(t.read_at(5).expect("read_at failure"), i2); - - let root = a.node(i1, i2, 1); - a.reset(); + fn hash2(a: &PoseidonDomain, b: &PoseidonDomain) -> PoseidonDomain { + let preimage = [(*a).into(), (*b).into()]; + Poseidon::new_with_preimage(&preimage, &*POSEIDON_CONSTANTS_2_VESTA) + .hash() + .into() + } - assert_eq!( - t.read_at(4).expect("read_at failure").0, - u64s_to_u8s([ - 0xb339ff6079800b5e, - 0xec5907b3dc3094af, - 0x93c003cc74a24f26, - 0x042f94ffbe786bc3, - ]), + fn hash_md(input: &[PoseidonDomain]) -> PoseidonDomain { + assert!( + input.len() > 1, + "hash_md preimage must contain more than one element" ); - let expected = u64s_to_u8s([ - 0xefbb8be3e291e671, - 0x77cc72b8cb2b5ad2, - 0x30eb6385ae6b74ae, - 0x1effebb7b26ad9eb, - ]); - let actual = t.read_at(6).expect("read_at failure").0; - - assert_eq!(actual, expected); - assert_eq!(t.read_at(6).expect("read_at failure"), root); - } - - #[test] - fn test_as_ref() { - let cases: Vec<[u64; 4]> = vec![ - [0, 0, 0, 0], - [ - 14963070332212552755, - 2414807501862983188, - 16116531553419129213, - 6357427774790868134, - ], - ]; - - for case in cases.into_iter() { - let val = PoseidonDomain(u64s_to_u8s(case)); - - for _ in 0..100 { - assert_eq!(val.into_bytes(), val.into_bytes()); - } + let arity = PoseidonMDArity::to_usize(); + let mut p = Poseidon::new(&*POSEIDON_MD_CONSTANTS_VESTA); - let raw: &[u8] = val.as_ref(); + let fr_input: Vec = input.iter().map(|domain| (*domain).into()).collect(); - for (limb, bytes) in case.iter().zip(raw.chunks(8)) { - assert_eq!(&limb.to_le_bytes(), bytes); - } - } + fr_input[1..] + .chunks(arity - 1) + .fold(fr_input[0], |acc, frs| { + p.reset(); + // Calling `.expect()` will panic iff we call `.input()` more that `arity` number + // of times prior to resetting the hasher (i.e. if we exceed the arity of the + // Poseidon constants) or if `preimge.len() == 1`; we prevent both scenarios. + p.input(acc).expect("input failure"); + for fr in frs { + p.input(*fr).expect("input failure"); + } + p.hash() + }) + .into() } - #[test] - fn test_serialize() { - let val = PoseidonDomain(u64s_to_u8s([1, 2, 3, 4])); + fn hash_leaf_circuit>( + _cs: CS, + _left: &AllocatedNum, + _right: &AllocatedNum, + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") + } - let ser = serde_json::to_string(&val) - .expect("Failed to serialize `PoseidonDomain` element to JSON string"); - let val_back = serde_json::from_str(&ser) - .expect("Failed to deserialize JSON string to `PoseidonnDomain`"); + fn hash_multi_leaf_circuit>( + _cs: CS, + _leaves: &[AllocatedNum], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") + } - assert_eq!(val, val_back); + fn hash_md_circuit>( + _cs: &mut CS, + _elements: &[AllocatedNum], + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") } - #[test] - fn test_hash_md() { - // let arity = PoseidonMDArity::to_usize(); - let n = 71; - let data = vec![PoseidonDomain(Fr::one().to_repr()); n]; - let hashed = PoseidonFunction::hash_md(&data); + fn hash_leaf_bits_circuit>( + _cs: CS, + _left: &[Boolean], + _right: &[Boolean], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") + } - assert_eq!( - hashed, - PoseidonDomain(u64s_to_u8s([ - 0x351c54133b332c90, - 0xc26f6d625f4e8195, - 0x5fd9623643ed9622, - 0x59f42220e09ff6f7, - ])) - ); + fn hash_circuit>( + _cs: CS, + _bits: &[Boolean], + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") } - #[test] - fn test_hash_md_circuit() { - // let arity = PoseidonMDArity::to_usize(); - let n = 71; - let data = vec![PoseidonDomain(Fr::one().to_repr()); n]; - - let mut cs = TestConstraintSystem::::new(); - let circuit_data = (0..n) - .map(|n| { - AllocatedNum::alloc(cs.namespace(|| format!("input {}", n)), || Ok(Fr::one())) - .expect("alloc failure") - }) - .collect::>(); - let hashed = PoseidonFunction::hash_md(&data); - let hashed_fr = Fr::from_repr_vartime(hashed.0).expect("from_repr failure"); + fn hash2_circuit>( + _cs: CS, + _a: &AllocatedNum, + _b: &AllocatedNum, + ) -> Result, SynthesisError> { + unimplemented!("PoseidonFunction cannot be used within Groth16 circuits") + } +} - let circuit_hashed = PoseidonFunction::hash_md_circuit(&mut cs, circuit_data.as_slice()) - .expect("hash_md_circuit failure"); +#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)] +pub struct PoseidonHasher { + _f: PhantomData, +} - assert!(cs.is_satisfied()); - let expected_constraints = 2_770; - let actual_constraints = cs.num_constraints(); +// Implementing `Hasher` for specific fields (rather than blanket implementing for all `F`) restricts +// users to using the fields which are compatible with `rust-fil-proofs`. +impl Hasher for PoseidonHasher { + type Domain = PoseidonDomain; + type Function = PoseidonFunction; - assert_eq!(expected_constraints, actual_constraints); + fn name() -> String { + "poseidon_hasher".into() + } +} +impl Hasher for PoseidonHasher { + type Domain = PoseidonDomain; + type Function = PoseidonFunction; - assert_eq!( - hashed_fr, - circuit_hashed.get_value().expect("get_value failure") - ); + fn name() -> String { + "poseidon_hasher_pallas".into() + } +} +impl Hasher for PoseidonHasher { + type Domain = PoseidonDomain; + type Function = PoseidonFunction; + + fn name() -> String { + "poseidon_hasher_vesta".into() } } diff --git a/filecoin-hashers/src/poseidon_types.rs b/filecoin-hashers/src/poseidon_types.rs index 510f16c8e4..01401a235a 100644 --- a/filecoin-hashers/src/poseidon_types.rs +++ b/filecoin-hashers/src/poseidon_types.rs @@ -1,9 +1,13 @@ use std::fmt::Debug; +use std::marker::PhantomData; use blstrs::Scalar as Fr; +use ff::PrimeField; use generic_array::typenum::{U0, U11, U16, U2, U24, U36, U4, U8}; use lazy_static::lazy_static; use neptune::{poseidon::PoseidonConstants, Arity}; +use pasta_curves::{Fp, Fq}; +use typemap::ShareMap; pub type PoseidonBinaryArity = U2; pub type PoseidonQuadArity = U4; @@ -26,6 +30,60 @@ lazy_static! { pub static ref POSEIDON_CONSTANTS_11: PoseidonConstants:: = PoseidonConstants::new(); pub static ref POSEIDON_MD_CONSTANTS: PoseidonConstants:: = PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS_2_PALLAS: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS_4_PALLAS: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS_8_PALLAS: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS_11_PALLAS: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_MD_CONSTANTS_PALLAS: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS_2_VESTA: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS_4_VESTA: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS_8_VESTA: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS_11_VESTA: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_MD_CONSTANTS_VESTA: PoseidonConstants:: = + PoseidonConstants::new(); + pub static ref POSEIDON_CONSTANTS: ShareMap = { + let mut tm = ShareMap::custom(); + + tm.insert::>(&*POSEIDON_CONSTANTS_2); + tm.insert::>(&*POSEIDON_CONSTANTS_4); + tm.insert::>(&*POSEIDON_CONSTANTS_8); + tm.insert::>(&*POSEIDON_CONSTANTS_11); + + tm.insert::>(&*POSEIDON_CONSTANTS_2_PALLAS); + tm.insert::>(&*POSEIDON_CONSTANTS_4_PALLAS); + tm.insert::>(&*POSEIDON_CONSTANTS_8_PALLAS); + tm.insert::>(&*POSEIDON_CONSTANTS_11_PALLAS); + + tm.insert::>(&*POSEIDON_CONSTANTS_2_VESTA); + tm.insert::>(&*POSEIDON_CONSTANTS_4_VESTA); + tm.insert::>(&*POSEIDON_CONSTANTS_8_VESTA); + tm.insert::>(&*POSEIDON_CONSTANTS_11_VESTA); + + tm + }; +} + +// Used as the key to lookup Poseidon constants for a field `F` and arity `A`. +pub struct FieldArity(PhantomData<(F, A)>) +where + F: PrimeField, + A: Arity; + +impl typemap::Key for FieldArity +where + F: PrimeField, + A: Arity, +{ + type Value = &'static PoseidonConstants; } pub trait PoseidonArity: Arity + Send + Sync + Clone + Debug { diff --git a/filecoin-hashers/src/sha256.rs b/filecoin-hashers/src/sha256.rs index 6e2d4d66e1..b08f6d2e24 100644 --- a/filecoin-hashers/src/sha256.rs +++ b/filecoin-hashers/src/sha256.rs @@ -1,164 +1,291 @@ +use std::cmp::Ordering; use std::fmt::{self, Debug, Formatter}; -use std::hash::Hasher as StdHasher; -use std::panic::panic_any; +use std::marker::PhantomData; -use anyhow::ensure; use bellperson::{ gadgets::{boolean::Boolean, multipack, num::AllocatedNum, sha256::sha256 as sha256_circuit}, ConstraintSystem, SynthesisError, }; use blstrs::Scalar as Fr; -use ff::{Field, PrimeField}; +use ff::PrimeField; use merkletree::{ hash::{Algorithm, Hashable}, merkle::Element, }; -use rand::RngCore; -use serde::{Deserialize, Serialize}; +use pasta_curves::{Fp, Fq}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use sha2::{Digest, Sha256}; -use crate::types::{Domain, HashFunction, Hasher}; +use crate::{Domain, HashFunction, Hasher}; -#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)] -pub struct Sha256Hasher {} - -impl Hasher for Sha256Hasher { - type Domain = Sha256Domain; - type Function = Sha256Function; +#[derive(Copy, Clone, Default)] +pub struct Sha256Domain { + pub state: [u8; 32], + _f: PhantomData, +} - fn name() -> String { - "sha256_hasher".into() +impl Debug for Sha256Domain { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "Sha256Domain({})", hex::encode(&self.state)) } } -#[derive(Default, Clone, Debug)] -pub struct Sha256Function(Sha256); +// Can't blanket `impl From for Sha256Domain where F: PrimeField` because it can conflict +// with `impl From<[u8; 32]> for Sha256Domain`, i.e. `[u8; 32]` is an external type which may +// already implement the external trait `PrimeField`, which causes a "conflicting implementation" +// compiler error. +impl From for Sha256Domain { + fn from(f: Fr) -> Self { + Sha256Domain { + state: f.to_repr(), + _f: PhantomData, + } + } +} +impl From for Sha256Domain { + fn from(f: Fp) -> Self { + Sha256Domain { + state: f.to_repr(), + _f: PhantomData, + } + } +} +impl From for Sha256Domain { + fn from(f: Fq) -> Self { + Sha256Domain { + state: f.to_repr(), + _f: PhantomData, + } + } +} -impl StdHasher for Sha256Function { - #[inline] - fn write(&mut self, msg: &[u8]) { - self.0.update(msg) +#[allow(clippy::from_over_into)] +impl Into for Sha256Domain { + fn into(self) -> Fr { + Fr::from_repr_vartime(self.state).expect("from_repr failure") + } +} +#[allow(clippy::from_over_into)] +impl Into for Sha256Domain { + fn into(self) -> Fp { + Fp::from_repr_vartime(self.state).expect("from_repr failure") } +} +#[allow(clippy::from_over_into)] +impl Into for Sha256Domain { + fn into(self) -> Fq { + Fq::from_repr_vartime(self.state).expect("from_repr failure") + } +} - #[inline] - fn finish(&self) -> u64 { - unreachable!("unused by Function -- should never be called") +// Currently, these panics serve as a stopgap to prevent accidental conversions of a Pasta field +// domains to/from a BLS12-381 scalar field domain. +impl From for Sha256Domain { + fn from(_f: Fr) -> Self { + panic!("cannot convert BLS12-381 scalar into Sha256Domain") + } +} +#[allow(clippy::from_over_into)] +impl Into for Sha256Domain { + fn into(self) -> Fr { + panic!("cannot convert Sha256Domain into BLS12-381 scalar"); + } +} +impl From for Sha256Domain { + fn from(_f: Fr) -> Self { + panic!("cannot convert BLS12-381 scalar into Sha256Domain") + } +} +#[allow(clippy::from_over_into)] +impl Into for Sha256Domain { + fn into(self) -> Fr { + panic!("cannot convert Sha256Domain into BLS12-381 scalar"); } } -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize, Hash)] -pub struct Sha256Domain(pub [u8; 32]); +impl From<[u8; 32]> for Sha256Domain { + fn from(bytes: [u8; 32]) -> Self { + Sha256Domain { + state: bytes, + _f: PhantomData, + } + } +} -impl Debug for Sha256Domain { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "Sha256Domain({})", hex::encode(&self.0)) +impl AsRef<[u8]> for Sha256Domain { + fn as_ref(&self) -> &[u8] { + &self.state } } -impl AsRef for Sha256Domain { +impl AsRef for Sha256Domain { fn as_ref(&self) -> &Self { self } } -impl Sha256Domain { - fn trim_to_fr32(&mut self) { - // strip last two bits, to ensure result is in Fr. - self.0[31] &= 0b0011_1111; +// Implement comparison traits by hand because we have not bound `F` to have those traits. +impl PartialEq for Sha256Domain { + fn eq(&self, other: &Self) -> bool { + self.state == other.state } } -impl AsRef<[u8]> for Sha256Domain { - fn as_ref(&self) -> &[u8] { - &self.0[..] +impl Eq for Sha256Domain {} + +impl PartialOrd for Sha256Domain { + fn partial_cmp(&self, other: &Self) -> Option { + self.state.partial_cmp(&other.state) } } -impl Hashable for Sha256Domain { - fn hash(&self, state: &mut Sha256Function) { - state.write(self.as_ref()) +impl Ord for Sha256Domain { + fn cmp(&self, other: &Self) -> Ordering { + self.state.cmp(&other.state) } } -impl From for Sha256Domain { - fn from(val: Fr) -> Self { - Sha256Domain(val.to_repr()) +// Must add the trait bound `F: PrimeField` because `Element` requires that `F` implements `Clone`, +// `Send`, and `Sync`. +impl Element for Sha256Domain { + fn byte_len() -> usize { + 32 + } + + fn from_slice(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), Self::byte_len(), "invalid number of bytes"); + let mut state = [0u8; 32]; + state.copy_from_slice(bytes); + state.into() + } + + fn copy_to_slice(&self, bytes: &mut [u8]) { + bytes.copy_from_slice(&self.state); } } -impl From for Fr { - fn from(val: Sha256Domain) -> Self { - Fr::from_repr_vartime(val.0).expect("from_repr failure") +impl std::hash::Hash for Sha256Domain { + fn hash(&self, hasher: &mut H) { + std::hash::Hash::hash(&self.state, hasher); } } -impl Domain for Sha256Domain { - fn into_bytes(&self) -> Vec { - self.0.to_vec() +// Implement `serde` traits by hand because we have not bound `F` to have those traits. +impl Serialize for Sha256Domain { + fn serialize(&self, s: S) -> Result { + self.state.serialize(s) + } +} +impl<'de, F> Deserialize<'de> for Sha256Domain { + fn deserialize>(d: D) -> Result { + <[u8; 32]>::deserialize(d).map(Into::into) } +} - fn try_from_bytes(raw: &[u8]) -> anyhow::Result { - ensure!( - raw.len() == Sha256Domain::byte_len(), - "invalid number of bytes" - ); +// Implementing `Domain` for specific fields (rather than blanket implementing for all `F`) restricts +// users to using the fields which are compatible with `rust-fil-proofs`. +impl Domain for Sha256Domain { + type Field = Fr; +} +impl Domain for Sha256Domain { + type Field = Fp; +} +impl Domain for Sha256Domain { + type Field = Fq; +} - let mut res = Sha256Domain::default(); - res.0.copy_from_slice(&raw[0..Sha256Domain::byte_len()]); - Ok(res) +impl Sha256Domain { + // Strip the last (most-significant) two bits to ensure that we state within the ~256-bit field + // `F`; note the fields `Fr`, `Fp`, and `Fq` are each 255-bit fields which fully utilize 254 + // bits, i.e. `254 < log2(field_modulus) < 255`. + fn trim_to_fr32(&mut self) { + self.state[31] &= 0b0011_1111; } +} - fn write_bytes(&self, dest: &mut [u8]) -> anyhow::Result<()> { - ensure!( - dest.len() >= Sha256Domain::byte_len(), - "invalid number of bytes" - ); +#[derive(Clone, Debug, Default)] +pub struct Sha256Function { + hasher: Sha256, + _f: PhantomData, +} - dest[0..Sha256Domain::byte_len()].copy_from_slice(&self.0[..]); - Ok(()) +impl std::hash::Hasher for Sha256Function { + fn write(&mut self, msg: &[u8]) { + self.hasher.update(msg); } - fn random(rng: &mut R) -> Self { - // generating an Fr and converting it, to ensure we stay in the field - Fr::random(rng).into() + fn finish(&self) -> u64 { + unreachable!("unused by Function -- should never be called"); } } -impl Element for Sha256Domain { - fn byte_len() -> usize { - 32 +impl Hashable> for Sha256Domain { + fn hash(&self, hasher: &mut Sha256Function) { + as std::hash::Hasher>::write(hasher, self.as_ref()); } +} - fn from_slice(bytes: &[u8]) -> Self { - match Sha256Domain::try_from_bytes(bytes) { - Ok(res) => res, - Err(err) => panic_any(err), - } +// Must add the trait bound `F: PrimeField` because `Algorithm` requires that `F` implements `Clone` +// and `Default`. +impl Algorithm> for Sha256Function { + fn hash(&mut self) -> Sha256Domain { + let mut digest = [0u8; 32]; + digest.copy_from_slice(self.hasher.clone().finalize().as_ref()); + let mut trimmed = Sha256Domain { + state: digest, + _f: PhantomData, + }; + trimmed.trim_to_fr32(); + trimmed } - fn copy_to_slice(&self, bytes: &mut [u8]) { - bytes.copy_from_slice(&self.0); + fn reset(&mut self) { + self.hasher.reset(); + } + + fn leaf(&mut self, leaf: Sha256Domain) -> Sha256Domain { + leaf + } + + fn node( + &mut self, + left: Sha256Domain, + right: Sha256Domain, + _height: usize, + ) -> Sha256Domain { + left.hash(self); + right.hash(self); + self.hash() + } + + fn multi_node(&mut self, parts: &[Sha256Domain], _height: usize) -> Sha256Domain { + for part in parts { + part.hash(self); + } + self.hash() } } -impl HashFunction for Sha256Function { - fn hash(data: &[u8]) -> Sha256Domain { - let hashed = Sha256::digest(data); - let mut res = Sha256Domain::default(); - res.0.copy_from_slice(&hashed[..]); - res.trim_to_fr32(); - res +// Specialized implementation of `HashFunction` over the BLS12-381 scalar field `Fr` because `Fr` +// is the only field which is compatible with `HashFunction`'s Groth16 circuit interfaces. +impl HashFunction> for Sha256Function { + fn hash(data: &[u8]) -> Sha256Domain { + let mut digest = [0u8; 32]; + digest.copy_from_slice(Sha256::digest(data).as_ref()); + let mut trimmed: Sha256Domain = digest.into(); + trimmed.trim_to_fr32(); + trimmed } - fn hash2(a: &Sha256Domain, b: &Sha256Domain) -> Sha256Domain { - let hashed = Sha256::new() + fn hash2(a: &Sha256Domain, b: &Sha256Domain) -> Sha256Domain { + let mut digest = [0u8; 32]; + let hasher = Sha256::new() .chain(AsRef::<[u8]>::as_ref(a)) - .chain(AsRef::<[u8]>::as_ref(b)) - .finalize(); - let mut res = Sha256Domain::default(); - res.0.copy_from_slice(&hashed[..]); - res.trim_to_fr32(); - res + .chain(AsRef::<[u8]>::as_ref(b)); + digest.copy_from_slice(hasher.finalize().as_ref()); + let mut trimmed: Sha256Domain = digest.into(); + trimmed.trim_to_fr32(); + trimmed } fn hash_multi_leaf_circuit>( @@ -274,49 +401,173 @@ impl HashFunction for Sha256Function { } } -impl Algorithm for Sha256Function { - #[inline] - fn hash(&mut self) -> Sha256Domain { - let mut h = [0u8; 32]; - h.copy_from_slice(self.0.clone().finalize().as_ref()); - let mut dd = Sha256Domain::from(h); - dd.trim_to_fr32(); - dd +// Specialized implementation of `HashFunction` over the Pasta scalar fields `Fp` and `Fq` because +// those fields are incompatible with `HashFunction`'s Groth16 circuit interfaces. +impl HashFunction> for Sha256Function { + fn hash(data: &[u8]) -> Sha256Domain { + let mut digest = [0u8; 32]; + digest.copy_from_slice(Sha256::digest(data).as_ref()); + let mut trimmed: Sha256Domain = digest.into(); + trimmed.trim_to_fr32(); + trimmed } - #[inline] - fn reset(&mut self) { - self.0.reset(); + fn hash2(a: &Sha256Domain, b: &Sha256Domain) -> Sha256Domain { + let mut digest = [0u8; 32]; + let hasher = Sha256::new() + .chain(AsRef::<[u8]>::as_ref(a)) + .chain(AsRef::<[u8]>::as_ref(b)); + digest.copy_from_slice(hasher.finalize().as_ref()); + let mut trimmed: Sha256Domain = digest.into(); + trimmed.trim_to_fr32(); + trimmed } - fn leaf(&mut self, leaf: Sha256Domain) -> Sha256Domain { - leaf + fn hash_leaf_circuit>( + mut _cs: CS, + _left: &AllocatedNum, + _right: &AllocatedNum, + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") } - fn node(&mut self, left: Sha256Domain, right: Sha256Domain, _height: usize) -> Sha256Domain { - left.hash(self); - right.hash(self); - self.hash() + fn hash_multi_leaf_circuit>( + mut _cs: CS, + _leaves: &[AllocatedNum], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") } - fn multi_node(&mut self, parts: &[Sha256Domain], _height: usize) -> Sha256Domain { - for part in parts { - part.hash(self) - } - self.hash() + fn hash_md_circuit>( + _cs: &mut CS, + _elements: &[AllocatedNum], + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } + + fn hash_leaf_bits_circuit>( + _cs: CS, + _left: &[Boolean], + _right: &[Boolean], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } + + fn hash_circuit>( + mut _cs: CS, + _bits: &[Boolean], + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } + + fn hash2_circuit>( + mut _cs: CS, + _a_num: &AllocatedNum, + _b_num: &AllocatedNum, + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } +} +impl HashFunction> for Sha256Function { + fn hash(data: &[u8]) -> Sha256Domain { + let mut digest = [0u8; 32]; + digest.copy_from_slice(Sha256::digest(data).as_ref()); + let mut trimmed: Sha256Domain = digest.into(); + trimmed.trim_to_fr32(); + trimmed + } + + fn hash2(a: &Sha256Domain, b: &Sha256Domain) -> Sha256Domain { + let mut digest = [0u8; 32]; + let hasher = Sha256::new() + .chain(AsRef::<[u8]>::as_ref(a)) + .chain(AsRef::<[u8]>::as_ref(b)); + digest.copy_from_slice(hasher.finalize().as_ref()); + let mut trimmed: Sha256Domain = digest.into(); + trimmed.trim_to_fr32(); + trimmed + } + + fn hash_leaf_circuit>( + mut _cs: CS, + _left: &AllocatedNum, + _right: &AllocatedNum, + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } + + fn hash_multi_leaf_circuit>( + mut _cs: CS, + _leaves: &[AllocatedNum], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } + + fn hash_md_circuit>( + _cs: &mut CS, + _elements: &[AllocatedNum], + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } + + fn hash_leaf_bits_circuit>( + _cs: CS, + _left: &[Boolean], + _right: &[Boolean], + _height: usize, + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") } + + fn hash_circuit>( + mut _cs: CS, + _bits: &[Boolean], + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } + + fn hash2_circuit>( + mut _cs: CS, + _a_num: &AllocatedNum, + _b_num: &AllocatedNum, + ) -> Result, SynthesisError> { + unimplemented!("Sha256Function cannot be used within Groth16 circuits") + } +} + +#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)] +pub struct Sha256Hasher { + _f: PhantomData, } -impl From<[u8; 32]> for Sha256Domain { - #[inline] - fn from(val: [u8; 32]) -> Self { - Sha256Domain(val) +// Implementing `Hasher` for specific fields (rather than blanket implementing for all `F`) +// restricts users to using the fields which are compatible with `rust-fil-proofs`. +impl Hasher for Sha256Hasher { + type Domain = Sha256Domain; + type Function = Sha256Function; + + fn name() -> String { + "sha256_hasher".into() } } +impl Hasher for Sha256Hasher { + type Domain = Sha256Domain; + type Function = Sha256Function; -impl From for [u8; 32] { - #[inline] - fn from(val: Sha256Domain) -> Self { - val.0 + fn name() -> String { + "sha256_hasher_pallas".into() + } +} +impl Hasher for Sha256Hasher { + type Domain = Sha256Domain; + type Function = Sha256Function; + + fn name() -> String { + "sha256_hasher_vesta".into() } } diff --git a/filecoin-hashers/src/types.rs b/filecoin-hashers/src/types.rs index 115c1d0db4..f0bd86de8c 100644 --- a/filecoin-hashers/src/types.rs +++ b/filecoin-hashers/src/types.rs @@ -4,12 +4,13 @@ use std::hash::Hash as StdHash; #[cfg(feature = "poseidon")] pub use crate::poseidon_types::*; +use anyhow::ensure; use bellperson::{ gadgets::{boolean::Boolean, num::AllocatedNum}, ConstraintSystem, SynthesisError, }; use blstrs::Scalar as Fr; -use ff::PrimeField; +use ff::{Field, PrimeField}; use merkletree::{ hash::{Algorithm as LightAlgorithm, Hashable as LightHashable}, merkle::Element, @@ -27,21 +28,48 @@ pub trait Domain: + Eq + Send + Sync + // TODO(jake): currently the `From + Into` trait bounds are used as a stopgap to prevent + // Pasta field domains from being used in GPU code, e.g. currently converting a + // `Sha256Domain` into an `Fr` panics. Remove theses trait bounds once Pasta fields are + // fully supported in `rust-fil-proofs`, e.g. GPU code. + From - + From<::Repr> + Into + // Note that `Self::Field` may be `Fr`, in which case the trait bounds + // `From + Into` are redundant. + + From + + Into + + From<[u8; 32]> + Serialize + DeserializeOwned + Element + StdHash { + type Field: PrimeField; + #[allow(clippy::wrong_self_convention)] - fn into_bytes(&self) -> Vec; - fn try_from_bytes(raw: &[u8]) -> anyhow::Result; + fn into_bytes(&self) -> Vec { + self.as_ref().to_vec() + } + + fn try_from_bytes(bytes: &[u8]) -> anyhow::Result { + ensure!(bytes.len() == Self::byte_len(), "invalid number of bytes"); + let mut array = [0u8; 32]; + array.copy_from_slice(bytes); + Ok(array.into()) + } + /// Write itself into the given slice, LittleEndian bytes. - fn write_bytes(&self, _: &mut [u8]) -> anyhow::Result<()>; + fn write_bytes(&self, dest: &mut [u8]) -> anyhow::Result<()> { + let n = Self::byte_len(); + ensure!(dest.len() >= n, "invalid number of bytes"); + dest[..n].copy_from_slice(self.as_ref()); + Ok(()) + } - fn random(rng: &mut R) -> Self; + fn random(rng: &mut R) -> Self { + // Generating a field element then converting it ensures that we stay within the field. + Self::Field::random(rng).into() + } } pub trait HashFunction: Clone + Debug + Send + Sync + LightAlgorithm { diff --git a/filecoin-proofs/src/api/fake_seal.rs b/filecoin-proofs/src/api/fake_seal.rs index d69b7ce7c4..71a541852f 100644 --- a/filecoin-proofs/src/api/fake_seal.rs +++ b/filecoin-proofs/src/api/fake_seal.rs @@ -4,6 +4,7 @@ use std::path::Path; use anyhow::{Context, Result}; use bincode::serialize; +use blstrs::Scalar as Fr; use filecoin_hashers::{Domain, Hasher}; use rand::{thread_rng, Rng}; use storage_proofs_core::{cache_key::CacheKey, merkle::MerkleTreeTrait}; @@ -18,7 +19,10 @@ pub fn fauxrep, S: AsRef, Tree: 'static + MerkleTreeTrait>( porep_config: PoRepConfig, cache_path: R, out_path: S, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ let mut rng = thread_rng(); fauxrep_aux::<_, R, S, Tree>(&mut rng, porep_config, cache_path, out_path) } @@ -28,7 +32,10 @@ pub fn fauxrep_aux, T: AsRef, Tree: 'static + Merkl porep_config: PoRepConfig, cache_path: S, out_path: T, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ let sector_bytes = PaddedBytesAmount::from(porep_config).0; { @@ -61,7 +68,10 @@ pub fn fauxrep_aux, T: AsRef, Tree: 'static + Merkl pub fn fauxrep2, S: AsRef, Tree: 'static + MerkleTreeTrait>( cache_path: R, existing_p_aux_path: S, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ let mut rng = thread_rng(); let fake_comm_c = ::Domain::random(&mut rng); diff --git a/filecoin-proofs/src/api/mod.rs b/filecoin-proofs/src/api/mod.rs index 0af9b53bfb..f6600ee1da 100644 --- a/filecoin-proofs/src/api/mod.rs +++ b/filecoin-proofs/src/api/mod.rs @@ -4,7 +4,8 @@ use std::path::{Path, PathBuf}; use anyhow::{ensure, Context, Result}; use bincode::deserialize; -use filecoin_hashers::Hasher; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use fr32::{write_unpadded, Fr32Reader}; use log::{info, trace}; use memmap::MmapOptions; @@ -86,7 +87,10 @@ pub fn get_unsealed_range + AsRef, Tree: 'static + Merkle ticket: Ticket, offset: UnpaddedByteIndex, num_bytes: UnpaddedBytesAmount, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("get_unsealed_range:start"); let f_out = File::create(&output_path) @@ -146,6 +150,7 @@ where R: Read, W: Write, Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, { info!("unseal_range:start"); ensure!(comm_d != [0; 32], "Invalid all zero commitment (comm_d)"); @@ -213,6 +218,7 @@ where P: Into + AsRef, W: Write, Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, { info!("unseal_range_mapped:start"); ensure!(comm_d != [0; 32], "Invalid all zero commitment (comm_d)"); @@ -279,6 +285,7 @@ where P: Into + AsRef, W: Write, Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, { trace!("unseal_range_inner:start"); @@ -543,7 +550,10 @@ fn verify_store(config: &StoreConfig, arity: usize, required_configs: usize) -> } // Verifies if a LevelCacheStore specified by a config is consistent. -fn verify_level_cache_store(config: &StoreConfig) -> Result<()> { +fn verify_level_cache_store(config: &StoreConfig) -> Result<()> +where + ::Domain: Domain, +{ let store_path = StoreConfig::data_path(&config.path, &config.id); if !Path::new(&store_path).exists() { let required_configs = get_base_tree_count::(); @@ -630,6 +640,7 @@ pub fn validate_cache_for_precommit_phase2( where R: AsRef, T: AsRef, + ::Domain: Domain, { info!("validate_cache_for_precommit_phase2:start"); @@ -674,6 +685,7 @@ pub fn validate_cache_for_commit( where R: AsRef, T: AsRef, + ::Domain: Domain, { info!("validate_cache_for_commit:start"); diff --git a/filecoin-proofs/src/api/post_util.rs b/filecoin-proofs/src/api/post_util.rs index c734df82e0..ac681b802b 100644 --- a/filecoin-proofs/src/api/post_util.rs +++ b/filecoin-proofs/src/api/post_util.rs @@ -4,7 +4,8 @@ use std::path::Path; use anyhow::{anyhow, ensure, Context, Result}; use bincode::deserialize; -use filecoin_hashers::Hasher; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use log::{debug, info}; use storage_proofs_core::{ cache_key::CacheKey, merkle::MerkleTreeTrait, proof::ProofScheme, sector::SectorId, @@ -22,7 +23,10 @@ use crate::{ }; // Ensure that any associated cached data persisted is discarded. -pub fn clear_cache(cache_dir: &Path) -> Result<()> { +pub fn clear_cache(cache_dir: &Path) -> Result<()> +where + ::Domain: Domain, +{ info!("clear_cache:start"); let t_aux = { @@ -43,7 +47,10 @@ pub fn clear_cache(cache_dir: &Path) -> Result<()> { // Ensure that any associated cached data persisted is discarded. pub fn clear_caches( replicas: &BTreeMap>, -) -> Result<()> { +) -> Result<()> +where + ::Domain: Domain, +{ info!("clear_caches:start"); for replica in replicas.values() { @@ -62,7 +69,10 @@ pub fn generate_fallback_sector_challenges( randomness: &ChallengeSeed, pub_sectors: &[SectorId], _prover_id: ProverId, -) -> Result>> { +) -> Result>> +where + ::Domain: Domain, +{ info!("generate_sector_challenges:start"); ensure!( post_config.typ == PoStType::Window || post_config.typ == PoStType::Winning, @@ -127,7 +137,10 @@ pub fn generate_single_vanilla_proof( sector_id: SectorId, replica: &PrivateReplicaInfo, challenges: &[u64], -) -> Result> { +) -> Result> +where + ::Domain: Domain, +{ info!("generate_single_vanilla_proof:start: {:?}", sector_id); let tree = &replica @@ -183,7 +196,10 @@ pub fn partition_vanilla_proofs( pub_inputs: &fallback::PublicInputs<::Domain>, partition_count: usize, vanilla_proofs: &[FallbackPoStSectorProof], -) -> Result>> { +) -> Result>> +where + ::Domain: Domain, +{ info!("partition_vanilla_proofs:start"); ensure!( post_config.typ == PoStType::Window || post_config.typ == PoStType::Winning, @@ -263,7 +279,10 @@ pub fn single_partition_vanilla_proofs( pub_params: &fallback::PublicParams, pub_inputs: &fallback::PublicInputs<::Domain>, vanilla_proofs: &[FallbackPoStSectorProof], -) -> Result> { +) -> Result> +where + ::Domain: Domain, +{ info!("single_partition_vanilla_proofs:start"); ensure!(pub_inputs.k.is_some(), "must have a partition index"); let partition_index = pub_inputs.k.expect("prechecked"); diff --git a/filecoin-proofs/src/api/seal.rs b/filecoin-proofs/src/api/seal.rs index e12a56564b..e12d6a162a 100644 --- a/filecoin-proofs/src/api/seal.rs +++ b/filecoin-proofs/src/api/seal.rs @@ -64,6 +64,7 @@ where R: AsRef, S: AsRef, T: AsRef, + ::Domain: Domain, { info!("seal_pre_commit_phase1:start: {:?}", sector_id); @@ -207,6 +208,7 @@ pub fn seal_pre_commit_phase2( where R: AsRef, S: AsRef, + ::Domain: Domain, { info!("seal_pre_commit_phase2:start"); @@ -332,7 +334,10 @@ pub fn seal_commit_phase1, Tree: 'static + MerkleTreeTrait>( seed: Ticket, pre_commit: SealPreCommitOutput, piece_infos: &[PieceInfo], -) -> Result> { +) -> Result> +where + ::Domain: Domain, +{ info!("seal_commit_phase1:start: {:?}", sector_id); // Sanity check all input path types. @@ -455,7 +460,10 @@ pub fn seal_commit_phase2( phase1_output: SealCommitPhase1Output, prover_id: ProverId, sector_id: SectorId, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("seal_commit_phase2:start: {:?}", sector_id); let SealCommitPhase1Output { @@ -568,7 +576,10 @@ pub fn get_seal_inputs( sector_id: SectorId, ticket: Ticket, seed: Ticket, -) -> Result>> { +) -> Result>> +where + ::Domain: Domain, +{ trace!("get_seal_inputs:start"); ensure!(comm_d != [0; 32], "Invalid all zero commitment (comm_d)"); @@ -724,7 +735,10 @@ pub fn aggregate_seal_commit_proofs( comm_rs: &[[u8; 32]], seeds: &[[u8; 32]], commit_outputs: &[SealCommitOutput], -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("aggregate_seal_commit_proofs:start"); ensure!( @@ -809,7 +823,10 @@ pub fn verify_aggregate_seal_commit_proofs( comm_rs: &[[u8; 32]], seeds: &[[u8; 32]], commit_inputs: Vec>, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("verify_aggregate_seal_commit_proofs:start"); let aggregate_proof = @@ -923,7 +940,10 @@ pub fn verify_seal( ticket: Ticket, seed: Ticket, proof_vec: &[u8], -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("verify_seal:start: {:?}", sector_id); ensure!(comm_d_in != [0; 32], "Invalid all zero commitment (comm_d)"); @@ -1020,7 +1040,10 @@ pub fn verify_batch_seal( tickets: &[Ticket], seeds: &[Ticket], proof_vecs: &[&[u8]], -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("verify_batch_seal:start"); ensure!(!comm_r_ins.is_empty(), "Cannot prove empty batch"); let l = comm_r_ins.len(); diff --git a/filecoin-proofs/src/api/update.rs b/filecoin-proofs/src/api/update.rs index 340c85597a..66e224b1a6 100644 --- a/filecoin-proofs/src/api/update.rs +++ b/filecoin-proofs/src/api/update.rs @@ -4,6 +4,7 @@ use std::path::Path; use anyhow::{ensure, Context, Result}; use bincode::{deserialize, serialize}; +use blstrs::Scalar as Fr; use filecoin_hashers::{Domain, Hasher}; use generic_array::typenum::Unsigned; use log::{info, trace}; @@ -18,8 +19,9 @@ use storage_proofs_core::{ }; use storage_proofs_porep::stacked::{PersistentAux, TemporaryAux}; use storage_proofs_update::{ - constants::TreeDArity, constants::TreeRHasher, EmptySectorUpdate, EmptySectorUpdateCompound, - PartitionProof, PrivateInputs, PublicInputs, PublicParams, SetupParams, + constants::{TreeDArity, TreeRHasher}, + EmptySectorUpdate, EmptySectorUpdateCompound, PrivateInputs, PublicInputs, PublicParams, + SetupParams, }; use crate::{ @@ -27,15 +29,16 @@ use crate::{ constants::{DefaultPieceDomain, DefaultPieceHasher}, pieces::verify_pieces, types::{ - Commitment, EmptySectorUpdateEncoded, EmptySectorUpdateProof, PieceInfo, PoRepConfig, - SectorUpdateConfig, + Commitment, EmptySectorUpdateEncoded, EmptySectorUpdateProof, PartitionProof, PieceInfo, + PoRepConfig, SectorUpdateConfig, }, }; // Instantiates p_aux from the specified cache_dir for access to comm_c and comm_r_last -fn get_p_aux>( - cache_path: &Path, -) -> Result::Domain>> { +fn get_p_aux(cache_path: &Path) -> Result::Domain>> +where + Tree: 'static + MerkleTreeTrait>, +{ let p_aux_path = cache_path.join(CacheKey::PAux.to_string()); let p_aux_bytes = fs::read(&p_aux_path) .with_context(|| format!("could not read file p_aux={:?}", p_aux_path))?; @@ -45,10 +48,13 @@ fn get_p_aux>( Ok(p_aux) } -fn persist_p_aux>( +fn persist_p_aux( p_aux: &PersistentAux<::Domain>, cache_path: &Path, -) -> Result<()> { +) -> Result<()> +where + Tree: 'static + MerkleTreeTrait>, +{ let p_aux_path = cache_path.join(CacheKey::PAux.to_string()); let mut f_p_aux = File::create(&p_aux_path) .with_context(|| format!("could not create file p_aux={:?}", p_aux_path))?; @@ -62,9 +68,10 @@ fn persist_p_aux>( // Instantiates t_aux from the specified cache_dir for access to // labels and tree_d, tree_c, tree_r_last store configs -fn get_t_aux>( - cache_path: &Path, -) -> Result> { +fn get_t_aux(cache_path: &Path) -> Result> +where + Tree: 'static + MerkleTreeTrait>, +{ let t_aux_path = cache_path.join(CacheKey::TAux.to_string()); trace!("Instantiating TemporaryAux from {:?}", cache_path); let t_aux_bytes = fs::read(&t_aux_path) @@ -77,10 +84,13 @@ fn get_t_aux>( Ok(res) } -fn persist_t_aux>( +fn persist_t_aux( t_aux: &TemporaryAux, cache_path: &Path, -) -> Result<()> { +) -> Result<()> +where + Tree: 'static + MerkleTreeTrait>, +{ let t_aux_path = cache_path.join(CacheKey::TAux.to_string()); let mut f_t_aux = File::create(&t_aux_path) .with_context(|| format!("could not create file t_aux={:?}", t_aux_path))?; @@ -103,11 +113,14 @@ fn persist_t_aux>( // ...) // // Returns a pair of the new tree_d_config and tree_r_last configs -fn get_new_configs_from_t_aux_old>( +fn get_new_configs_from_t_aux_old( t_aux_old: &TemporaryAux, new_cache_path: &Path, nodes_count: usize, -) -> Result<(StoreConfig, StoreConfig)> { +) -> Result<(StoreConfig, StoreConfig)> +where + Tree: 'static + MerkleTreeTrait>, +{ let mut t_aux_new = t_aux_old.clone(); t_aux_new.set_cache_path(new_cache_path); @@ -139,7 +152,7 @@ fn get_new_configs_from_t_aux_old>( +pub fn encode_into( porep_config: PoRepConfig, new_replica_path: &Path, new_cache_path: &Path, @@ -147,7 +160,10 @@ pub fn encode_into>( sector_key_cache_path: &Path, staged_data_path: &Path, piece_infos: &[PieceInfo], -) -> Result { +) -> Result +where + Tree: 'static + MerkleTreeTrait>, +{ info!("encode_into:start"); let config = SectorUpdateConfig::from_porep_config(porep_config); @@ -158,7 +174,7 @@ pub fn encode_into>( get_new_configs_from_t_aux_old::(&t_aux, new_cache_path, config.nodes_count)?; let (comm_r_domain, comm_r_last_domain, comm_d_domain) = - EmptySectorUpdate::::encode_into( + EmptySectorUpdate::::encode_into( config.nodes_count, tree_d_new_config, tree_r_last_new_config, @@ -208,20 +224,23 @@ pub fn encode_into>( /// Reverses the encoding process and outputs the data into out_data_path. #[allow(clippy::too_many_arguments)] -pub fn decode_from>( +pub fn decode_from( config: SectorUpdateConfig, out_data_path: &Path, replica_path: &Path, sector_key_path: &Path, sector_key_cache_path: &Path, comm_d_new: Commitment, -) -> Result<()> { +) -> Result<()> +where + Tree: 'static + MerkleTreeTrait>, +{ info!("decode_from:start"); let p_aux = get_p_aux::(sector_key_cache_path)?; let nodes_count = config.nodes_count; - EmptySectorUpdate::::decode_from( + EmptySectorUpdate::::decode_from( nodes_count, out_data_path, replica_path, @@ -239,7 +258,7 @@ pub fn decode_from>( /// Removes encoded data and outputs the sector key. #[allow(clippy::too_many_arguments)] -pub fn remove_encoded_data>( +pub fn remove_encoded_data( config: SectorUpdateConfig, sector_key_path: &Path, sector_key_cache_path: &Path, @@ -247,7 +266,10 @@ pub fn remove_encoded_data replica_cache_path: &Path, data_path: &Path, comm_d_new: Commitment, -) -> Result<()> { +) -> Result<()> +where + Tree: 'static + MerkleTreeTrait>, +{ info!("remove_data:start"); let p_aux = get_p_aux::(replica_cache_path)?; @@ -257,7 +279,12 @@ pub fn remove_encoded_data get_new_configs_from_t_aux_old::(&t_aux, sector_key_cache_path, config.nodes_count)?; let nodes_count = config.nodes_count; - let tree_r_last_new = EmptySectorUpdate::::remove_encoded_data( + let tree_r_last_new = EmptySectorUpdate::< + Fr, + Tree::Arity, + Tree::SubTreeArity, + Tree::TopTreeArity, + >::remove_encoded_data( nodes_count, sector_key_path, sector_key_cache_path, @@ -283,7 +310,7 @@ pub fn remove_encoded_data /// Generate a single vanilla partition proof for a specified partition. #[allow(clippy::too_many_arguments)] -pub fn generate_single_partition_proof>( +pub fn generate_single_partition_proof( config: SectorUpdateConfig, partition_index: usize, comm_r_old: Commitment, @@ -293,11 +320,14 @@ pub fn generate_single_partition_proof Result> { +) -> Result> +where + Tree: 'static + MerkleTreeTrait>, +{ info!("generate_single_partition_proof:start"); - let comm_r_old_safe = ::Domain::try_from_bytes(&comm_r_old)?; - let comm_r_new_safe = ::Domain::try_from_bytes(&comm_r_new)?; + let comm_r_old_safe = as Hasher>::Domain::try_from_bytes(&comm_r_old)?; + let comm_r_new_safe = as Hasher>::Domain::try_from_bytes(&comm_r_new)?; let comm_d_new_safe = DefaultPieceDomain::try_from_bytes(&comm_d_new)?; @@ -309,7 +339,7 @@ pub fn generate_single_partition_proof(&t_aux_old, replica_cache_path, config.nodes_count)?; - let private_inputs: PrivateInputs = PrivateInputs { + let private_inputs = PrivateInputs { comm_c: p_aux_old.comm_c, tree_r_old_config: t_aux_old.tree_r_last_config, old_replica_path: sector_key_path.to_path_buf(), @@ -331,8 +361,12 @@ pub fn generate_single_partition_proof::prove(&public_params, &public_inputs, &private_inputs)?; + let partition_proof = EmptySectorUpdate::< + Fr, + Tree::Arity, + Tree::SubTreeArity, + Tree::TopTreeArity, + >::prove(&public_params, &public_inputs, &private_inputs)?; info!("generate_single_partition_proof:finish"); @@ -341,18 +375,21 @@ pub fn generate_single_partition_proof>( +pub fn verify_single_partition_proof( config: SectorUpdateConfig, partition_index: usize, - proof: PartitionProof, + proof: PartitionProof, comm_r_old: Commitment, comm_r_new: Commitment, comm_d_new: Commitment, -) -> Result { +) -> Result +where + Tree: 'static + MerkleTreeTrait>, +{ info!("verify_single_partition_proof:start"); - let comm_r_old_safe = ::Domain::try_from_bytes(&comm_r_old)?; - let comm_r_new_safe = ::Domain::try_from_bytes(&comm_r_new)?; + let comm_r_old_safe = as Hasher>::Domain::try_from_bytes(&comm_r_old)?; + let comm_r_new_safe = as Hasher>::Domain::try_from_bytes(&comm_r_new)?; let comm_d_new_safe = DefaultPieceDomain::try_from_bytes(&comm_d_new)?; @@ -362,7 +399,7 @@ pub fn verify_single_partition_proof::verify(&public_params, &public_inputs, &proof)?; + let valid = + EmptySectorUpdate::::verify( + &public_params, + &public_inputs, + &proof, + )?; info!("verify_single_partition_proof:finish"); @@ -379,7 +421,7 @@ pub fn verify_single_partition_proof>( +pub fn generate_partition_proofs( config: SectorUpdateConfig, comm_r_old: Commitment, comm_r_new: Commitment, @@ -388,11 +430,14 @@ pub fn generate_partition_proofs Result>> { +) -> Result>> +where + Tree: 'static + MerkleTreeTrait>, +{ info!("generate_partition_proofs:start"); - let comm_r_old_safe = ::Domain::try_from_bytes(&comm_r_old)?; - let comm_r_new_safe = ::Domain::try_from_bytes(&comm_r_new)?; + let comm_r_old_safe = as Hasher>::Domain::try_from_bytes(&comm_r_old)?; + let comm_r_new_safe = as Hasher>::Domain::try_from_bytes(&comm_r_new)?; let comm_d_new_safe = DefaultPieceDomain::try_from_bytes(&comm_d_new)?; @@ -401,7 +446,7 @@ pub fn generate_partition_proofs(sector_key_cache_path)?; - let public_inputs: storage_proofs_update::PublicInputs = PublicInputs { + let public_inputs = PublicInputs { k: usize::from(config.update_partitions), comm_r_old: comm_r_old_safe, comm_d_new: comm_d_new_safe, @@ -414,7 +459,7 @@ pub fn generate_partition_proofs(&t_aux_old, replica_cache_path, config.nodes_count)?; - let private_inputs: PrivateInputs = PrivateInputs { + let private_inputs = PrivateInputs { comm_c: p_aux_old.comm_c, tree_r_old_config: t_aux_old.tree_r_last_config, old_replica_path: sector_key_path.to_path_buf(), @@ -423,7 +468,12 @@ pub fn generate_partition_proofs::prove_all_partitions( + let partition_proofs = EmptySectorUpdate::< + Fr, + Tree::Arity, + Tree::SubTreeArity, + Tree::TopTreeArity, + >::prove_all_partitions( &public_params, &public_inputs, &private_inputs, @@ -436,24 +486,27 @@ pub fn generate_partition_proofs>( +pub fn verify_partition_proofs( config: SectorUpdateConfig, - proofs: &[PartitionProof], + proofs: &[PartitionProof], comm_r_old: Commitment, comm_r_new: Commitment, comm_d_new: Commitment, -) -> Result { +) -> Result +where + Tree: 'static + MerkleTreeTrait>, +{ info!("verify_partition_proofs:start"); - let comm_r_old_safe = ::Domain::try_from_bytes(&comm_r_old)?; - let comm_r_new_safe = ::Domain::try_from_bytes(&comm_r_new)?; + let comm_r_old_safe = as Hasher>::Domain::try_from_bytes(&comm_r_old)?; + let comm_r_new_safe = as Hasher>::Domain::try_from_bytes(&comm_r_new)?; let comm_d_new_safe = DefaultPieceDomain::try_from_bytes(&comm_d_new)?; let public_params: storage_proofs_update::PublicParams = PublicParams::from_sector_size(u64::from(config.sector_size)); - let public_inputs: storage_proofs_update::PublicInputs = PublicInputs { + let public_inputs = PublicInputs { k: usize::from(config.update_partitions), comm_r_old: comm_r_old_safe, comm_d_new: comm_d_new_safe, @@ -462,7 +515,7 @@ pub fn verify_partition_proofs::verify_all_partitions(&public_params, &public_inputs, proofs)?; + EmptySectorUpdate::::verify_all_partitions(&public_params, &public_inputs, proofs)?; info!("verify_partition_proofs:finish"); @@ -470,26 +523,27 @@ pub fn verify_partition_proofs, ->( +pub fn generate_empty_sector_update_proof_with_vanilla( porep_config: PoRepConfig, - vanilla_proofs: Vec>, + vanilla_proofs: Vec>, comm_r_old: Commitment, comm_r_new: Commitment, comm_d_new: Commitment, -) -> Result { +) -> Result +where + Tree: 'static + MerkleTreeTrait>, +{ info!("generate_empty_sector_update_proof_with_vanilla:start"); - let comm_r_old_safe = ::Domain::try_from_bytes(&comm_r_old)?; - let comm_r_new_safe = ::Domain::try_from_bytes(&comm_r_new)?; + let comm_r_old_safe = as Hasher>::Domain::try_from_bytes(&comm_r_old)?; + let comm_r_new_safe = as Hasher>::Domain::try_from_bytes(&comm_r_new)?; let comm_d_new_safe = DefaultPieceDomain::try_from_bytes(&comm_d_new)?; let config = SectorUpdateConfig::from_porep_config(porep_config); let partitions = usize::from(config.update_partitions); - let public_inputs: storage_proofs_update::PublicInputs = PublicInputs { + let public_inputs = PublicInputs { k: partitions, comm_r_old: comm_r_old_safe, comm_d_new: comm_d_new_safe, @@ -504,7 +558,10 @@ pub fn generate_empty_sector_update_proof_with_vanilla< partitions: Some(partitions), priority: false, }; - let pub_params_compound = EmptySectorUpdateCompound::::setup(&setup_params_compound)?; + let pub_params_compound = + EmptySectorUpdateCompound::::setup( + &setup_params_compound, + )?; let groth_params = get_empty_sector_update_params::(porep_config)?; let multi_proof = EmptySectorUpdateCompound::prove_with_vanilla( @@ -520,7 +577,7 @@ pub fn generate_empty_sector_update_proof_with_vanilla< } #[allow(clippy::too_many_arguments)] -pub fn generate_empty_sector_update_proof>( +pub fn generate_empty_sector_update_proof( porep_config: PoRepConfig, comm_r_old: Commitment, comm_r_new: Commitment, @@ -529,11 +586,14 @@ pub fn generate_empty_sector_update_proof Result { +) -> Result +where + Tree: 'static + MerkleTreeTrait>, +{ info!("generate_empty_sector_update_proof:start"); - let comm_r_old_safe = ::Domain::try_from_bytes(&comm_r_old)?; - let comm_r_new_safe = ::Domain::try_from_bytes(&comm_r_new)?; + let comm_r_old_safe = as Hasher>::Domain::try_from_bytes(&comm_r_old)?; + let comm_r_new_safe = as Hasher>::Domain::try_from_bytes(&comm_r_new)?; let comm_d_new_safe = DefaultPieceDomain::try_from_bytes(&comm_d_new)?; @@ -542,7 +602,7 @@ pub fn generate_empty_sector_update_proof(sector_key_cache_path)?; let partitions = usize::from(config.update_partitions); - let public_inputs: storage_proofs_update::PublicInputs = PublicInputs { + let public_inputs = PublicInputs { k: partitions, comm_r_old: comm_r_old_safe, comm_d_new: comm_d_new_safe, @@ -555,7 +615,7 @@ pub fn generate_empty_sector_update_proof(&t_aux_old, replica_cache_path, config.nodes_count)?; - let private_inputs: PrivateInputs = PrivateInputs { + let private_inputs = PrivateInputs { comm_c: p_aux_old.comm_c, tree_r_old_config: t_aux_old.tree_r_last_config, old_replica_path: sector_key_path.to_path_buf(), @@ -571,7 +631,10 @@ pub fn generate_empty_sector_update_proof::setup(&setup_params_compound)?; + let pub_params_compound = + EmptySectorUpdateCompound::::setup( + &setup_params_compound, + )?; let groth_params = get_empty_sector_update_params::(porep_config)?; let multi_proof = EmptySectorUpdateCompound::prove( @@ -586,23 +649,26 @@ pub fn generate_empty_sector_update_proof>( +pub fn verify_empty_sector_update_proof( porep_config: PoRepConfig, proof_bytes: &[u8], comm_r_old: Commitment, comm_r_new: Commitment, comm_d_new: Commitment, -) -> Result { +) -> Result +where + Tree: 'static + MerkleTreeTrait>, +{ info!("verify_empty_sector_update_proof:start"); - let comm_r_old_safe = ::Domain::try_from_bytes(&comm_r_old)?; - let comm_r_new_safe = ::Domain::try_from_bytes(&comm_r_new)?; + let comm_r_old_safe = as Hasher>::Domain::try_from_bytes(&comm_r_old)?; + let comm_r_new_safe = as Hasher>::Domain::try_from_bytes(&comm_r_new)?; let comm_d_new_safe = DefaultPieceDomain::try_from_bytes(&comm_d_new)?; let config = SectorUpdateConfig::from_porep_config(porep_config); let partitions = usize::from(config.update_partitions); - let public_inputs: storage_proofs_update::PublicInputs = PublicInputs { + let public_inputs = PublicInputs { k: partitions, comm_r_old: comm_r_old_safe, comm_d_new: comm_d_new_safe, @@ -616,7 +682,10 @@ pub fn verify_empty_sector_update_proof::setup(&setup_params_compound)?; + let pub_params_compound = + EmptySectorUpdateCompound::::setup( + &setup_params_compound, + )?; let verifying_key = get_empty_sector_update_verifying_key::(porep_config)?; let multi_proof = MultiProof::new_from_bytes(Some(partitions), proof_bytes, &verifying_key)?; diff --git a/filecoin-proofs/src/api/window_post.rs b/filecoin-proofs/src/api/window_post.rs index 0a3214b87f..d3d79b7a54 100644 --- a/filecoin-proofs/src/api/window_post.rs +++ b/filecoin-proofs/src/api/window_post.rs @@ -1,7 +1,8 @@ use std::collections::BTreeMap; use anyhow::{ensure, Context, Result}; -use filecoin_hashers::Hasher; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use log::info; use storage_proofs_core::{ compound_proof::{self, CompoundProof}, @@ -33,7 +34,10 @@ pub fn generate_window_post_with_vanilla( randomness: &ChallengeSeed, prover_id: ProverId, vanilla_proofs: Vec>, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("generate_window_post_with_vanilla:start"); ensure!( post_config.typ == PoStType::Window, @@ -101,7 +105,10 @@ pub fn generate_window_post( randomness: &ChallengeSeed, replicas: &BTreeMap>, prover_id: ProverId, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("generate_window_post:start"); ensure!( post_config.typ == PoStType::Window, @@ -182,7 +189,10 @@ pub fn verify_window_post( replicas: &BTreeMap, prover_id: ProverId, proof: &[u8], -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("verify_window_post:start"); ensure!( @@ -253,7 +263,10 @@ pub fn generate_single_window_post_with_vanilla prover_id: ProverId, vanilla_proofs: Vec>, partition_index: usize, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("generate_single_window_post_with_vanilla:start"); ensure!( post_config.typ == PoStType::Window, diff --git a/filecoin-proofs/src/api/winning_post.rs b/filecoin-proofs/src/api/winning_post.rs index 6ab4d91fe2..0c7ee9d179 100644 --- a/filecoin-proofs/src/api/winning_post.rs +++ b/filecoin-proofs/src/api/winning_post.rs @@ -1,5 +1,6 @@ use anyhow::{ensure, Context, Result}; -use filecoin_hashers::Hasher; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use log::info; use storage_proofs_core::{ compound_proof::{self, CompoundProof}, @@ -29,7 +30,10 @@ pub fn generate_winning_post_with_vanilla( randomness: &ChallengeSeed, prover_id: ProverId, vanilla_proofs: Vec>, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("generate_winning_post_with_vanilla:start"); ensure!( post_config.typ == PoStType::Winning, @@ -100,7 +104,10 @@ pub fn generate_winning_post( randomness: &ChallengeSeed, replicas: &[(SectorId, PrivateReplicaInfo)], prover_id: ProverId, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("generate_winning_post:start"); ensure!( post_config.typ == PoStType::Winning, @@ -192,7 +199,10 @@ pub fn generate_winning_post_sector_challenge( randomness: &ChallengeSeed, sector_set_size: u64, prover_id: Commitment, -) -> Result> { +) -> Result> +where + ::Domain: Domain, +{ info!("generate_winning_post_sector_challenge:start"); ensure!(sector_set_size != 0, "empty sector set is invalid"); ensure!( @@ -228,7 +238,10 @@ pub fn verify_winning_post( replicas: &[(SectorId, PublicReplicaInfo)], prover_id: ProverId, proof: &[u8], -) -> Result { +) -> Result +where + ::Domain: Domain, +{ info!("verify_winning_post:start"); ensure!( diff --git a/filecoin-proofs/src/caches.rs b/filecoin-proofs/src/caches.rs index c294de268b..75741313f2 100644 --- a/filecoin-proofs/src/caches.rs +++ b/filecoin-proofs/src/caches.rs @@ -3,7 +3,8 @@ use std::sync::{Arc, Mutex}; use anyhow::Result; use bellperson::groth16::{self, prepare_verifying_key}; -use blstrs::Bls12; +use blstrs::{Bls12, Scalar as Fr}; +use filecoin_hashers::{Domain, Hasher}; use lazy_static::lazy_static; use log::{info, trace}; use once_cell::sync::OnceCell; @@ -12,8 +13,8 @@ use storage_proofs_core::{compound_proof::CompoundProof, merkle::MerkleTreeTrait use storage_proofs_porep::stacked::{StackedCompound, StackedDrg}; use storage_proofs_post::fallback::{FallbackPoSt, FallbackPoStCircuit, FallbackPoStCompound}; use storage_proofs_update::{ - circuit::EmptySectorUpdateCircuit, compound::EmptySectorUpdateCompound, constants::TreeRHasher, - EmptySectorUpdate, PublicParams, + constants::TreeRHasher, EmptySectorUpdate, EmptySectorUpdateCircuit, EmptySectorUpdateCompound, + PublicParams, }; use crate::{ @@ -195,9 +196,11 @@ where ) } -pub fn get_stacked_params( - porep_config: PoRepConfig, -) -> Result> { +pub fn get_stacked_params(porep_config: PoRepConfig) -> Result> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let public_params = public_params::( PaddedBytesAmount::from(porep_config), usize::from(PoRepProofPartitions::from(porep_config)), @@ -222,9 +225,11 @@ pub fn get_stacked_params( ) } -pub fn get_post_params( - post_config: &PoStConfig, -) -> Result> { +pub fn get_post_params(post_config: &PoStConfig) -> Result> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ match post_config.typ { PoStType::Winning => { let post_public_params = winning_post_public_params::(post_config)?; @@ -267,16 +272,19 @@ pub fn get_post_params( } } -pub fn get_empty_sector_update_params>( +pub fn get_empty_sector_update_params( porep_config: PoRepConfig, -) -> Result> { +) -> Result> +where + Tree: 'static + MerkleTreeTrait>, +{ let public_params: storage_proofs_update::PublicParams = PublicParams::from_sector_size(u64::from(porep_config.sector_size)); let parameters_generator = || { - as CompoundProof< - EmptySectorUpdate, - EmptySectorUpdateCircuit, + as CompoundProof< + EmptySectorUpdate, + EmptySectorUpdateCircuit, >>::groth_params::(None, &public_params) .map_err(Into::into) }; @@ -290,9 +298,13 @@ pub fn get_empty_sector_update_params( +pub fn get_stacked_verifying_key( porep_config: PoRepConfig, -) -> Result> { +) -> Result> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let public_params = public_params( PaddedBytesAmount::from(porep_config), usize::from(PoRepProofPartitions::from(porep_config)), @@ -317,9 +329,13 @@ pub fn get_stacked_verifying_key( ) } -pub fn get_post_verifying_key( +pub fn get_post_verifying_key( post_config: &PoStConfig, -) -> Result> { +) -> Result> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ match post_config.typ { PoStType::Winning => { let post_public_params = winning_post_public_params::(post_config)?; @@ -362,10 +378,14 @@ pub fn get_post_verifying_key( } } -pub fn get_stacked_srs_key( +pub fn get_stacked_srs_key( porep_config: PoRepConfig, num_proofs_to_aggregate: usize, -) -> Result> { +) -> Result> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let public_params = public_params( PaddedBytesAmount::from(porep_config), usize::from(PoRepProofPartitions::from(porep_config)), @@ -395,10 +415,14 @@ pub fn get_stacked_srs_key( ) } -pub fn get_stacked_srs_verifier_key( +pub fn get_stacked_srs_verifier_key( porep_config: PoRepConfig, num_proofs_to_aggregate: usize, -) -> Result> { +) -> Result> +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let public_params = public_params( PaddedBytesAmount::from(porep_config), usize::from(PoRepProofPartitions::from(porep_config)), @@ -430,18 +454,19 @@ pub fn get_stacked_srs_verifier_key( ) } -pub fn get_empty_sector_update_verifying_key< - Tree: 'static + MerkleTreeTrait, ->( +pub fn get_empty_sector_update_verifying_key( porep_config: PoRepConfig, -) -> Result> { +) -> Result> +where + Tree: 'static + MerkleTreeTrait>, +{ let public_params: storage_proofs_update::PublicParams = PublicParams::from_sector_size(u64::from(porep_config.sector_size)); let vk_generator = || { - let vk = as CompoundProof< - EmptySectorUpdate, - EmptySectorUpdateCircuit, + let vk = as CompoundProof< + EmptySectorUpdate, + EmptySectorUpdateCircuit, >>::verifying_key::(None, &public_params)?; Ok(prepare_verifying_key(&vk)) }; diff --git a/filecoin-proofs/src/constants.rs b/filecoin-proofs/src/constants.rs index 1a436cac60..4b9243a442 100644 --- a/filecoin-proofs/src/constants.rs +++ b/filecoin-proofs/src/constants.rs @@ -4,6 +4,7 @@ use std::sync::RwLock; pub use storage_proofs_core::drgraph::BASE_DEGREE as DRG_DEGREE; pub use storage_proofs_porep::stacked::EXP_DEGREE; +use blstrs::Scalar as Fr; use filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Hasher}; use lazy_static::lazy_static; use storage_proofs_core::{ @@ -134,11 +135,11 @@ pub const MINIMUM_RESERVED_BYTES_FOR_PIECE_IN_FULLY_ALIGNED_SECTOR: u64 = pub const MIN_PIECE_SIZE: UnpaddedBytesAmount = UnpaddedBytesAmount(127); /// The hasher used for creating comm_d. -pub type DefaultPieceHasher = Sha256Hasher; +pub type DefaultPieceHasher = Sha256Hasher; pub type DefaultPieceDomain = ::Domain; /// The default hasher for merkle trees currently in use. -pub type DefaultTreeHasher = PoseidonHasher; +pub type DefaultTreeHasher = PoseidonHasher; pub type DefaultTreeDomain = ::Domain; pub type DefaultBinaryTree = BinaryMerkleTree; diff --git a/filecoin-proofs/src/parameters.rs b/filecoin-proofs/src/parameters.rs index bc3a31f9c1..1a5227d0bf 100644 --- a/filecoin-proofs/src/parameters.rs +++ b/filecoin-proofs/src/parameters.rs @@ -1,4 +1,6 @@ use anyhow::{ensure, Result}; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use storage_proofs_core::{api_version::ApiVersion, proof::ProofScheme}; use storage_proofs_porep::stacked::{self, LayerChallenges, StackedDrg}; use storage_proofs_post::fallback::{self, FallbackPoSt}; @@ -19,7 +21,10 @@ pub fn public_params( partitions: usize, porep_id: [u8; 32], api_version: ApiVersion, -) -> Result> { +) -> Result> +where + ::Domain: Domain, +{ StackedDrg::::setup(&setup_params( sector_bytes, partitions, @@ -30,7 +35,10 @@ pub fn public_params( pub fn winning_post_public_params( post_config: &PoStConfig, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ FallbackPoSt::::setup(&winning_post_setup_params(post_config)?) } @@ -61,7 +69,10 @@ pub fn winning_post_setup_params(post_config: &PoStConfig) -> Result( post_config: &PoStConfig, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ FallbackPoSt::::setup(&window_post_setup_params(post_config)) } diff --git a/filecoin-proofs/src/types/mod.rs b/filecoin-proofs/src/types/mod.rs index 499121350d..8e215f78cc 100644 --- a/filecoin-proofs/src/types/mod.rs +++ b/filecoin-proofs/src/types/mod.rs @@ -2,7 +2,8 @@ pub use merkletree::store::StoreConfig; pub use storage_proofs_core::merkle::{MerkleProof, MerkleTreeTrait}; pub use storage_proofs_porep::stacked::{Labels, PersistentAux, TemporaryAux}; -use filecoin_hashers::Hasher; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use serde::{Deserialize, Serialize}; use storage_proofs_core::{merkle::BinaryMerkleTree, sector::SectorId}; use storage_proofs_porep::stacked; @@ -59,7 +60,10 @@ pub struct SealPreCommitOutput { pub type VanillaSealProof = stacked::Proof; #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct SealCommitPhase1Output { +pub struct SealCommitPhase1Output +where + ::Domain: Domain, +{ #[serde(bound( serialize = "VanillaSealProof: Serialize", deserialize = "VanillaSealProof: Deserialize<'de>" @@ -95,7 +99,7 @@ pub struct PartitionSnarkProof(pub Vec); pub type SnarkProof = Vec; pub type AggregateSnarkProof = Vec; pub type VanillaProof = fallback::Proof<::Proof>; -pub type PartitionProof = storage_proofs_update::vanilla::PartitionProof; +pub type PartitionProof = storage_proofs_update::vanilla::PartitionProof; #[derive(Debug, Clone, PartialEq)] #[repr(transparent)] diff --git a/filecoin-proofs/src/types/porep_config.rs b/filecoin-proofs/src/types/porep_config.rs index 4660c946b8..9c9f28ac69 100644 --- a/filecoin-proofs/src/types/porep_config.rs +++ b/filecoin-proofs/src/types/porep_config.rs @@ -1,6 +1,8 @@ use std::path::PathBuf; use anyhow::Result; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use storage_proofs_core::{ api_version::ApiVersion, merkle::MerkleTreeTrait, @@ -55,7 +57,10 @@ impl From for SectorSize { impl PoRepConfig { /// Returns the cache identifier as used by `storage-proofs::parameter_cache`. - pub fn get_cache_identifier(&self) -> Result { + pub fn get_cache_identifier(&self) -> Result + where + ::Domain: Domain, + { let params = public_params::( self.sector_size.into(), self.partitions.into(), @@ -71,17 +76,26 @@ impl PoRepConfig { ) } - pub fn get_cache_metadata_path(&self) -> Result { + pub fn get_cache_metadata_path(&self) -> Result + where + ::Domain: Domain, + { let id = self.get_cache_identifier::()?; Ok(parameter_cache_metadata_path(&id)) } - pub fn get_cache_verifying_key_path(&self) -> Result { + pub fn get_cache_verifying_key_path(&self) -> Result + where + ::Domain: Domain, + { let id = self.get_cache_identifier::()?; Ok(parameter_cache_verifying_key_path(&id)) } - pub fn get_cache_params_path(&self) -> Result { + pub fn get_cache_params_path(&self) -> Result + where + ::Domain: Domain, + { let id = self.get_cache_identifier::()?; Ok(parameter_cache_params_path(&id)) } diff --git a/filecoin-proofs/src/types/post_config.rs b/filecoin-proofs/src/types/post_config.rs index e5828b6edb..02e91bb8d1 100644 --- a/filecoin-proofs/src/types/post_config.rs +++ b/filecoin-proofs/src/types/post_config.rs @@ -1,6 +1,8 @@ use std::path::PathBuf; use anyhow::Result; +use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use storage_proofs_core::{ api_version::ApiVersion, merkle::MerkleTreeTrait, @@ -57,7 +59,10 @@ impl PoStConfig { } /// Returns the cache identifier as used by `storage-proofs::paramater_cache`. - pub fn get_cache_identifier(&self) -> Result { + pub fn get_cache_identifier(&self) -> Result + where + ::Domain: Domain, + { match self.typ { PoStType::Winning => { let params = winning_post_public_params::(self)?; @@ -78,17 +83,26 @@ impl PoStConfig { } } - pub fn get_cache_metadata_path(&self) -> Result { + pub fn get_cache_metadata_path(&self) -> Result + where + ::Domain: Domain, + { let id = self.get_cache_identifier::()?; Ok(parameter_cache_metadata_path(&id)) } - pub fn get_cache_verifying_key_path(&self) -> Result { + pub fn get_cache_verifying_key_path(&self) -> Result + where + ::Domain: Domain, + { let id = self.get_cache_identifier::()?; Ok(parameter_cache_verifying_key_path(&id)) } - pub fn get_cache_params_path(&self) -> Result { + pub fn get_cache_params_path(&self) -> Result + where + ::Domain: Domain, + { let id = self.get_cache_identifier::()?; Ok(parameter_cache_params_path(&id)) } diff --git a/filecoin-proofs/tests/api.rs b/filecoin-proofs/tests/api.rs index bbde1c5e66..1943a96f38 100644 --- a/filecoin-proofs/tests/api.rs +++ b/filecoin-proofs/tests/api.rs @@ -9,7 +9,7 @@ use bellperson::groth16; use bincode::serialize; use blstrs::{Bls12, Scalar as Fr}; use ff::Field; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use filecoin_proofs::{ add_piece, aggregate_seal_commit_proofs, clear_cache, compute_comm_d, decode_from, encode_into, fauxrep_aux, generate_empty_sector_update_proof, @@ -293,7 +293,10 @@ fn seal_lifecycle( sector_size: u64, porep_id: &[u8; 32], api_version: ApiVersion, -) -> Result<()> { +) -> Result<()> +where + ::Domain: Domain, +{ let mut rng = XorShiftRng::from_seed(TEST_SEED); let prover_fr: DefaultTreeDomain = Fr::random(&mut rng).into(); let mut prover_id = [0u8; 32]; @@ -312,11 +315,14 @@ fn seal_lifecycle( Ok(()) } -fn seal_lifecycle_upgrade>( +fn seal_lifecycle_upgrade( sector_size: u64, porep_id: &[u8; 32], api_version: ApiVersion, -) -> Result<()> { +) -> Result<()> +where + Tree: 'static + MerkleTreeTrait>, +{ let mut rng = &mut XorShiftRng::from_seed(TEST_SEED); let prover_fr: DefaultTreeDomain = Fr::random(&mut rng).into(); let mut prover_id = [0u8; 32]; @@ -518,7 +524,10 @@ fn aggregate_proofs( porep_id: &[u8; 32], api_version: ApiVersion, num_proofs_to_aggregate: usize, -) -> Result { +) -> Result +where + ::Domain: Domain, +{ let mut rng = XorShiftRng::from_seed(TEST_SEED); let prover_fr: DefaultTreeDomain = Fr::random(&mut rng).into(); let mut prover_id = [0u8; 32]; @@ -645,7 +654,9 @@ fn run_resumable_seal( layer_to_delete: usize, porep_id: &[u8; 32], api_version: ApiVersion, -) { +) where + ::Domain: Domain, +{ init_logger(); let sector_size = SECTOR_SIZE_2_KIB; @@ -832,7 +843,10 @@ fn winning_post( sector_size: u64, fake: bool, api_version: ApiVersion, -) -> Result<()> { +) -> Result<()> +where + ::Domain: Domain, +{ let mut rng = XorShiftRng::from_seed(TEST_SEED); let prover_fr: DefaultTreeDomain = Fr::random(&mut rng).into(); @@ -1246,7 +1260,10 @@ fn partition_window_post( sector_count: usize, fake: bool, api_version: ApiVersion, -) -> Result<()> { +) -> Result<()> +where + ::Domain: Domain, +{ use anyhow::anyhow; let mut rng = XorShiftRng::from_seed(TEST_SEED); @@ -1373,7 +1390,10 @@ fn window_post( sector_count: usize, fake: bool, api_version: ApiVersion, -) -> Result<()> { +) -> Result<()> +where + ::Domain: Domain, +{ let mut rng = XorShiftRng::from_seed(TEST_SEED); let mut sectors = Vec::with_capacity(total_sector_count); @@ -1505,7 +1525,10 @@ fn run_seal_pre_commit_phase1( cache_dir: &TempDir, mut piece_file: &mut NamedTempFile, sealed_sector_file: &NamedTempFile, -) -> Result<(Vec, SealPreCommitPhase1Output)> { +) -> Result<(Vec, SealPreCommitPhase1Output)> +where + ::Domain: Domain, +{ let number_of_bytes_in_piece = UnpaddedBytesAmount::from(PaddedBytesAmount(config.sector_size.into())); @@ -1553,7 +1576,10 @@ fn generate_proof( seed: [u8; 32], pre_commit_output: &SealPreCommitOutput, piece_infos: &[PieceInfo], -) -> Result<(SealCommitOutput, Vec>, [u8; 32], [u8; 32])> { +) -> Result<(SealCommitOutput, Vec>, [u8; 32], [u8; 32])> +where + ::Domain: Domain, +{ let phase1_output = seal_commit_phase1::<_, Tree>( config, cache_dir_path, @@ -1605,7 +1631,10 @@ fn unseal( piece_infos: &[PieceInfo], piece_bytes: &[u8], commit_output: &SealCommitOutput, -) -> Result<()> { +) -> Result<()> +where + ::Domain: Domain, +{ let comm_d = pre_commit_output.comm_d; let comm_r = pre_commit_output.comm_r; @@ -1666,7 +1695,10 @@ fn proof_and_unseal( pre_commit_output: SealPreCommitOutput, piece_infos: &[PieceInfo], piece_bytes: &[u8], -) -> Result<()> { +) -> Result<()> +where + ::Domain: Domain, +{ let (commit_output, _commit_inputs, _seed, _comm_r) = generate_proof::( config, cache_dir_path, @@ -1701,7 +1733,10 @@ fn create_seal( skip_proof: bool, porep_id: &[u8; 32], api_version: ApiVersion, -) -> Result<(SectorId, NamedTempFile, Commitment, TempDir)> { +) -> Result<(SectorId, NamedTempFile, Commitment, TempDir)> +where + ::Domain: Domain, +{ init_logger(); let (mut piece_file, piece_bytes) = generate_piece_file(sector_size)?; @@ -1761,7 +1796,10 @@ fn create_seal_for_aggregation( prover_id: ProverId, porep_id: &[u8; 32], api_version: ApiVersion, -) -> Result<(SealCommitOutput, Vec>, [u8; 32], [u8; 32])> { +) -> Result<(SealCommitOutput, Vec>, [u8; 32], [u8; 32])> +where + ::Domain: Domain, +{ init_logger(); let (mut piece_file, _piece_bytes) = generate_piece_file(sector_size)?; @@ -1843,13 +1881,17 @@ fn compare_elements(path1: &Path, path2: &Path) -> Result<(), Error> { Ok(()) } -fn create_seal_for_upgrade>( +fn create_seal_for_upgrade( rng: &mut R, sector_size: u64, prover_id: ProverId, porep_id: &[u8; 32], api_version: ApiVersion, -) -> Result<(SectorId, NamedTempFile, Commitment, TempDir)> { +) -> Result<(SectorId, NamedTempFile, Commitment, TempDir)> +where + R: Rng, + Tree: 'static + MerkleTreeTrait>, +{ init_logger(); let (mut piece_file, _piece_bytes) = generate_piece_file(sector_size)?; @@ -2079,7 +2121,10 @@ fn create_fake_seal( sector_size: u64, porep_id: &[u8; 32], api_version: ApiVersion, -) -> Result<(SectorId, NamedTempFile, Commitment, TempDir)> { +) -> Result<(SectorId, NamedTempFile, Commitment, TempDir)> +where + ::Domain: Domain, +{ init_logger(); let sealed_sector_file = NamedTempFile::new()?; diff --git a/fr32/Cargo.toml b/fr32/Cargo.toml index 3c9b3fa329..2eac95f9db 100644 --- a/fr32/Cargo.toml +++ b/fr32/Cargo.toml @@ -20,6 +20,7 @@ blstrs = "0.4.0" bitvec = "0.17" criterion = "0.3" itertools = "0.9" +pasta_curves = "0.3.0" pretty_assertions = "0.6.1" rand = "0.8" rand_xorshift = "0.3" diff --git a/fr32/src/reader.rs b/fr32/src/reader.rs index 78ea9239bc..159ce56fcd 100644 --- a/fr32/src/reader.rs +++ b/fr32/src/reader.rs @@ -192,7 +192,9 @@ mod tests { use std::io::Cursor; use bitvec::{order::Lsb0 as LittleEndian, vec::BitVec}; + use ff::PrimeField; use itertools::Itertools; + use pasta_curves::{Fp, Fq}; use pretty_assertions::assert_eq; use rand::random; @@ -356,14 +358,29 @@ mod tests { fn validate_fr32(bytes: &[u8]) { let chunks = (bytes.len() as f64 / 32_f64).ceil() as usize; for (i, chunk) in bytes.chunks(32).enumerate() { - let _ = bytes_into_fr(chunk).unwrap_or_else(|_| { - panic!( - "chunk {}/{} cannot be converted to valid Fr: {:?}", - i + 1, - chunks, - chunk - ) - }); + assert!( + bytes_into_fr(chunk).is_ok(), + "chunk {}/{} cannot be converted to valid Fr: {:?}", + i + 1, + chunks, + chunk, + ); + let mut repr = [0u8; 32]; + repr.copy_from_slice(chunk); + assert!( + Fp::from_repr_vartime(repr).is_some(), + "chunk {}/{} cannot be converted to valid Fp (Pallas): {:?}", + i + i, + chunks, + chunk, + ); + assert!( + Fq::from_repr_vartime(repr).is_some(), + "chunk {}/{} cannot be converted to valid Fq (Vesta): {:?}", + i + i, + chunks, + chunk, + ); } } diff --git a/storage-proofs-core/Cargo.toml b/storage-proofs-core/Cargo.toml index dc475daa4f..b23cd02d22 100644 --- a/storage-proofs-core/Cargo.toml +++ b/storage-proofs-core/Cargo.toml @@ -56,6 +56,7 @@ rand_xorshift = "0.3.0" pretty_assertions = "0.6.1" sha2raw = { path = "../sha2raw", version = "^6.0.0"} filecoin-hashers = { path = "../filecoin-hashers", version = "^6.0.0", default-features = false, features = ["blake2s", "sha256", "poseidon"] } +pasta_curves = "0.3.0" [features] default = ["opencl"] diff --git a/storage-proofs-core/benches/drgraph.rs b/storage-proofs-core/benches/drgraph.rs index 2866c98461..df0821ee71 100644 --- a/storage-proofs-core/benches/drgraph.rs +++ b/storage-proofs-core/benches/drgraph.rs @@ -1,30 +1,46 @@ +use blstrs::Scalar as Fr; use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use filecoin_hashers::poseidon::PoseidonHasher; +use filecoin_hashers::{poseidon::PoseidonHasher, Hasher}; +use pasta_curves::{Fp, Fq}; use storage_proofs_core::{ api_version::ApiVersion, drgraph::{BucketGraph, Graph, BASE_DEGREE}, }; #[allow(clippy::unit_arg)] -fn drgraph(c: &mut Criterion) { - let params = vec![12, 24, 128, 1024]; +fn bench_for_hasher(c: &mut Criterion, hasher_name: &str) { + // Graph sizes to bench. + let nodes = vec![12, 24, 128, 1024]; + + // The node to generate parents for; DRG parent-gen for the first and second nodes (node + // indexes `0` and `1`) is different than parent-gen for all other nodes (node-indexes `>= 2`). + let child: usize = 2; - let mut group = c.benchmark_group("sample"); - for n in params { - group.bench_function(format!("bucket/m=6-{}", n), |b| { - let graph = - BucketGraph::::new(n, BASE_DEGREE, 0, [32; 32], ApiVersion::V1_1_0) - .unwrap(); + let mut group = c.benchmark_group("drg-parent-gen"); + for n in nodes { + group.bench_function( + format!("deg={}-nodes={}-{}", BASE_DEGREE, n, hasher_name), + |b| { + let graph = + BucketGraph::::new(n, BASE_DEGREE, 0, [32; 32], ApiVersion::V1_1_0).unwrap(); - b.iter(|| { - let mut parents = vec![0; 6]; - black_box(graph.parents(2, &mut parents).unwrap()); - }) - }); + b.iter(|| { + let mut parents = vec![0; BASE_DEGREE]; + black_box(graph.parents(child, &mut parents).unwrap()); + }) + }, + ); } group.finish(); } +#[allow(clippy::unit_arg)] +fn drgraph(c: &mut Criterion) { + bench_for_hasher::>(c, "bls"); + bench_for_hasher::>(c, "pallas"); + bench_for_hasher::>(c, "vesta"); +} + criterion_group!(benches, drgraph); criterion_main!(benches); diff --git a/storage-proofs-core/benches/merkle.rs b/storage-proofs-core/benches/merkle.rs index fc09497185..82d9f87d73 100644 --- a/storage-proofs-core/benches/merkle.rs +++ b/storage-proofs-core/benches/merkle.rs @@ -1,12 +1,11 @@ -use anyhow::Result; +use blstrs::Scalar as Fr; use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use filecoin_hashers::{ - poseidon::PoseidonDomain, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, -}; -use rand::{thread_rng, Rng}; +use filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, Hasher}; +use pasta_curves::{Fp, Fq}; +use rand::thread_rng; use storage_proofs_core::merkle::{create_base_merkle_tree, BinaryMerkleTree}; -fn merkle_benchmark_sha256(c: &mut Criterion) { +fn bench_with_hasher(c: &mut Criterion, hasher_name: &str) { let params = if cfg!(feature = "big-sector-sizes-bench") { vec![128, 1024, 1_048_576] } else { @@ -15,13 +14,14 @@ fn merkle_benchmark_sha256(c: &mut Criterion) { let mut group = c.benchmark_group("merkletree-binary"); for n_nodes in params { - group.bench_function(format!("sha256-{}", n_nodes), |b| { + group.bench_function(format!("nodes={}-{}", n_nodes, hasher_name), |b| { let mut rng = thread_rng(); - let data: Vec = (0..32 * n_nodes).map(|_| rng.gen()).collect(); + let data: Vec = (0..n_nodes) + .flat_map(|_| H::Domain::random(&mut rng).into_bytes()) + .collect(); b.iter(|| { black_box( - create_base_merkle_tree::>(None, n_nodes, &data) - .unwrap(), + create_base_merkle_tree::>(None, n_nodes, &data).unwrap(), ) }) }); @@ -30,40 +30,15 @@ fn merkle_benchmark_sha256(c: &mut Criterion) { group.finish(); } -fn merkle_benchmark_poseidon(c: &mut Criterion) { - let params = if cfg!(feature = "big-sector-sizes-bench") { - vec![64, 128, 1024, 1_048_576] - } else { - vec![64, 128, 1024] - }; - - let mut group = c.benchmark_group("merkletree-binary"); - for n_nodes in params { - group.bench_function(format!("poseidon-{}", n_nodes), |b| { - let mut rng = thread_rng(); - let mut data: Vec = Vec::with_capacity(32 * n_nodes); - (0..n_nodes) - .into_iter() - .try_for_each(|_| -> Result<()> { - let node = PoseidonDomain::random(&mut rng); - data.extend(node.into_bytes()); - Ok(()) - }) - .expect("failed to generate data"); +fn merkle_benchmark(c: &mut Criterion) { + bench_with_hasher::>(c, "sha256-bls"); + bench_with_hasher::>(c, "sha256-pallas"); + bench_with_hasher::>(c, "sha256-vesta"); - b.iter(|| { - black_box( - create_base_merkle_tree::>( - None, n_nodes, &data, - ) - .unwrap(), - ) - }) - }); - } - - group.finish(); + bench_with_hasher::>(c, "poseidon-bls"); + bench_with_hasher::>(c, "poseidon-pallas"); + bench_with_hasher::>(c, "poseidon-vesta"); } -criterion_group!(benches, merkle_benchmark_sha256, merkle_benchmark_poseidon); +criterion_group!(benches, merkle_benchmark); criterion_main!(benches); diff --git a/storage-proofs-core/src/drgraph.rs b/storage-proofs-core/src/drgraph.rs index 27757139c0..fe52952524 100644 --- a/storage-proofs-core/src/drgraph.rs +++ b/storage-proofs-core/src/drgraph.rs @@ -255,12 +255,14 @@ pub fn derive_drg_seed(porep_id: PoRepID) -> [u8; 28] { mod tests { use super::*; + use blstrs::Scalar as Fr; use filecoin_hashers::{ blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, }; use generic_array::typenum::{U0, U2, U4, U8}; use memmap::{MmapMut, MmapOptions}; use merkletree::store::StoreConfig; + use pasta_curves::{Fp, Fq}; use crate::merkle::{ create_base_merkle_tree, DiskStore, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper, @@ -351,12 +353,16 @@ mod tests { #[test] fn graph_bucket_sha256() { - graph_bucket::(); + graph_bucket::>(); + graph_bucket::>(); + graph_bucket::>(); } #[test] fn graph_bucket_blake2s() { - graph_bucket::(); + graph_bucket::>(); + graph_bucket::>(); + graph_bucket::>(); } fn gen_proof(config: Option) { @@ -381,36 +387,50 @@ mod tests { #[test] fn gen_proof_poseidon_binary() { - gen_proof::(None); + gen_proof::, U2>(None); + gen_proof::, U2>(None); + gen_proof::, U2>(None); } #[test] fn gen_proof_sha256_binary() { - gen_proof::(None); + gen_proof::, U2>(None); + gen_proof::, U2>(None); + gen_proof::, U2>(None); } #[test] fn gen_proof_blake2s_binary() { - gen_proof::(None); + gen_proof::, U2>(None); + gen_proof::, U2>(None); + gen_proof::, U2>(None); } #[test] fn gen_proof_poseidon_quad() { - gen_proof::(None); + gen_proof::, U4>(None); + gen_proof::, U4>(None); + gen_proof::, U4>(None); } #[test] fn gen_proof_sha256_quad() { - gen_proof::(None); + gen_proof::, U4>(None); + gen_proof::, U4>(None); + gen_proof::, U4>(None); } #[test] fn gen_proof_blake2s_quad() { - gen_proof::(None); + gen_proof::, U4>(None); + gen_proof::, U4>(None); + gen_proof::, U4>(None); } #[test] fn gen_proof_poseidon_oct() { - gen_proof::(None); + gen_proof::, U8>(None); + gen_proof::, U8>(None); + gen_proof::, U8>(None); } } diff --git a/storage-proofs-core/src/gadgets/por.rs b/storage-proofs-core/src/gadgets/por.rs index dfa3aa0be5..d09e0326e1 100644 --- a/storage-proofs-core/src/gadgets/por.rs +++ b/storage-proofs-core/src/gadgets/por.rs @@ -11,7 +11,7 @@ use bellperson::{ Circuit, ConstraintSystem, SynthesisError, }; use blstrs::Scalar as Fr; -use filecoin_hashers::{HashFunction, Hasher, PoseidonArity}; +use filecoin_hashers::{Domain, HashFunction, Hasher, PoseidonArity}; use generic_array::typenum::Unsigned; use crate::{ @@ -33,7 +33,10 @@ use crate::{ /// * `auth_path` - The authentication path of the leaf in the tree. /// * `root` - The merkle root of the tree. /// -pub struct PoRCircuit { +pub struct PoRCircuit +where + ::Domain: Domain, +{ value: Root, auth_path: AuthPath, root: Root, @@ -47,7 +50,9 @@ pub struct AuthPath< U: 'static + PoseidonArity, V: 'static + PoseidonArity, W: 'static + PoseidonArity, -> { +> where + H::Domain: Domain, +{ base: SubPath, sub: SubPath, top: SubPath, @@ -59,6 +64,8 @@ impl< V: 'static + PoseidonArity, W: 'static + PoseidonArity, > From>, Option)>> for AuthPath +where + H::Domain: Domain, { fn from(mut base_opts: Vec<(Vec>, Option)>) -> Self { let has_top = W::to_usize() > 0; @@ -119,19 +126,28 @@ impl< } #[derive(Debug, Clone)] -struct SubPath { +struct SubPath +where + H::Domain: Domain, +{ path: Vec>, } #[derive(Debug, Clone)] -struct PathElement { +struct PathElement +where + H::Domain: Domain, +{ hashes: Vec>, index: Option, _a: PhantomData, _h: PhantomData, } -impl SubPath { +impl SubPath +where + H::Domain: Domain, +{ fn synthesize>( self, mut cs: CS, @@ -193,7 +209,10 @@ impl SubPath { } } -impl AuthPath { +impl AuthPath +where + H::Domain: Domain, +{ pub fn blank(leaves: usize) -> Self { let has_sub = V::to_usize() > 0; let has_top = W::to_usize() > 0; @@ -239,11 +258,17 @@ impl AuthPath CircuitComponent for PoRCircuit { +impl CircuitComponent for PoRCircuit +where + ::Domain: Domain, +{ type ComponentPrivateInputs = Option>; } -pub struct PoRCompound { +pub struct PoRCompound +where + ::Domain: Domain, +{ _tree: PhantomData, } @@ -259,6 +284,8 @@ pub fn challenge_into_auth_path_bits(challenge: usize, leaves: usize) -> Vec, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters for PoRCompound +where + ::Domain: Domain, { fn cache_prefix() -> String { format!("proof-of-retrievability-{}", Tree::display()) @@ -268,6 +295,8 @@ impl, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheablePa // can only implment for Bls12 because por is not generic over the engine. impl<'a, Tree: 'static + MerkleTreeTrait> CompoundProof<'a, PoR, PoRCircuit> for PoRCompound +where + ::Domain: Domain, { fn circuit<'b>( public_inputs: & as ProofScheme<'a>>::PublicInputs, @@ -336,7 +365,10 @@ impl<'a, Tree: 'static + MerkleTreeTrait> CompoundProof<'a, PoR, PoRCircui } } -impl<'a, Tree: MerkleTreeTrait> Circuit for PoRCircuit { +impl<'a, Tree: MerkleTreeTrait> Circuit for PoRCircuit +where + ::Domain: Domain, +{ /// # Public Inputs /// /// This circuit expects the following public inputs. @@ -418,7 +450,10 @@ impl<'a, Tree: MerkleTreeTrait> Circuit for PoRCircuit { } } -impl<'a, Tree: MerkleTreeTrait> PoRCircuit { +impl<'a, Tree: MerkleTreeTrait> PoRCircuit +where + ::Domain: Domain, +{ pub fn new(proof: Tree::Proof, private: bool) -> Self { PoRCircuit:: { value: Root::Val(Some(proof.leaf().into())), @@ -464,6 +499,7 @@ pub fn por_no_challenge_input( ) -> Result<(), SynthesisError> where Tree: MerkleTreeTrait, + ::Domain: Domain, CS: ConstraintSystem, { let base_arity = Tree::Arity::to_usize(); @@ -634,10 +670,13 @@ mod tests { #[test] #[ignore] // Slow test – run only when compiled for release. fn por_test_compound_poseidon_base_8() { - por_compound::>(); + por_compound::, U8>>(); } - fn por_compound() { + fn por_compound() + where + ::Domain: Domain, + { let mut rng = XorShiftRng::from_seed(TEST_SEED); let leaves = 64 * get_base_tree_count::(); @@ -699,81 +738,81 @@ mod tests { #[test] fn test_por_circuit_blake2s_base_2() { - test_por_circuit::>(3, 129_135); + test_por_circuit::, U2>>(3, 129_135); } #[test] fn test_por_circuit_sha256_base_2() { - test_por_circuit::>(3, 272_295); + test_por_circuit::, U2>>(3, 272_295); } #[test] fn test_por_circuit_poseidon_base_2() { - test_por_circuit::>(3, 1_887); + test_por_circuit::, U2>>(3, 1_887); } #[test] fn test_por_circuit_blake2s_base_4() { - test_por_circuit::>(3, 130_296); + test_por_circuit::, U4>>(3, 130_296); } #[test] fn test_por_circuit_sha256_base_4() { - test_por_circuit::>(3, 216_258); + test_por_circuit::, U4>>(3, 216_258); } #[test] fn test_por_circuit_poseidon_base_4() { - test_por_circuit::>(3, 1_164); + test_por_circuit::, U4>>(3, 1_164); } #[test] fn test_por_circuit_blake2s_base_8() { - test_por_circuit::>(3, 174_503); + test_por_circuit::, U8>>(3, 174_503); } #[test] fn test_por_circuit_sha256_base_8() { - test_por_circuit::>(3, 250_987); + test_por_circuit::, U8>>(3, 250_987); } #[test] fn test_por_circuit_poseidon_base_8() { - test_por_circuit::>(3, 1_063); + test_por_circuit::, U8>>(3, 1_063); } #[test] fn test_por_circuit_poseidon_sub_8_2() { - test_por_circuit::>(3, 1_377); + test_por_circuit::, U8, U2>>(3, 1_377); } #[test] fn test_por_circuit_poseidon_top_8_4_2() { - test_por_circuit::>(3, 1_764); + test_por_circuit::, U8, U4, U2>>(3, 1_764); } #[test] fn test_por_circuit_poseidon_top_8_8() { // This is the shape we want for 32GiB sectors. - test_por_circuit::>(3, 1_593); + test_por_circuit::, U8, U8>>(3, 1_593); } #[test] fn test_por_circuit_poseidon_top_8_8_2() { // This is the shape we want for 64GiB secotrs. - test_por_circuit::>(3, 1_907); + test_por_circuit::, U8, U8, U2>>(3, 1_907); } #[test] fn test_por_circuit_poseidon_top_8_2_4() { // We can handle top-heavy trees with a non-zero subtree arity. // These should never be produced, though. - test_por_circuit::>(3, 1_764); + test_por_circuit::, U8, U2, U4>>(3, 1_764); } - fn test_por_circuit( - num_inputs: usize, - num_constraints: usize, - ) { + fn test_por_circuit(num_inputs: usize, num_constraints: usize) + where + ::Domain: Domain, + { let rng = &mut XorShiftRng::from_seed(TEST_SEED); // Ensure arity will evenly fill tree. @@ -865,46 +904,49 @@ mod tests { #[ignore] // Slow test – run only when compiled for release. #[test] fn test_private_por_compound_poseidon_base_2() { - private_por_test_compound::>(); + private_por_test_compound::, U2>>(); } #[ignore] // Slow test – run only when compiled for release. #[test] fn test_private_por_compound_poseidon_base_4() { - private_por_test_compound::>(); + private_por_test_compound::, U4>>(); } #[ignore] // Slow test – run only when compiled for release. #[test] fn test_private_por_compound_poseidon_sub_8_2() { - private_por_test_compound::>(); + private_por_test_compound::, U8, U2>>(); } #[ignore] // Slow test – run only when compiled for release. #[test] fn test_private_por_compound_poseidon_top_8_4_2() { - private_por_test_compound::>(); + private_por_test_compound::, U8, U4, U2>>(); } #[ignore] // Slow test – run only when compiled for release. #[test] fn test_private_por_compound_poseidon_top_8_8() { - private_por_test_compound::>(); + private_por_test_compound::, U8, U8>>(); } #[ignore] // Slow test – run only when compiled for release. #[test] fn test_private_por_compound_poseidon_top_8_8_2() { - private_por_test_compound::>(); + private_por_test_compound::, U8, U8, U2>>(); } #[ignore] // Slow test – run only when compiled for release. #[test] fn test_private_por_compound_poseidon_top_8_2_4() { - private_por_test_compound::>(); + private_por_test_compound::, U8, U2, U4>>(); } - fn private_por_test_compound() { + fn private_por_test_compound() + where + ::Domain: Domain, + { let rng = &mut XorShiftRng::from_seed(TEST_SEED); // Ensure arity will evenly fill tree. @@ -1009,20 +1051,23 @@ mod tests { #[test] fn test_private_por_input_circuit_poseidon_binary() { - test_private_por_input_circuit::>(1_886); + test_private_por_input_circuit::, U2>>(1_886); } #[test] fn test_private_por_input_circuit_poseidon_quad() { - test_private_por_input_circuit::>(1_163); + test_private_por_input_circuit::, U4>>(1_163); } #[test] fn test_private_por_input_circuit_poseidon_oct() { - test_private_por_input_circuit::>(1_062); + test_private_por_input_circuit::, U8>>(1_062); } - fn test_private_por_input_circuit(num_constraints: usize) { + fn test_private_por_input_circuit(num_constraints: usize) + where + ::Domain: Domain, + { let mut rng = XorShiftRng::from_seed(TEST_SEED); let leaves = 64 * get_base_tree_count::(); diff --git a/storage-proofs-core/src/merkle/proof.rs b/storage-proofs-core/src/merkle/proof.rs index 0e48d288e3..2257ed1832 100644 --- a/storage-proofs-core/src/merkle/proof.rs +++ b/storage-proofs-core/src/merkle/proof.rs @@ -703,6 +703,7 @@ mod tests { blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, }; use generic_array::typenum::{U2, U4, U8}; + use pasta_curves::{Fp, Fq}; use rand::thread_rng; use crate::merkle::{ @@ -737,8 +738,8 @@ mod tests { fn merklepath_poseidon_2() { merklepath::< MerkleTreeWrapper< - PoseidonHasher, - DiskStore<::Domain>, + PoseidonHasher, + DiskStore< as Hasher>::Domain>, U2, U0, U0, @@ -750,8 +751,8 @@ mod tests { fn merklepath_poseidon_4() { merklepath::< MerkleTreeWrapper< - PoseidonHasher, - DiskStore<::Domain>, + PoseidonHasher, + DiskStore< as Hasher>::Domain>, U4, U0, U0, @@ -763,8 +764,8 @@ mod tests { fn merklepath_poseidon_8() { merklepath::< MerkleTreeWrapper< - PoseidonHasher, - DiskStore<::Domain>, + PoseidonHasher, + DiskStore< as Hasher>::Domain>, U8, U0, U0, @@ -776,8 +777,8 @@ mod tests { fn merklepath_poseidon_8_2() { merklepath::< MerkleTreeWrapper< - PoseidonHasher, - DiskStore<::Domain>, + PoseidonHasher, + DiskStore< as Hasher>::Domain>, U8, U2, U0, @@ -789,8 +790,8 @@ mod tests { fn merklepath_poseidon_8_4() { merklepath::< MerkleTreeWrapper< - PoseidonHasher, - DiskStore<::Domain>, + PoseidonHasher, + DiskStore< as Hasher>::Domain>, U8, U4, U0, @@ -802,8 +803,8 @@ mod tests { fn merklepath_poseidon_8_4_2() { merklepath::< MerkleTreeWrapper< - PoseidonHasher, - DiskStore<::Domain>, + PoseidonHasher, + DiskStore< as Hasher>::Domain>, U8, U4, U2, @@ -815,8 +816,8 @@ mod tests { fn merklepath_sha256_2() { merklepath::< MerkleTreeWrapper< - Sha256Hasher, - DiskStore<::Domain>, + Sha256Hasher, + DiskStore< as Hasher>::Domain>, U2, U0, U0, @@ -828,8 +829,8 @@ mod tests { fn merklepath_sha256_4() { merklepath::< MerkleTreeWrapper< - Sha256Hasher, - DiskStore<::Domain>, + Sha256Hasher, + DiskStore< as Hasher>::Domain>, U4, U0, U0, @@ -841,8 +842,8 @@ mod tests { fn merklepath_sha256_2_4() { merklepath::< MerkleTreeWrapper< - Sha256Hasher, - DiskStore<::Domain>, + Sha256Hasher, + DiskStore< as Hasher>::Domain>, U2, U4, U0, @@ -854,8 +855,8 @@ mod tests { fn merklepath_sha256_top_2_4_2() { merklepath::< MerkleTreeWrapper< - Sha256Hasher, - DiskStore<::Domain>, + Sha256Hasher, + DiskStore< as Hasher>::Domain>, U2, U4, U2, @@ -867,8 +868,8 @@ mod tests { fn merklepath_blake2s_2() { merklepath::< MerkleTreeWrapper< - Blake2sHasher, - DiskStore<::Domain>, + Blake2sHasher, + DiskStore< as Hasher>::Domain>, U2, U0, U0, @@ -880,8 +881,8 @@ mod tests { fn merklepath_blake2s_4() { merklepath::< MerkleTreeWrapper< - Blake2sHasher, - DiskStore<::Domain>, + Blake2sHasher, + DiskStore< as Hasher>::Domain>, U4, U0, U0, @@ -893,12 +894,103 @@ mod tests { fn merklepath_blake2s_8_4_2() { merklepath::< MerkleTreeWrapper< - Blake2sHasher, - DiskStore<::Domain>, + Blake2sHasher, + DiskStore< as Hasher>::Domain>, U8, U4, U2, >, >(); } + + #[test] + fn merklepath_poseidon_2_halo() { + type Tree = MerkleTreeWrapper::Domain>, U2, U0, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_poseidon_4_halo() { + type Tree = MerkleTreeWrapper::Domain>, U4, U0, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_poseidon_8_halo() { + type Tree = MerkleTreeWrapper::Domain>, U8, U0, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_poseidon_8_2_halo() { + type Tree = MerkleTreeWrapper::Domain>, U8, U2, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_poseidon_8_4_halo() { + type Tree = MerkleTreeWrapper::Domain>, U8, U4, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_poseidon_8_4_2_halo() { + type Tree = MerkleTreeWrapper::Domain>, U8, U4, U2>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_sha256_2_halo() { + type Tree = MerkleTreeWrapper::Domain>, U2, U0, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_sha256_4_halo() { + type Tree = MerkleTreeWrapper::Domain>, U4, U0, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_sha256_2_4_halo() { + type Tree = MerkleTreeWrapper::Domain>, U2, U4, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_sha256_2_4_2_halo() { + type Tree = MerkleTreeWrapper::Domain>, U2, U4, U2>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_blake2s_2_halo() { + type Tree = MerkleTreeWrapper::Domain>, U2, U0, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_blake2s_4_halo() { + type Tree = MerkleTreeWrapper::Domain>, U4, U0, U0>; + merklepath::>>(); + merklepath::>>(); + } + + #[test] + fn merklepath_blake2s_8_4_2_halo() { + type Tree = MerkleTreeWrapper::Domain>, U8, U4, U2>; + merklepath::>>(); + merklepath::>>(); + } } diff --git a/storage-proofs-core/src/pieces.rs b/storage-proofs-core/src/pieces.rs index 5c62796241..c55143f1a9 100644 --- a/storage-proofs-core/src/pieces.rs +++ b/storage-proofs-core/src/pieces.rs @@ -106,6 +106,7 @@ fn subtree_capacity(pos: usize, total: usize) -> Result { mod tests { use super::*; + use blstrs::Scalar as Fr; use filecoin_hashers::poseidon::PoseidonHasher; #[test] @@ -153,13 +154,16 @@ mod tests { fn test_generate_piece_commitment_bytes_from_source() -> Result<()> { let some_bytes: Vec = vec![0; 64]; let mut some_bytes_slice: &[u8] = &some_bytes; - generate_piece_commitment_bytes_from_source::(&mut some_bytes_slice, 64) - .expect("threshold for sufficient bytes is 32"); + generate_piece_commitment_bytes_from_source::>( + &mut some_bytes_slice, + 64, + ) + .expect("threshold for sufficient bytes is 32"); let not_enough_bytes: Vec = vec![0; 7]; let mut not_enough_bytes_slice: &[u8] = ¬_enough_bytes; assert!( - generate_piece_commitment_bytes_from_source::( + generate_piece_commitment_bytes_from_source::>( &mut not_enough_bytes_slice, 7 ) diff --git a/storage-proofs-core/src/util.rs b/storage-proofs-core/src/util.rs index 823814a1d8..229a807ccc 100644 --- a/storage-proofs-core/src/util.rs +++ b/storage-proofs-core/src/util.rs @@ -311,7 +311,7 @@ mod tests { .expect("right bits failure") }; - let out = Sha256Function::hash_leaf_bits_circuit( + let out = Sha256Function::::hash_leaf_bits_circuit( cs.namespace(|| "hash_leaf_circuit"), &left_bits, &right_bits, @@ -322,7 +322,7 @@ mod tests { assert!(cs.is_satisfied(), "constraints not satisfied"); assert_eq!(cs.num_constraints(), 45_387); - let expected: Fr = Sha256Function::default() + let expected: Fr = Sha256Function::::default() .node(left_fr.into(), right_fr.into(), height) .into(); diff --git a/storage-proofs-core/tests/por_circuit.rs b/storage-proofs-core/tests/por_circuit.rs index ab59fa09dc..3baf25ff4f 100644 --- a/storage-proofs-core/tests/por_circuit.rs +++ b/storage-proofs-core/tests/por_circuit.rs @@ -41,78 +41,81 @@ type TreeTop = MerkleTreeWrapper::Domain>, #[test] fn test_por_circuit_blake2s_base_2() { - test_por_circuit::>(3, 129_135); + test_por_circuit::, U2>>(3, 129_135); } #[test] fn test_por_circuit_sha256_base_2() { - test_por_circuit::>(3, 272_295); + test_por_circuit::, U2>>(3, 272_295); } #[test] fn test_por_circuit_poseidon_base_2() { - test_por_circuit::>(3, 1_887); + test_por_circuit::, U2>>(3, 1_887); } #[test] fn test_por_circuit_blake2s_base_4() { - test_por_circuit::>(3, 130_296); + test_por_circuit::, U4>>(3, 130_296); } #[test] fn test_por_circuit_sha256_base_4() { - test_por_circuit::>(3, 216_258); + test_por_circuit::, U4>>(3, 216_258); } #[test] fn test_por_circuit_poseidon_base_4() { - test_por_circuit::>(3, 1_164); + test_por_circuit::, U4>>(3, 1_164); } #[test] fn test_por_circuit_blake2s_base_8() { - test_por_circuit::>(3, 174_503); + test_por_circuit::, U8>>(3, 174_503); } #[test] fn test_por_circuit_sha256_base_8() { - test_por_circuit::>(3, 250_987); + test_por_circuit::, U8>>(3, 250_987); } #[test] fn test_por_circuit_poseidon_base_8() { - test_por_circuit::>(3, 1_063); + test_por_circuit::, U8>>(3, 1_063); } #[test] fn test_por_circuit_poseidon_sub_8_2() { - test_por_circuit::>(3, 1_377); + test_por_circuit::, U8, U2>>(3, 1_377); } #[test] fn test_por_circuit_poseidon_top_8_4_2() { - test_por_circuit::>(3, 1_764); + test_por_circuit::, U8, U4, U2>>(3, 1_764); } #[test] fn test_por_circuit_poseidon_sub_8_8() { // This is the shape we want for 32GiB sectors. - test_por_circuit::>(3, 1_593); + test_por_circuit::, U8, U8>>(3, 1_593); } #[test] fn test_por_circuit_poseidon_top_8_8_2() { // This is the shape we want for 64GiB secotrs. - test_por_circuit::>(3, 1_907); + test_por_circuit::, U8, U8, U2>>(3, 1_907); } #[test] fn test_por_circuit_poseidon_top_8_2_4() { // We can handle top-heavy trees with a non-zero subtree arity. // These should never be produced, though. - test_por_circuit::>(3, 1_764); + test_por_circuit::, U8, U2, U4>>(3, 1_764); } -fn test_por_circuit(num_inputs: usize, num_constraints: usize) { +fn test_por_circuit(num_inputs: usize, num_constraints: usize) +where + ::Domain: Domain, +{ let mut rng = XorShiftRng::from_seed(TEST_SEED); // Ensure arity will evenly fill tree. @@ -190,20 +193,23 @@ fn test_por_circuit(num_inputs: usize, num_cons #[test] fn test_por_circuit_poseidon_base_2_private_root() { - test_por_circuit_private_root::>(1_886); + test_por_circuit_private_root::, U2>>(1_886); } #[test] fn test_por_circuit_poseidon_base_4_private_root() { - test_por_circuit_private_root::>(1_163); + test_por_circuit_private_root::, U4>>(1_163); } #[test] fn test_por_circuit_poseidon_base_8_private_root() { - test_por_circuit_private_root::>(1_062); + test_por_circuit_private_root::, U8>>(1_062); } -fn test_por_circuit_private_root(num_constraints: usize) { +fn test_por_circuit_private_root(num_constraints: usize) +where + ::Domain: Domain, +{ let mut rng = XorShiftRng::from_seed(TEST_SEED); let leaves = 64 * get_base_tree_count::(); @@ -285,6 +291,8 @@ fn create_tree( labels: &[<::Hasher as Hasher>::Domain], tmp_path: &Path, ) -> MerkleTreeWrapper +where + ::Domain: Domain, { let sector_nodes = labels.len(); let tree_name = Tree::display(); @@ -387,10 +395,10 @@ where let tmp_path = tmp_dir.path(); // Create random TreeROld. - let leafs: Vec = (0..sector_nodes) + let leafs: Vec> = (0..sector_nodes) .map(|_| PoseidonDomain::random(&mut rng)) .collect(); - let tree = create_tree::>(&leafs, tmp_path); + let tree = create_tree::, U, V, W>>(&leafs, tmp_path); let root = tree.root(); let mut cs = TestConstraintSystem::::new(); @@ -412,11 +420,14 @@ where commitment: None, }; let priv_inputs = - por::PrivateInputs::> { leaf, tree: &tree }; + por::PrivateInputs::, U, V, W>> { leaf, tree: &tree }; let proof = PoR::prove(&pub_params, &pub_inputs, &priv_inputs).expect("proving failed"); - let is_valid = - PoR::>::verify(&pub_params, &pub_inputs, &proof) - .expect("verification failed"); + let is_valid = PoR::, U, V, W>>::verify( + &pub_params, + &pub_inputs, + &proof, + ) + .expect("verification failed"); assert!(is_valid, "failed to verify por proof"); proof.proof }; @@ -463,7 +474,7 @@ where }) .collect(); - por_no_challenge_input::, _>( + por_no_challenge_input::, U, V, W>, _>( cs.namespace(|| format!("por (c_index={})", c_index)), c_bits, leaf, diff --git a/storage-proofs-core/tests/por_compound.rs b/storage-proofs-core/tests/por_compound.rs index 94c8028d51..1bf6ae8d75 100644 --- a/storage-proofs-core/tests/por_compound.rs +++ b/storage-proofs-core/tests/por_compound.rs @@ -4,7 +4,7 @@ use bellperson::{ }; use blstrs::Scalar as Fr; use ff::Field; -use filecoin_hashers::{poseidon::PoseidonHasher, Hasher}; +use filecoin_hashers::{poseidon::PoseidonHasher, Domain, Hasher}; use fr32::{bytes_into_fr, fr_into_bytes}; use generic_array::typenum::{U0, U2, U4, U8}; use merkletree::store::VecStore; @@ -31,10 +31,14 @@ type TreeTop = MerkleTreeWrapper::Domain>, #[test] #[ignore] fn test_por_compound_poseidon_base_8() { - por_compound::>(); + por_compound::, U8>>(); } -fn por_compound() { +fn por_compound() +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let mut rng = XorShiftRng::from_seed(TEST_SEED); let leaves = 64 * get_base_tree_count::(); @@ -95,46 +99,50 @@ fn por_compound() { #[ignore] #[test] fn test_por_compound_poseidon_base_2_private_root() { - por_compound_private_root::>(); + por_compound_private_root::, U2>>(); } #[ignore] #[test] fn test_por_compound_poseidon_base_4_private_root() { - por_compound_private_root::>(); + por_compound_private_root::, U4>>(); } #[ignore] #[test] fn test_por_compound_poseidon_sub_8_2_private_root() { - por_compound_private_root::>(); + por_compound_private_root::, U8, U2>>(); } #[ignore] #[test] fn test_por_compound_poseidon_top_8_4_2_private_root() { - por_compound_private_root::>(); + por_compound_private_root::, U8, U4, U2>>(); } #[ignore] #[test] fn test_por_compound_poseidon_sub_8_8_private_root() { - por_compound_private_root::>(); + por_compound_private_root::, U8, U8>>(); } #[ignore] #[test] fn test_por_compound_poseidon_top_8_8_2_private_root() { - por_compound_private_root::>(); + por_compound_private_root::, U8, U8, U2>>(); } #[ignore] #[test] fn test_por_compound_poseidon_top_8_2_4_private_root() { - por_compound_private_root::>(); + por_compound_private_root::, U8, U2, U4>>(); } -fn por_compound_private_root() { +fn por_compound_private_root() +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let mut rng = XorShiftRng::from_seed(TEST_SEED); // Ensure arity will evenly fill tree. diff --git a/storage-proofs-core/tests/por_vanilla.rs b/storage-proofs-core/tests/por_vanilla.rs index 27c8a4e29b..0650b952f6 100644 --- a/storage-proofs-core/tests/por_vanilla.rs +++ b/storage-proofs-core/tests/por_vanilla.rs @@ -5,8 +5,8 @@ use ff::Field; use filecoin_hashers::{ blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, Hasher, }; -use fr32::fr_into_bytes; use generic_array::typenum::{U0, U2, U4}; +use pasta_curves::{Fp, Fq}; use rand::SeedableRng; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ @@ -23,32 +23,68 @@ type TreeBase = MerkleTreeWrapper::Domain>, U, #[test] fn test_por_poseidon_base_2() { - test_por::>(); + test_por::, U2>>(); } #[test] fn test_por_sha256_base_2() { - test_por::>(); + test_por::, U2>>(); } #[test] fn test_por_blake2s_base_2() { - test_por::>(); + test_por::, U2>>(); +} + +#[test] +fn test_por_poseidon_base_2_halo() { + test_por::, U2>>(); + test_por::, U2>>(); +} + +#[test] +fn test_por_sha256_base_2_halo() { + test_por::, U2>>(); + test_por::, U2>>(); +} + +#[test] +fn test_por_blake2s_base_2_halo() { + test_por::, U2>>(); + test_por::, U2>>(); } #[test] fn test_por_poseidon_base_4() { - test_por::>(); + test_por::, U4>>(); } #[test] fn test_por_sha256_base_4() { - test_por::>(); + test_por::, U4>>(); } #[test] fn test_por_blake2s_base_4() { - test_por::>(); + test_por::, U4>>(); +} + +#[test] +fn test_por_poseidon_base_4_halo() { + test_por::, U4>>(); + test_por::, U4>>(); +} + +#[test] +fn test_por_sha256_base_4_halo() { + test_por::, U4>>(); + test_por::, U4>>(); +} + +#[test] +fn test_por_blake2s_base_4_halo() { + test_por::, U4>>(); + test_por::, U4>>(); } fn test_por() { @@ -61,7 +97,7 @@ fn test_por() { }; let data: Vec = (0..leaves) - .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng))) + .flat_map(|_| ::Domain::random(&mut rng).into_bytes()) .collect(); let porep_id = [3; 32]; let graph = @@ -91,32 +127,68 @@ fn test_por() { #[test] fn test_por_validates_proof_sha256_base_2() { - test_por_validates_proof::>(); + test_por_validates_proof::, U2>>(); } #[test] fn test_por_validates_proof_blake2s_base_2() { - test_por_validates_proof::>(); + test_por_validates_proof::, U2>>(); } #[test] fn test_por_validates_proof_poseidon_base_2() { - test_por_validates_proof::>(); + test_por_validates_proof::, U2>>(); +} + +#[test] +fn test_por_validates_proof_sha256_base_2_halo() { + test_por_validates_proof::, U2>>(); + test_por_validates_proof::, U2>>(); +} + +#[test] +fn test_por_validates_proof_blake2s_base_2_halo() { + test_por_validates_proof::, U2>>(); + test_por_validates_proof::, U2>>(); +} + +#[test] +fn test_por_validates_proof_poseidon_base_2_halo() { + test_por_validates_proof::, U2>>(); + test_por_validates_proof::, U2>>(); } #[test] fn test_por_validates_proof_sha256_base_4() { - test_por_validates_proof::>(); + test_por_validates_proof::, U4>>(); } #[test] fn test_por_validates_proof_blake2s_base_4() { - test_por_validates_proof::>(); + test_por_validates_proof::, U4>>(); } #[test] fn test_por_validates_proof_poseidon_base_4() { - test_por_validates_proof::>(); + test_por_validates_proof::, U4>>(); +} + +#[test] +fn test_por_validates_proof_sha256_base_4_halo() { + test_por_validates_proof::, U4>>(); + test_por_validates_proof::, U4>>(); +} + +#[test] +fn test_por_validates_proof_blake2s_base_4_halo() { + test_por_validates_proof::, U4>>(); + test_por_validates_proof::, U4>>(); +} + +#[test] +fn test_por_validates_proof_poseidon_base_4_halo() { + test_por_validates_proof::, U4>>(); + test_por_validates_proof::, U4>>(); } fn test_por_validates_proof() { @@ -129,7 +201,7 @@ fn test_por_validates_proof() { }; let data: Vec = (0..leaves) - .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng))) + .flat_map(|_| ::Domain::random(&mut rng).into_bytes()) .collect(); let porep_id = [99; 32]; @@ -161,8 +233,8 @@ fn test_por_validates_proof() { // Invalidate the proof. let bad_proof = { let mut proof = good_proof; - let mut bad_leaf = Into::::into(proof.data); - bad_leaf += Fr::one(); + let mut bad_leaf: <::Domain as Domain>::Field = proof.data.into(); + bad_leaf += <::Domain as Domain>::Field::one(); proof.data = bad_leaf.into(); proof }; @@ -175,32 +247,68 @@ fn test_por_validates_proof() { #[test] fn test_por_validates_challenge_sha256_base_2() { - test_por_validates_challenge::>(); + test_por_validates_challenge::, U2>>(); } #[test] fn test_por_validates_challenge_blake2s_base_2() { - test_por_validates_challenge::>(); + test_por_validates_challenge::, U2>>(); } #[test] fn test_por_validates_challenge_poseidon_base_2() { - test_por_validates_challenge::>(); + test_por_validates_challenge::, U2>>(); +} + +#[test] +fn test_por_validates_challenge_sha256_base_2_halo() { + test_por_validates_challenge::, U2>>(); + test_por_validates_challenge::, U2>>(); +} + +#[test] +fn test_por_validates_challenge_blake2s_base_2_halo() { + test_por_validates_challenge::, U2>>(); + test_por_validates_challenge::, U2>>(); +} + +#[test] +fn test_por_validates_challenge_poseidon_base_2_halo() { + test_por_validates_challenge::, U2>>(); + test_por_validates_challenge::, U2>>(); } #[test] fn test_por_validates_challenge_sha256_base_4() { - test_por_validates_challenge::>(); + test_por_validates_challenge::, U4>>(); } #[test] fn test_por_validates_challenge_blake2s_base_4() { - test_por_validates_challenge::>(); + test_por_validates_challenge::, U4>>(); } #[test] fn test_por_validates_challenge_poseidon_base_4() { - test_por_validates_challenge::>(); + test_por_validates_challenge::, U4>>(); +} + +#[test] +fn test_por_validates_challenge_sha256_base_4_halo() { + test_por_validates_challenge::, U4>>(); + test_por_validates_challenge::, U4>>(); +} + +#[test] +fn test_por_validates_challenge_blake2s_base_4_halo() { + test_por_validates_challenge::, U4>>(); + test_por_validates_challenge::, U4>>(); +} + +#[test] +fn test_por_validates_challenge_poseidon_base_4_halo() { + test_por_validates_challenge::, U4>>(); + test_por_validates_challenge::, U4>>(); } fn test_por_validates_challenge() { @@ -214,7 +322,7 @@ fn test_por_validates_challenge() { }; let data: Vec = (0..leaves) - .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng))) + .flat_map(|_| ::Domain::random(&mut rng).into_bytes()) .collect(); let porep_id = [32; 32]; diff --git a/storage-proofs-porep/Cargo.toml b/storage-proofs-porep/Cargo.toml index 2bb1f133f5..5bbedee1f5 100644 --- a/storage-proofs-porep/Cargo.toml +++ b/storage-proofs-porep/Cargo.toml @@ -43,6 +43,8 @@ yastl = "0.1.2" fil_logger = "0.1" pairing = "0.21" blstrs = "0.4.0" +pasta_curves = "0.3.0" +typemap = "0.3.3" [target."cfg(target_arch = \"aarch64\")".dependencies] sha2 = { version = "0.9.3", features = ["compress", "asm"] } diff --git a/storage-proofs-porep/benches/encode.rs b/storage-proofs-porep/benches/encode.rs index 9ef8a336c9..1ea4426bd8 100644 --- a/storage-proofs-porep/benches/encode.rs +++ b/storage-proofs-porep/benches/encode.rs @@ -41,7 +41,7 @@ fn kdf_benchmark(c: &mut Criterion) { data, replica_id, graph, - } = pregenerate_data::(degree); + } = pregenerate_data::>(degree); let mut group = c.benchmark_group("kdf"); group.sample_size(10); diff --git a/storage-proofs-porep/benches/parents.rs b/storage-proofs-porep/benches/parents.rs index 9c2ad4bb09..9c900c8701 100644 --- a/storage-proofs-porep/benches/parents.rs +++ b/storage-proofs-porep/benches/parents.rs @@ -1,7 +1,9 @@ +use blstrs::Scalar as Fr; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use filecoin_hashers::{blake2s::Blake2sHasher, sha256::Sha256Hasher, Hasher}; #[cfg(feature = "cpu-profile")] use gperftools::profiler::PROFILER; +use pasta_curves::Fp; use storage_proofs_core::{ api_version::ApiVersion, drgraph::{Graph, BASE_DEGREE}, @@ -50,16 +52,28 @@ fn parents_loop_benchmark(c: &mut Criterion) { let mut group = c.benchmark_group("parents in a loop"); for size in sizes { group.bench_function(format!("Blake2s-{}", size), |b| { - let graph = pregenerate_graph::(size, ApiVersion::V1_1_0); + let graph = pregenerate_graph::>(size, ApiVersion::V1_1_0); let mut parents = vec![0; graph.degree()]; start_profile(&format!("parents-blake2s-{}", size)); - b.iter(|| black_box(parents_loop::(&graph, &mut parents))); + b.iter(|| black_box(parents_loop::, _>(&graph, &mut parents))); stop_profile(); }); group.bench_function(format!("Sha256-{}", size), |b| { - let graph = pregenerate_graph::(size, ApiVersion::V1_1_0); + let graph = pregenerate_graph::>(size, ApiVersion::V1_1_0); let mut parents = vec![0; graph.degree()]; - b.iter(|| black_box(parents_loop::(&graph, &mut parents))) + b.iter(|| black_box(parents_loop::, _>(&graph, &mut parents))) + }); + group.bench_function(format!("Blake2s-pallas-{}", size), |b| { + let graph = pregenerate_graph::>(size, ApiVersion::V1_1_0); + let mut parents = vec![0; graph.degree()]; + start_profile(&format!("parents-blake2s-{}", size)); + b.iter(|| black_box(parents_loop::, _>(&graph, &mut parents))); + stop_profile(); + }); + group.bench_function(format!("Sha256-pallas-{}", size), |b| { + let graph = pregenerate_graph::>(size, ApiVersion::V1_1_0); + let mut parents = vec![0; graph.degree()]; + b.iter(|| black_box(parents_loop::, _>(&graph, &mut parents))) }); } diff --git a/storage-proofs-porep/src/drg/circuit.rs b/storage-proofs-porep/src/drg/circuit.rs index 34e29ffec4..3f265fc922 100644 --- a/storage-proofs-porep/src/drg/circuit.rs +++ b/storage-proofs-porep/src/drg/circuit.rs @@ -6,7 +6,7 @@ use bellperson::{ }; use blstrs::Scalar as Fr; use ff::PrimeField; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use storage_proofs_core::{ compound_proof::CircuitComponent, error::Result, @@ -37,7 +37,10 @@ use storage_proofs_core::{ /// * `replica_id` - The id of the replica. /// -pub struct DrgPoRepCircuit<'a, H: Hasher> { +pub struct DrgPoRepCircuit<'a, H: Hasher> +where + H::Domain: Domain, +{ pub replica_nodes: Vec>, #[allow(clippy::type_complexity)] pub replica_nodes_paths: Vec>, Option)>>, @@ -54,7 +57,10 @@ pub struct DrgPoRepCircuit<'a, H: Hasher> { pub _h: PhantomData<&'a H>, } -impl<'a, H: 'static + Hasher> DrgPoRepCircuit<'a, H> { +impl<'a, H: 'static + Hasher> DrgPoRepCircuit<'a, H> +where + H::Domain: Domain, +{ #[allow(clippy::type_complexity, clippy::too_many_arguments)] pub fn synthesize( mut cs: CS, @@ -95,7 +101,10 @@ pub struct ComponentPrivateInputs { pub comm_d: Option>, } -impl<'a, H: Hasher> CircuitComponent for DrgPoRepCircuit<'a, H> { +impl<'a, H: Hasher> CircuitComponent for DrgPoRepCircuit<'a, H> +where + H::Domain: Domain, +{ type ComponentPrivateInputs = ComponentPrivateInputs; } @@ -122,7 +131,10 @@ impl<'a, H: Hasher> CircuitComponent for DrgPoRepCircuit<'a, H> { /// /// Total = 2 + replica_parents.len() /// -impl<'a, H: 'static + Hasher> Circuit for DrgPoRepCircuit<'a, H> { +impl<'a, H: 'static + Hasher> Circuit for DrgPoRepCircuit<'a, H> +where + H::Domain: Domain, +{ fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { let replica_id = self.replica_id; let replica_root = self.replica_root; diff --git a/storage-proofs-porep/src/drg/compound.rs b/storage-proofs-porep/src/drg/compound.rs index 848d5c7459..aa81298e2e 100644 --- a/storage-proofs-porep/src/drg/compound.rs +++ b/storage-proofs-porep/src/drg/compound.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use anyhow::{ensure, Context}; use bellperson::Circuit; use blstrs::Scalar as Fr; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use generic_array::typenum; use storage_proofs_core::{ compound_proof::{CircuitComponent, CompoundProof}, @@ -45,6 +45,7 @@ use crate::drg::{DrgPoRep, DrgPoRepCircuit}; pub struct DrgPoRepCompound where H: Hasher, + H::Domain: Domain, G::Key: AsRef, G: Graph, { @@ -56,6 +57,7 @@ where impl, H: Hasher, G: Graph, P: ParameterSetMetadata> CacheableParameters for DrgPoRepCompound where + H::Domain: Domain, G::Key: AsRef, { fn cache_prefix() -> String { @@ -67,6 +69,7 @@ impl<'a, H, G> CompoundProof<'a, DrgPoRep<'a, H, G>, DrgPoRepCircuit<'a, H>> for DrgPoRepCompound where H: 'static + Hasher, + H::Domain: Domain, G::Key: AsRef<::Domain>, G: 'a + Graph + ParameterSetMetadata + Sync + Send, { diff --git a/storage-proofs-porep/src/encode.rs b/storage-proofs-porep/src/encode.rs index 556ecd28ec..e9ad4541a7 100644 --- a/storage-proofs-porep/src/encode.rs +++ b/storage-proofs-porep/src/encode.rs @@ -1,21 +1,21 @@ -use blstrs::Scalar as Fr; +use ff::PrimeField; use filecoin_hashers::Domain; pub fn encode(key: T, value: T) -> T { - let value: Fr = value.into(); - let mut result: Fr = key.into(); + let value: T::Field = value.into(); + let mut result: T::Field = key.into(); encode_fr(&mut result, value); result.into() } -pub fn encode_fr(key: &mut Fr, value: Fr) { +pub fn encode_fr(key: &mut F, value: F) { *key += value; } pub fn decode(key: T, value: T) -> T { - let mut result: Fr = value.into(); - let key: Fr = key.into(); + let mut result: T::Field = value.into(); + let key: T::Field = key.into(); result -= key; result.into() diff --git a/storage-proofs-porep/src/lib.rs b/storage-proofs-porep/src/lib.rs index d00d8e69cd..d45d99c5c6 100644 --- a/storage-proofs-porep/src/lib.rs +++ b/storage-proofs-porep/src/lib.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use merkletree::store::StoreConfig; use storage_proofs_core::{error::Result, merkle::BinaryMerkleTree, proof::ProofScheme, Data}; @@ -16,7 +16,10 @@ mod encode; pub const MAX_LEGACY_POREP_REGISTERED_PROOF_ID: u64 = 4; -pub trait PoRep<'a, H: Hasher, G: Hasher>: ProofScheme<'a> { +pub trait PoRep<'a, H: Hasher, G: Hasher>: ProofScheme<'a> +where + H::Domain: Domain::Field>, +{ type Tau; type ProverAux; diff --git a/storage-proofs-porep/src/stacked/circuit/column.rs b/storage-proofs-porep/src/stacked/circuit/column.rs index 7f2dd5a7d3..c575136c73 100644 --- a/storage-proofs-porep/src/stacked/circuit/column.rs +++ b/storage-proofs-porep/src/stacked/circuit/column.rs @@ -1,6 +1,6 @@ use bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; use blstrs::Scalar as Fr; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use storage_proofs_core::merkle::MerkleTreeTrait; use crate::stacked::{circuit::hash::hash_single_column, Column as VanillaColumn, PublicParams}; @@ -15,7 +15,10 @@ pub struct AllocatedColumn { rows: Vec>, } -impl From> for Column { +impl From> for Column +where + H::Domain: Domain, +{ fn from(other: VanillaColumn) -> Self { let VanillaColumn { rows, .. } = other; diff --git a/storage-proofs-porep/src/stacked/circuit/column_proof.rs b/storage-proofs-porep/src/stacked/circuit/column_proof.rs index 327d41e4b5..b64554eeb5 100644 --- a/storage-proofs-porep/src/stacked/circuit/column_proof.rs +++ b/storage-proofs-porep/src/stacked/circuit/column_proof.rs @@ -1,6 +1,6 @@ use bellperson::{ConstraintSystem, SynthesisError}; use blstrs::Scalar as Fr; -use filecoin_hashers::{Hasher, PoseidonArity}; +use filecoin_hashers::{Domain, Hasher, PoseidonArity}; use storage_proofs_core::{ drgraph::Graph, gadgets::por::AuthPath, @@ -18,7 +18,9 @@ pub struct ColumnProof< U: 'static + PoseidonArity, V: 'static + PoseidonArity, W: 'static + PoseidonArity, -> { +> where + H::Domain: Domain, +{ column: Column, inclusion_path: AuthPath, } @@ -29,6 +31,8 @@ impl< V: 'static + PoseidonArity, W: 'static + PoseidonArity, > ColumnProof +where + H::Domain: Domain, { /// Create an empty `ColumnProof`, used in `blank_circuit`s. pub fn empty< @@ -61,6 +65,8 @@ impl< impl From> for ColumnProof +where + ::Domain: Domain, { fn from(vanilla_proof: VanillaColumnProof) -> Self { let VanillaColumnProof { diff --git a/storage-proofs-porep/src/stacked/circuit/create_label.rs b/storage-proofs-porep/src/stacked/circuit/create_label.rs index ae1dbdcd34..9befb26619 100644 --- a/storage-proofs-porep/src/stacked/circuit/create_label.rs +++ b/storage-proofs-porep/src/stacked/circuit/create_label.rs @@ -96,7 +96,7 @@ mod tests { let size = 64; let porep_id = [32; 32]; - let graph = StackedBucketGraph::::new_stacked( + let graph = StackedBucketGraph::>::new_stacked( size, BASE_DEGREE, EXP_DEGREE, diff --git a/storage-proofs-porep/src/stacked/circuit/hash.rs b/storage-proofs-porep/src/stacked/circuit/hash.rs index 3423b05a42..fc9b43090f 100644 --- a/storage-proofs-porep/src/stacked/circuit/hash.rs +++ b/storage-proofs-porep/src/stacked/circuit/hash.rs @@ -52,7 +52,7 @@ mod tests { AllocatedNum::alloc(&mut cs, || Ok(b)).expect("alloc failed") }; - let out = ::Function::hash2_circuit( + let out = as Hasher>::Function::hash2_circuit( cs.namespace(|| "hash2"), &a_num, &b_num, @@ -63,7 +63,7 @@ mod tests { assert_eq!(cs.num_constraints(), 311); let expected: Fr = - ::Function::hash2(&a.into(), &b.into()).into(); + as Hasher>::Function::hash2(&a.into(), &b.into()).into(); assert_eq!( expected, diff --git a/storage-proofs-porep/src/stacked/circuit/params.rs b/storage-proofs-porep/src/stacked/circuit/params.rs index 696a484541..5831bd57ef 100644 --- a/storage-proofs-porep/src/stacked/circuit/params.rs +++ b/storage-proofs-porep/src/stacked/circuit/params.rs @@ -5,7 +5,7 @@ use bellperson::{ ConstraintSystem, SynthesisError, }; use blstrs::Scalar as Fr; -use filecoin_hashers::{Hasher, PoseidonArity}; +use filecoin_hashers::{Domain, Hasher, PoseidonArity}; use generic_array::typenum::{U0, U2}; use storage_proofs_core::{ drgraph::Graph, @@ -38,7 +38,11 @@ type TreeColumnProof = ColumnProof< /// Proof for a single challenge. #[derive(Debug)] -pub struct Proof { +pub struct Proof +where + ::Domain: Domain, + G::Domain: Domain, +{ /// Inclusion path for the challenged data node in tree D. pub comm_d_path: AuthPath, /// The value of the challenged data node. @@ -60,7 +64,11 @@ pub struct Proof { // #[derive(Clone)]) because derive(Clone) will only expand for MerkleTreeTrait types that also // implement Clone. Not every MerkleTreeTrait type is Clone-able because not all merkel Store's are // Clone-able, therefore deriving Clone would impl Clone for less than all possible Tree types. -impl Clone for Proof { +impl Clone for Proof +where + ::Domain: Domain, + G::Domain: Domain, +{ fn clone(&self) -> Self { Proof { comm_d_path: self.comm_d_path.clone(), @@ -75,7 +83,11 @@ impl Clone for Proof { } } -impl Proof { +impl Proof +where + ::Domain: Domain, + G::Domain: Domain, +{ /// Create an empty proof, used in `blank_circuit`s. pub fn empty(params: &PublicParams) -> Self { Proof { @@ -281,6 +293,8 @@ impl Proof { impl From> for Proof where Tree::Hasher: 'static, + ::Domain: Domain, + G::Domain: Domain, { fn from(vanilla_proof: VanillaProof) -> Self { let VanillaProof { @@ -320,6 +334,7 @@ fn enforce_inclusion>( ) -> Result<(), SynthesisError> where H: 'static + Hasher, + H::Domain: Domain, U: 'static + PoseidonArity, V: 'static + PoseidonArity, W: 'static + PoseidonArity, diff --git a/storage-proofs-porep/src/stacked/circuit/proof.rs b/storage-proofs-porep/src/stacked/circuit/proof.rs index 03b2cc9200..6b9fcf5fab 100644 --- a/storage-proofs-porep/src/stacked/circuit/proof.rs +++ b/storage-proofs-porep/src/stacked/circuit/proof.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use anyhow::ensure; use bellperson::{gadgets::num::AllocatedNum, Circuit, ConstraintSystem, SynthesisError}; use blstrs::Scalar as Fr; -use filecoin_hashers::{HashFunction, Hasher}; +use filecoin_hashers::{Domain, HashFunction, Hasher}; use fr32::u64_into_fr; use storage_proofs_core::{ compound_proof::{CircuitComponent, CompoundProof}, @@ -25,7 +25,11 @@ use crate::stacked::{circuit::params::Proof, StackedDrg}; /// /// * `params` - parameters for the curve /// -pub struct StackedCircuit<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> { +pub struct StackedCircuit<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> +where + ::Domain: Domain, + G::Domain: Domain, +{ public_params: as ProofScheme<'a>>::PublicParams, replica_id: Option<::Domain>, comm_d: Option, @@ -41,7 +45,11 @@ pub struct StackedCircuit<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hash // #[derive(Clone)]) because derive(Clone) will only expand for MerkleTreeTrait types that also // implement Clone. Not every MerkleTreeTrait type is Clone-able because not all merkel Store's are // Clone-able, therefore deriving Clone would impl Clone for less than all possible Tree types. -impl<'a, Tree: MerkleTreeTrait, G: Hasher> Clone for StackedCircuit<'a, Tree, G> { +impl<'a, Tree: MerkleTreeTrait, G: Hasher> Clone for StackedCircuit<'a, Tree, G> +where + ::Domain: Domain, + G::Domain: Domain, +{ fn clone(&self) -> Self { StackedCircuit { public_params: self.public_params.clone(), @@ -55,11 +63,19 @@ impl<'a, Tree: MerkleTreeTrait, G: Hasher> Clone for StackedCircuit<'a, Tree, G> } } -impl<'a, Tree: MerkleTreeTrait, G: Hasher> CircuitComponent for StackedCircuit<'a, Tree, G> { +impl<'a, Tree: MerkleTreeTrait, G: Hasher> CircuitComponent for StackedCircuit<'a, Tree, G> +where + ::Domain: Domain, + G::Domain: Domain, +{ type ComponentPrivateInputs = (); } -impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedCircuit<'a, Tree, G> { +impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedCircuit<'a, Tree, G> +where + ::Domain: Domain, + G::Domain: Domain, +{ #[allow(clippy::too_many_arguments)] pub fn synthesize( mut cs: CS, @@ -88,7 +104,11 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedCircuit<'a } } -impl<'a, Tree: MerkleTreeTrait, G: Hasher> Circuit for StackedCircuit<'a, Tree, G> { +impl<'a, Tree: MerkleTreeTrait, G: Hasher> Circuit for StackedCircuit<'a, Tree, G> +where + ::Domain: Domain, + G::Domain: Domain, +{ fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { let StackedCircuit { public_params, @@ -181,7 +201,11 @@ impl<'a, Tree: MerkleTreeTrait, G: Hasher> Circuit for StackedCircuit<'a, Tr } #[allow(dead_code)] -pub struct StackedCompound { +pub struct StackedCompound +where + ::Domain: Domain, + G::Domain: Domain, +{ partitions: Option, _t: PhantomData, _g: PhantomData, @@ -189,6 +213,9 @@ pub struct StackedCompound { impl, P: ParameterSetMetadata, Tree: MerkleTreeTrait, G: Hasher> CacheableParameters for StackedCompound +where + ::Domain: Domain, + G::Domain: Domain, { fn cache_prefix() -> String { format!( @@ -202,6 +229,9 @@ impl, P: ParameterSetMetadata, Tree: MerkleTreeTrait, G: Hasher> impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> CompoundProof<'a, StackedDrg<'a, Tree, G>, StackedCircuit<'a, Tree, G>> for StackedCompound +where + ::Domain: Domain, + G::Domain: Domain, { fn generate_public_inputs( pub_in: & as ProofScheme<'_>>::PublicInputs, @@ -343,7 +373,10 @@ fn generate_inclusion_inputs( por_params: &por::PublicParams, challenge: usize, k: Option, -) -> Result> { +) -> Result> +where + ::Domain: Domain, +{ let pub_inputs = por::PublicInputs::<::Domain> { challenge, commitment: None, diff --git a/storage-proofs-porep/src/stacked/vanilla/cache.rs b/storage-proofs-porep/src/stacked/vanilla/cache.rs index ca7f99af70..5758432bb6 100644 --- a/storage-proofs-porep/src/stacked/vanilla/cache.rs +++ b/storage-proofs-porep/src/stacked/vanilla/cache.rs @@ -453,6 +453,7 @@ mod tests { use std::sync::Once; + use blstrs::Scalar as Fr; use filecoin_hashers::poseidon::PoseidonHasher; use storage_proofs_core::api_version::ApiVersion; @@ -469,7 +470,7 @@ mod tests { fn test_read_full_range() { init_logger(); let nodes = 24u32; - let graph = StackedBucketGraph::::new_stacked( + let graph = StackedBucketGraph::>::new_stacked( nodes as usize, BASE_DEGREE, EXP_DEGREE, @@ -523,7 +524,7 @@ mod tests { init_logger(); let pool = Pool::new(3); let nodes = 48u32; - let graph = StackedBucketGraph::::new_stacked( + let graph = StackedBucketGraph::>::new_stacked( nodes as usize, BASE_DEGREE, EXP_DEGREE, @@ -541,7 +542,7 @@ mod tests { pool.scoped(|s| { for _ in 0..3 { s.execute(move || { - let graph = StackedBucketGraph::::new_stacked( + let graph = StackedBucketGraph::>::new_stacked( nodes as usize, BASE_DEGREE, EXP_DEGREE, @@ -573,7 +574,7 @@ mod tests { fn test_read_partial_range(api_version: ApiVersion, porep_id: &[u8; 32]) { init_logger(); let nodes = 48u32; - let graph = StackedBucketGraph::::new_stacked( + let graph = StackedBucketGraph::>::new_stacked( nodes as usize, BASE_DEGREE, EXP_DEGREE, diff --git a/storage-proofs-porep/src/stacked/vanilla/challenges.rs b/storage-proofs-porep/src/stacked/vanilla/challenges.rs index 389c76589f..a7dfec7f89 100644 --- a/storage-proofs-porep/src/stacked/vanilla/challenges.rs +++ b/storage-proofs-porep/src/stacked/vanilla/challenges.rs @@ -81,6 +81,7 @@ mod test { use std::collections::HashMap; + use blstrs::Scalar as Fr; use filecoin_hashers::sha256::Sha256Domain; use rand::{thread_rng, Rng}; @@ -101,7 +102,7 @@ mod test { let challenges = LayerChallenges::new(layers, n); let leaves = 1 << 30; let rng = &mut thread_rng(); - let replica_id: Sha256Domain = Sha256Domain::random(rng); + let replica_id = Sha256Domain::::random(rng); let seed: [u8; 32] = rng.gen(); let partitions = 5; let total_challenges = partitions * n; @@ -138,7 +139,7 @@ mod test { let n = 40; let leaves = 1 << 30; let rng = &mut thread_rng(); - let replica_id: Sha256Domain = Sha256Domain::random(rng); + let replica_id = Sha256Domain::::random(rng); let seed: [u8; 32] = rng.gen(); let partitions = 5; let layers = 100; diff --git a/storage-proofs-porep/src/stacked/vanilla/column.rs b/storage-proofs-porep/src/stacked/vanilla/column.rs index 6e51a25202..32a93deb4d 100644 --- a/storage-proofs-porep/src/stacked/vanilla/column.rs +++ b/storage-proofs-porep/src/stacked/vanilla/column.rs @@ -1,7 +1,6 @@ use std::marker::PhantomData; -use blstrs::Scalar as Fr; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use serde::{Deserialize, Serialize}; use storage_proofs_core::{ error::Result, @@ -39,7 +38,7 @@ impl Column { } /// Calculate the column hashes `C_i = H(E_i, O_i)` for the passed in column. - pub fn hash(&self) -> Fr { + pub fn hash(&self) -> ::Field { hash_single_column( &self .rows diff --git a/storage-proofs-porep/src/stacked/vanilla/column_proof.rs b/storage-proofs-porep/src/stacked/vanilla/column_proof.rs index 17bf4e4fbc..184a50e715 100644 --- a/storage-proofs-porep/src/stacked/vanilla/column_proof.rs +++ b/storage-proofs-porep/src/stacked/vanilla/column_proof.rs @@ -1,5 +1,4 @@ -use blstrs::Scalar as Fr; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use log::trace; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use storage_proofs_core::{error::Result, merkle::MerkleProofTrait}; @@ -40,7 +39,7 @@ impl ColumnProof { self.column().get_node_at_layer(layer) } - pub fn column_hash(&self) -> Fr { + pub fn column_hash(&self) -> <::Domain as Domain>::Field { self.column.hash() } diff --git a/storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs b/storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs index dc7641e3a6..1cca171741 100644 --- a/storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs +++ b/storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs @@ -730,7 +730,7 @@ mod tests { nodes.trailing_zeros() as usize, ); - let graph = StackedBucketGraph::::new( + let graph = StackedBucketGraph::>::new( None, nodes, BASE_DEGREE, @@ -741,7 +741,7 @@ mod tests { .expect("stacked bucket graph new failed"); let cache = graph.parent_cache().expect("parent_cache failed"); - let labels = create_labels_for_decoding::, _>( + let labels = create_labels_for_decoding::, U8, U0, U2>, _>( &graph, &cache, layers, replica_id, config, ) .expect("create_labels_for_decoding failed"); @@ -753,6 +753,6 @@ mod tests { .read_at(final_labels.len() - 1) .expect("read_at"); dbg!(&last_label); - assert_eq!(expected_last_label.to_repr(), last_label.0); + assert_eq!(expected_last_label.to_repr(), last_label.repr()); } } diff --git a/storage-proofs-porep/src/stacked/vanilla/encoding_proof.rs b/storage-proofs-porep/src/stacked/vanilla/encoding_proof.rs index 256a58762e..07580682b9 100644 --- a/storage-proofs-porep/src/stacked/vanilla/encoding_proof.rs +++ b/storage-proofs-porep/src/stacked/vanilla/encoding_proof.rs @@ -1,7 +1,6 @@ use std::marker::PhantomData; -use blstrs::Scalar as Fr; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use fr32::bytes_into_fr_repr_safe; use log::trace; use serde::{Deserialize, Serialize}; @@ -55,10 +54,13 @@ impl EncodingProof { replica_id: &H::Domain, exp_encoded_node: &H::Domain, decoded_node: &G::Domain, - ) -> bool { + ) -> bool + where + G::Domain: Domain::Field>, + { let key = self.create_key(replica_id); - let fr: Fr = (*decoded_node).into(); + let fr: ::Field = (*decoded_node).into(); let encoded_node = encode(key, fr.into()); check_eq!(exp_encoded_node, &encoded_node); diff --git a/storage-proofs-porep/src/stacked/vanilla/graph.rs b/storage-proofs-porep/src/stacked/vanilla/graph.rs index f57dec79eb..371f842174 100644 --- a/storage-proofs-porep/src/stacked/vanilla/graph.rs +++ b/storage-proofs-porep/src/stacked/vanilla/graph.rs @@ -454,6 +454,7 @@ mod tests { use std::collections::HashSet; + use blstrs::Scalar as Fr; use filecoin_hashers::poseidon::PoseidonHasher; // Test that 3 (or more) rounds of the Feistel cipher can be used @@ -534,7 +535,7 @@ mod tests { ApiVersion::V1_1_0 => false, }; - let graph = StackedBucketGraph::::new_stacked( + let graph = StackedBucketGraph::>::new_stacked( nodes as usize, BASE_DEGREE, EXP_DEGREE, @@ -601,7 +602,7 @@ mod tests { let mut porep_id = [0u8; 32]; porep_id[..8].copy_from_slice(&5u64.to_le_bytes()); - let graph = StackedBucketGraph::::new_stacked( + let graph = StackedBucketGraph::>::new_stacked( N_NODES, BASE_DEGREE, EXP_DEGREE, @@ -651,7 +652,7 @@ mod tests { let mut porep_id = [0u8; 32]; porep_id[..8].copy_from_slice(&5u64.to_le_bytes()); - let graph = StackedBucketGraph::::new_stacked( + let graph = StackedBucketGraph::>::new_stacked( N_NODES, BASE_DEGREE, EXP_DEGREE, diff --git a/storage-proofs-porep/src/stacked/vanilla/hash.rs b/storage-proofs-porep/src/stacked/vanilla/hash.rs index 519e5b7662..5600587f43 100644 --- a/storage-proofs-porep/src/stacked/vanilla/hash.rs +++ b/storage-proofs-porep/src/stacked/vanilla/hash.rs @@ -1,17 +1,22 @@ -use blstrs::Scalar as Fr; -use filecoin_hashers::{POSEIDON_CONSTANTS_11, POSEIDON_CONSTANTS_2}; +use ff::PrimeField; +use filecoin_hashers::{FieldArity, POSEIDON_CONSTANTS}; +use generic_array::typenum::{U11, U2}; use neptune::poseidon::Poseidon; /// Hash all elements in the given column. -pub fn hash_single_column(column: &[Fr]) -> Fr { +pub fn hash_single_column(column: &[F]) -> F { match column.len() { 2 => { - let mut hasher = Poseidon::new_with_preimage(column, &*POSEIDON_CONSTANTS_2); - hasher.hash() + let consts = &POSEIDON_CONSTANTS + .get::>() + .expect("Poseidon constants not found for field and arity-2"); + Poseidon::new_with_preimage(column, consts).hash() } 11 => { - let mut hasher = Poseidon::new_with_preimage(column, &*POSEIDON_CONSTANTS_11); - hasher.hash() + let consts = &POSEIDON_CONSTANTS + .get::>() + .expect("Poseidon constants not found for field and arity-11"); + Poseidon::new_with_preimage(column, consts).hash() } _ => panic!("unsupported column size: {}", column.len()), } diff --git a/storage-proofs-porep/src/stacked/vanilla/params.rs b/storage-proofs-porep/src/stacked/vanilla/params.rs index bb1b03dc19..ada688da82 100644 --- a/storage-proofs-porep/src/stacked/vanilla/params.rs +++ b/storage-proofs-porep/src/stacked/vanilla/params.rs @@ -113,7 +113,10 @@ where } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct PublicInputs { +pub struct PublicInputs +where + T: Domain, +{ #[serde(bound = "")] pub replica_id: T, pub seed: [u8; 32], @@ -123,7 +126,10 @@ pub struct PublicInputs { pub k: Option, } -impl PublicInputs { +impl PublicInputs +where + T: Domain, +{ pub fn challenges( &self, layer_challenges: &LayerChallenges, @@ -137,13 +143,19 @@ impl PublicInputs { } #[derive(Debug)] -pub struct PrivateInputs { +pub struct PrivateInputs +where + ::Domain: Domain::Field>, +{ pub p_aux: PersistentAux<::Domain>, pub t_aux: TemporaryAuxCache, } #[derive(Debug, Serialize, Deserialize)] -pub struct Proof { +pub struct Proof +where + ::Domain: Domain::Field>, +{ #[serde(bound( serialize = "MerkleProof: Serialize", deserialize = "MerkleProof: Deserialize<'de>" @@ -175,7 +187,10 @@ pub struct Proof { pub encoding_proof: EncodingProof, } -impl Clone for Proof { +impl Clone for Proof +where + ::Domain: Domain::Field>, +{ fn clone(&self) -> Self { Self { comm_d_proofs: self.comm_d_proofs.clone(), @@ -187,7 +202,10 @@ impl Clone for Proof { } } -impl Proof { +impl Proof +where + ::Domain: Domain::Field>, +{ pub fn comm_r_last(&self) -> ::Domain { self.comm_r_last_proof.root() } @@ -345,7 +363,10 @@ pub struct PersistentAux { } #[derive(Debug, Serialize, Deserialize)] -pub struct TemporaryAux { +pub struct TemporaryAux +where + ::Domain: Domain::Field>, +{ /// The encoded nodes for 1..layers. #[serde(bound( serialize = "StoreConfig: Serialize", @@ -358,7 +379,10 @@ pub struct TemporaryAux { pub _g: PhantomData, } -impl Clone for TemporaryAux { +impl Clone for TemporaryAux +where + ::Domain: Domain::Field>, +{ fn clone(&self) -> Self { Self { labels: self.labels.clone(), @@ -370,7 +394,10 @@ impl Clone for TemporaryAux { } } -impl TemporaryAux { +impl TemporaryAux +where + ::Domain: Domain::Field>, +{ pub fn set_cache_path>(&mut self, cache_path: P) { let cp = cache_path.as_ref().to_path_buf(); for label in self.labels.labels.iter_mut() { @@ -484,7 +511,10 @@ impl TemporaryAux { } #[derive(Debug)] -pub struct TemporaryAuxCache { +pub struct TemporaryAuxCache +where + ::Domain: Domain::Field>, +{ /// The encoded nodes for 1..layers. pub labels: LabelsCache, pub tree_d: BinaryMerkleTree, @@ -501,7 +531,10 @@ pub struct TemporaryAuxCache { pub replica_path: PathBuf, } -impl TemporaryAuxCache { +impl TemporaryAuxCache +where + ::Domain: Domain::Field>, +{ pub fn new(t_aux: &TemporaryAux, replica_path: PathBuf) -> Result { // tree_d_size stored in the config is the base tree size let tree_d_size = t_aux.tree_d_config.size.expect("config size failure"); diff --git a/storage-proofs-porep/src/stacked/vanilla/porep.rs b/storage-proofs-porep/src/stacked/vanilla/porep.rs index 38e9600c6d..74fbc26d52 100644 --- a/storage-proofs-porep/src/stacked/vanilla/porep.rs +++ b/storage-proofs-porep/src/stacked/vanilla/porep.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use merkletree::store::StoreConfig; use storage_proofs_core::{ error::Result, @@ -18,6 +18,8 @@ use crate::{ impl<'a, 'c, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> PoRep<'a, Tree::Hasher, G> for StackedDrg<'a, Tree, G> +where + ::Domain: Domain::Field>, { type Tau = Tau<::Domain, ::Domain>; type ProverAux = ( diff --git a/storage-proofs-porep/src/stacked/vanilla/proof.rs b/storage-proofs-porep/src/stacked/vanilla/proof.rs index 795c56cfef..752f028f0e 100644 --- a/storage-proofs-porep/src/stacked/vanilla/proof.rs +++ b/storage-proofs-porep/src/stacked/vanilla/proof.rs @@ -68,7 +68,10 @@ lazy_static! { } #[derive(Debug)] -pub struct StackedDrg<'a, Tree: MerkleTreeTrait, G: Hasher> { +pub struct StackedDrg<'a, Tree: MerkleTreeTrait, G: Hasher> +where + ::Domain: Domain::Field>, +{ _a: PhantomData<&'a Tree>, _b: PhantomData<&'a G>, } @@ -93,7 +96,10 @@ pub type PrepareTreeRDataCallback = end: usize, ) -> Result>; -impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tree, G> { +impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tree, G> +where + ::Domain: Domain::Field>, +{ #[allow(clippy::too_many_arguments)] pub(crate) fn prove_layers( graph: &StackedBucketGraph, @@ -440,15 +446,21 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr // Even if the column builder is enabled, the GPU column builder // only supports Poseidon hashes. pub fn use_gpu_column_builder() -> bool { + // TODO (jake): change `PoseidonHasher` to + // `PoseidonHasher || PoseidonHasher || PoseidonHasher` once `neptune` supports + // Pasta fields in GPU. SETTINGS.use_gpu_column_builder - && TypeId::of::() == TypeId::of::() + && TypeId::of::() == TypeId::of::>() } // Even if the tree builder is enabled, the GPU tree builder // only supports Poseidon hashes. pub fn use_gpu_tree_builder() -> bool { + // TODO (jake): change `PoseidonHasher` to + // `PoseidonHasher || PoseidonHasher || PoseidonHasher` once `neptune` supports + // Pasta fields in GPU. SETTINGS.use_gpu_tree_builder - && TypeId::of::() == TypeId::of::() + && TypeId::of::() == TypeId::of::>() } #[cfg(any(feature = "cuda", feature = "opencl"))] @@ -800,15 +812,16 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr s.execute(move || { for (j, hash) in hashes_chunk.iter_mut().enumerate() { - let data: Vec<_> = (1..=layers) - .map(|layer| { - let store = labels.labels_for_layer(layer); - let el: ::Domain = store - .read_at((i * nodes_count) + j + chunk * chunk_size) - .expect("store read_at failure"); - el.into() - }) - .collect(); + let data: Vec<<::Domain as Domain>::Field> = + (1..=layers) + .map(|layer| { + let store = labels.labels_for_layer(layer); + let el: ::Domain = store + .read_at((i * nodes_count) + j + chunk * chunk_size) + .expect("store read_at failure"); + el.into() + }) + .collect(); *hash = hash_single_column(&data).into(); } diff --git a/storage-proofs-porep/src/stacked/vanilla/proof_scheme.rs b/storage-proofs-porep/src/stacked/vanilla/proof_scheme.rs index 78389371c7..5117d2f170 100644 --- a/storage-proofs-porep/src/stacked/vanilla/proof_scheme.rs +++ b/storage-proofs-porep/src/stacked/vanilla/proof_scheme.rs @@ -1,5 +1,5 @@ use anyhow::ensure; -use filecoin_hashers::{HashFunction, Hasher}; +use filecoin_hashers::{Domain, HashFunction, Hasher}; use log::trace; use rayon::prelude::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; use storage_proofs_core::{ @@ -15,6 +15,8 @@ use crate::stacked::vanilla::{ impl<'a, 'c, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> ProofScheme<'a> for StackedDrg<'c, Tree, G> +where + ::Domain: Domain::Field>, { type PublicParams = PublicParams; type SetupParams = SetupParams; diff --git a/storage-proofs-porep/tests/drg_circuit.rs b/storage-proofs-porep/tests/drg_circuit.rs index 1ed9d9c8f5..6c59eaf6f6 100644 --- a/storage-proofs-porep/tests/drg_circuit.rs +++ b/storage-proofs-porep/tests/drg_circuit.rs @@ -73,9 +73,9 @@ fn test_drg_porep_circuit() { api_version: ApiVersion::V1_1_0, }; - let pp = DrgPoRep::>::setup(&sp) + let pp = DrgPoRep::, BucketGraph<_>>::setup(&sp) .expect("failed to create drgporep setup"); - let (tau, aux) = DrgPoRep::::replicate( + let (tau, aux) = DrgPoRep::, _>::replicate( &pp, &replica_id.into(), (mmapped_data.as_mut()).into(), @@ -91,17 +91,17 @@ fn test_drg_porep_circuit() { tau: Some(tau), }; - let priv_inputs = drg::PrivateInputs:: { + let priv_inputs = drg::PrivateInputs::> { tree_d: &aux.tree_d, tree_r: &aux.tree_r, tree_r_config_rows_to_discard: default_rows_to_discard(nodes, BINARY_ARITY), }; - let proof_nc = DrgPoRep::::prove(&pp, &pub_inputs, &priv_inputs) + let proof_nc = DrgPoRep::, _>::prove(&pp, &pub_inputs, &priv_inputs) .expect("failed to prove"); assert!( - DrgPoRep::::verify(&pp, &pub_inputs, &proof_nc) + DrgPoRep::, _>::verify(&pp, &pub_inputs, &proof_nc) .expect("failed to verify"), "failed to verify (non circuit)" ); @@ -145,7 +145,7 @@ fn test_drg_porep_circuit() { ); let mut cs = TestConstraintSystem::::new(); - DrgPoRepCircuit::::synthesize( + DrgPoRepCircuit::>::synthesize( cs.namespace(|| "drgporep"), vec![replica_node], vec![replica_node_path], @@ -212,7 +212,7 @@ fn test_drg_porep_circuit_inputs_and_constraints() { let tree_depth = graph_height::(n); let mut cs = TestConstraintSystem::::new(); - DrgPoRepCircuit::::synthesize( + DrgPoRepCircuit::>::synthesize( cs.namespace(|| "drgporep"), vec![Some(Fr::random(&mut rng)); 1], vec![vec![(vec![Some(Fr::random(&mut rng))], Some(0)); tree_depth]; 1], diff --git a/storage-proofs-porep/tests/drg_compound.rs b/storage-proofs-porep/tests/drg_compound.rs index 8046d46a0c..67e4eef5ed 100644 --- a/storage-proofs-porep/tests/drg_compound.rs +++ b/storage-proofs-porep/tests/drg_compound.rs @@ -4,7 +4,7 @@ use bellperson::{ }; use blstrs::Scalar as Fr; use ff::Field; -use filecoin_hashers::{poseidon::PoseidonHasher, Hasher}; +use filecoin_hashers::{poseidon::PoseidonHasher, Domain, Hasher}; use fr32::fr_into_bytes; use merkletree::store::StoreConfig; use pretty_assertions::assert_eq; @@ -31,10 +31,14 @@ use tempfile::tempdir; #[test] #[ignore] fn test_drg_porep_compound_poseidon() { - drg_porep_compound::>(); + drg_porep_compound::>>(); } -fn drg_porep_compound() { +fn drg_porep_compound() +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ // femme::pretty::Logger::new() // .start(log::LevelFilter::Trace) // .ok(); diff --git a/storage-proofs-porep/tests/drg_vanilla.rs b/storage-proofs-porep/tests/drg_vanilla.rs index 6b959a189a..f52fbb1501 100644 --- a/storage-proofs-porep/tests/drg_vanilla.rs +++ b/storage-proofs-porep/tests/drg_vanilla.rs @@ -26,12 +26,12 @@ use tempfile::tempdir; #[test] fn text_drg_porep_extract_all_sha256() { - test_extract_all::>(); + test_extract_all::>>(); } #[test] fn text_drg_porep_extract_all_blake2s() { - test_extract_all::>(); + test_extract_all::>>(); } fn test_extract_all() { @@ -96,12 +96,12 @@ fn test_extract_all() { #[test] fn test_drg_porep_extract_sha256() { - test_extract::>(); + test_extract::>>(); } #[test] fn test_drg_porep_extract_blake2s() { - test_extract::>(); + test_extract::>>(); } fn test_extract() { @@ -185,8 +185,8 @@ table_tests! { } fn test_prove_verify(n: usize, i: usize) { - test_prove_verify_aux::>(n, i, false, false); - test_prove_verify_aux::>(n, i, false, false); + test_prove_verify_aux::>>(n, i, false, false); + test_prove_verify_aux::>>(n, i, false, false); } fn test_prove_verify_aux( @@ -372,12 +372,12 @@ fn test_prove_verify_aux( #[test] fn test_drg_porep_verify_fails_on_wrong_challenge() { - test_prove_verify_aux::>(8, 1, true, false); - test_prove_verify_aux::>(8, 1, true, false); + test_prove_verify_aux::>>(8, 1, true, false); + test_prove_verify_aux::>>(8, 1, true, false); } #[test] fn test_drg_porep_verify_fails_on_wrong_parents() { - test_prove_verify_aux::>(8, 5, false, true); - test_prove_verify_aux::>(8, 5, false, true); + test_prove_verify_aux::>>(8, 5, false, true); + test_prove_verify_aux::>>(8, 5, false, true); } diff --git a/storage-proofs-porep/tests/stacked_circuit.rs b/storage-proofs-porep/tests/stacked_circuit.rs index 4a7b7d85e6..0507765731 100644 --- a/storage-proofs-porep/tests/stacked_circuit.rs +++ b/storage-proofs-porep/tests/stacked_circuit.rs @@ -4,7 +4,7 @@ use bellperson::{ }; use blstrs::Scalar as Fr; use ff::Field; -use filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Hasher}; +use filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, Hasher}; use fr32::fr_into_bytes; use generic_array::typenum::{U0, U2, U4, U8}; use merkletree::store::StoreConfig; @@ -32,28 +32,29 @@ use tempfile::tempdir; #[test] fn test_stacked_porep_circuit_poseidon_base_2() { - test_stacked_porep_circuit::>(22, 1_206_212); + test_stacked_porep_circuit::, U2, U0, U0>>(22, 1_206_212); } #[test] fn test_stacked_input_circuit_poseidon_base_8() { - test_stacked_porep_circuit::>(22, 1_199_620); + test_stacked_porep_circuit::, U8, U0, U0>>(22, 1_199_620); } #[test] fn test_stacked_input_circuit_poseidon_sub_8_4() { - test_stacked_porep_circuit::>(22, 1_296_576); + test_stacked_porep_circuit::, U8, U4, U0>>(22, 1_296_576); } #[test] fn test_stacked_input_circuit_poseidon_top_8_4_2() { - test_stacked_porep_circuit::>(22, 1_346_982); + test_stacked_porep_circuit::, U8, U4, U2>>(22, 1_346_982); } -fn test_stacked_porep_circuit( - expected_inputs: usize, - expected_constraints: usize, -) { +fn test_stacked_porep_circuit(expected_inputs: usize, expected_constraints: usize) +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let nodes = 8 * get_base_tree_count::(); let degree = BASE_DEGREE; let expansion_degree = EXP_DEGREE; @@ -90,8 +91,8 @@ fn test_stacked_porep_circuit( api_version: ApiVersion::V1_1_0, }; - let pp = StackedDrg::::setup(&sp).expect("setup failed"); - let (tau, (p_aux, t_aux)) = StackedDrg::::replicate( + let pp = StackedDrg::>::setup(&sp).expect("setup failed"); + let (tau, (p_aux, t_aux)) = StackedDrg::>::replicate( &pp, &replica_id.into(), (mmapped_data.as_mut()).into(), @@ -107,7 +108,7 @@ fn test_stacked_porep_circuit( let seed = rng.gen(); let pub_inputs = - PublicInputs::<::Domain, ::Domain> { + PublicInputs::<::Domain, as Hasher>::Domain> { replica_id: replica_id.into(), seed, tau: Some(tau), @@ -119,29 +120,33 @@ fn test_stacked_porep_circuit( // Convert TemporaryAux to TemporaryAuxCache, which instantiates all // elements based on the configs stored in TemporaryAux. - let t_aux = TemporaryAuxCache::::new(&t_aux, replica_path) + let t_aux = TemporaryAuxCache::>::new(&t_aux, replica_path) .expect("failed to restore contents of t_aux"); - let priv_inputs = PrivateInputs:: { p_aux, t_aux }; + let priv_inputs = PrivateInputs::> { p_aux, t_aux }; - let proofs = - StackedDrg::::prove_all_partitions(&pp, &pub_inputs, &priv_inputs, 1) - .expect("failed to generate partition proofs"); + let proofs = StackedDrg::>::prove_all_partitions( + &pp, + &pub_inputs, + &priv_inputs, + 1, + ) + .expect("failed to generate partition proofs"); let proofs_are_valid = - StackedDrg::::verify_all_partitions(&pp, &pub_inputs, &proofs) + StackedDrg::>::verify_all_partitions(&pp, &pub_inputs, &proofs) .expect("failed while trying to verify partition proofs"); assert!(proofs_are_valid); // Discard cached MTs that are no longer needed. - TemporaryAux::::clear_temp(t_aux_orig).expect("t_aux delete failed"); + TemporaryAux::>::clear_temp(t_aux_orig).expect("t_aux delete failed"); { // Verify that MetricCS returns the same metrics as TestConstraintSystem. let mut cs = MetricCS::::new(); - StackedCompound::::circuit(&pub_inputs, (), &proofs[0], &pp, None) + StackedCompound::>::circuit(&pub_inputs, (), &proofs[0], &pp, None) .expect("circuit failed") .synthesize(&mut cs.namespace(|| "stacked drgporep")) .expect("failed to synthesize circuit"); @@ -155,7 +160,7 @@ fn test_stacked_porep_circuit( } let mut cs = TestConstraintSystem::::new(); - StackedCompound::::circuit(&pub_inputs, (), &proofs[0], &pp, None) + StackedCompound::>::circuit(&pub_inputs, (), &proofs[0], &pp, None) .expect("circuit failed") .synthesize(&mut cs.namespace(|| "stacked drgporep")) .expect("failed to synthesize circuit"); @@ -170,8 +175,8 @@ fn test_stacked_porep_circuit( assert_eq!(cs.get_input(0, "ONE"), Fr::one()); - let generated_inputs = as CompoundProof< - StackedDrg<'_, Tree, Sha256Hasher>, + let generated_inputs = > as CompoundProof< + StackedDrg<'_, Tree, Sha256Hasher>, _, >>::generate_public_inputs(&pub_inputs, &pp, None) .expect("failed to generate public inputs"); diff --git a/storage-proofs-porep/tests/stacked_compound.rs b/storage-proofs-porep/tests/stacked_compound.rs index dfc75c16ec..647e3f5d98 100644 --- a/storage-proofs-porep/tests/stacked_compound.rs +++ b/storage-proofs-porep/tests/stacked_compound.rs @@ -4,7 +4,7 @@ use bellperson::{ }; use blstrs::Scalar as Fr; use ff::Field; -use filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Hasher}; +use filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, Hasher}; use fr32::fr_into_bytes; use generic_array::typenum::{U0, U2, U4, U8}; use merkletree::store::StoreConfig; @@ -32,22 +32,26 @@ use tempfile::tempdir; #[test] #[ignore] fn test_stacked_compound_poseidon_base_8() { - test_stacked_compound::>(); + test_stacked_compound::, U8, U0, U0>>(); } #[test] #[ignore] fn test_stacked_compound_poseidon_sub_8_4() { - test_stacked_compound::>(); + test_stacked_compound::, U8, U4, U0>>(); } #[test] #[ignore] fn test_stacked_compound_poseidon_top_8_4_2() { - test_stacked_compound::>(); + test_stacked_compound::, U8, U4, U2>>(); } -fn test_stacked_compound() { +fn test_stacked_compound() +where + Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, +{ let nodes = 8 * get_base_tree_count::(); let degree = BASE_DEGREE; @@ -107,7 +111,7 @@ fn test_stacked_compound() { let seed = rng.gen(); let public_inputs = - PublicInputs::<::Domain, ::Domain> { + PublicInputs::<::Domain, as Hasher>::Domain> { replica_id: replica_id.into(), seed, tau: Some(tau), @@ -122,7 +126,7 @@ fn test_stacked_compound() { let t_aux = TemporaryAuxCache::::new(&t_aux, replica_path) .expect("failed to restore contents of t_aux"); - let private_inputs = PrivateInputs:: { p_aux, t_aux }; + let private_inputs = PrivateInputs::> { p_aux, t_aux }; { let (circuit, inputs) = @@ -150,8 +154,8 @@ fn test_stacked_compound() { let (circuit1, _inputs) = StackedCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs) .unwrap(); - let blank_circuit = as CompoundProof< - StackedDrg<'_, Tree, Sha256Hasher>, + let blank_circuit = > as CompoundProof< + StackedDrg<'_, Tree, Sha256Hasher>, _, >>::blank_circuit(&public_params.vanilla_params); @@ -171,14 +175,14 @@ fn test_stacked_compound() { } } - let blank_groth_params = as CompoundProof< - StackedDrg<'_, Tree, Sha256Hasher>, + let blank_groth_params = > as CompoundProof< + StackedDrg<'_, Tree, Sha256Hasher>, _, >>::groth_params(Some(&mut rng), &public_params.vanilla_params) .expect("failed to generate groth params"); // Discard cached MTs that are no longer needed. - TemporaryAux::::clear_temp(t_aux_orig).expect("t_aux delete failed"); + TemporaryAux::>::clear_temp(t_aux_orig).expect("t_aux delete failed"); let proof = StackedCompound::prove( &public_params, diff --git a/storage-proofs-porep/tests/stacked_vanilla.rs b/storage-proofs-porep/tests/stacked_vanilla.rs index d9126880eb..aa45bc0918 100644 --- a/storage-proofs-porep/tests/stacked_vanilla.rs +++ b/storage-proofs-porep/tests/stacked_vanilla.rs @@ -1,14 +1,14 @@ use std::fs::remove_file; use blstrs::Scalar as Fr; -use ff::{Field, PrimeField}; +use ff::PrimeField; use filecoin_hashers::{ blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, Hasher, }; -use fr32::fr_into_bytes; use generic_array::typenum::{U0, U2, U4, U8}; use glob::glob; use merkletree::store::{Store, StoreConfig}; +use pasta_curves::{Fp, Fq}; use rand::{Rng, SeedableRng}; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ @@ -35,50 +35,108 @@ const DEFAULT_STACKED_LAYERS: usize = 11; #[test] fn test_stacked_porep_extract_all_sha256_base_8() { - test_extract_all::>(); + test_extract_all::, U8, U0, U0>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_sha256_base_8_halo() { + test_extract_all::, U8, U0, U0>, Sha256Hasher>(); + test_extract_all::, U8, U0, U0>, Sha256Hasher>(); } #[test] fn test_stacked_porep_extract_all_sha256_sub_8_8() { - test_extract_all::>(); + test_extract_all::, U8, U8, U0>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_sha256_sub_8_8_halo() { + test_extract_all::, U8, U8, U0>, Sha256Hasher>(); + test_extract_all::, U8, U8, U0>, Sha256Hasher>(); } #[test] fn test_stacked_porep_extract_all_sha256_top_8_8_2() { - test_extract_all::>(); + test_extract_all::, U8, U8, U2>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_sha256_top_8_8_2_halo() { + test_extract_all::, U8, U8, U2>, Sha256Hasher>(); + test_extract_all::, U8, U8, U2>, Sha256Hasher>(); } #[test] fn test_stacked_porep_extract_all_blake2s_base_8() { - test_extract_all::>(); + test_extract_all::, U8, U0, U0>, Blake2sHasher>(); } #[test] fn test_stacked_porep_extract_all_blake2s_sub_8_8() { - test_extract_all::>(); + test_extract_all::, U8, U8, U0>, Blake2sHasher>(); } #[test] fn test_stacked_porep_extract_all_blake2s_top_8_8_2() { - test_extract_all::>(); + test_extract_all::, U8, U8, U2>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_blake2s_base_8_halo() { + test_extract_all::, U8, U0, U0>, Blake2sHasher>(); + test_extract_all::, U8, U0, U0>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_blake2s_sub_8_8_halo() { + test_extract_all::, U8, U8, U0>, Blake2sHasher>(); + test_extract_all::, U8, U8, U0>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_blake2s_top_8_8_2_halo() { + test_extract_all::, U8, U8, U2>, Blake2sHasher>(); + test_extract_all::, U8, U8, U2>, Blake2sHasher>(); } #[test] fn test_stacked_porep_extract_all_poseidon_base_8() { - test_extract_all::>(); + test_extract_all::, U8, U0, U0>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_poseidon_base_8_halo() { + test_extract_all::, U8, U0, U0>, Sha256Hasher>(); + test_extract_all::, U8, U0, U0>, Sha256Hasher>(); } #[test] fn test_stacked_porep_extract_all_poseidon_sub_8_2() { - test_extract_all::>(); + test_extract_all::, U8, U2, U0>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_poseidon_sub_8_2_halo() { + test_extract_all::, U8, U2, U0>, Sha256Hasher>(); + test_extract_all::, U8, U2, U0>, Sha256Hasher>(); } #[test] fn test_stacked_porep_extract_all_poseidon_top_8_8_2() { - test_extract_all::>(); + test_extract_all::, U8, U8, U2>, Blake2sHasher>(); +} + +#[test] +fn test_stacked_porep_extract_all_poseidon_top_8_8_2_halo() { + test_extract_all::, U8, U8, U2>, Sha256Hasher>(); + test_extract_all::, U8, U8, U2>, Sha256Hasher>(); } -fn test_extract_all() { +fn test_extract_all() +where + G: 'static + Hasher, + G::Domain: Domain::Domain as Domain>::Field>, +{ // pretty_env_logger::try_init(); let mut rng = XorShiftRng::from_seed(TEST_SEED); @@ -117,9 +175,9 @@ fn test_extract_all() { api_version: ApiVersion::V1_1_0, }; - let pp = StackedDrg::::setup(&sp).expect("setup failed"); + let pp = StackedDrg::::setup(&sp).expect("setup failed"); - StackedDrg::::replicate( + StackedDrg::::replicate( &pp, &replica_id, (mmapped_data.as_mut()).into(), @@ -132,7 +190,7 @@ fn test_extract_all() { // The layers are still in the cache dir, so rerunning the label generation should // not do any work. - let (_, label_states) = StackedDrg::::generate_labels_for_encoding( + let (_, label_states) = StackedDrg::::generate_labels_for_encoding( &pp.graph, &layer_challenges, &replica_id, @@ -150,7 +208,7 @@ fn test_extract_all() { remove_file(data_path).expect("failed to delete layer cache"); } - let (_, label_states) = StackedDrg::::generate_labels_for_encoding( + let (_, label_states) = StackedDrg::::generate_labels_for_encoding( &pp.graph, &layer_challenges, &replica_id, @@ -166,13 +224,8 @@ fn test_extract_all() { assert_ne!(data, &mmapped_data[..], "replication did not change data"); - StackedDrg::::extract_all( - &pp, - &replica_id, - mmapped_data.as_mut(), - Some(config), - ) - .expect("failed to extract data"); + StackedDrg::::extract_all(&pp, &replica_id, mmapped_data.as_mut(), Some(config)) + .expect("failed to extract data"); assert_eq!(data, mmapped_data.as_ref()); @@ -183,15 +236,15 @@ fn test_extract_all() { fn test_stacked_porep_resume_seal() { // pretty_env_logger::try_init().ok(); - type Tree = DiskTree; + type Tree = DiskTree, U8, U8, U2>; let mut rng = XorShiftRng::from_seed(TEST_SEED); - let replica_id = ::Domain::random(&mut rng); + let replica_id = as Hasher>::Domain::random(&mut rng); let nodes = 64 * get_base_tree_count::(); let data: Vec = (0..nodes) .flat_map(|_| { - let v = ::Domain::random(&mut rng); + let v = as Hasher>::Domain::random(&mut rng); v.into_bytes() }) .collect(); @@ -224,7 +277,7 @@ fn test_stacked_porep_resume_seal() { api_version: ApiVersion::V1_1_0, }; - let pp = StackedDrg::::setup(&sp).expect("setup failed"); + let pp = StackedDrg::>::setup(&sp).expect("setup failed"); let clear_temp = || { for entry in glob(&(cache_dir.path().to_string_lossy() + "/*.dat")).unwrap() { @@ -239,7 +292,7 @@ fn test_stacked_porep_resume_seal() { }; // first replicaton - StackedDrg::::replicate( + StackedDrg::>::replicate( &pp, &replica_id, (mmapped_data1.as_mut()).into(), @@ -251,7 +304,7 @@ fn test_stacked_porep_resume_seal() { clear_temp(); // replicate a second time - StackedDrg::::replicate( + StackedDrg::>::replicate( &pp, &replica_id, (mmapped_data2.as_mut()).into(), @@ -263,7 +316,7 @@ fn test_stacked_porep_resume_seal() { clear_temp(); // delete last 2 layers - let (_, label_states) = StackedDrg::::generate_labels_for_encoding( + let (_, label_states) = StackedDrg::>::generate_labels_for_encoding( &pp.graph, &layer_challenges, &replica_id, @@ -278,7 +331,7 @@ fn test_stacked_porep_resume_seal() { } // replicate a third time - StackedDrg::::replicate( + StackedDrg::>::replicate( &pp, &replica_id, (mmapped_data3.as_mut()).into(), @@ -294,7 +347,7 @@ fn test_stacked_porep_resume_seal() { assert_eq!(&mmapped_data1[..], &mmapped_data2[..]); assert_eq!(&mmapped_data2[..], &mmapped_data3[..]); - StackedDrg::::extract_all( + StackedDrg::>::extract_all( &pp, &replica_id, mmapped_data1.as_mut(), @@ -316,32 +369,166 @@ table_tests! { fn test_prove_verify_fixed(n: usize) { let challenges = LayerChallenges::new(DEFAULT_STACKED_LAYERS, 5); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); + test_prove_verify::, U8, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); + test_prove_verify::, U4, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); + test_prove_verify::, U4, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); + + test_prove_verify::, U8, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); + + test_prove_verify::, U4, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); + + test_prove_verify::, U8, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); + // Alternate the Pasta fields rather than run each test for both fields. + test_prove_verify::, U8, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); + test_prove_verify::, U4, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges.clone()); - test_prove_verify::>(n, challenges); + test_prove_verify::, U8, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); + + test_prove_verify::, U4, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); + + test_prove_verify::, U8, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U8, U8, U2>, Blake2sHasher>( + n, + challenges.clone(), + ); + + test_prove_verify::, U4, U0, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U2, U0>, Blake2sHasher>( + n, + challenges.clone(), + ); + test_prove_verify::, U4, U8, U2>, Blake2sHasher>(n, challenges); } -fn test_prove_verify(n: usize, challenges: LayerChallenges) { +fn test_prove_verify(n: usize, challenges: LayerChallenges) +where + G: 'static + Hasher, + G::Domain: Domain::Domain as Domain>::Field>, +{ // This will be called multiple times, only the first one succeeds, and that is ok. // femme::pretty::Logger::new() // .start(log::LevelFilter::Trace) @@ -355,7 +542,7 @@ fn test_prove_verify(n: usize, challenges: Laye let replica_id: ::Domain = ::Domain::random(&mut rng); let data: Vec = (0..nodes) - .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng))) + .flat_map(|_| ::Domain::random(&mut rng).into_bytes()) .collect(); // MT for original data is always named tree-d, and it will be @@ -383,8 +570,8 @@ fn test_prove_verify(n: usize, challenges: Laye api_version: ApiVersion::V1_1_0, }; - let pp = StackedDrg::::setup(&sp).expect("setup failed"); - let (tau, (p_aux, t_aux)) = StackedDrg::::replicate( + let pp = StackedDrg::::setup(&sp).expect("setup failed"); + let (tau, (p_aux, t_aux)) = StackedDrg::::replicate( &pp, &replica_id, (mmapped_data.as_mut()).into(), @@ -399,41 +586,33 @@ fn test_prove_verify(n: usize, challenges: Laye assert_ne!(data, copied, "replication did not change data"); let seed = rng.gen(); - let pub_inputs = - PublicInputs::<::Domain, ::Domain> { - replica_id, - seed, - tau: Some(tau), - k: None, - }; + let pub_inputs = PublicInputs::<::Domain, ::Domain> { + replica_id, + seed, + tau: Some(tau), + k: None, + }; // Store a copy of the t_aux for later resource deletion. let t_aux_orig = t_aux.clone(); // Convert TemporaryAux to TemporaryAuxCache, which instantiates all // elements based on the configs stored in TemporaryAux. - let t_aux = TemporaryAuxCache::::new(&t_aux, replica_path) + let t_aux = TemporaryAuxCache::::new(&t_aux, replica_path) .expect("failed to restore contents of t_aux"); let priv_inputs = PrivateInputs { p_aux, t_aux }; - let all_partition_proofs = &StackedDrg::::prove_all_partitions( - &pp, - &pub_inputs, - &priv_inputs, - partitions, - ) - .expect("failed to generate partition proofs"); + let all_partition_proofs = + &StackedDrg::::prove_all_partitions(&pp, &pub_inputs, &priv_inputs, partitions) + .expect("failed to generate partition proofs"); - let proofs_are_valid = StackedDrg::::verify_all_partitions( - &pp, - &pub_inputs, - all_partition_proofs, - ) - .expect("failed to verify partition proofs"); + let proofs_are_valid = + StackedDrg::::verify_all_partitions(&pp, &pub_inputs, all_partition_proofs) + .expect("failed to verify partition proofs"); // Discard cached MTs that are no longer needed. - TemporaryAux::::clear_temp(t_aux_orig).expect("t_aux delete failed"); + TemporaryAux::::clear_temp(t_aux_orig).expect("t_aux delete failed"); assert!(proofs_are_valid); @@ -459,7 +638,7 @@ fn test_stacked_porep_setup_terminates() { // When this fails, the call to setup should panic, but seems to actually hang (i.e. neither return nor panic) for some reason. // When working as designed, the call to setup returns without error. - let _pp = StackedDrg::, Blake2sHasher>::setup(&sp) + let _pp = StackedDrg::, U8, U0, U0>, Blake2sHasher>::setup(&sp) .expect("setup failed"); } @@ -549,7 +728,7 @@ fn test_generate_labels_aux( nodes.trailing_zeros() as usize, ); - let graph = StackedBucketGraph::::new( + let graph = StackedBucketGraph::>::new( None, nodes, BASE_DEGREE, @@ -564,12 +743,12 @@ fn test_generate_labels_aux( let labels = StackedDrg::< // Although not generally correct for every size, the hasher shape is not used, // so for purposes of testing label creation, it is safe to supply a dummy. - DiskTree, - Sha256Hasher, + DiskTree, U8, U8, U2>, + Sha256Hasher, >::generate_labels_for_decoding( &graph, &unused_layer_challenges, - &::Domain::try_from_bytes(&replica_id).unwrap(), + & as Hasher>::Domain::try_from_bytes(&replica_id).unwrap(), config, ) .unwrap(); @@ -577,5 +756,5 @@ fn test_generate_labels_aux( let final_labels = labels.labels_for_last_layer().unwrap(); let last_label = final_labels.read_at(nodes - 1).unwrap(); - assert_eq!(expected_last_label.to_repr(), last_label.0); + assert_eq!(expected_last_label.to_repr(), last_label.repr()); } diff --git a/storage-proofs-post/Cargo.toml b/storage-proofs-post/Cargo.toml index 06c320d376..5a3509e3d8 100644 --- a/storage-proofs-post/Cargo.toml +++ b/storage-proofs-post/Cargo.toml @@ -36,6 +36,7 @@ tempfile = "3" pretty_assertions = "0.6.1" filecoin-hashers = { path = "../filecoin-hashers", version = "^6.0.0", default-features = false, features = ["poseidon", "sha256", "blake2s"]} rand_xorshift = "0.3.0" +pasta_curves = "0.3.0" [features] default = ["opencl"] diff --git a/storage-proofs-post/src/election/circuit.rs b/storage-proofs-post/src/election/circuit.rs index ae37ee39b1..7d1c396946 100644 --- a/storage-proofs-post/src/election/circuit.rs +++ b/storage-proofs-post/src/election/circuit.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use bellperson::{gadgets::num::AllocatedNum, Circuit, ConstraintSystem, SynthesisError}; use blstrs::Scalar as Fr; use ff::Field; -use filecoin_hashers::{poseidon::PoseidonFunction, HashFunction, Hasher, PoseidonMDArity}; +use filecoin_hashers::{poseidon::PoseidonFunction, Domain, HashFunction, Hasher, PoseidonMDArity}; use generic_array::typenum::Unsigned; use storage_proofs_core::{ compound_proof::CircuitComponent, @@ -12,7 +12,10 @@ use storage_proofs_core::{ }; /// This is the `ElectionPoSt` circuit. -pub struct ElectionPoStCircuit { +pub struct ElectionPoStCircuit +where + ::Domain: Domain, +{ pub comm_r: Option, pub comm_c: Option, pub comm_r_last: Option, @@ -29,11 +32,17 @@ pub struct ElectionPoStCircuit { #[derive(Clone, Default)] pub struct ComponentPrivateInputs {} -impl<'a, Tree: MerkleTreeTrait> CircuitComponent for ElectionPoStCircuit { +impl<'a, Tree: MerkleTreeTrait> CircuitComponent for ElectionPoStCircuit +where + ::Domain: Domain, +{ type ComponentPrivateInputs = ComponentPrivateInputs; } -impl<'a, Tree: 'static + MerkleTreeTrait> Circuit for ElectionPoStCircuit { +impl<'a, Tree: 'static + MerkleTreeTrait> Circuit for ElectionPoStCircuit +where + ::Domain: Domain, +{ fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { let comm_r = self.comm_r; let comm_c = self.comm_c; @@ -139,7 +148,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait> Circuit for ElectionPoStCircuit( + let partial_ticket_num = PoseidonFunction::::hash_md_circuit::<_>( &mut cs.namespace(|| "partial_ticket_hash"), &partial_ticket_nums, )?; diff --git a/storage-proofs-post/src/election/compound.rs b/storage-proofs-post/src/election/compound.rs index 5e4069d1ed..277f984b58 100644 --- a/storage-proofs-post/src/election/compound.rs +++ b/storage-proofs-post/src/election/compound.rs @@ -2,6 +2,8 @@ use std::marker::PhantomData; use bellperson::Circuit; use blstrs::Scalar as Fr; +use ff::PrimeField; +use filecoin_hashers::{Domain, Hasher}; use generic_array::typenum::Unsigned; use storage_proofs_core::{ compound_proof::{CircuitComponent, CompoundProof}, @@ -20,12 +22,15 @@ use crate::election::{generate_leaf_challenge, ElectionPoSt, ElectionPoStCircuit pub struct ElectionPoStCompound where Tree: MerkleTreeTrait, + ::Domain: Domain, { _t: PhantomData, } impl, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters for ElectionPoStCompound +where + ::Domain: Domain, { fn cache_prefix() -> String { format!("proof-of-spacetime-election-{}", Tree::display()) @@ -36,6 +41,7 @@ impl<'a, Tree> CompoundProof<'a, ElectionPoSt<'a, Tree>, ElectionPoStCircuit where Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, { fn generate_public_inputs( pub_inputs: & as ProofScheme<'a>>::PublicInputs, @@ -78,7 +84,7 @@ where } // 3. Inputs for verifying partial_ticket generation - inputs.push(pub_inputs.partial_ticket); + inputs.push(Fr::from_repr_vartime(pub_inputs.partial_ticket).expect("from_repr failure")); Ok(inputs) } @@ -121,7 +127,9 @@ where comm_c: Some(comm_c), comm_r_last: Some(comm_r_last), paths, - partial_ticket: Some(pub_in.partial_ticket), + partial_ticket: Some( + Fr::from_repr_vartime(pub_in.partial_ticket).expect("from_repr failure"), + ), randomness: Some(pub_in.randomness.into()), prover_id: Some(pub_in.prover_id.into()), sector_id: Some(pub_in.sector_id.into()), diff --git a/storage-proofs-post/src/election/vanilla.rs b/storage-proofs-post/src/election/vanilla.rs index 529c15dc95..17697ae2b0 100644 --- a/storage-proofs-post/src/election/vanilla.rs +++ b/storage-proofs-post/src/election/vanilla.rs @@ -3,13 +3,11 @@ use std::fmt::{self, Debug, Formatter}; use std::marker::PhantomData; use anyhow::{bail, ensure, Context}; -use blstrs::Scalar as Fr; use byteorder::{ByteOrder, LittleEndian}; use filecoin_hashers::{ poseidon::{PoseidonDomain, PoseidonFunction}, Domain, HashFunction, Hasher, PoseidonMDArity, }; -use fr32::fr_into_bytes; use generic_array::typenum::Unsigned; use log::trace; use rayon::prelude::{ @@ -67,7 +65,7 @@ pub struct PublicInputs { pub prover_id: T, #[serde(bound = "")] pub comm_r: T, - pub partial_ticket: Fr, + pub partial_ticket: [u8; 32], pub sector_challenge_index: u64, } @@ -88,7 +86,7 @@ pub struct PrivateInputs { #[derive(Clone, Serialize, Deserialize)] pub struct Candidate { pub sector_id: SectorId, - pub partial_ticket: Fr, + pub partial_ticket: [u8; 32], pub ticket: [u8; 32], pub sector_challenge_index: u64, } @@ -97,7 +95,7 @@ impl Debug for Candidate { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("Candidate") .field("sector_id", &self.sector_id) - .field("partial_ticket", &self.partial_ticket) + .field("partial_ticket", &hex::encode(&self.partial_ticket)) .field("ticket", &hex::encode(&self.ticket)) .field("sector_challenge_index", &self.sector_challenge_index) .finish() @@ -152,7 +150,7 @@ where } #[allow(clippy::type_complexity)] -pub fn generate_candidates( +pub fn generate_candidates( pub_params: &PublicParams, challenged_sectors: &[SectorId], trees: &BTreeMap< @@ -167,7 +165,14 @@ pub fn generate_candidates( >, prover_id: ::Domain, randomness: ::Domain, -) -> Result> { +) -> Result> +where + Tree: MerkleTreeTrait, + PoseidonDomain<<::Domain as Domain>::Field>: + Domain::Domain as Domain>::Field>, + PoseidonFunction<<::Domain as Domain>::Field>: + HashFunction::Domain as Domain>::Field>>, +{ challenged_sectors .par_iter() .enumerate() @@ -189,7 +194,7 @@ pub fn generate_candidates( .collect() } -fn generate_candidate( +fn generate_candidate( pub_params: &PublicParams, tree: &MerkleTreeWrapper< Tree::Hasher, @@ -202,23 +207,32 @@ fn generate_candidate( sector_id: SectorId, randomness: ::Domain, sector_challenge_index: u64, -) -> Result { - let randomness_fr: Fr = randomness.into(); - let prover_id_fr: Fr = prover_id.into(); - let mut data: Vec = vec![ +) -> Result +where + Tree: MerkleTreeTrait, + // Ensure that there exists a Poseidon hasher over the same field as `Tree`. + PoseidonDomain<<::Domain as Domain>::Field>: + Domain::Domain as Domain>::Field>, + PoseidonFunction<<::Domain as Domain>::Field>: + HashFunction::Domain as Domain>::Field>>, +{ + let randomness_fr: <::Domain as Domain>::Field = randomness.into(); + let prover_id_fr: <::Domain as Domain>::Field = prover_id.into(); + let mut data: Vec::Domain as Domain>::Field>> = vec![ randomness_fr.into(), prover_id_fr.into(), - Fr::from(sector_id).into(), + <::Domain as Domain>::Field::from(sector_id.into()).into(), ]; for n in 0..pub_params.challenge_count { let challenge = generate_leaf_challenge(pub_params, randomness, sector_challenge_index, n as u64)?; - let val: Fr = measure_op(Operation::PostReadChallengedRange, || { - tree.read_at(challenge as usize) - })? - .into(); + let val: <::Domain as Domain>::Field = + measure_op(Operation::PostReadChallengedRange, || { + tree.read_at(challenge as usize) + })? + .into(); data.push(val.into()); } @@ -228,10 +242,10 @@ fn generate_candidate( data.push(PoseidonDomain::default()); } - let partial_ticket: Fr = measure_op(Operation::PostPartialTicketHash, || { + let partial_ticket = measure_op(Operation::PostPartialTicketHash, || { PoseidonFunction::hash_md(&data) }) - .into(); + .repr(); // ticket = sha256(partial_ticket) let ticket = finalize_ticket(&partial_ticket); @@ -244,9 +258,8 @@ fn generate_candidate( }) } -pub fn finalize_ticket(partial_ticket: &Fr) -> [u8; 32] { - let bytes = fr_into_bytes(partial_ticket); - let ticket_hash = Sha256::digest(&bytes); +pub fn finalize_ticket(partial_ticket: &[u8; 32]) -> [u8; 32] { + let ticket_hash = Sha256::digest(partial_ticket); let mut ticket = [0u8; 32]; ticket.copy_from_slice(&ticket_hash[..]); ticket diff --git a/storage-proofs-post/src/fallback/circuit.rs b/storage-proofs-post/src/fallback/circuit.rs index c436027463..22687e9002 100644 --- a/storage-proofs-post/src/fallback/circuit.rs +++ b/storage-proofs-post/src/fallback/circuit.rs @@ -1,7 +1,7 @@ use bellperson::{gadgets::num::AllocatedNum, Circuit, ConstraintSystem, SynthesisError}; use blstrs::Scalar as Fr; use ff::Field; -use filecoin_hashers::{HashFunction, Hasher}; +use filecoin_hashers::{Domain, HashFunction, Hasher}; use rayon::prelude::{ParallelIterator, ParallelSlice}; use storage_proofs_core::{ compound_proof::CircuitComponent, @@ -20,7 +20,10 @@ use storage_proofs_core::{ use crate::fallback::{PublicParams, PublicSector, SectorProof}; /// This is the `FallbackPoSt` circuit. -pub struct FallbackPoStCircuit { +pub struct FallbackPoStCircuit +where + ::Domain: Domain, +{ pub prover_id: Option, pub sectors: Vec>, } @@ -29,7 +32,10 @@ pub struct FallbackPoStCircuit { // #[derive(Clone)]) because derive(Clone) will only expand for MerkleTreeTrait types that also // implement Clone. Not every MerkleTreeTrait type is Clone-able because not all merkel Store's are // Clone-able, therefore deriving Clone would impl Clone for less than all possible Tree types. -impl Clone for FallbackPoStCircuit { +impl Clone for FallbackPoStCircuit +where + ::Domain: Domain, +{ fn clone(&self) -> Self { FallbackPoStCircuit { prover_id: self.prover_id, @@ -38,7 +44,10 @@ impl Clone for FallbackPoStCircuit { } } -pub struct Sector { +pub struct Sector +where + ::Domain: Domain, +{ pub comm_r: Option, pub comm_c: Option, pub comm_r_last: Option, @@ -49,7 +58,10 @@ pub struct Sector { // We must manually implement Clone for all types generic over MerkleTreeTrait (instead of using // #derive(Clone)). -impl Clone for Sector { +impl Clone for Sector +where + ::Domain: Domain, +{ fn clone(&self) -> Self { Sector { comm_r: self.comm_r, @@ -62,7 +74,10 @@ impl Clone for Sector { } } -impl Sector { +impl Sector +where + ::Domain: Domain, +{ pub fn circuit( sector: &PublicSector<::Domain>, vanilla_proof: &SectorProof, @@ -111,7 +126,10 @@ impl Sector { } } -impl Circuit for &Sector { +impl Circuit for &Sector +where + ::Domain: Domain, +{ fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { let Sector { comm_r, @@ -180,11 +198,17 @@ impl Circuit for &Sector { #[derive(Clone, Default)] pub struct ComponentPrivateInputs {} -impl CircuitComponent for FallbackPoStCircuit { +impl CircuitComponent for FallbackPoStCircuit +where + ::Domain: Domain, +{ type ComponentPrivateInputs = ComponentPrivateInputs; } -impl Circuit for FallbackPoStCircuit { +impl Circuit for FallbackPoStCircuit +where + ::Domain: Domain, +{ fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { if CS::is_extensible() { return self.synthesize_extendable(cs); @@ -194,7 +218,10 @@ impl Circuit for FallbackPoStCircuit } } -impl FallbackPoStCircuit { +impl FallbackPoStCircuit +where + ::Domain: Domain, +{ fn synthesize_default>( self, cs: &mut CS, diff --git a/storage-proofs-post/src/fallback/compound.rs b/storage-proofs-post/src/fallback/compound.rs index 4050ecb7c2..7b4e5731a2 100644 --- a/storage-proofs-post/src/fallback/compound.rs +++ b/storage-proofs-post/src/fallback/compound.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use anyhow::{anyhow, ensure}; use bellperson::Circuit; use blstrs::Scalar as Fr; -use filecoin_hashers::Hasher; +use filecoin_hashers::{Domain, Hasher}; use sha2::{Digest, Sha256}; use storage_proofs_core::{ compound_proof::{CircuitComponent, CompoundProof}, @@ -21,12 +21,15 @@ use crate::fallback::{generate_leaf_challenge_inner, FallbackPoSt, FallbackPoStC pub struct FallbackPoStCompound where Tree: MerkleTreeTrait, + ::Domain: Domain, { _t: PhantomData, } impl, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters for FallbackPoStCompound +where + ::Domain: Domain, { fn cache_prefix() -> String { format!("proof-of-spacetime-fallback-{}", Tree::display()) @@ -36,6 +39,8 @@ impl, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheablePa impl<'a, Tree: 'static + MerkleTreeTrait> CompoundProof<'a, FallbackPoSt<'a, Tree>, FallbackPoStCircuit> for FallbackPoStCompound +where + ::Domain: Domain, { fn generate_public_inputs( pub_inputs: & as ProofScheme<'a>>::PublicInputs, diff --git a/storage-proofs-post/src/rational/circuit.rs b/storage-proofs-post/src/rational/circuit.rs index 14fbead053..f45dd40804 100644 --- a/storage-proofs-post/src/rational/circuit.rs +++ b/storage-proofs-post/src/rational/circuit.rs @@ -2,14 +2,17 @@ use std::marker::PhantomData; use bellperson::{gadgets::num::AllocatedNum, Circuit, ConstraintSystem, SynthesisError}; use blstrs::Scalar as Fr; -use filecoin_hashers::{HashFunction, Hasher}; +use filecoin_hashers::{Domain, HashFunction, Hasher}; use storage_proofs_core::{ compound_proof::CircuitComponent, error::Result, gadgets::constraint, gadgets::por::PoRCircuit, gadgets::variables::Root, merkle::MerkleTreeTrait, }; /// This is the `RationalPoSt` circuit. -pub struct RationalPoStCircuit { +pub struct RationalPoStCircuit +where + ::Domain: Domain, +{ /// Paramters for the engine. pub comm_rs: Vec>, pub comm_cs: Vec>, @@ -23,11 +26,17 @@ pub struct RationalPoStCircuit { #[derive(Clone, Default)] pub struct ComponentPrivateInputs {} -impl<'a, Tree: MerkleTreeTrait> CircuitComponent for RationalPoStCircuit { +impl<'a, Tree: MerkleTreeTrait> CircuitComponent for RationalPoStCircuit +where + ::Domain: Domain, +{ type ComponentPrivateInputs = ComponentPrivateInputs; } -impl<'a, Tree: 'static + MerkleTreeTrait> Circuit for RationalPoStCircuit { +impl<'a, Tree: 'static + MerkleTreeTrait> Circuit for RationalPoStCircuit +where + ::Domain: Domain, +{ fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { let comm_rs = self.comm_rs; let comm_cs = self.comm_cs; diff --git a/storage-proofs-post/src/rational/compound.rs b/storage-proofs-post/src/rational/compound.rs index aaca7aa268..72b030a346 100644 --- a/storage-proofs-post/src/rational/compound.rs +++ b/storage-proofs-post/src/rational/compound.rs @@ -3,6 +3,7 @@ use std::marker::PhantomData; use anyhow::ensure; use bellperson::{Circuit, ConstraintSystem, SynthesisError}; use blstrs::Scalar as Fr; +use filecoin_hashers::{Domain, Hasher}; use generic_array::typenum::U2; use storage_proofs_core::{ compound_proof::{CircuitComponent, CompoundProof}, @@ -21,12 +22,15 @@ use crate::rational::{RationalPoSt, RationalPoStCircuit}; pub struct RationalPoStCompound where Tree: MerkleTreeTrait, + ::Domain: Domain, { _t: PhantomData, } impl, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters for RationalPoStCompound +where + ::Domain: Domain, { fn cache_prefix() -> String { format!("proof-of-spacetime-rational-{}", Tree::display()) @@ -38,6 +42,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait> for RationalPoStCompound where Tree: 'static + MerkleTreeTrait, + ::Domain: Domain, { fn generate_public_inputs( pub_in: & as ProofScheme<'a>>::PublicInputs, @@ -149,7 +154,10 @@ where } } -impl<'a, Tree: 'static + MerkleTreeTrait> RationalPoStCircuit { +impl<'a, Tree: 'static + MerkleTreeTrait> RationalPoStCircuit +where + ::Domain: Domain, +{ #[allow(clippy::type_complexity)] pub fn synthesize>( cs: &mut CS, diff --git a/storage-proofs-post/tests/election_circuit.rs b/storage-proofs-post/tests/election_circuit.rs index ce32bb8a5d..24ce159422 100644 --- a/storage-proofs-post/tests/election_circuit.rs +++ b/storage-proofs-post/tests/election_circuit.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use bellperson::{util_cs::test_cs::TestConstraintSystem, Circuit}; use blstrs::Scalar as Fr; -use ff::Field; +use ff::{Field, PrimeField}; use filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher}; use generic_array::typenum::{U0, U8}; use rand::SeedableRng; @@ -23,10 +23,13 @@ use tempfile::tempdir; #[test] fn test_election_post_circuit_poseidon() { - test_election_post_circuit::>(22_940); + test_election_post_circuit::, U8, U0, U0>>(22_940); } -fn test_election_post_circuit(expected_constraints: usize) { +fn test_election_post_circuit(expected_constraints: usize) +where + ::Domain: Domain, +{ let rng = &mut XorShiftRng::from_seed(TEST_SEED); let leaves = 64 * get_base_tree_count::(); @@ -110,7 +113,9 @@ fn test_election_post_circuit(expected_constrai comm_r: Some(comm_r.into()), comm_c: Some(comm_c.into()), comm_r_last: Some(comm_r_last.into()), - partial_ticket: Some(candidate.partial_ticket), + partial_ticket: Some( + Fr::from_repr_vartime(candidate.partial_ticket).expect("from_repr failure"), + ), randomness: Some(randomness.into()), prover_id: Some(prover_id.into()), sector_id: Some(candidate.sector_id.into()), diff --git a/storage-proofs-post/tests/election_compound.rs b/storage-proofs-post/tests/election_compound.rs index 18d77fdc56..7beef25b68 100644 --- a/storage-proofs-post/tests/election_compound.rs +++ b/storage-proofs-post/tests/election_compound.rs @@ -4,6 +4,7 @@ use bellperson::{ util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem}, Circuit, }; +use blstrs::Scalar as Fr; use filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher}; use generic_array::typenum::{U0, U8}; use rand::SeedableRng; @@ -24,10 +25,13 @@ use tempfile::tempdir; #[ignore] #[test] fn test_election_post_compound_poseidon() { - test_election_post_compound::>(); + test_election_post_compound::, U8, U0, U0>>(); } -fn test_election_post_compound() { +fn test_election_post_compound() +where + ::Domain: Domain, +{ let rng = &mut XorShiftRng::from_seed(TEST_SEED); let leaves = 64 * get_base_tree_count::(); diff --git a/storage-proofs-post/tests/election_vanilla.rs b/storage-proofs-post/tests/election_vanilla.rs index 2b78a2f46c..dfa9b074ee 100644 --- a/storage-proofs-post/tests/election_vanilla.rs +++ b/storage-proofs-post/tests/election_vanilla.rs @@ -1,7 +1,12 @@ use std::collections::BTreeMap; -use filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher}; +use blstrs::Scalar as Fr; +use filecoin_hashers::{ + poseidon::{PoseidonDomain, PoseidonFunction, PoseidonHasher}, + Domain, HashFunction, Hasher, +}; use generic_array::typenum::{U0, U2, U8}; +use pasta_curves::{Fp, Fq}; use rand::SeedableRng; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ @@ -18,20 +23,30 @@ use tempfile::tempdir; #[test] fn test_election_post_poseidon_base_8() { - test_election_post::>(); + test_election_post::, U8, U0, U0>>(); + test_election_post::, U8, U0, U0>>(); } #[test] fn test_election_post_poseidon_sub_8_8() { - test_election_post::>(); + test_election_post::, U8, U8, U0>>(); + test_election_post::, U8, U8, U0>>(); } #[test] fn test_election_post_poseidon_top_8_8_2() { - test_election_post::>(); + test_election_post::, U8, U8, U2>>(); + test_election_post::, U8, U8, U2>>(); } -fn test_election_post() { +fn test_election_post() +where + Tree: 'static + MerkleTreeTrait, + PoseidonDomain<<::Domain as Domain>::Field>: + Domain::Domain as Domain>::Field>, + PoseidonFunction<<::Domain as Domain>::Field>: + HashFunction::Domain as Domain>::Field>>, +{ let rng = &mut XorShiftRng::from_seed(TEST_SEED); let leaves = 64 * get_base_tree_count::(); diff --git a/storage-proofs-post/tests/fallback_circuit.rs b/storage-proofs-post/tests/fallback_circuit.rs index c0727be864..6dd920659a 100644 --- a/storage-proofs-post/tests/fallback_circuit.rs +++ b/storage-proofs-post/tests/fallback_circuit.rs @@ -25,32 +25,32 @@ use tempfile::tempdir; #[test] fn test_fallback_post_circuit_poseidon_single_partition_base_8() { - test_fallback_post::>(3, 3, 1, 19, 16_869); + test_fallback_post::, U8, U0, U0>>(3, 3, 1, 19, 16_869); } #[test] fn test_fallback_post_circuit_poseidon_single_partition_sub_8_4() { - test_fallback_post::>(3, 3, 1, 19, 22_674); + test_fallback_post::, U8, U4, U0>>(3, 3, 1, 19, 22_674); } #[test] fn test_fallback_post_circuit_poseidon_single_partition_top_8_4_2() { - test_fallback_post::>(3, 3, 1, 19, 27_384); + test_fallback_post::, U8, U4, U2>>(3, 3, 1, 19, 27_384); } #[test] fn test_fallback_post_circuit_poseidon_two_partitions_base_8() { - test_fallback_post::>(4, 2, 2, 13, 11_246); + test_fallback_post::, U8, U0, U0>>(4, 2, 2, 13, 11_246); } #[test] fn test_fallback_post_circuit_poseidon_single_partition_smaller_base_8() { - test_fallback_post::>(2, 3, 1, 19, 16_869); + test_fallback_post::, U8, U0, U0>>(2, 3, 1, 19, 16_869); } #[test] fn test_fallback_post_circuit_poseidon_two_partitions_smaller_base_8() { - test_fallback_post::>(5, 3, 2, 19, 16_869); + test_fallback_post::, U8, U0, U0>>(5, 3, 2, 19, 16_869); } fn test_fallback_post( @@ -61,6 +61,7 @@ fn test_fallback_post( expected_constraints: usize, ) where Tree::Store: 'static, + ::Domain: Domain, { let rng = &mut XorShiftRng::from_seed(TEST_SEED); @@ -211,11 +212,11 @@ fn test_fallback_post_circuit_poseidon_base_8_bench_cs() { api_version: ApiVersion::V1_1_0, }; - let pp = FallbackPoSt::>::setup(¶ms) + let pp = FallbackPoSt::>>::setup(¶ms) .expect("fallback post setup failure"); let mut cs = BenchCS::::new(); - FallbackPoStCompound::>::blank_circuit(&pp) + FallbackPoStCompound::>>::blank_circuit(&pp) .synthesize(&mut cs) .expect("blank circuit failure"); diff --git a/storage-proofs-post/tests/fallback_compound.rs b/storage-proofs-post/tests/fallback_compound.rs index fa57d40804..c3e0439a3d 100644 --- a/storage-proofs-post/tests/fallback_compound.rs +++ b/storage-proofs-post/tests/fallback_compound.rs @@ -2,6 +2,7 @@ use bellperson::{ util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem}, Circuit, }; +use blstrs::Scalar as Fr; use filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher}; use generic_array::typenum::{U0, U2, U4, U8}; use pretty_assertions::assert_eq; @@ -23,43 +24,43 @@ use tempfile::tempdir; #[ignore] #[test] fn test_fallback_post_compound_poseidon_single_partition_base_8() { - fallback_post::>(15, 15, 1, ApiVersion::V1_0_0); - fallback_post::>(15, 15, 1, ApiVersion::V1_1_0); + fallback_post::, U8, U0, U0>>(15, 15, 1, ApiVersion::V1_0_0); + fallback_post::, U8, U0, U0>>(15, 15, 1, ApiVersion::V1_1_0); } #[ignore] #[test] fn test_fallback_post_compound_poseidon_single_partition_sub_8_4() { - fallback_post::>(3, 3, 1, ApiVersion::V1_0_0); - fallback_post::>(3, 3, 1, ApiVersion::V1_1_0); + fallback_post::, U8, U4, U0>>(3, 3, 1, ApiVersion::V1_0_0); + fallback_post::, U8, U4, U0>>(3, 3, 1, ApiVersion::V1_1_0); } #[ignore] #[test] fn test_fallback_post_compound_poseidon_single_partition_top_8_4_2() { - fallback_post::>(3, 3, 1, ApiVersion::V1_0_0); - fallback_post::>(3, 3, 1, ApiVersion::V1_1_0); + fallback_post::, U8, U4, U2>>(3, 3, 1, ApiVersion::V1_0_0); + fallback_post::, U8, U4, U2>>(3, 3, 1, ApiVersion::V1_1_0); } #[ignore] #[test] fn test_fallback_post_compound_poseidon_single_partition_smaller_base_8() { - fallback_post::>(2, 3, 1, ApiVersion::V1_0_0); - fallback_post::>(2, 3, 1, ApiVersion::V1_1_0); + fallback_post::, U8, U0, U0>>(2, 3, 1, ApiVersion::V1_0_0); + fallback_post::, U8, U0, U0>>(2, 3, 1, ApiVersion::V1_1_0); } #[ignore] #[test] fn test_fallback_post_compound_poseidon_two_partitions_base_8() { - fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + fallback_post::, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_0_0); + fallback_post::, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_1_0); } #[ignore] #[test] fn test_fallback_post_compound_poseidon_two_partitions_smaller_base_8() { - fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + fallback_post::, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_0_0); + fallback_post::, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_1_0); } fn fallback_post( @@ -69,6 +70,7 @@ fn fallback_post( api_version: ApiVersion, ) where Tree::Store: 'static, + ::Domain: Domain, { let rng = &mut XorShiftRng::from_seed(TEST_SEED); diff --git a/storage-proofs-post/tests/fallback_vanilla.rs b/storage-proofs-post/tests/fallback_vanilla.rs index 11dd1af19b..259279c52e 100644 --- a/storage-proofs-post/tests/fallback_vanilla.rs +++ b/storage-proofs-post/tests/fallback_vanilla.rs @@ -1,5 +1,7 @@ +use blstrs::Scalar as Fr; use filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher}; use generic_array::typenum::{U0, U2, U4, U8}; +use pasta_curves::{Fp, Fq}; use rand::SeedableRng; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ @@ -16,98 +18,114 @@ use tempfile::tempdir; #[test] fn test_fallback_post_poseidon_single_partition_base_8() { - test_fallback_post::>(5, 5, 1, ApiVersion::V1_0_0); - test_fallback_post::>(5, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U0, U0>>(5, 5, 1, ApiVersion::V1_0_0); + test_fallback_post::, U8, U0, U0>>(5, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U0, U0>>(5, 5, 1, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_single_partition_smaller_base_8() { - test_fallback_post::>(3, 5, 1, ApiVersion::V1_0_0); - test_fallback_post::>(3, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U0, U0>>(3, 5, 1, ApiVersion::V1_0_0); + test_fallback_post::, U8, U0, U0>>(3, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U0, U0>>(5, 5, 1, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_base_8() { - test_fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - test_fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_smaller_base_8() { - test_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_single_partition_sub_8_4() { - test_fallback_post::>(5, 5, 1, ApiVersion::V1_0_0); - test_fallback_post::>(5, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U0>>(5, 5, 1, ApiVersion::V1_0_0); + test_fallback_post::, U8, U4, U0>>(5, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U0>>(5, 5, 1, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_single_partition_smaller_sub_8_4() { - test_fallback_post::>(3, 5, 1, ApiVersion::V1_0_0); - test_fallback_post::>(3, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U0>>(3, 5, 1, ApiVersion::V1_0_0); + test_fallback_post::, U8, U4, U0>>(3, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U0>>(3, 5, 1, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_sub_8_4() { - test_fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - test_fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U0>>(4, 2, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U4, U0>>(4, 2, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U0>>(4, 2, 2, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_sub_8_8() { - test_fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - test_fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U8, U0>>(4, 2, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U8, U0>>(4, 2, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U8, U0>>(4, 2, 2, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_smaller_sub_8_4() { - test_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U0>>(5, 3, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U4, U0>>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U0>>(5, 3, 2, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_smaller_sub_8_8() { - test_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U8, U0>>(5, 3, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U8, U0>>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U8, U0>>(5, 3, 2, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_single_partition_top_8_4_2() { - test_fallback_post::>(5, 5, 1, ApiVersion::V1_0_0); - test_fallback_post::>(5, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U2>>(5, 5, 1, ApiVersion::V1_0_0); + test_fallback_post::, U8, U4, U2>>(5, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U2>>(5, 5, 1, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_single_partition_top_8_8_2() { - test_fallback_post::>(5, 5, 1, ApiVersion::V1_0_0); - test_fallback_post::>(5, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U8, U2>>(5, 5, 1, ApiVersion::V1_0_0); + test_fallback_post::, U8, U8, U2>>(5, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U8, U2>>(5, 5, 1, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_single_partition_smaller_top_8_4_2() { - test_fallback_post::>(3, 5, 1, ApiVersion::V1_0_0); - test_fallback_post::>(3, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U2>>(3, 5, 1, ApiVersion::V1_0_0); + test_fallback_post::, U8, U4, U2>>(3, 5, 1, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U2>>(3, 5, 1, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_top_8_4_2() { - test_fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - test_fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U2>>(4, 2, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U4, U2>>(4, 2, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U2>>(4, 2, 2, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_smaller_top_8_4_2() { - test_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U2>>(5, 3, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U4, U2>>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U4, U2>>(5, 3, 2, ApiVersion::V1_1_0); } #[test] fn test_fallback_post_poseidon_two_partitions_smaller_top_8_8_2() { - test_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U8, U2>>(5, 3, 2, ApiVersion::V1_0_0); + test_fallback_post::, U8, U8, U2>>(5, 3, 2, ApiVersion::V1_1_0); + test_fallback_post::, U8, U8, U2>>(5, 3, 2, ApiVersion::V1_1_0); } fn test_fallback_post( @@ -187,98 +205,354 @@ fn test_fallback_post( #[test] fn test_invalid_fallback_post_poseidon_single_partition_base_8() { - test_invalid_fallback_post::>(5, 5, 1, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 5, 1, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U0, U0>>( + 5, + 5, + 1, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U0, U0>>( + 5, + 5, + 1, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U0, U0>>( + 5, + 5, + 1, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_single_partition_smaller_base_8() { - test_invalid_fallback_post::>(3, 5, 1, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(3, 5, 1, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U0, U0>>( + 3, + 5, + 1, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U0, U0>>( + 3, + 5, + 1, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U0, U0>>( + 3, + 5, + 1, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_base_8() { - test_invalid_fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U0, U0>>( + 4, + 2, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U0, U0>>( + 4, + 2, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U0, U0>>( + 4, + 2, + 2, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_smaller_base_8() { - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U0, U0>>( + 5, + 3, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U0, U0>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U0, U0>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_single_partition_sub_8_4() { - test_invalid_fallback_post::>(5, 5, 1, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 5, 1, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U4, U0>>( + 5, + 5, + 1, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U4, U0>>( + 5, + 5, + 1, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U4, U0>>( + 5, + 5, + 1, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_single_partition_smaller_sub_8_4() { - test_invalid_fallback_post::>(3, 5, 1, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(3, 5, 1, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U4, U0>>( + 3, + 5, + 1, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U4, U0>>( + 3, + 5, + 1, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U4, U0>>( + 3, + 5, + 1, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_sub_8_4() { - test_invalid_fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U4, U0>>( + 4, + 2, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U4, U0>>( + 4, + 2, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U4, U0>>( + 4, + 2, + 2, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_sub_8_8() { - test_invalid_fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U8, U0>>( + 4, + 2, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U8, U0>>( + 4, + 2, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U8, U0>>( + 4, + 2, + 2, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_smaller_sub_8_4() { - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U4, U0>>( + 5, + 3, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U4, U0>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U4, U0>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_smaller_sub_8_8() { - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U8, U0>>( + 5, + 3, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U8, U0>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U8, U0>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_single_partition_top_8_4_2() { - test_invalid_fallback_post::>(5, 5, 1, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 5, 1, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U4, U2>>( + 5, + 5, + 1, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U4, U2>>( + 5, + 5, + 1, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U4, U2>>( + 5, + 5, + 1, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_single_partition_top_8_8_2() { - test_invalid_fallback_post::>(5, 5, 1, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 5, 1, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U8, U2>>( + 5, + 5, + 1, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U8, U2>>( + 5, + 5, + 1, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U8, U2>>( + 5, + 5, + 1, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_single_partition_smaller_top_8_4_2() { - test_invalid_fallback_post::>(3, 5, 1, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(3, 5, 1, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U4, U2>>( + 3, + 5, + 1, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U4, U2>>( + 3, + 5, + 1, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U4, U2>>( + 3, + 5, + 1, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_top_8_4_2() { - test_invalid_fallback_post::>(4, 2, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(4, 2, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U4, U2>>( + 4, + 2, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U4, U2>>( + 4, + 2, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U4, U2>>( + 4, + 2, + 2, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_smaller_top_8_4_2() { - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U4, U2>>( + 5, + 3, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U4, U2>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U4, U2>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); } #[test] fn test_invalid_fallback_post_poseidon_two_partitions_smaller_top_8_8_2() { - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_0_0); - test_invalid_fallback_post::>(5, 3, 2, ApiVersion::V1_1_0); + test_invalid_fallback_post::, U8, U8, U2>>( + 5, + 3, + 2, + ApiVersion::V1_0_0, + ); + test_invalid_fallback_post::, U8, U8, U2>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); + test_invalid_fallback_post::, U8, U8, U2>>( + 5, + 3, + 2, + ApiVersion::V1_1_0, + ); } fn test_invalid_fallback_post( diff --git a/storage-proofs-post/tests/rational_circuit.rs b/storage-proofs-post/tests/rational_circuit.rs index 125ab3fb06..6cf6509eff 100644 --- a/storage-proofs-post/tests/rational_circuit.rs +++ b/storage-proofs-post/tests/rational_circuit.rs @@ -22,10 +22,13 @@ use tempfile::tempdir; #[test] fn test_rational_post_circuit_poseidon() { - test_rational_post_circuit::>(3_770); + test_rational_post_circuit::>>(3_770); } -fn test_rational_post_circuit(expected_constraints: usize) { +fn test_rational_post_circuit(expected_constraints: usize) +where + ::Domain: Domain, +{ let rng = &mut XorShiftRng::from_seed(TEST_SEED); let leaves = 32 * get_base_tree_count::(); diff --git a/storage-proofs-post/tests/rational_compound.rs b/storage-proofs-post/tests/rational_compound.rs index c8426af8b8..e27d40fbd8 100644 --- a/storage-proofs-post/tests/rational_compound.rs +++ b/storage-proofs-post/tests/rational_compound.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use bellperson::{util_cs::test_cs::TestConstraintSystem, Circuit}; +use blstrs::Scalar as Fr; use filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher}; use rand::{Rng, SeedableRng}; use rand_xorshift::XorShiftRng; @@ -18,10 +19,13 @@ use tempfile::tempdir; #[ignore] #[test] fn test_rational_post_compound_poseidon() { - test_rational_post_compound::>(); + test_rational_post_compound::>>(); } -fn test_rational_post_compound() { +fn test_rational_post_compound() +where + ::Domain: Domain, +{ let rng = &mut XorShiftRng::from_seed(TEST_SEED); let leaves = 32 * get_base_tree_count::(); diff --git a/storage-proofs-post/tests/rational_vanilla.rs b/storage-proofs-post/tests/rational_vanilla.rs index f3698ff2b2..10dfab40b4 100644 --- a/storage-proofs-post/tests/rational_vanilla.rs +++ b/storage-proofs-post/tests/rational_vanilla.rs @@ -1,10 +1,12 @@ use std::collections::BTreeMap; +use blstrs::Scalar as Fr; use filecoin_hashers::{ blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, HashFunction, Hasher, }; use generic_array::typenum::{U0, U2, U8}; +use pasta_curves::{Fp, Fq}; use rand::{Rng, SeedableRng}; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ @@ -18,27 +20,32 @@ use tempfile::tempdir; #[test] fn test_rational_post_sha256_base_8() { - test_rational_post::>(); + test_rational_post::, U8, U0, U0>>(); + test_rational_post::, U8, U0, U0>>(); } #[test] fn test_rational_post_blake2s_base_8() { - test_rational_post::>(); + test_rational_post::, U8, U0, U0>>(); + test_rational_post::, U8, U0, U0>>(); } #[test] fn test_rational_post_poseidon_base_8() { - test_rational_post::>(); + test_rational_post::, U8, U0, U0>>(); + test_rational_post::, U8, U0, U0>>(); } #[test] fn test_rational_post_poseidon_sub_8_8() { - test_rational_post::>(); + test_rational_post::, U8, U8, U0>>(); + test_rational_post::, U8, U8, U0>>(); } #[test] fn test_rational_post_poseidon_top_8_8_2() { - test_rational_post::>(); + test_rational_post::, U8, U8, U2>>(); + test_rational_post::, U8, U8, U2>>(); } fn test_rational_post() @@ -127,27 +134,32 @@ where #[test] fn test_rational_post_validates_challenge_sha256_base_8() { - test_rational_post_validates_challenge::>(); + test_rational_post_validates_challenge::, U8, U0, U0>>(); + test_rational_post_validates_challenge::, U8, U0, U0>>(); } #[test] fn test_rational_post_validates_challenge_blake2s_base_8() { - test_rational_post_validates_challenge::>(); + test_rational_post_validates_challenge::, U8, U0, U0>>(); + test_rational_post_validates_challenge::, U8, U0, U0>>(); } #[test] fn test_rational_post_validates_challenge_poseidon_base_8() { - test_rational_post_validates_challenge::>(); + test_rational_post_validates_challenge::, U8, U0, U0>>(); + test_rational_post_validates_challenge::, U8, U0, U0>>(); } #[test] fn test_rational_post_validates_challenge_poseidon_sub_8_8() { - test_rational_post_validates_challenge::>(); + test_rational_post_validates_challenge::, U8, U8, U0>>(); + test_rational_post_validates_challenge::, U8, U0, U0>>(); } #[test] fn test_rational_post_validates_challenge_poseidon_top_8_8_2() { - test_rational_post_validates_challenge::>(); + test_rational_post_validates_challenge::, U8, U8, U2>>(); + test_rational_post_validates_challenge::, U8, U8, U2>>(); } fn test_rational_post_validates_challenge() { diff --git a/storage-proofs-update/Cargo.toml b/storage-proofs-update/Cargo.toml index aaa469f14d..4cdc7558c0 100644 --- a/storage-proofs-update/Cargo.toml +++ b/storage-proofs-update/Cargo.toml @@ -44,6 +44,8 @@ fr32 = { path = "../fr32", version = "^4.0.0", default-features = false } yastl = "0.1.2" fil_logger = "0.1" memmap = "0.7" +pasta_curves = "0.3.0" +typemap = "0.3.3" [target."cfg(target_arch = \"aarch64\")".dependencies] sha2 = { version = "0.9.3", features = ["compress", "asm"] } diff --git a/storage-proofs-update/src/challenges.rs b/storage-proofs-update/src/challenges.rs index 5e8b65373f..c224fd9d56 100644 --- a/storage-proofs-update/src/challenges.rs +++ b/storage-proofs-update/src/challenges.rs @@ -1,5 +1,6 @@ -use blstrs::Scalar as Fr; -use ff::{PrimeField, PrimeFieldBits}; +use ff::PrimeFieldBits; +use filecoin_hashers::{Domain, FieldArity}; +use generic_array::typenum::U2; use neptune::poseidon::Poseidon; use crate::constants::{ @@ -16,8 +17,12 @@ use crate::constants::{ // partition-index (`partition_bits`) is appended onto the most-significant end of the random bits. // Random bits are generated using the Poseidon hash function; each digest generates the random bits // for `challenges_per_digest` number challenges. -pub struct Challenges { - comm_r_new: TreeRDomain, +pub struct Challenges +where + F: PrimeFieldBits, + TreeRDomain: Domain, +{ + comm_r_new: TreeRDomain, // The partition-index bits which are appended onto each challenges random bits. partition_bits: u32, // The number of bits to generate per challenge. @@ -33,15 +38,19 @@ pub struct Challenges { challenges_remaining: usize, } -impl Challenges { - pub fn new(sector_nodes: usize, comm_r_new: TreeRDomain, k: usize) -> Self { +impl Challenges +where + F: PrimeFieldBits, + TreeRDomain: Domain, +{ + pub fn new(sector_nodes: usize, comm_r_new: TreeRDomain, k: usize) -> Self { let partitions = partition_count(sector_nodes); assert!(k < partitions); let challenge_bit_len = sector_nodes.trailing_zeros() as usize; let partition_bit_len = partitions.trailing_zeros() as usize; let random_bits_per_challenge = challenge_bit_len - partition_bit_len; - let challenges_per_digest = Fr::CAPACITY as usize / random_bits_per_challenge; + let challenges_per_digest = F::CAPACITY as usize / random_bits_per_challenge; let partition_bits = (k << random_bits_per_challenge) as u32; @@ -57,17 +66,17 @@ impl Challenges { challenges_per_digest, digest_index_all_partitions, i: 0, - digest_bits: Vec::with_capacity(Fr::NUM_BITS as usize), + digest_bits: Vec::with_capacity(F::NUM_BITS as usize), challenges_remaining: challenge_count, } } - pub fn new_poseidon(sector_nodes: usize, comm_r_new: TreeRDomain) -> Self { + pub fn new_poseidon(sector_nodes: usize, comm_r_new: TreeRDomain) -> Self { let repeats = partition_count(sector_nodes); let challenge_bit_len = sector_nodes.trailing_zeros() as usize; let random_bits_per_challenge = challenge_bit_len; - let challenges_per_digest = Fr::CAPACITY as usize / random_bits_per_challenge; + let challenges_per_digest = F::CAPACITY as usize / random_bits_per_challenge; let challenge_count = challenge_count(sector_nodes) * repeats; let digest_index_all_partitions = 0; @@ -79,13 +88,17 @@ impl Challenges { challenges_per_digest, digest_index_all_partitions, i: 0, - digest_bits: Vec::with_capacity(Fr::NUM_BITS as usize), + digest_bits: Vec::with_capacity(F::NUM_BITS as usize), challenges_remaining: challenge_count, } } } -impl Iterator for Challenges { +impl Iterator for Challenges +where + F: PrimeFieldBits, + TreeRDomain: Domain, +{ // All sector-sizes have challenges that fit within 32 bits. type Item = u32; @@ -96,12 +109,12 @@ impl Iterator for Challenges { // `digest = H(comm_r_new || digest_index)` where `digest_index` is across all partitions. if self.i == 0 { - let digest_index = Fr::from(self.digest_index_all_partitions as u64); - let digest = Poseidon::new_with_preimage( - &[self.comm_r_new.into(), digest_index], - &POSEIDON_CONSTANTS_GEN_RANDOMNESS, - ) - .hash(); + let comm_r_new: F = self.comm_r_new.into(); + let digest_index = F::from(self.digest_index_all_partitions as u64); + let consts = POSEIDON_CONSTANTS_GEN_RANDOMNESS + .get::>() + .expect("arity-2 Poseidon constants not found for field"); + let digest = Poseidon::new_with_preimage(&[comm_r_new, digest_index], consts).hash(); self.digest_bits = digest.to_le_bits().into_iter().collect(); } @@ -137,6 +150,7 @@ mod tests { use std::collections::HashMap; + use blstrs::Scalar as Fr; use filecoin_hashers::Domain; use rand::SeedableRng; use rand_xorshift::XorShiftRng; @@ -155,7 +169,7 @@ mod tests { type PartitionIndex = usize; let mut rng = XorShiftRng::from_seed(TEST_SEED); - let comm_r_new = TreeRDomain::random(&mut rng); + let comm_r_new = TreeRDomain::::random(&mut rng); let test_vectors: HashMap<(SectorNodes, PartitionIndex), [u32; 5]> = { let mut hm = HashMap::new(); @@ -254,7 +268,7 @@ mod tests { let mut rng = XorShiftRng::from_seed(TEST_SEED); for sector_nodes in ALLOWED_SECTOR_SIZES.iter().copied() { - let comm_r_new = TreeRDomain::random(&mut rng); + let comm_r_new = TreeRDomain::::random(&mut rng); let partitions = partition_count(sector_nodes); let partition_challenges = challenge_count(sector_nodes); diff --git a/storage-proofs-update/src/circuit.rs b/storage-proofs-update/src/circuit.rs index d353e26632..69ea23d775 100644 --- a/storage-proofs-update/src/circuit.rs +++ b/storage-proofs-update/src/circuit.rs @@ -10,20 +10,19 @@ use bellperson::{ }; use blstrs::Scalar as Fr; use ff::{Field, PrimeFieldBits}; -use filecoin_hashers::{HashFunction, Hasher}; -use generic_array::typenum::Unsigned; +use filecoin_hashers::{HashFunction, Hasher, PoseidonArity}; use neptune::circuit::poseidon_hash; use storage_proofs_core::{ compound_proof::CircuitComponent, gadgets::{insertion::select, por::por_no_challenge_input}, - merkle::{MerkleProof, MerkleProofTrait, MerkleTreeTrait}, + merkle::{MerkleProof, MerkleProofTrait}, }; use crate::{ constants::{ apex_leaf_count, challenge_count, hs, partition_count, validate_tree_r_shape, TreeD, - TreeDArity, TreeDDomain, TreeDHasher, TreeRDomain, TreeRHasher, - POSEIDON_CONSTANTS_GEN_RANDOMNESS, + TreeDArity, TreeDDomain, TreeDHasher, TreeR, TreeRDomain, TreeRHasher, + POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS, }, gadgets::{apex_por, gen_challenge_bits, get_challenge_high_bits, label_r_new}, vanilla, PublicParams, @@ -48,9 +47,9 @@ impl PublicInputs { sector_nodes: usize, k: usize, h: usize, - comm_r_old: TreeRDomain, - comm_d_new: TreeDDomain, - comm_r_new: TreeRDomain, + comm_r_old: TreeRDomain, + comm_d_new: TreeDDomain, + comm_r_new: TreeRDomain, ) -> Self { let partition_count = partition_count(sector_nodes); assert!( @@ -97,9 +96,12 @@ impl PublicInputs { } } -pub struct ChallengeProof +#[derive(Clone)] +pub struct ChallengeProof where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub leaf_r_old: Option, pub path_r_old: Vec>>, @@ -107,59 +109,35 @@ where pub path_d_new: Vec>>, pub leaf_r_new: Option, pub path_r_new: Vec>>, - pub _tree_r: PhantomData, -} - -// Implement `Clone` by hand because `MerkleTreeTrait` does not implement `Clone`. -impl Clone for ChallengeProof -where - TreeR: MerkleTreeTrait, -{ - fn clone(&self) -> Self { - ChallengeProof { - leaf_r_old: self.leaf_r_old, - path_r_old: self.path_r_old.clone(), - leaf_d_new: self.leaf_d_new, - path_d_new: self.path_d_new.clone(), - leaf_r_new: self.leaf_r_new, - path_r_new: self.path_r_new.clone(), - _tree_r: PhantomData, - } - } + pub _tree_r: PhantomData<(U, V, W)>, } -impl From> for ChallengeProof +impl From> for ChallengeProof where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - fn from(vanilla_challenge_proof: vanilla::ChallengeProof) -> Self { + fn from(challenge_proof: vanilla::ChallengeProof) -> Self { let vanilla::ChallengeProof { proof_r_old, proof_d_new, proof_r_new, - } = vanilla_challenge_proof; + } = challenge_proof; ChallengeProof::from_merkle_proofs(proof_r_old, proof_d_new, proof_r_new) } } -impl ChallengeProof +impl ChallengeProof where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub fn from_merkle_proofs( - proof_r_old: MerkleProof< - TreeRHasher, - TreeR::Arity, - TreeR::SubTreeArity, - TreeR::TopTreeArity, - >, - proof_d_new: MerkleProof, - proof_r_new: MerkleProof< - TreeRHasher, - TreeR::Arity, - TreeR::SubTreeArity, - TreeR::TopTreeArity, - >, + proof_r_old: MerkleProof, U, V, W>, + proof_d_new: MerkleProof, TreeDArity>, + proof_r_new: MerkleProof, U, V, W>, ) -> Self { let leaf_r_old = Some(proof_r_old.leaf().into()); let path_r_old: Vec>> = proof_r_old @@ -201,9 +179,9 @@ where // TreeROld and TreeRNew have the same shape, thus have the same Merkle path length. let path_r = { - let base_arity = TreeR::Arity::to_usize(); - let sub_arity = TreeR::SubTreeArity::to_usize(); - let top_arity = TreeR::TopTreeArity::to_usize(); + let base_arity = U::to_usize(); + let sub_arity = V::to_usize(); + let top_arity = W::to_usize(); let mut bits_remaining = challenge_bit_len; let mut sub_and_top_path = vec![]; @@ -237,9 +215,11 @@ where } #[derive(Clone)] -pub struct PrivateInputs +pub struct PrivateInputs where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { // CommC created by running SDR-PoRep on the old/un-updated data. pub comm_c: Option, @@ -253,17 +233,19 @@ where pub apex_leafs: Vec>, // Generate three Merkle proofs (TreeROld, TreeDNew, TreeRNew) for each of this partition's // challenges. - pub challenge_proofs: Vec>, + pub challenge_proofs: Vec>, } -impl PrivateInputs +impl PrivateInputs where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub fn new( - comm_c: TreeRDomain, - apex_leafs: &[TreeDDomain], - challenge_proofs: &[vanilla::ChallengeProof], + comm_c: TreeRDomain, + apex_leafs: &[TreeDDomain], + challenge_proofs: &[vanilla::ChallengeProof], ) -> Self { let root_r_old: Fr = challenge_proofs[0].proof_r_old.root().into(); let root_r_new: Fr = challenge_proofs[0].proof_r_new.root().into(); @@ -274,7 +256,7 @@ where .map(|leaf| Some(leaf.into())) .collect(); - let challenge_proofs: Vec> = challenge_proofs + let challenge_proofs: Vec> = challenge_proofs .iter() .cloned() .map(ChallengeProof::from) @@ -302,25 +284,31 @@ where } } -pub struct EmptySectorUpdateCircuit +pub struct EmptySectorUpdateCircuit where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub pub_params: PublicParams, pub pub_inputs: PublicInputs, - pub priv_inputs: PrivateInputs, + pub priv_inputs: PrivateInputs, } -impl CircuitComponent for EmptySectorUpdateCircuit +impl CircuitComponent for EmptySectorUpdateCircuit where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { type ComponentPrivateInputs = (); } -impl EmptySectorUpdateCircuit +impl EmptySectorUpdateCircuit where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub fn blank(pub_params: PublicParams) -> Self { let sector_bytes = (pub_params.sector_nodes as u64) << 5; @@ -330,7 +318,7 @@ where "invalid public-params for sector-size", ); let pub_inputs = PublicInputs::empty(); - let priv_inputs = PrivateInputs::::empty(pub_params.sector_nodes); + let priv_inputs = PrivateInputs::::empty(pub_params.sector_nodes); EmptySectorUpdateCircuit { pub_params, pub_inputs, @@ -339,9 +327,11 @@ where } } -impl Circuit for EmptySectorUpdateCircuit +impl Circuit for EmptySectorUpdateCircuit where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { let EmptySectorUpdateCircuit { @@ -372,7 +362,7 @@ where }, } = self; - validate_tree_r_shape::(sector_nodes); + validate_tree_r_shape::(sector_nodes); let hs = hs(sector_nodes); let h_select_bit_len = hs.len(); @@ -497,7 +487,7 @@ where let phi = poseidon_hash( cs.namespace(|| "phi"), vec![comm_d_new.clone(), comm_r_old.clone()], - &POSEIDON_CONSTANTS_GEN_RANDOMNESS, + &*POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS, )?; // Allocate private-inputs; excludes each challenge's Merkle proofs. @@ -538,7 +528,7 @@ where // Assert that the witnessed `root_r_old` and `root_r_new` are consistent with the // public `comm_r_old` and `comm_r_new` via `comm_r = H(comm_c || root_r)`. - let comm_r_old_calc = ::Function::hash2_circuit( + let comm_r_old_calc = as Hasher>::Function::hash2_circuit( cs.namespace(|| "comm_r_old_calc"), &comm_c, &root_r_old, @@ -549,7 +539,7 @@ where |lc| lc + CS::one(), |lc| lc + comm_r_old.get_variable(), ); - let comm_r_new_calc = ::Function::hash2_circuit( + let comm_r_new_calc = as Hasher>::Function::hash2_circuit( cs.namespace(|| "comm_r_new_calc"), &comm_c, &root_r_new, @@ -601,7 +591,7 @@ where let rho = poseidon_hash( cs.namespace(|| format!("rho (c_index={})", c_index)), vec![phi.clone(), c_high.clone()], - &POSEIDON_CONSTANTS_GEN_RANDOMNESS, + &*POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS, )?; // Validate this challenge's Merkle proofs. @@ -659,7 +649,7 @@ where }) .collect::>>, SynthesisError>>()?; - por_no_challenge_input::( + por_no_challenge_input::, _>( cs.namespace(|| format!("por tree_r_old (c_index={})", c_index)), c_bits.clone(), leaf_r_old.clone(), @@ -689,7 +679,7 @@ where }) .collect::>>, SynthesisError>>()?; - por_no_challenge_input::( + por_no_challenge_input::, _>( cs.namespace(|| format!("por tree_r_new (c_index={})", c_index)), c_bits.clone(), leaf_r_new.clone(), @@ -737,7 +727,7 @@ where }) .collect::>>, SynthesisError>>()?; - por_no_challenge_input::( + por_no_challenge_input::, _>( cs.namespace(|| format!("por to_apex_leaf (c_index={})", c_index)), c_bits_to_apex_leaf, leaf_d_new, diff --git a/storage-proofs-update/src/compound.rs b/storage-proofs-update/src/compound.rs index 23aeaac668..4ad92de03b 100644 --- a/storage-proofs-update/src/compound.rs +++ b/storage-proofs-update/src/compound.rs @@ -1,7 +1,7 @@ use std::marker::PhantomData; use blstrs::Scalar as Fr; - +use filecoin_hashers::PoseidonArity; use storage_proofs_core::{ compound_proof::{CircuitComponent, CompoundProof}, error::Result, @@ -10,37 +10,44 @@ use storage_proofs_core::{ }; use crate::{ - circuit, constants::TreeRHasher, EmptySectorUpdate, EmptySectorUpdateCircuit, PartitionProof, + circuit, constants::TreeR, EmptySectorUpdate, EmptySectorUpdateCircuit, PartitionProof, PublicInputs, PublicParams, }; -pub struct EmptySectorUpdateCompound +pub struct EmptySectorUpdateCompound where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - pub _tree_r: PhantomData, + pub _tree_r: PhantomData<(U, V, W)>, } -impl CacheableParameters, PublicParams> - for EmptySectorUpdateCompound +impl CacheableParameters, PublicParams> + for EmptySectorUpdateCompound where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { fn cache_prefix() -> String { - format!("empty-sector-update-{}", TreeR::display()) + format!("empty-sector-update-{}", TreeR::::display()) } } -impl<'a, TreeR> CompoundProof<'a, EmptySectorUpdate, EmptySectorUpdateCircuit> - for EmptySectorUpdateCompound +impl<'a, U, V, W> + CompoundProof<'a, EmptySectorUpdate, EmptySectorUpdateCircuit> + for EmptySectorUpdateCompound where - TreeR: 'static + MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { // Generates a partition circuit's public-inputs. If the `k` argument is `Some` we overwrite // `pub_inputs.k` with the `k` argument's value, otherwise if the `k` argument is `None` we use // `pub_inputs.k` as the circuit's public-input. fn generate_public_inputs( - pub_inputs: &PublicInputs, + pub_inputs: &PublicInputs, pub_params: &PublicParams, k: Option, ) -> Result> { @@ -71,12 +78,12 @@ where // with the `k` argument's value, otherwise if the `k` argument is `None` we use `pub_inputs.k` // as the circuit's public-input. fn circuit( - pub_inputs: &PublicInputs, - _priv_inputs: as CircuitComponent>::ComponentPrivateInputs, - vanilla_proof: &PartitionProof, + pub_inputs: &PublicInputs, + _priv_inputs: as CircuitComponent>::ComponentPrivateInputs, + vanilla_proof: &PartitionProof, pub_params: &PublicParams, k: Option, - ) -> Result> { + ) -> Result> { // Prioritize the partition-index provided via the `k` argument; default to `pub_inputs.k`. let k = k.unwrap_or(pub_inputs.k); @@ -110,7 +117,7 @@ where }) } - fn blank_circuit(pub_params: &PublicParams) -> EmptySectorUpdateCircuit { + fn blank_circuit(pub_params: &PublicParams) -> EmptySectorUpdateCircuit { EmptySectorUpdateCircuit::blank(pub_params.clone()) } } diff --git a/storage-proofs-update/src/constants.rs b/storage-proofs-update/src/constants.rs index 7c45aeb903..946ed1cddb 100644 --- a/storage-proofs-update/src/constants.rs +++ b/storage-proofs-update/src/constants.rs @@ -2,8 +2,9 @@ use blstrs::Scalar as Fr; use filecoin_hashers::{ poseidon::{PoseidonDomain, PoseidonHasher}, sha256::{Sha256Domain, Sha256Hasher}, + FieldArity, PoseidonArity, }; -use generic_array::typenum::{Unsigned, U0, U2, U8}; +use generic_array::typenum::{U0, U2, U8}; use lazy_static::lazy_static; use merkletree::store::DiskStore; use neptune::{ @@ -11,14 +12,38 @@ use neptune::{ poseidon::PoseidonConstants, Strength, }; -use storage_proofs_core::merkle::{BinaryMerkleTree, LCStore, LCTree, MerkleTreeTrait}; - -// Use a custom domain separation tag when generating randomness phi, rho, and challenges bits. -pub const HASH_TYPE_GEN_RANDOMNESS: HashType = HashType::Custom(CType::Arbitrary(1)); +use pasta_curves::{Fp, Fq}; +use storage_proofs_core::merkle::{BinaryMerkleTree, LCTree}; +use typemap::ShareMap; lazy_static! { - pub static ref POSEIDON_CONSTANTS_GEN_RANDOMNESS: PoseidonConstants:: = - PoseidonConstants::new_with_strength_and_type(Strength::Standard, HASH_TYPE_GEN_RANDOMNESS); + // Use a custom domain separation tag `HashType` when using Poseidon to generate randomness + // (i.e. phi, rho, and challenges bits). + pub static ref POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS: PoseidonConstants = + PoseidonConstants::new_with_strength_and_type( + Strength::Standard, + HashType::Custom(CType::Arbitrary(1)), + ); + + pub static ref POSEIDON_CONSTANTS_GEN_RANDOMNESS_PALLAS: PoseidonConstants = + PoseidonConstants::new_with_strength_and_type( + Strength::Standard, + HashType::Custom(CType::Arbitrary(1)), + ); + + pub static ref POSEIDON_CONSTANTS_GEN_RANDOMNESS_VESTA: PoseidonConstants = + PoseidonConstants::new_with_strength_and_type( + Strength::Standard, + HashType::Custom(CType::Arbitrary(1)), + ); + + pub static ref POSEIDON_CONSTANTS_GEN_RANDOMNESS: ShareMap = { + let mut tm = ShareMap::custom(); + tm.insert::>(&*POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS); + tm.insert::>(&*POSEIDON_CONSTANTS_GEN_RANDOMNESS_PALLAS); + tm.insert::>(&*POSEIDON_CONSTANTS_GEN_RANDOMNESS_VESTA); + tm + }; } // Sector-sizes measured in nodes. @@ -45,22 +70,24 @@ pub const ALLOWED_SECTOR_SIZES: [usize; 11] = [ SECTOR_SIZE_8_MIB, SECTOR_SIZE_16_MIB, SECTOR_SIZE_512_MIB, - // published sector-sizes + // production sector-sizes SECTOR_SIZE_32_GIB, SECTOR_SIZE_64_GIB, ]; -pub type TreeD = BinaryMerkleTree; -pub type TreeDHasher = Sha256Hasher; -pub type TreeDDomain = Sha256Domain; -pub type TreeDStore = DiskStore; +// Note: these TreeD constants are only valid for the non-Poseidon version of EmptySectorUpdate; +// EmptySectorUpdate-Poseidon uses TreeR for its TreeD. +pub type TreeD = BinaryMerkleTree>; pub type TreeDArity = U2; +pub type TreeDStore = DiskStore>; +pub type TreeDDomain = Sha256Domain; +pub type TreeDHasher = Sha256Hasher; -pub type TreeRHasher = PoseidonHasher; -pub type TreeRDomain = PoseidonDomain; -pub type TreeRStore = LCStore; -// All valid TreeR's have the same base-tree shape. -pub type TreeRBaseTree = LCTree; +pub type TreeR = LCTree, U, V, W>; +// All valid TreeR's shapes have the same base-tree shape. +pub type TreeRBase = LCTree, U8, U0, U0>; +pub type TreeRDomain = PoseidonDomain; +pub type TreeRHasher = PoseidonHasher; // The number of partitions for the given sector-size. pub const fn partition_count(sector_nodes: usize) -> usize { @@ -110,10 +137,15 @@ pub const fn apex_leaf_count(sector_nodes: usize) -> usize { } } -pub fn validate_tree_r_shape(sector_nodes: usize) { - let base_arity = TreeR::Arity::to_usize(); - let sub_arity = TreeR::SubTreeArity::to_usize(); - let top_arity = TreeR::TopTreeArity::to_usize(); +pub fn validate_tree_r_shape(sector_nodes: usize) +where + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, +{ + let base_arity = U::to_usize(); + let sub_arity = V::to_usize(); + let top_arity = W::to_usize(); let arities = (base_arity, sub_arity, top_arity); let arities_expected = match sector_nodes { @@ -128,7 +160,7 @@ pub fn validate_tree_r_shape(sector_nodes: usize) { SECTOR_SIZE_512_MIB => (8, 0, 0), SECTOR_SIZE_32_GIB => (8, 8, 0), SECTOR_SIZE_64_GIB => (8, 8, 2), - _ => unreachable!(), + _ => unimplemented!("sector-size not supported"), }; assert_eq!(arities, arities_expected); diff --git a/storage-proofs-update/src/gadgets.rs b/storage-proofs-update/src/gadgets.rs index d83004c448..e4b42727e1 100644 --- a/storage-proofs-update/src/gadgets.rs +++ b/storage-proofs-update/src/gadgets.rs @@ -12,7 +12,7 @@ use filecoin_hashers::{HashFunction, Hasher}; use neptune::circuit::poseidon_hash; use storage_proofs_core::gadgets::por::por_no_challenge_input; -use crate::constants::{TreeD, TreeDHasher, POSEIDON_CONSTANTS_GEN_RANDOMNESS}; +use crate::constants::{TreeD, TreeDHasher, POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS}; // Allocates `num` as `Fr::NUM_BITS` number of bits. pub fn allocated_num_to_allocated_bits>( @@ -61,7 +61,7 @@ pub fn apex_por>( .chunks(2) .enumerate() .map(|(i, siblings)| { - ::Function::hash2_circuit( + as Hasher>::Function::hash2_circuit( cs.namespace(|| { format!( "apex_tree generation hash (tree_row={}, siblings={})", @@ -79,7 +79,7 @@ pub fn apex_por>( // This partition's apex-tree root. let apex_root = apex_tree.last().unwrap()[0].clone(); - por_no_challenge_input::( + por_no_challenge_input::, _>( cs.namespace(|| "partition-tree por"), partition_bits, apex_root, @@ -131,7 +131,7 @@ pub fn gen_challenge_bits>( let digest = poseidon_hash( cs.namespace(|| format!("digest_{}", j)), vec![comm_r_new.clone(), digest_index.clone()], - &POSEIDON_CONSTANTS_GEN_RANDOMNESS, + &*POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS, )?; // Allocate `digest` as `Fr::NUM_BITS` bits. @@ -286,12 +286,16 @@ mod tests { use crate::{ challenges::Challenges, constants::{ - apex_leaf_count, challenge_count, partition_count, TreeDDomain, TreeDHasher, - TreeRDomain, ALLOWED_SECTOR_SIZES, SECTOR_SIZE_16_KIB, SECTOR_SIZE_1_KIB, - SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_KIB, SECTOR_SIZE_4_KIB, SECTOR_SIZE_8_KIB, + self, apex_leaf_count, challenge_count, partition_count, ALLOWED_SECTOR_SIZES, + SECTOR_SIZE_16_KIB, SECTOR_SIZE_1_KIB, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_KIB, + SECTOR_SIZE_4_KIB, SECTOR_SIZE_8_KIB, }, }; + type TreeDDomain = constants::TreeDDomain; + type TreeDHasher = constants::TreeDHasher; + type TreeRDomain = constants::TreeRDomain; + #[test] fn test_gen_challenge_bits_gadget() { let mut rng = XorShiftRng::from_seed(TEST_SEED); @@ -314,7 +318,7 @@ mod tests { for k in 0..partition_count { let challenges = Challenges::new(sector_nodes, comm_r_new, k); - let mut cs = TestConstraintSystem::::new(); + let mut cs = TestConstraintSystem::new(); let comm_r_new = AllocatedNum::alloc(cs.namespace(|| "comm_r_new"), || Ok(comm_r_new.into())) .unwrap(); @@ -400,7 +404,7 @@ mod tests { .chunks(apex_leafs_per_partition) .enumerate() { - let mut cs = TestConstraintSystem::::new(); + let mut cs = TestConstraintSystem::new(); let comm_d = AllocatedNum::alloc(cs.namespace(|| "comm_d"), || Ok(comm_d.into())).unwrap(); diff --git a/storage-proofs-update/src/poseidon/circuit.rs b/storage-proofs-update/src/poseidon/circuit.rs index 9b0063583b..0e3e023be9 100644 --- a/storage-proofs-update/src/poseidon/circuit.rs +++ b/storage-proofs-update/src/poseidon/circuit.rs @@ -8,19 +8,18 @@ use bellperson::{ }; use blstrs::Scalar as Fr; use ff::{Field, PrimeFieldBits}; -use filecoin_hashers::{HashFunction, Hasher}; -use generic_array::typenum::Unsigned; +use filecoin_hashers::{HashFunction, Hasher, PoseidonArity}; use neptune::circuit::poseidon_hash; use storage_proofs_core::{ compound_proof::CircuitComponent, gadgets::por::por_no_challenge_input, - merkle::{MerkleProof, MerkleProofTrait, MerkleTreeTrait}, + merkle::{MerkleProof, MerkleProofTrait}, }; use crate::{ constants::{ - challenge_count_poseidon, hs, validate_tree_r_shape, TreeRDomain, TreeRHasher, - POSEIDON_CONSTANTS_GEN_RANDOMNESS, + challenge_count_poseidon, hs, validate_tree_r_shape, TreeR, TreeRDomain, TreeRHasher, + POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS, }, gadgets::{gen_challenge_bits, get_challenge_high_bits, label_r_new}, poseidon::vanilla, @@ -44,9 +43,9 @@ impl PublicInputs { pub fn new( sector_nodes: usize, h: usize, - comm_r_old: TreeRDomain, - comm_d_new: TreeRDomain, - comm_r_new: TreeRDomain, + comm_r_old: TreeRDomain, + comm_d_new: TreeRDomain, + comm_r_new: TreeRDomain, ) -> Self { let hs_index = hs(sector_nodes) .iter() @@ -84,9 +83,12 @@ impl PublicInputs { } } -pub struct ChallengeProof +#[derive(Clone)] +pub struct ChallengeProof where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub leaf_r_old: Option, pub path_r_old: Vec>>, @@ -94,64 +96,35 @@ where pub path_d_new: Vec>>, pub leaf_r_new: Option, pub path_r_new: Vec>>, - pub _tree_r: PhantomData, + pub _tree_r: PhantomData<(U, V, W)>, } -// Implement `Clone` by hand because `MerkleTreeTrait` does not implement `Clone`. -impl Clone for ChallengeProof +impl From> for ChallengeProof where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - fn clone(&self) -> Self { - ChallengeProof { - leaf_r_old: self.leaf_r_old, - path_r_old: self.path_r_old.clone(), - leaf_d_new: self.leaf_d_new, - path_d_new: self.path_d_new.clone(), - leaf_r_new: self.leaf_r_new, - path_r_new: self.path_r_new.clone(), - _tree_r: PhantomData, - } - } -} - -impl From> for ChallengeProof -where - TreeR: MerkleTreeTrait, -{ - fn from(vanilla_challenge_proof: vanilla::ChallengeProof) -> Self { + fn from(challenge_proof: vanilla::ChallengeProof) -> Self { let vanilla::ChallengeProof { proof_r_old, proof_d_new, proof_r_new, - } = vanilla_challenge_proof; + } = challenge_proof; ChallengeProof::from_merkle_proofs(proof_r_old, proof_d_new, proof_r_new) } } -impl ChallengeProof +impl ChallengeProof where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub fn from_merkle_proofs( - proof_r_old: MerkleProof< - TreeRHasher, - TreeR::Arity, - TreeR::SubTreeArity, - TreeR::TopTreeArity, - >, - proof_d_new: MerkleProof< - TreeRHasher, - TreeR::Arity, - TreeR::SubTreeArity, - TreeR::TopTreeArity, - >, - proof_r_new: MerkleProof< - TreeRHasher, - TreeR::Arity, - TreeR::SubTreeArity, - TreeR::TopTreeArity, - >, + proof_r_old: MerkleProof, U, V, W>, + proof_d_new: MerkleProof, U, V, W>, + proof_r_new: MerkleProof, U, V, W>, ) -> Self { let leaf_r_old = Some(proof_r_old.leaf().into()); let path_r_old: Vec>> = proof_r_old @@ -190,9 +163,9 @@ where // TreeROld and TreeRNew and TreeD have the same shape, thus have the same Merkle path length. let path_r = { - let base_arity = TreeR::Arity::to_usize(); - let sub_arity = TreeR::SubTreeArity::to_usize(); - let top_arity = TreeR::TopTreeArity::to_usize(); + let base_arity = U::to_usize(); + let sub_arity = V::to_usize(); + let top_arity = W::to_usize(); let mut bits_remaining = challenge_bit_len; let mut sub_and_top_path = vec![]; @@ -226,9 +199,11 @@ where } #[derive(Clone)] -pub struct PrivateInputs +pub struct PrivateInputs where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { // CommC created by running SDR-PoRep on the old/un-updated data. pub comm_c: Option, @@ -239,18 +214,23 @@ where pub root_r_new: Option, // Generate three Merkle proofs (TreeROld, TreeDNew, TreeRNew) for each of this partition's // challenges. - pub challenge_proofs: Vec>, + pub challenge_proofs: Vec>, } -impl PrivateInputs +impl PrivateInputs where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - pub fn new(comm_c: TreeRDomain, challenge_proofs: &[vanilla::ChallengeProof]) -> Self { + pub fn new( + comm_c: TreeRDomain, + challenge_proofs: &[vanilla::ChallengeProof], + ) -> Self { let root_r_old: Fr = challenge_proofs[0].proof_r_old.root().into(); let root_r_new: Fr = challenge_proofs[0].proof_r_new.root().into(); - let challenge_proofs: Vec> = challenge_proofs + let challenge_proofs: Vec> = challenge_proofs .iter() .cloned() .map(ChallengeProof::from) @@ -277,25 +257,31 @@ where } } -pub struct EmptySectorUpdateCircuit +pub struct EmptySectorUpdateCircuit where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub pub_params: PublicParams, pub pub_inputs: PublicInputs, - pub priv_inputs: PrivateInputs, + pub priv_inputs: PrivateInputs, } -impl CircuitComponent for EmptySectorUpdateCircuit +impl CircuitComponent for EmptySectorUpdateCircuit where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { type ComponentPrivateInputs = (); } -impl EmptySectorUpdateCircuit +impl EmptySectorUpdateCircuit where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub fn blank(pub_params: PublicParams) -> Self { let sector_bytes = (pub_params.sector_nodes as u64) << 5; @@ -305,7 +291,7 @@ where "invalid public-params for sector-size", ); let pub_inputs = PublicInputs::empty(); - let priv_inputs = PrivateInputs::::empty(pub_params.sector_nodes); + let priv_inputs = PrivateInputs::::empty(pub_params.sector_nodes); EmptySectorUpdateCircuit { pub_params, pub_inputs, @@ -314,9 +300,11 @@ where } } -impl Circuit for EmptySectorUpdateCircuit +impl Circuit for EmptySectorUpdateCircuit where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { /// This circuit is NOT AUDITED, USE AT YOUR OWN RISK. fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { @@ -341,7 +329,7 @@ where let challenge_bit_len = sector_nodes.trailing_zeros() as usize; let challenge_count = challenge_count_poseidon(sector_nodes); - validate_tree_r_shape::(sector_nodes); + validate_tree_r_shape::(sector_nodes); let hs = hs(sector_nodes); let h_select_bit_len = hs.len(); @@ -427,7 +415,7 @@ where let phi = poseidon_hash( cs.namespace(|| "phi"), vec![comm_d_new.clone(), comm_r_old.clone()], - &POSEIDON_CONSTANTS_GEN_RANDOMNESS, + &*POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS, )?; // Allocate private-inputs; excludes each challenge's Merkle proofs. @@ -446,7 +434,7 @@ where // Assert that the witnessed `root_r_old` and `root_r_new` are consistent with the // public `comm_r_old` and `comm_r_new` via `comm_r = H(comm_c || root_r)`. - let comm_r_old_calc = ::Function::hash2_circuit( + let comm_r_old_calc = as Hasher>::Function::hash2_circuit( cs.namespace(|| "comm_r_old_calc"), &comm_c, &root_r_old, @@ -457,7 +445,7 @@ where |lc| lc + CS::one(), |lc| lc + comm_r_old.get_variable(), ); - let comm_r_new_calc = ::Function::hash2_circuit( + let comm_r_new_calc = as Hasher>::Function::hash2_circuit( cs.namespace(|| "comm_r_new_calc"), &comm_c, &root_r_new, @@ -495,7 +483,7 @@ where let rho = poseidon_hash( cs.namespace(|| format!("rho (c_index={})", c_index)), vec![phi.clone(), c_high.clone()], - &POSEIDON_CONSTANTS_GEN_RANDOMNESS, + &*POSEIDON_CONSTANTS_GEN_RANDOMNESS_BLS, )?; // Validate this challenge's Merkle proofs. @@ -553,7 +541,7 @@ where }) .collect::>>, SynthesisError>>()?; - por_no_challenge_input::( + por_no_challenge_input::, _>( cs.namespace(|| format!("por tree_r_old (c_index={})", c_index)), c_bits.clone(), leaf_r_old.clone(), @@ -583,7 +571,7 @@ where }) .collect::>>, SynthesisError>>()?; - por_no_challenge_input::( + por_no_challenge_input::, _>( cs.namespace(|| format!("por tree_r_new (c_index={})", c_index)), c_bits.clone(), leaf_r_new.clone(), @@ -613,7 +601,7 @@ where }) .collect::>>, SynthesisError>>()?; - por_no_challenge_input::( + por_no_challenge_input::, _>( cs.namespace(|| format!("por tree_d_new (c_index={})", c_index)), c_bits.clone(), leaf_d_new.clone(), diff --git a/storage-proofs-update/src/poseidon/compound.rs b/storage-proofs-update/src/poseidon/compound.rs index 2ec850540d..02cfe89442 100644 --- a/storage-proofs-update/src/poseidon/compound.rs +++ b/storage-proofs-update/src/poseidon/compound.rs @@ -2,7 +2,7 @@ use std::marker::PhantomData; use anyhow::ensure; use blstrs::Scalar as Fr; - +use filecoin_hashers::PoseidonArity; use storage_proofs_core::{ compound_proof::{CircuitComponent, CompoundProof}, error::Result, @@ -11,38 +11,47 @@ use storage_proofs_core::{ }; use crate::{ - constants::TreeRHasher, + constants::TreeR, poseidon::{ - circuit::{self, EmptySectorUpdateCircuit}, - vanilla::{EmptySectorUpdate, PartitionProof, PublicInputs}, + circuit, EmptySectorUpdate, EmptySectorUpdateCircuit, PartitionProof, PublicInputs, }, PublicParams, }; -pub struct EmptySectorUpdateCompound +pub struct EmptySectorUpdateCompound where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - pub _tree_r: PhantomData, + pub _tree_r: PhantomData<(U, V, W)>, } -impl CacheableParameters, PublicParams> - for EmptySectorUpdateCompound +impl CacheableParameters, PublicParams> + for EmptySectorUpdateCompound where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { fn cache_prefix() -> String { - format!("empty-sector-update-poseidon-{}", TreeR::display()) + format!( + "empty-sector-update-poseidon-{}", + TreeR::::display() + ) } } -impl<'a, TreeR> CompoundProof<'a, EmptySectorUpdate, EmptySectorUpdateCircuit> - for EmptySectorUpdateCompound +impl<'a, U, V, W> + CompoundProof<'a, EmptySectorUpdate, EmptySectorUpdateCircuit> + for EmptySectorUpdateCompound where - TreeR: 'static + MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { fn generate_public_inputs( - pub_inputs: &PublicInputs, + pub_inputs: &PublicInputs, pub_params: &PublicParams, k: Option, ) -> Result> { @@ -79,12 +88,12 @@ where } fn circuit( - pub_inputs: &PublicInputs, - _priv_inputs: as CircuitComponent>::ComponentPrivateInputs, - vanilla_proof: &PartitionProof, + pub_inputs: &PublicInputs, + _priv_inputs: as CircuitComponent>::ComponentPrivateInputs, + vanilla_proof: &PartitionProof, pub_params: &PublicParams, k: Option, - ) -> Result> { + ) -> Result> { // Ensure correctness of arguments. let sector_bytes = (pub_params.sector_nodes as u64) << 5; ensure!( @@ -128,7 +137,7 @@ where }) } - fn blank_circuit(pub_params: &PublicParams) -> EmptySectorUpdateCircuit { + fn blank_circuit(pub_params: &PublicParams) -> EmptySectorUpdateCircuit { EmptySectorUpdateCircuit::blank(pub_params.clone()) } } diff --git a/storage-proofs-update/src/poseidon/mod.rs b/storage-proofs-update/src/poseidon/mod.rs index a52afd3603..a052684d90 100644 --- a/storage-proofs-update/src/poseidon/mod.rs +++ b/storage-proofs-update/src/poseidon/mod.rs @@ -4,4 +4,4 @@ pub mod vanilla; pub use circuit::EmptySectorUpdateCircuit; pub use compound::EmptySectorUpdateCompound; -pub use vanilla::EmptySectorUpdate; +pub use vanilla::{ChallengeProof, EmptySectorUpdate, PartitionProof, PublicInputs}; diff --git a/storage-proofs-update/src/poseidon/vanilla.rs b/storage-proofs-update/src/poseidon/vanilla.rs index 6ed573f136..2944d342fd 100644 --- a/storage-proofs-update/src/poseidon/vanilla.rs +++ b/storage-proofs-update/src/poseidon/vanilla.rs @@ -1,11 +1,9 @@ use std::marker::PhantomData; +use ff::PrimeFieldBits; +use filecoin_hashers::{Hasher, PoseidonArity}; use serde::{Deserialize, Serialize}; -use storage_proofs_core::{ - error::Result, - merkle::{MerkleProof, MerkleTreeTrait}, - proof::ProofScheme, -}; +use storage_proofs_core::{error::Result, merkle::MerkleProof, proof::ProofScheme}; use crate::{ constants::{TreeRDomain, TreeRHasher}, @@ -13,99 +11,76 @@ use crate::{ }; #[derive(Clone, Serialize, Deserialize)] -pub struct PublicInputs { - pub comm_r_old: TreeRDomain, - pub comm_d_new: TreeRDomain, - pub comm_r_new: TreeRDomain, +pub struct PublicInputs { + #[serde(bound(serialize = "TreeRDomain: Serialize"))] + #[serde(bound(deserialize = "TreeRDomain: Deserialize<'de>"))] + pub comm_r_old: TreeRDomain, + #[serde(bound(serialize = "TreeRDomain: Serialize"))] + #[serde(bound(deserialize = "TreeRDomain: Deserialize<'de>"))] + pub comm_d_new: TreeRDomain, + #[serde(bound(serialize = "TreeRDomain: Serialize"))] + #[serde(bound(deserialize = "TreeRDomain: Deserialize<'de>"))] + pub comm_r_new: TreeRDomain, // The number of high bits to take from each challenge's bits. Used to verify replica encoding // in the vanilla proof. `h` is only a public-input for the vanilla proof; the circuit takes // `h_select` as a public-input rather than `h`. pub h: usize, } -#[derive(Serialize, Deserialize)] -pub struct ChallengeProof -where - TreeR: MerkleTreeTrait, -{ - #[serde(bound( - serialize = "MerkleProof: Serialize", - deserialize = "MerkleProof: Deserialize<'de>" - ))] - pub proof_r_old: - MerkleProof, - #[serde(bound( - serialize = "MerkleProof: Serialize", - deserialize = "MerkleProof: Deserialize<'de>" - ))] - pub proof_d_new: - MerkleProof, - #[serde(bound( - serialize = "MerkleProof: Serialize", - deserialize = "MerkleProof: Deserialize<'de>" - ))] - pub proof_r_new: - MerkleProof, -} - -// Implement `Clone` by hand because `MerkleTreeTrait` does not implement `Clone`. -impl Clone for ChallengeProof -where - TreeR: MerkleTreeTrait, -{ - fn clone(&self) -> Self { - ChallengeProof { - proof_r_old: self.proof_r_old.clone(), - proof_d_new: self.proof_d_new.clone(), - proof_r_new: self.proof_r_new.clone(), - } - } -} - -#[derive(Serialize, Deserialize)] -pub struct PartitionProof +#[derive(Clone, Serialize, Deserialize)] +pub struct ChallengeProof where - TreeR: MerkleTreeTrait, + TreeRHasher: Hasher, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - pub comm_c: TreeRDomain, - #[serde(bound( - serialize = "ChallengeProof: Serialize", - deserialize = "ChallengeProof: Deserialize<'de>" - ))] - pub challenge_proofs: Vec>, + #[serde(bound(serialize = "MerkleProof, U, V, W>: Serialize"))] + #[serde(bound(deserialize = "MerkleProof, U, V, W>: Deserialize<'de>"))] + pub proof_r_old: MerkleProof, U, V, W>, + #[serde(bound(serialize = "MerkleProof, U, V, W>: Serialize"))] + #[serde(bound(deserialize = "MerkleProof, U, V, W>: Deserialize<'de>"))] + pub proof_d_new: MerkleProof, U, V, W>, + #[serde(bound(serialize = "MerkleProof, U, V, W>: Serialize"))] + #[serde(bound(deserialize = "MerkleProof, U, V, W>: Deserialize<'de>"))] + pub proof_r_new: MerkleProof, U, V, W>, } -// Implement `Clone` by hand because `MerkleTreeTrait` does not implement `Clone`. -impl Clone for PartitionProof +#[derive(Clone, Serialize, Deserialize)] +pub struct PartitionProof where - TreeR: MerkleTreeTrait, + TreeRHasher: Hasher, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - fn clone(&self) -> Self { - PartitionProof { - comm_c: self.comm_c, - challenge_proofs: self.challenge_proofs.clone(), - } - } + #[serde(bound(serialize = "TreeRDomain: Serialize"))] + #[serde(bound(deserialize = "TreeRDomain: Deserialize<'de>"))] + pub comm_c: TreeRDomain, + #[serde(bound(serialize = "ChallengeProof: Serialize"))] + #[serde(bound(deserialize = "ChallengeProof: Deserialize<'de>"))] + pub challenge_proofs: Vec>, } #[derive(Debug)] -#[allow(clippy::upper_case_acronyms)] -pub struct EmptySectorUpdate -where - TreeR: MerkleTreeTrait, -{ - _tree_r: PhantomData, +pub struct EmptySectorUpdate { + _f: PhantomData, + _tree_r: PhantomData<(U, V, W)>, } -impl<'a, TreeR> ProofScheme<'a> for EmptySectorUpdate +impl<'a, F, U, V, W> ProofScheme<'a> for EmptySectorUpdate where - TreeR: 'static + MerkleTreeTrait, + F: PrimeFieldBits, + TreeRHasher: Hasher>, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { type SetupParams = SetupParams; type PublicParams = PublicParams; - type PublicInputs = PublicInputs; - type PrivateInputs = PrivateInputs; - type Proof = PartitionProof; + type PublicInputs = PublicInputs; + type PrivateInputs = PrivateInputs; + type Proof = PartitionProof; type Requirements = (); fn setup(setup_params: &Self::SetupParams) -> Result { diff --git a/storage-proofs-update/src/vanilla.rs b/storage-proofs-update/src/vanilla.rs index 1c9456c386..d6d5b289de 100644 --- a/storage-proofs-update/src/vanilla.rs +++ b/storage-proofs-update/src/vanilla.rs @@ -5,10 +5,9 @@ use std::path::{Path, PathBuf}; use anyhow::{ensure, Context, Error}; use blstrs::Scalar as Fr; -use ff::Field; -use filecoin_hashers::{Domain, HashFunction, Hasher}; -use fr32::{bytes_into_fr, fr_into_bytes_slice}; -use generic_array::typenum::Unsigned; +use ff::{PrimeField, PrimeFieldBits}; +use filecoin_hashers::{Domain, FieldArity, HashFunction, Hasher, PoseidonArity}; +use generic_array::typenum::{Unsigned, U2}; use log::{info, trace}; use memmap::{Mmap, MmapMut, MmapOptions}; use merkletree::{ @@ -26,7 +25,7 @@ use storage_proofs_core::{ error::Result, merkle::{ create_base_merkle_tree, create_lc_tree, get_base_tree_count, split_config_and_replica, - BinaryMerkleTree, LCTree, MerkleProof, MerkleProofTrait, MerkleTreeTrait, + MerkleProof, MerkleProofTrait, MerkleTreeTrait, }, parameter_cache::ParameterSetMetadata, proof::ProofScheme, @@ -36,7 +35,7 @@ use storage_proofs_porep::stacked::{StackedDrg, TreeRElementData}; use crate::{ constants::{ apex_leaf_count, challenge_count, challenge_count_poseidon, hs, partition_count, TreeD, - TreeDArity, TreeDDomain, TreeDHasher, TreeDStore, TreeRDomain, TreeRHasher, + TreeDArity, TreeDDomain, TreeDHasher, TreeDStore, TreeR, TreeRDomain, TreeRHasher, ALLOWED_SECTOR_SIZES, POSEIDON_CONSTANTS_GEN_RANDOMNESS, }, Challenges, @@ -140,19 +139,25 @@ impl PublicParams { } #[derive(Clone, Serialize, Deserialize)] -pub struct PublicInputs { +pub struct PublicInputs { pub k: usize, - pub comm_r_old: TreeRDomain, - pub comm_d_new: TreeDDomain, - pub comm_r_new: TreeRDomain, + #[serde(bound(serialize = "TreeRDomain: Serialize"))] + #[serde(bound(deserialize = "TreeRDomain: Deserialize<'de>"))] + pub comm_r_old: TreeRDomain, + #[serde(bound(serialize = "TreeDDomain: Serialize"))] + #[serde(bound(deserialize = "TreeDDomain: Deserialize<'de>"))] + pub comm_d_new: TreeDDomain, + #[serde(bound(serialize = "TreeRDomain: Serialize"))] + #[serde(bound(deserialize = "TreeRDomain: Deserialize<'de>"))] + pub comm_r_new: TreeRDomain, // The number of high bits to take from each challenge's bits. Used to verify replica encoding // in the vanilla proof. `h` is only a public-input for the vanilla proof; the circuit takes // `h_select` as a public-input rather than `h`. pub h: usize, } -pub struct PrivateInputs { - pub comm_c: TreeRDomain, +pub struct PrivateInputs { + pub comm_c: TreeRDomain, pub tree_r_old_config: StoreConfig, // Path to old replica. pub old_replica_path: PathBuf, @@ -162,54 +167,41 @@ pub struct PrivateInputs { pub replica_path: PathBuf, } -#[derive(Serialize, Deserialize)] -pub struct ChallengeProof -where - TreeR: MerkleTreeTrait, -{ - #[serde(bound( - serialize = "MerkleProof: Serialize", - deserialize = "MerkleProof: Deserialize<'de>" - ))] - pub proof_r_old: - MerkleProof, - #[serde(bound( - serialize = "MerkleProof: Serialize", - deserialize = "MerkleProof: Deserialize<'de>" - ))] - pub proof_d_new: MerkleProof, - #[serde(bound( - serialize = "MerkleProof: Serialize", - deserialize = "MerkleProof: Deserialize<'de>" - ))] - pub proof_r_new: - MerkleProof, -} - -// Implement `Clone` by hand because `MerkleTreeTrait` does not implement `Clone`. -impl Clone for ChallengeProof +#[derive(Clone, Serialize, Deserialize)] +pub struct ChallengeProof where - TreeR: MerkleTreeTrait, + TreeDHasher: Hasher, + TreeRHasher: Hasher, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - fn clone(&self) -> Self { - ChallengeProof { - proof_r_old: self.proof_r_old.clone(), - proof_d_new: self.proof_d_new.clone(), - proof_r_new: self.proof_r_new.clone(), - } - } + #[serde(bound(serialize = "MerkleProof, U, V, W>: Serialize"))] + #[serde(bound(deserialize = "MerkleProof, U, V, W>: Deserialize<'de>"))] + pub proof_r_old: MerkleProof, U, V, W>, + #[serde(bound(serialize = "MerkleProof, U, V, W>: Serialize"))] + #[serde(bound(deserialize = "MerkleProof, U, V, W>: Deserialize<'de>"))] + pub proof_d_new: MerkleProof, TreeDArity>, + #[serde(bound(serialize = "MerkleProof, U, V, W>: Serialize"))] + #[serde(bound(deserialize = "MerkleProof, U, V, W>: Deserialize<'de>"))] + pub proof_r_new: MerkleProof, U, V, W>, } -impl ChallengeProof +impl ChallengeProof where - TreeR: MerkleTreeTrait, + F: PrimeField, + TreeDHasher: Hasher>, + TreeRHasher: Hasher>, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub fn verify_merkle_proofs( &self, c: u32, - root_r_old: &TreeRDomain, - comm_d_new: &TreeDDomain, - root_r_new: &TreeRDomain, + root_r_old: &TreeRDomain, + comm_d_new: &TreeDDomain, + root_r_new: &TreeRDomain, ) -> bool { let c = c as usize; self.proof_r_old.path_index() == c @@ -224,52 +216,47 @@ where } } -#[derive(Serialize, Deserialize)] -pub struct PartitionProof -where - TreeR: MerkleTreeTrait, -{ - pub comm_c: TreeRDomain, - pub apex_leafs: Vec, - #[serde(bound( - serialize = "ChallengeProof: Serialize", - deserialize = "ChallengeProof: Deserialize<'de>" - ))] - pub challenge_proofs: Vec>, -} - -// Implement `Clone` by hand because `MerkleTreeTrait` does not implement `Clone`. -impl Clone for PartitionProof +#[derive(Clone, Serialize, Deserialize)] +pub struct PartitionProof where - TreeR: MerkleTreeTrait, + TreeDHasher: Hasher, + TreeRHasher: Hasher, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - fn clone(&self) -> Self { - PartitionProof { - comm_c: self.comm_c, - apex_leafs: self.apex_leafs.clone(), - challenge_proofs: self.challenge_proofs.clone(), - } - } + #[serde(bound(serialize = "TreeRDomain: Serialize"))] + #[serde(bound(deserialize = "TreeRDomain: Deserialize<'de>"))] + pub comm_c: TreeRDomain, + #[serde(bound(serialize = "TreeDDomain: Serialize"))] + #[serde(bound(deserialize = "TreeDDomain: Deserialize<'de>"))] + pub apex_leafs: Vec>, + #[serde(bound(serialize = "ChallengeProof: Serialize"))] + #[serde(bound(deserialize = "ChallengeProof: Deserialize<'de>"))] + pub challenge_proofs: Vec>, } -#[derive(Debug)] -#[allow(clippy::upper_case_acronyms)] -pub struct EmptySectorUpdate -where - TreeR: MerkleTreeTrait, -{ - _tree_r: PhantomData, +pub struct EmptySectorUpdate { + _f: PhantomData, + _tree_r: PhantomData<(U, V, W)>, } -impl<'a, TreeR> ProofScheme<'a> for EmptySectorUpdate +impl<'a, F, U, V, W> ProofScheme<'a> for EmptySectorUpdate where - TreeR: 'static + MerkleTreeTrait, + F: PrimeFieldBits, + TreeDHasher: Hasher>, + TreeRHasher: Hasher>, + TreeDDomain: Domain, + TreeRDomain: Domain, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { type SetupParams = SetupParams; type PublicParams = PublicParams; - type PublicInputs = PublicInputs; - type PrivateInputs = PrivateInputs; - type Proof = PartitionProof; + type PublicInputs = PublicInputs; + type PrivateInputs = PrivateInputs; + type Proof = PartitionProof; type Requirements = (); fn setup(setup_params: &Self::SetupParams) -> Result { @@ -344,7 +331,7 @@ where &tree_r_new, ) }) - .collect::>>>()?; + .collect::>>>()?; info!("Finished generating all partition proofs"); @@ -398,14 +385,14 @@ where ); // Compute apex-tree. - let mut apex_tree: Vec> = vec![apex_leafs.clone()]; + let mut apex_tree: Vec>> = vec![apex_leafs.clone()]; for _ in 0..apex_leaf_bit_len { - let tree_row: Vec = apex_tree + let tree_row: Vec> = apex_tree .last() .unwrap() .chunks(2) .map(|siblings| { - ::Function::hash2(&siblings[0], &siblings[1]) + as Hasher>::Function::hash2(&siblings[0], &siblings[1]) }) .collect(); apex_tree.push(tree_row); @@ -421,8 +408,8 @@ where // Verify that the TreeROld and TreeRNew Merkle proofs roots agree with the public CommC, // CommROld, and CommRNew. - let comm_r_old_calc = ::Function::hash2(comm_c, &root_r_old); - let comm_r_new_calc = ::Function::hash2(comm_c, &root_r_new); + let comm_r_old_calc = as Hasher>::Function::hash2(comm_c, &root_r_old); + let comm_r_new_calc = as Hasher>::Function::hash2(comm_c, &root_r_new); if comm_r_old_calc != comm_r_old || comm_r_new_calc != comm_r_new { return Ok(false); } @@ -442,12 +429,12 @@ where } // Verify replica encoding. - let label_r_old: Fr = challenge_proof.proof_r_old.leaf().into(); - let label_d_new: Fr = challenge_proof.proof_d_new.leaf().into(); + let label_r_old: F = challenge_proof.proof_r_old.leaf().into(); + let label_d_new: F = challenge_proof.proof_d_new.leaf().into(); let label_r_new = challenge_proof.proof_r_new.leaf(); let c_high = c >> get_high_bits_shr; let rho = rho(&phi, c_high); - let label_r_new_calc: TreeRDomain = (label_r_old + label_d_new * rho).into(); + let label_r_new_calc: TreeRDomain = (label_r_old + label_d_new * rho).into(); if label_r_new_calc != label_r_new { return false; } @@ -494,31 +481,42 @@ where // `phi = H(comm_d_new, comm_r_old)` where Poseidon uses the custom "gen randomness" domain // separation tag. -#[inline] -pub fn phi(comm_d_new: &TreeDDomain, comm_r_old: &TreeRDomain) -> TreeRDomain { - let comm_d_new: Fr = (*comm_d_new).into(); - let comm_r_old: Fr = (*comm_r_old).into(); - Poseidon::new_with_preimage( - &[comm_d_new, comm_r_old], - &POSEIDON_CONSTANTS_GEN_RANDOMNESS, - ) - .hash() - .into() +pub fn phi(comm_d_new: &D, comm_r_old: &TreeRDomain) -> TreeRDomain +where + // TreeD domain. + D: Domain, + // TreeD and TreeR Domains must have the same field. + TreeRDomain: Domain, +{ + let preimage: [D::Field; 2] = [(*comm_d_new).into(), (*comm_r_old).into()]; + let consts = POSEIDON_CONSTANTS_GEN_RANDOMNESS + .get::>() + .expect("arity-2 Poseidon constants not found for field"); + Poseidon::new_with_preimage(&preimage, consts).hash().into() } // `rho = H(phi, high)` where `high` is the `h` high bits of a node-index and Poseidon uses the // custom "gen randomness" domain separation tag. -#[inline] -pub fn rho(phi: &TreeRDomain, high: u32) -> Fr { - let phi: Fr = (*phi).into(); - let high = Fr::from(high as u64); - Poseidon::new_with_preimage(&[phi, high], &POSEIDON_CONSTANTS_GEN_RANDOMNESS).hash() +pub fn rho(phi: &TreeRDomain, high: u32) -> F +where + F: PrimeField, + TreeRDomain: Domain, +{ + let preimage: [F; 2] = [(*phi).into(), F::from(high as u64)]; + let consts = POSEIDON_CONSTANTS_GEN_RANDOMNESS + .get::>() + .expect("arity-2 Poseidon constants not found for field"); + Poseidon::new_with_preimage(&preimage, consts).hash() } // Computes all `2^h` rho values for the given `phi`. Each rho corresponds to one of the `2^h` // possible `high` values where `high` is the `h` high bits of a node-index. #[inline] -pub fn rhos(h: usize, phi: &TreeRDomain) -> Vec { +pub fn rhos(h: usize, phi: &TreeRDomain) -> Vec +where + F: PrimeField, + TreeRDomain: Domain, +{ (0..1 << h).map(|high| rho(phi, high)).collect() } @@ -549,14 +547,21 @@ fn mmap_write(path: &Path) -> Result { #[allow(clippy::too_many_arguments)] #[allow(clippy::from_iter_instead_of_collect)] -impl EmptySectorUpdate +impl EmptySectorUpdate where - TreeR: 'static + MerkleTreeTrait, + F: PrimeFieldBits, + TreeDHasher: Hasher>, + TreeRHasher: Hasher>, + TreeDDomain: Domain, + TreeRDomain: Domain, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { pub fn instantiate_tree_d( tree_d_leafs: usize, tree_d_new_config: &StoreConfig, - ) -> Result { + ) -> Result> { // Instantiate TreeD new from the replica cache path. Note that this is similar to what // we do when going from t_aux to t_aux cache. let tree_d_arity = TreeDArity::to_usize(); @@ -575,16 +580,16 @@ where tree_r_config: &StoreConfig, replica_path: &Path, name: &str, - ) -> Result> { - let tree_r_base_arity = TreeR::Arity::to_usize(); - let tree_r_sub_arity = TreeR::SubTreeArity::to_usize(); - let tree_r_top_arity = TreeR::TopTreeArity::to_usize(); + ) -> Result> { + let tree_r_base_arity = U::to_usize(); + let tree_r_sub_arity = V::to_usize(); + let tree_r_top_arity = W::to_usize(); // Instantiate TreeR new from the replica_cache_path. Note that this is similar to what we // do when going from t_aux to t_aux cache. let tree_r_base_tree_nodes = tree_r_config.size.expect("tree_r config size failure"); let tree_r_base_tree_leafs = get_merkle_tree_leafs(tree_r_base_tree_nodes, tree_r_base_arity)?; - let tree_r_base_tree_count = get_base_tree_count::(); + let tree_r_base_tree_count = get_base_tree_count::>(); let (tree_r_configs, replica_config) = split_config_and_replica( tree_r_config.clone(), replica_path.to_path_buf(), @@ -609,7 +614,7 @@ where StoreConfig::data_path(&config.path, &config.id) ); } - create_lc_tree::>( + create_lc_tree::>( tree_r_base_tree_nodes, &tree_r_configs, &replica_config, @@ -619,12 +624,12 @@ where // Generates a partition proof given instantiated trees TreeROld, TreeDNew, and TreeRNew. pub fn prove_inner( pub_params: &PublicParams, - pub_inputs: &PublicInputs, - priv_inputs: &PrivateInputs, - tree_r_old: &LCTree, - tree_d_new: &TreeD, - tree_r_new: &LCTree, - ) -> Result> { + pub_inputs: &PublicInputs, + priv_inputs: &PrivateInputs, + tree_r_old: &TreeR, + tree_d_new: &TreeD, + tree_r_new: &TreeR, + ) -> Result> { let PublicParams { sector_nodes, partition_count, @@ -666,8 +671,9 @@ where // Re-instantiate TreeD's store for reading apex leafs. let tree_d_nodes = tree_d_new_config.size.expect("config size failure"); - let tree_d_store = TreeDStore::new_from_disk(tree_d_nodes, tree_d_arity, tree_d_new_config) - .context("tree_d_store")?; + let tree_d_store = + TreeDStore::::new_from_disk(tree_d_nodes, tree_d_arity, tree_d_new_config) + .context("tree_d_store")?; ensure!( tree_d_nodes == Store::len(&tree_d_store), "TreeD store size mismatch" @@ -693,7 +699,7 @@ where apex_leafs_start, k ); - let apex_leafs: Vec = + let apex_leafs: Vec> = tree_d_store.read_range(apex_leafs_start..apex_leafs_stop)?; info!( "Finished reading apex-leafs from TreeD for partition k={}", @@ -734,7 +740,7 @@ where proof_r_new, }) }) - .collect::>>>()?; + .collect::>>>()?; info!("finished generating challenge-proofs for partition k={}", k); @@ -748,16 +754,16 @@ where #[cfg(any(feature = "cuda", feature = "opencl"))] #[allow(clippy::unnecessary_wraps)] fn prepare_tree_r_data( - source: &DiskStore, + source: &DiskStore>, _data: Option<&mut Data<'_>>, start: usize, end: usize, - ) -> Result> { - let tree_data: Vec = source + ) -> Result>> { + let tree_data: Vec> = source .read_range(start..end) .expect("failed to read from source"); - if StackedDrg::::use_gpu_tree_builder() { + if StackedDrg::, TreeDHasher>::use_gpu_tree_builder() { Ok(TreeRElementData::FrList( tree_data.into_par_iter().map(|x| x.into()).collect(), )) @@ -769,12 +775,12 @@ where #[cfg(not(any(feature = "cuda", feature = "opencl")))] #[allow(clippy::unnecessary_wraps)] fn prepare_tree_r_data( - source: &DiskStore, + source: &DiskStore>, _data: Option<&mut Data<'_>>, start: usize, end: usize, - ) -> Result> { - let tree_data: Vec = source + ) -> Result>> { + let tree_data: Vec> = source .read_range(start..end) .expect("failed to read from source"); @@ -786,15 +792,15 @@ where nodes_count: usize, tree_d_new_config: StoreConfig, tree_r_last_new_config: StoreConfig, - comm_c: TreeRDomain, - comm_r_last_old: TreeRDomain, + comm_c: TreeRDomain, + comm_r_last_old: TreeRDomain, new_replica_path: &Path, new_cache_path: &Path, sector_key_path: &Path, sector_key_cache_path: &Path, staged_data_path: &Path, h: usize, - ) -> Result<(TreeRDomain, TreeRDomain, TreeDDomain)> { + ) -> Result<(TreeRDomain, TreeRDomain, TreeDDomain)> { // Sanity check all input path types. ensure!( metadata(new_cache_path)?.is_dir(), @@ -805,7 +811,7 @@ where "sector_key_cache_path must be a directory" ); - let tree_count = get_base_tree_count::(); + let tree_count = get_base_tree_count::>(); let base_tree_nodes_count = nodes_count / tree_count; let new_replica_path_metadata = metadata(new_replica_path)?; @@ -861,7 +867,7 @@ where new_data.ensure_data_of_len(sector_key_path_metadata.len() as usize)?; // Generate tree_d over the staged_data. - let tree_d = create_base_merkle_tree::>( + let tree_d = create_base_merkle_tree::>( Some(tree_d_new_config), tree_count * base_tree_nodes_count, new_data.as_ref(), @@ -869,7 +875,7 @@ where let comm_d_new = tree_d.root(); - let comm_r_old = ::Function::hash2(&comm_c, &comm_r_last_old); + let comm_r_old = as Hasher>::Function::hash2(&comm_c, &comm_r_last_old); let phi = phi(&comm_d_new, &comm_r_old); let end = staged_data_path_metadata.len() as u64; @@ -901,16 +907,26 @@ where let high = node_index >> get_high_bits_shr; let rho = rhos[high]; - let sector_key_fr = - bytes_into_fr(§or_key_data[input_index..input_index + FR_SIZE])?; - let staged_data_fr = - bytes_into_fr(&staged_data[input_index..input_index + FR_SIZE])?; + let sector_key_fr = { + let mut repr = F::Repr::default(); + repr.as_mut() + .copy_from_slice(§or_key_data[input_index..input_index + FR_SIZE]); + let opt = F::from_repr_vartime(repr); + ensure!(opt.is_some(), "bytes are invalid field repr"); + opt.unwrap() + }; + let staged_data_fr = { + let mut repr = F::Repr::default(); + repr.as_mut() + .copy_from_slice(&staged_data[input_index..input_index + FR_SIZE]); + let opt = F::from_repr_vartime(repr); + ensure!(opt.is_some(), "bytes are invalid field repr"); + opt.unwrap() + }; let new_replica_fr = sector_key_fr + (staged_data_fr * rho); - fr_into_bytes_slice( - &new_replica_fr, - &mut replica_data[output_index..output_index + FR_SIZE], - ); + replica_data[output_index..output_index + FR_SIZE] + .copy_from_slice(new_replica_fr.to_repr().as_ref()); } Ok(()) @@ -918,13 +934,13 @@ where new_replica_data.flush()?; // Open the new written replica data as a DiskStore. - let new_replica_store: DiskStore = + let new_replica_store: DiskStore> = DiskStore::new_from_slice(nodes_count, &new_replica_data[0..])?; // This argument is currently unused by this invocation, but required for the API. let mut unused_data = Data::empty(); - let tree_r_last = StackedDrg::::generate_tree_r_last::( + let tree_r_last = StackedDrg::, TreeDHasher>::generate_tree_r_last::( &mut unused_data, base_tree_nodes_count, tree_count, @@ -935,7 +951,7 @@ where )?; let comm_r_last_new = tree_r_last.root(); - let comm_r_new = ::Function::hash2(&comm_c, &comm_r_last_new); + let comm_r_new = as Hasher>::Function::hash2(&comm_c, &comm_r_last_new); Ok((comm_r_new, comm_r_last_new, comm_d_new)) } @@ -947,9 +963,9 @@ where replica_path: &Path, sector_key_path: &Path, sector_key_cache_path: &Path, - comm_c: TreeRDomain, - comm_d_new: TreeDDomain, - comm_sector_key: TreeRDomain, + comm_c: TreeRDomain, + comm_d_new: TreeDDomain, + comm_sector_key: TreeRDomain, h: usize, ) -> Result<()> { // Sanity check all input path types. @@ -958,7 +974,7 @@ where "sector_key_cache_path must be a directory" ); - let tree_count = get_base_tree_count::(); + let tree_count = get_base_tree_count::>(); let base_tree_nodes_count = nodes_count / tree_count; let out_data_path_metadata = metadata(out_data_path)?; @@ -1006,7 +1022,7 @@ where let replica_data = mmap_read(replica_path)?; let sector_key_data = mmap_read(sector_key_path)?; - let comm_r_old = ::Function::hash2(&comm_c, &comm_sector_key); + let comm_r_old = as Hasher>::Function::hash2(&comm_c, &comm_sector_key); let phi = phi(&comm_d_new, &comm_r_old); let end = replica_path_metadata.len() as u64; @@ -1023,7 +1039,7 @@ where let get_high_bits_shr = node_index_bit_len - h; // Precompute all rho^-1 values. - let rho_invs: Vec = rhos(h, &phi) + let rho_invs: Vec = rhos(h, &phi) .into_iter() .map(|rho| rho.invert().unwrap()) .collect(); @@ -1041,16 +1057,26 @@ where let high = node_index >> get_high_bits_shr; let rho_inv = rho_invs[high]; - let sector_key_fr = - bytes_into_fr(§or_key_data[input_index..input_index + FR_SIZE])?; - let replica_data_fr = - bytes_into_fr(&replica_data[input_index..input_index + FR_SIZE])?; + let sector_key_fr = { + let mut repr = F::Repr::default(); + repr.as_mut() + .copy_from_slice(§or_key_data[input_index..input_index + FR_SIZE]); + let opt = F::from_repr_vartime(repr); + ensure!(opt.is_some(), "bytes are invalid field repr"); + opt.unwrap() + }; + let replica_data_fr = { + let mut repr = F::Repr::default(); + repr.as_mut() + .copy_from_slice(&replica_data[input_index..input_index + FR_SIZE]); + let opt = F::from_repr_vartime(repr); + ensure!(opt.is_some(), "bytes are invalid field repr"); + opt.unwrap() + }; let out_data_fr = (replica_data_fr - sector_key_fr) * rho_inv; - fr_into_bytes_slice( - &out_data_fr, - &mut output_data[output_index..output_index + FR_SIZE], - ); + output_data[output_index..output_index + FR_SIZE] + .copy_from_slice(out_data_fr.to_repr().as_ref()); } Ok(()) @@ -1069,11 +1095,11 @@ where replica_cache_path: &Path, data_path: &Path, tree_r_last_new_config: StoreConfig, - comm_c: TreeRDomain, - comm_d_new: TreeDDomain, - comm_sector_key: TreeRDomain, + comm_c: TreeRDomain, + comm_d_new: TreeDDomain, + comm_sector_key: TreeRDomain, h: usize, - ) -> Result { + ) -> Result> { // Sanity check all input path types. ensure!( metadata(sector_key_cache_path)?.is_dir(), @@ -1084,7 +1110,7 @@ where "replica_cache_path must be a directory" ); - let tree_count = get_base_tree_count::(); + let tree_count = get_base_tree_count::>(); let base_tree_nodes_count = nodes_count / tree_count; let data_path_metadata = metadata(data_path)?; @@ -1133,7 +1159,7 @@ where let replica_data = mmap_read(replica_path)?; let data = mmap_read(data_path)?; - let comm_r_old = ::Function::hash2(&comm_c, &comm_sector_key); + let comm_r_old = as Hasher>::Function::hash2(&comm_c, &comm_sector_key); let phi = phi(&comm_d_new, &comm_r_old); let end = replica_path_metadata.len() as u64; @@ -1165,15 +1191,26 @@ where let high = node_index >> get_high_bits_shr; let rho = rhos[high]; - let data_fr = bytes_into_fr(&data[input_index..input_index + FR_SIZE])?; - let replica_data_fr = - bytes_into_fr(&replica_data[input_index..input_index + FR_SIZE])?; + let data_fr = { + let mut repr = F::Repr::default(); + repr.as_mut() + .copy_from_slice(&data[input_index..input_index + FR_SIZE]); + let opt = F::from_repr_vartime(repr); + ensure!(opt.is_some(), "bytes are invalid field repr"); + opt.unwrap() + }; + let replica_data_fr = { + let mut repr = F::Repr::default(); + repr.as_mut() + .copy_from_slice(&replica_data[input_index..input_index + FR_SIZE]); + let opt = F::from_repr_vartime(repr); + ensure!(opt.is_some(), "bytes are invalid field repr"); + opt.unwrap() + }; let sector_key_fr = replica_data_fr - (data_fr * rho); - fr_into_bytes_slice( - §or_key_fr, - &mut skey_data[output_index..output_index + FR_SIZE], - ); + skey_data[output_index..output_index + FR_SIZE] + .copy_from_slice(sector_key_fr.to_repr().as_ref()); } Ok(()) @@ -1181,13 +1218,13 @@ where sector_key_data.flush()?; // Open the new written sector_key data as a DiskStore. - let sector_key_store: DiskStore = + let sector_key_store: DiskStore> = DiskStore::new_from_slice(nodes_count, §or_key_data[0..])?; // This argument is currently unused by this invocation, but required for the API. let mut unused_data = Data::empty(); - let tree_r_last = StackedDrg::::generate_tree_r_last::( + let tree_r_last = StackedDrg::, TreeDHasher>::generate_tree_r_last::( &mut unused_data, base_tree_nodes_count, tree_count, diff --git a/storage-proofs-update/tests/circuit.rs b/storage-proofs-update/tests/circuit.rs index d42d0942af..0480a3a7e8 100644 --- a/storage-proofs-update/tests/circuit.rs +++ b/storage-proofs-update/tests/circuit.rs @@ -1,11 +1,14 @@ #![allow(unused_imports)] #![allow(dead_code)] + use std::path::Path; -use bellperson::util_cs::bench_cs::BenchCS; -use bellperson::{util_cs::test_cs::TestConstraintSystem, Circuit}; +use bellperson::{ + util_cs::{bench_cs::BenchCS, test_cs::TestConstraintSystem}, + Circuit, +}; use blstrs::Scalar as Fr; -use filecoin_hashers::{Domain, HashFunction, Hasher}; +use filecoin_hashers::{Domain, HashFunction, Hasher, PoseidonArity}; use generic_array::typenum::{Unsigned, U0, U2, U4, U8}; use merkletree::store::{DiskStore, StoreConfig}; use rand::SeedableRng; @@ -18,9 +21,9 @@ use storage_proofs_core::{ use storage_proofs_update::{ circuit, constants::{ - apex_leaf_count, hs, partition_count, validate_tree_r_shape, TreeD, TreeDDomain, - TreeRDomain, TreeRHasher, SECTOR_SIZE_16_KIB, SECTOR_SIZE_1_KIB, SECTOR_SIZE_2_KIB, - SECTOR_SIZE_32_GIB, SECTOR_SIZE_32_KIB, SECTOR_SIZE_4_KIB, SECTOR_SIZE_8_KIB, + self, apex_leaf_count, hs, partition_count, validate_tree_r_shape, SECTOR_SIZE_16_KIB, + SECTOR_SIZE_1_KIB, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_GIB, SECTOR_SIZE_32_KIB, + SECTOR_SIZE_4_KIB, SECTOR_SIZE_8_KIB, }, phi, rho, vanilla, Challenges, EmptySectorUpdateCircuit, PublicParams, }; @@ -28,16 +31,14 @@ use tempfile::tempdir; mod common; -fn get_apex_leafs( - tree_d_new: &MerkleTreeWrapper< - ::Hasher, - ::Store, - ::Arity, - ::SubTreeArity, - ::TopTreeArity, - >, - k: usize, -) -> Vec { +type TreeD = constants::TreeD; +type TreeDDomain = constants::TreeDDomain; + +type TreeR = constants::TreeR; +type TreeRDomain = constants::TreeRDomain; +type TreeRHasher = constants::TreeRHasher; + +fn get_apex_leafs(tree_d_new: &TreeD, k: usize) -> Vec { let sector_nodes = tree_d_new.leafs(); let tree_d_height = sector_nodes.trailing_zeros() as usize; let partition_count = partition_count(sector_nodes); @@ -62,11 +63,13 @@ fn get_apex_leafs( }) } -fn test_empty_sector_update_circuit(sector_nodes: usize, constraints_expected: usize) +fn test_empty_sector_update_circuit(sector_nodes: usize, constraints_expected: usize) where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - validate_tree_r_shape::(sector_nodes); + validate_tree_r_shape::(sector_nodes); let sector_bytes = sector_nodes << 5; let hs = hs(sector_nodes); @@ -82,7 +85,7 @@ where let labels_r_old: Vec = (0..sector_nodes) .map(|_| TreeRDomain::random(&mut rng)) .collect(); - let tree_r_old = common::create_tree::(&labels_r_old, tmp_path, "tree-r-old"); + let tree_r_old = common::create_tree::>(&labels_r_old, tmp_path, "tree-r-old"); let root_r_old = tree_r_old.root(); let comm_c = TreeRDomain::random(&mut rng); let comm_r_old = ::Function::hash2(&comm_c, &root_r_old); @@ -99,7 +102,7 @@ where // Encode `labels_d_new` into `labels_r_new` and create TreeRNew. let labels_r_new = common::encode_new_replica(&labels_r_old, &labels_d_new, &phi, h); - let tree_r_new = common::create_tree::(&labels_r_new, tmp_path, "tree-r-new"); + let tree_r_new = common::create_tree::>(&labels_r_new, tmp_path, "tree-r-new"); let root_r_new = tree_r_new.root(); let comm_r_new = ::Function::hash2(&comm_c, &root_r_new); @@ -108,7 +111,7 @@ where for k in 0..pub_params.partition_count { // Generate vanilla-proof. let apex_leafs = get_apex_leafs(&tree_d_new, k); - let challenge_proofs: Vec> = + let challenge_proofs: Vec> = Challenges::new(sector_nodes, comm_r_new, k) .enumerate() .take(pub_params.challenge_count) @@ -140,13 +143,13 @@ where let priv_inputs = circuit::PrivateInputs::new(comm_c, &apex_leafs, &challenge_proofs); - let circuit = EmptySectorUpdateCircuit:: { + let circuit = EmptySectorUpdateCircuit { pub_params: pub_params.clone(), pub_inputs, priv_inputs, }; - let mut cs = TestConstraintSystem::::new(); + let mut cs = TestConstraintSystem::new(); circuit.synthesize(&mut cs).expect("failed to synthesize"); assert!(cs.is_satisfied()); assert!(cs.verify(&pub_inputs_vec)); @@ -154,63 +157,56 @@ where } } -#[test] #[cfg(feature = "isolated-testing")] +#[test] fn test_empty_sector_update_circuit_1kib() { - type TreeR = MerkleTreeWrapper, U8, U4, U0>; - test_empty_sector_update_circuit::(SECTOR_SIZE_1_KIB, 1248389); + test_empty_sector_update_circuit::(SECTOR_SIZE_1_KIB, 1248389); } -#[test] #[cfg(feature = "isolated-testing")] +#[test] fn test_empty_sector_update_circuit_2kib() { - type TreeR = MerkleTreeWrapper, U8, U0, U0>; - test_empty_sector_update_circuit::(SECTOR_SIZE_2_KIB, 1705039); + test_empty_sector_update_circuit::(SECTOR_SIZE_2_KIB, 1705039); } -#[test] #[cfg(feature = "isolated-testing")] +#[test] fn test_empty_sector_update_circuit_4kib() { - type TreeR = MerkleTreeWrapper, U8, U2, U0>; - test_empty_sector_update_circuit::(SECTOR_SIZE_4_KIB, 2165109); + test_empty_sector_update_circuit::(SECTOR_SIZE_4_KIB, 2165109); } -#[test] #[cfg(feature = "isolated-testing")] +#[test] fn test_empty_sector_update_circuit_8kib() { - type TreeR = MerkleTreeWrapper, U8, U4, U0>; - test_empty_sector_update_circuit::(SECTOR_SIZE_8_KIB, 2620359); + test_empty_sector_update_circuit::(SECTOR_SIZE_8_KIB, 2620359); } -#[test] #[cfg(feature = "isolated-testing")] +#[test] fn test_empty_sector_update_circuit_16kib() { - type TreeR = MerkleTreeWrapper, U8, U8, U0>; - test_empty_sector_update_circuit::(SECTOR_SIZE_16_KIB, 6300021); + test_empty_sector_update_circuit::(SECTOR_SIZE_16_KIB, 6300021); } -#[test] #[cfg(feature = "isolated-testing")] +#[test] fn test_empty_sector_update_circuit_32kib() { - type TreeR = MerkleTreeWrapper, U8, U8, U2>; - test_empty_sector_update_circuit::(SECTOR_SIZE_32_KIB, 6760091); + test_empty_sector_update_circuit::(SECTOR_SIZE_32_KIB, 6760091); } #[test] #[ignore] fn test_empty_sector_update_constraints_32gib() { - type TreeR = MerkleTreeWrapper, U8, U8, U0>; + let pub_params = PublicParams::from_sector_size(SECTOR_SIZE_32_GIB as u64 * 32); let pub_inputs = circuit::PublicInputs::empty(); + let priv_inputs = circuit::PrivateInputs::::empty(SECTOR_SIZE_32_GIB); - let priv_inputs = circuit::PrivateInputs::empty(SECTOR_SIZE_32_GIB); - - let circuit = EmptySectorUpdateCircuit:: { - pub_params: PublicParams::from_sector_size(SECTOR_SIZE_32_GIB as u64 * 32), + let circuit = EmptySectorUpdateCircuit { + pub_params, pub_inputs, priv_inputs, }; - let mut cs = BenchCS::::new(); + let mut cs = BenchCS::new(); circuit.synthesize(&mut cs).expect("failed to synthesize"); assert_eq!(cs.num_constraints(), 81049499) } diff --git a/storage-proofs-update/tests/circuit_poseidon.rs b/storage-proofs-update/tests/circuit_poseidon.rs index 95e0acc3d4..8c5cd0c2cd 100644 --- a/storage-proofs-update/tests/circuit_poseidon.rs +++ b/storage-proofs-update/tests/circuit_poseidon.rs @@ -1,10 +1,11 @@ #![allow(unused_imports)] #![allow(dead_code)] + use std::path::Path; use bellperson::{util_cs::bench_cs::BenchCS, util_cs::test_cs::TestConstraintSystem, Circuit}; use blstrs::Scalar as Fr; -use filecoin_hashers::{Domain, HashFunction, Hasher}; +use filecoin_hashers::{Domain, HashFunction, Hasher, PoseidonArity}; use generic_array::typenum::{Unsigned, U0, U2, U4, U8}; use merkletree::store::{DiskStore, StoreConfig}; use rand::SeedableRng; @@ -16,9 +17,9 @@ use storage_proofs_core::{ }; use storage_proofs_update::{ constants::{ - apex_leaf_count, hs, partition_count, validate_tree_r_shape, TreeD, TreeDDomain, - TreeRDomain, TreeRHasher, SECTOR_SIZE_16_KIB, SECTOR_SIZE_1_KIB, SECTOR_SIZE_2_KIB, - SECTOR_SIZE_32_GIB, SECTOR_SIZE_32_KIB, SECTOR_SIZE_4_KIB, SECTOR_SIZE_8_KIB, + self, apex_leaf_count, hs, partition_count, validate_tree_r_shape, SECTOR_SIZE_16_KIB, + SECTOR_SIZE_1_KIB, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_GIB, SECTOR_SIZE_32_KIB, + SECTOR_SIZE_4_KIB, SECTOR_SIZE_8_KIB, }, phi, poseidon::{circuit, vanilla, EmptySectorUpdateCircuit}, @@ -28,11 +29,17 @@ use tempfile::tempdir; mod common; -fn test_empty_sector_update_circuit(sector_nodes: usize, constraints_expected: usize) +type TreeR = constants::TreeR; +type TreeRDomain = constants::TreeRDomain; +type TreeRHasher = constants::TreeRHasher; + +fn test_empty_sector_update_circuit(sector_nodes: usize, constraints_expected: usize) where - TreeR: MerkleTreeTrait, + U: PoseidonArity, + V: PoseidonArity, + W: PoseidonArity, { - validate_tree_r_shape::(sector_nodes); + validate_tree_r_shape::(sector_nodes); let sector_bytes = sector_nodes << 5; let hs = hs(sector_nodes); @@ -48,7 +55,7 @@ where let labels_r_old: Vec = (0..sector_nodes) .map(|_| TreeRDomain::random(&mut rng)) .collect(); - let tree_r_old = common::create_tree::(&labels_r_old, tmp_path, "tree-r-old"); + let tree_r_old = common::create_tree::>(&labels_r_old, tmp_path, "tree-r-old"); let root_r_old = tree_r_old.root(); let comm_c = TreeRDomain::random(&mut rng); let comm_r_old = ::Function::hash2(&comm_c, &root_r_old); @@ -57,7 +64,7 @@ where let labels_d_new: Vec = (0..sector_nodes) .map(|_| TreeRDomain::random(&mut rng)) .collect(); - let tree_d_new = common::create_tree::(&labels_d_new, tmp_path, "tree-d-new"); + let tree_d_new = common::create_tree::>(&labels_d_new, tmp_path, "tree-d-new"); let comm_d_new = tree_d_new.root(); // `phi = H(comm_d_new || comm_r_old)` @@ -65,7 +72,7 @@ where // Encode `labels_d_new` into `labels_r_new` and create TreeRNew. let labels_r_new = common::encode_new_replica(&labels_r_old, &labels_d_new, &phi, h); - let tree_r_new = common::create_tree::(&labels_r_new, tmp_path, "tree-r-new"); + let tree_r_new = common::create_tree::>(&labels_r_new, tmp_path, "tree-r-new"); let root_r_new = tree_r_new.root(); let comm_r_new = ::Function::hash2(&comm_c, &root_r_new); @@ -73,7 +80,7 @@ where { // Generate vanilla-proof. - let challenge_proofs: Vec> = + let challenge_proofs: Vec> = Challenges::new_poseidon(sector_nodes, comm_r_new) .enumerate() .take(pub_params.challenge_count) @@ -106,7 +113,7 @@ where let priv_inputs = circuit::PrivateInputs::new(comm_c, &challenge_proofs); - let circuit = EmptySectorUpdateCircuit:: { + let circuit = EmptySectorUpdateCircuit:: { pub_params, pub_inputs, priv_inputs, @@ -123,26 +130,23 @@ where #[test] #[cfg(feature = "isolated-testing")] fn test_empty_sector_update_circuit_1kib() { - type TreeR = MerkleTreeWrapper, U8, U4, U0>; - test_empty_sector_update_circuit::(SECTOR_SIZE_1_KIB, 32164); //old 1248389 + test_empty_sector_update_circuit::(SECTOR_SIZE_1_KIB, 32164); //old 1248389 } #[test] #[cfg(feature = "isolated-testing")] fn test_empty_sector_update_circuit_8kib() { - type TreeR = MerkleTreeWrapper, U8, U4, U0>; - test_empty_sector_update_circuit::(SECTOR_SIZE_8_KIB, 47974); //old 2620359 + test_empty_sector_update_circuit::(SECTOR_SIZE_8_KIB, 47974); //old 2620359 } #[test] #[ignore] fn test_empty_sector_update_constraints_32gib() { - type TreeR = MerkleTreeWrapper, U8, U8, U0>; let pub_inputs = circuit::PublicInputs::empty(); let priv_inputs = circuit::PrivateInputs::empty(SECTOR_SIZE_32_GIB); - let circuit = EmptySectorUpdateCircuit:: { + let circuit = EmptySectorUpdateCircuit:: { pub_params: PublicParams::from_sector_size_poseidon(SECTOR_SIZE_32_GIB as u64 * 32), pub_inputs, priv_inputs, diff --git a/storage-proofs-update/tests/common/mod.rs b/storage-proofs-update/tests/common/mod.rs index e48d793977..f8be5540ce 100644 --- a/storage-proofs-update/tests/common/mod.rs +++ b/storage-proofs-update/tests/common/mod.rs @@ -1,6 +1,5 @@ use std::path::Path; -use blstrs::Scalar as Fr; use filecoin_hashers::{Domain, Hasher}; use generic_array::typenum::Unsigned; use merkletree::store::StoreConfig; @@ -107,12 +106,18 @@ pub fn create_tree( } } -pub fn encode_new_replica( - labels_r_old: &[TreeRDomain], - labels_d_new: &[TreeD], - phi: &TreeRDomain, +pub fn encode_new_replica( + labels_r_old: &[TreeRDomain], + labels_d_new: &[D], + phi: &TreeRDomain, h: usize, -) -> Vec { +) -> Vec> +where + // TreeD domain. + D: Domain, + // TreeD and TreeR Domains must have the same field. + TreeRDomain: Domain, +{ let sector_nodes = labels_r_old.len(); assert_eq!(sector_nodes, labels_d_new.len()); @@ -127,8 +132,8 @@ pub fn encode_new_replica( let rho = rho(phi, high); // `label_r_new = label_r_old + label_d_new * rho` - let label_r_old: Fr = labels_r_old[node].into(); - let label_d_new: Fr = labels_d_new[node].into(); + let label_r_old: D::Field = labels_r_old[node].into(); + let label_d_new: D::Field = labels_d_new[node].into(); (label_r_old + label_d_new * rho).into() }) .collect() diff --git a/storage-proofs-update/tests/compound.rs b/storage-proofs-update/tests/compound.rs index b3736c1d98..6042feddf4 100644 --- a/storage-proofs-update/tests/compound.rs +++ b/storage-proofs-update/tests/compound.rs @@ -1,6 +1,7 @@ use std::fs; use std::path::Path; +use blstrs::Scalar as Fr; use filecoin_hashers::{Domain, HashFunction, Hasher, PoseidonArity}; use generic_array::typenum::{Unsigned, U0, U2, U4, U8}; use merkletree::{merkle::get_merkle_tree_len, store::StoreConfig}; @@ -8,17 +9,15 @@ use rand::SeedableRng; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ compound_proof::{self, CompoundProof}, - merkle::{ - create_lc_tree, get_base_tree_count, split_config_and_replica, LCTree, MerkleTreeTrait, - }, + merkle::{create_lc_tree, get_base_tree_count, split_config_and_replica, MerkleTreeTrait}, util::default_rows_to_discard, TEST_SEED, }; use storage_proofs_update::{ constants::{ - hs, partition_count, validate_tree_r_shape, TreeD, TreeDArity, TreeDDomain, TreeRBaseTree, - TreeRDomain, TreeRHasher, SECTOR_SIZE_16_KIB, SECTOR_SIZE_1_KIB, SECTOR_SIZE_2_KIB, - SECTOR_SIZE_32_KIB, SECTOR_SIZE_4_KIB, SECTOR_SIZE_8_KIB, + self, hs, partition_count, validate_tree_r_shape, TreeDArity, SECTOR_SIZE_16_KIB, + SECTOR_SIZE_1_KIB, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_KIB, SECTOR_SIZE_4_KIB, + SECTOR_SIZE_8_KIB, }, phi, EmptySectorUpdateCompound, PrivateInputs, PublicInputs, SetupParams, }; @@ -28,7 +27,13 @@ mod common; const HS_INDEX: usize = 2; -type TreeR = LCTree; +type TreeD = constants::TreeD; +type TreeDDomain = constants::TreeDDomain; + +type TreeR = constants::TreeR; +type TreeRBase = constants::TreeRBase; +type TreeRDomain = constants::TreeRDomain; +type TreeRHasher = constants::TreeRHasher; fn test_empty_sector_update_compound(sector_nodes: usize) where @@ -36,7 +41,7 @@ where V: PoseidonArity, W: PoseidonArity, { - validate_tree_r_shape::>(sector_nodes); + validate_tree_r_shape::(sector_nodes); let base_arity = U::to_usize(); @@ -89,9 +94,8 @@ where .zip(replica_old_config.offsets.iter().copied()) { let leafs = &replica_old[leafs_offset..leafs_offset + tree_r_base_tree_leafs_byte_len]; - let _base_tree = - TreeRBaseTree::from_byte_slice_with_config(leafs, base_tree_config.clone()) - .expect("failed to create base-tree"); + let _base_tree = TreeRBase::from_byte_slice_with_config(leafs, base_tree_config.clone()) + .expect("failed to create base-tree"); } let tree_r_old = create_lc_tree::>( tree_r_base_tree_nodes, @@ -152,9 +156,8 @@ where .zip(replica_new_config.offsets.iter().copied()) { let leafs = &replica_new[leafs_offset..leafs_offset + tree_r_base_tree_leafs_byte_len]; - let _base_tree = - TreeRBaseTree::from_byte_slice_with_config(leafs, base_tree_config.clone()) - .expect("failed to create base-tree"); + let _base_tree = TreeRBase::from_byte_slice_with_config(leafs, base_tree_config.clone()) + .expect("failed to create base-tree"); } let tree_r_new = create_lc_tree::>( tree_r_base_tree_nodes, @@ -174,7 +177,7 @@ where priority: true, }; let pub_params_compound = - EmptySectorUpdateCompound::>::setup(&setup_params_compound).unwrap(); + EmptySectorUpdateCompound::::setup(&setup_params_compound).unwrap(); // Prove generate vanilla and circuit proofs for all partitions. let pub_inputs = PublicInputs { @@ -196,13 +199,13 @@ where replica_path: replica_new_path, }; - let blank_groth_params = EmptySectorUpdateCompound::>::groth_params( + let blank_groth_params = EmptySectorUpdateCompound::::groth_params( Some(&mut rng), &pub_params_compound.vanilla_params, ) .expect("failed to generate groth params"); - let multi_proof = EmptySectorUpdateCompound::>::prove( + let multi_proof = EmptySectorUpdateCompound::::prove( &pub_params_compound, &pub_inputs, &priv_inputs, @@ -210,7 +213,7 @@ where ) .expect("failed while proving"); - let is_valid = EmptySectorUpdateCompound::>::verify( + let is_valid = EmptySectorUpdateCompound::::verify( &pub_params_compound, &pub_inputs, &multi_proof,