diff --git a/src/backends/arkworks/ark_pairing.rs b/src/backends/arkworks/ark_pairing.rs index d208c39..6e86b1d 100644 --- a/src/backends/arkworks/ark_pairing.rs +++ b/src/backends/arkworks/ark_pairing.rs @@ -16,6 +16,8 @@ use ark_ff::One; pub struct BN254; mod pairing_helpers { + use ark_ec::pairing::{MillerLoopOutput, Pairing}; + use super::*; use super::{ArkG1, ArkG2, ArkGT}; @@ -36,8 +38,11 @@ mod pairing_helpers { /// Sequential multi-pairing #[allow(dead_code)] - #[tracing::instrument(skip_all, name = "multi_pair_sequential", fields(len = ps.len()))] - pub(super) fn multi_pair_sequential(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { + #[tracing::instrument(skip_all, name = "multi_pair_sequential_miller_loop", fields(len = ps.len()))] + pub(super) fn multi_pair_sequential_miller_loup( + ps: &[ArkG1], + qs: &[ArkG2], + ) -> MillerLoopOutput> { use ark_bn254::{G1Affine, G2Affine}; let ps_prep: Vec<::G1Prepared> = ps @@ -62,7 +67,10 @@ mod pairing_helpers { /// Sequential multi-pairing with G2 from setup (uses cache if available) #[allow(dead_code)] #[tracing::instrument(skip_all, name = "multi_pair_g2_setup_sequential", fields(len = ps.len()))] - pub(super) fn multi_pair_g2_setup_sequential(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { + pub(super) fn multi_pair_g2_setup_sequential( + ps: &[ArkG1], + qs: &[ArkG2], + ) -> MillerLoopOutput> { use ark_bn254::G1Affine; let ps_prep: Vec<::G1Prepared> = ps @@ -94,7 +102,10 @@ mod pairing_helpers { /// Sequential multi-pairing with G1 from setup (uses cache if available) #[allow(dead_code)] #[tracing::instrument(skip_all, name = "multi_pair_g1_setup_sequential", fields(len = ps.len()))] - pub(super) fn multi_pair_g1_setup_sequential(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { + pub(super) fn multi_pair_g1_setup_sequential( + ps: &[ArkG1], + qs: &[ArkG2], + ) -> MillerLoopOutput> { use ark_bn254::G2Affine; let qs_prep: Vec<::G2Prepared> = qs @@ -131,24 +142,23 @@ mod pairing_helpers { fn multi_pair_with_prepared( ps_prep: Vec<::G1Prepared>, qs_prep: &[::G2Prepared], - ) -> ArkGT { - let miller_output = Bn254::multi_miller_loop(ps_prep, qs_prep.to_vec()); - let result = Bn254::final_exponentiation(miller_output) - .expect("Final exponentiation should not fail"); - ArkGT(result.0) + ) -> MillerLoopOutput> { + Bn254::multi_miller_loop(ps_prep, qs_prep.to_vec()) } /// Parallel multi-pairing with chunked Miller loops (no caching assumptions) #[cfg(feature = "parallel")] - #[tracing::instrument(skip_all, name = "multi_pair_parallel", fields(len = ps.len(), chunk_size = determine_chunk_size(ps.len())))] - pub(super) fn multi_pair_parallel(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { + #[tracing::instrument(skip_all, name = "multi_pair_parallel_miller_loop", fields(len = ps.len(), chunk_size = determine_chunk_size(ps.len())))] + pub(super) fn multi_pair_parallel_miller_loop( + ps: &[ArkG1], + qs: &[ArkG2], + ) -> MillerLoopOutput> { use ark_bn254::{G1Affine, G2Affine}; use rayon::prelude::*; let chunk_size = determine_chunk_size(ps.len()); - let combined = ps - .par_chunks(chunk_size) + ps.par_chunks(chunk_size) .zip(qs.par_chunks(chunk_size)) .map(|(ps_chunk, qs_chunk)| { let ps_prep: Vec<::G1Prepared> = ps_chunk @@ -172,17 +182,16 @@ mod pairing_helpers { .reduce( || ark_ec::pairing::MillerLoopOutput(<::TargetField>::one()), |a, b| ark_ec::pairing::MillerLoopOutput(a.0 * b.0), - ); - - let result = - Bn254::final_exponentiation(combined).expect("Final exponentiation should not fail"); - ArkGT(result.0) + ) } /// Parallel multi-pairing with G2 from setup (uses cache if available) #[cfg(feature = "parallel")] #[tracing::instrument(skip_all, name = "multi_pair_g2_setup_parallel", fields(len = ps.len(), chunk_size = determine_chunk_size(ps.len())))] - pub(super) fn multi_pair_g2_setup_parallel(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { + pub(super) fn multi_pair_g2_setup_parallel( + ps: &[ArkG1], + qs: &[ArkG2], + ) -> MillerLoopOutput> { use ark_bn254::G1Affine; use rayon::prelude::*; @@ -193,8 +202,7 @@ mod pairing_helpers { #[cfg(not(feature = "cache"))] let cached_g2: Option<&[_]> = None; - let combined = ps - .par_chunks(chunk_size) + ps.par_chunks(chunk_size) .enumerate() .map(|(chunk_idx, ps_chunk)| { let start_idx = chunk_idx * chunk_size; @@ -226,17 +234,16 @@ mod pairing_helpers { .reduce( || ark_ec::pairing::MillerLoopOutput(<::TargetField>::one()), |a, b| ark_ec::pairing::MillerLoopOutput(a.0 * b.0), - ); - - let result = - Bn254::final_exponentiation(combined).expect("Final exponentiation should not fail"); - ArkGT(result.0) + ) } /// Parallel multi-pairing with G1 from setup (uses cache if available) #[cfg(feature = "parallel")] #[tracing::instrument(skip_all, name = "multi_pair_g1_setup_parallel", fields(len = ps.len(), chunk_size = determine_chunk_size(ps.len())))] - pub(super) fn multi_pair_g1_setup_parallel(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { + pub(super) fn multi_pair_g1_setup_parallel( + ps: &[ArkG1], + qs: &[ArkG2], + ) -> MillerLoopOutput> { use ark_bn254::G2Affine; use rayon::prelude::*; @@ -247,8 +254,7 @@ mod pairing_helpers { #[cfg(not(feature = "cache"))] let cached_g1: Option<&[_]> = None; - let combined = qs - .par_chunks(chunk_size) + qs.par_chunks(chunk_size) .enumerate() .map(|(chunk_idx, qs_chunk)| { let start_idx = chunk_idx * chunk_size; @@ -280,47 +286,43 @@ mod pairing_helpers { .reduce( || ark_ec::pairing::MillerLoopOutput(<::TargetField>::one()), |a, b| ark_ec::pairing::MillerLoopOutput(a.0 * b.0), - ); - - let result = - Bn254::final_exponentiation(combined).expect("Final exponentiation should not fail"); - ArkGT(result.0) + ) } /// Optimized multi-pairing dispatch pub(super) fn multi_pair_optimized(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { #[cfg(feature = "parallel")] - { - multi_pair_parallel(ps, qs) - } + let multi_miller_loop_output = multi_pair_parallel_miller_loop(ps, qs); #[cfg(not(feature = "parallel"))] - { - multi_pair_sequential(ps, qs) - } + let multi_miller_loop_output = multi_pair_sequential_miller_loup(ps, qs); + + let result = Bn254::final_exponentiation(multi_miller_loop_output) + .expect("Final exponentiation should not fail"); + ArkGT(result.0) } /// Optimized multi-pairing dispatch for G2 from setup pub(super) fn multi_pair_g2_setup_optimized(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { #[cfg(feature = "parallel")] - { - multi_pair_g2_setup_parallel(ps, qs) - } + let combined = multi_pair_g2_setup_parallel(ps, qs); #[cfg(not(feature = "parallel"))] - { - multi_pair_g2_setup_sequential(ps, qs) - } + let combined = multi_pair_g2_setup_sequential(ps, qs); + + let result = + Bn254::final_exponentiation(combined).expect("Final exponentiation should not fail"); + ArkGT(result.0) } /// Optimized multi-pairing dispatch for G1 from setup pub(super) fn multi_pair_g1_setup_optimized(ps: &[ArkG1], qs: &[ArkG2]) -> ArkGT { #[cfg(feature = "parallel")] - { - multi_pair_g1_setup_parallel(ps, qs) - } + let combined = multi_pair_g1_setup_parallel(ps, qs); #[cfg(not(feature = "parallel"))] - { - multi_pair_g1_setup_sequential(ps, qs) - } + let combined = multi_pair_g1_setup_sequential(ps, qs); + + let result = + Bn254::final_exponentiation(combined).expect("Final exponentiation should not fail"); + ArkGT(result.0) } } diff --git a/src/backends/arkworks/ark_poly.rs b/src/backends/arkworks/ark_poly.rs index f8f9a8b..c38e91f 100644 --- a/src/backends/arkworks/ark_poly.rs +++ b/src/backends/arkworks/ark_poly.rs @@ -4,7 +4,9 @@ use super::ark_field::ArkFr; use crate::error::DoryError; -use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; +use crate::primitives::arithmetic::{ + CompressedPairingCurve, DoryRoutines, Field, Group, PairingCurve, +}; use crate::primitives::poly::{MultilinearLagrange, Polynomial}; use crate::setup::ProverSetup; @@ -93,6 +95,48 @@ impl Polynomial for ArkworksPolynomial { Ok((commitment, row_commitments)) } + + #[tracing::instrument(skip_all, name = "ArkworksPolynomial::commit_compressed", fields(nu, sigma, num_rows = 1 << nu, num_cols = 1 << sigma))] + fn commit_compressed( + &self, + nu: usize, + sigma: usize, + setup: &ProverSetup, + ) -> Result<(E::CompressedGT, Vec), DoryError> + where + E: CompressedPairingCurve, + M1: DoryRoutines, + E::G1: Group, + { + let expected_len = 1 << (nu + sigma); + if self.coefficients.len() != expected_len { + return Err(DoryError::InvalidSize { + expected: expected_len, + actual: self.coefficients.len(), + }); + } + + let num_rows = 1 << nu; + let num_cols = 1 << sigma; + + // Tier 1: Compute row commitments + let mut row_commitments = Vec::with_capacity(num_rows); + for i in 0..num_rows { + let row_start = i * num_cols; + let row_end = row_start + num_cols; + let row = &self.coefficients[row_start..row_end]; + + let g1_bases = &setup.g1_vec[..num_cols]; + let row_commit = M1::msm(g1_bases, row); + row_commitments.push(row_commit); + } + + // Tier 2: Compute final commitment via multi-pairing (g2_bases from setup) + let g2_bases = &setup.g2_vec[..num_rows]; + let commitment = E::multi_pair_g2_setup_compressed(&row_commitments, g2_bases); + + Ok((commitment, row_commitments)) + } } impl MultilinearLagrange for ArkworksPolynomial { diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index d1bbbcf..72780a8 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -27,7 +27,9 @@ use crate::error::DoryError; use crate::messages::VMVMessage; -use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; +use crate::primitives::arithmetic::{ + CompressedPairingCurve, DoryRoutines, Field, Group, PairingCurve, +}; use crate::primitives::poly::MultilinearLagrange; use crate::primitives::transcript::Transcript; use crate::proof::DoryProof; @@ -236,6 +238,209 @@ where }) } +/// Create evaluation proof for a polynomial at a point +/// Same as create_evaluation_proof, but the result is compressed. +/// +/// Implements Eval-VMV-RE protocol from Dory Section 5. +/// The protocol proves that polynomial(point) = evaluation via the VMV relation: +/// evaluation = L^T × M × R +/// +/// # Algorithm +/// 1. Compute or use provided row commitments (Tier 1 commitment) +/// 2. Split evaluation point into left and right vectors +/// 3. Compute v_vec (column evaluations) +/// 4. Create VMV message (C, D2, E1) +/// 5. Initialize prover state for inner product / reduce-and-fold protocol +/// 6. Run max(nu, sigma) rounds of reduce-and-fold (with automatic padding for non-square): +/// - First reduce: compute message and apply beta challenge (reduce) +/// - Second reduce: compute message and apply alpha challenge (fold) +/// 7. Compute final scalar product message +/// +/// # Parameters +/// - `polynomial`: Polynomial to prove evaluation for +/// - `point`: Evaluation point (length nu + sigma) +/// - `row_commitments`: Optional precomputed row commitments from polynomial.commit() +/// - `nu`: Log₂ of number of rows (constraint: nu ≤ sigma) +/// - `sigma`: Log₂ of number of columns +/// - `setup`: Prover setup +/// - `transcript`: Fiat-Shamir transcript for challenge generation +/// +/// # Returns +/// Complete Dory proof containing VMV message, reduce messages, and final message +/// +/// # Errors +/// Returns error if dimensions are invalid (nu > sigma) or protocol fails +/// +/// # Matrix Layout +/// Supports both square (nu = sigma) and non-square (nu < sigma) matrices. +/// For non-square matrices, vectors are automatically padded to length 2^sigma. +#[allow(clippy::type_complexity)] +#[tracing::instrument(skip_all, name = "create_evaluation_proof_compressed")] +pub fn create_evaluation_proof_compressed( + polynomial: &P, + point: &[F], + row_commitments: Option>, + nu: usize, + sigma: usize, + setup: &ProverSetup, + transcript: &mut T, +) -> Result, DoryError> +where + F: Field, + E: CompressedPairingCurve, + E::G1: Group, + E::G2: Group, + E::GT: Group, + M1: DoryRoutines, + M2: DoryRoutines, + T: Transcript, + P: MultilinearLagrange, +{ + if point.len() != nu + sigma { + return Err(DoryError::InvalidPointDimension { + expected: nu + sigma, + actual: point.len(), + }); + } + + // Validate matrix dimensions: nu must be ≤ sigma (rows ≤ columns) + if nu > sigma { + return Err(DoryError::InvalidSize { + expected: sigma, + actual: nu, + }); + } + + let row_commitments = if let Some(rc) = row_commitments { + rc + } else { + let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; + rc + }; + + let _span_eval_vecs = tracing::span!( + tracing::Level::DEBUG, + "compute_evaluation_vectors", + nu, + sigma + ) + .entered(); + let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); + drop(_span_eval_vecs); + + let v_vec = polynomial.vector_matrix_product(&left_vec, nu, sigma); + + let mut padded_row_commitments = row_commitments.clone(); + if nu < sigma { + padded_row_commitments.resize(1 << sigma, E::G1::identity()); + } + + let _span_vmv = + tracing::span!(tracing::Level::DEBUG, "compute_vmv_message", nu, sigma).entered(); + + // C = e(⟨row_commitments, v_vec⟩, h₂) + let t_vec_v = M1::msm(&padded_row_commitments, &v_vec); + let c = E::pair_compressed(&t_vec_v, &setup.h2); + + // D₂ = e(⟨Γ₁[sigma], v_vec⟩, h₂) + let g1_bases_at_sigma = &setup.g1_vec[..1 << sigma]; + let gamma1_v = M1::msm(g1_bases_at_sigma, &v_vec); + let d2 = E::pair_compressed(&gamma1_v, &setup.h2); + + // E₁ = ⟨row_commitments, left_vec⟩ + let e1 = M1::msm(&row_commitments, &left_vec); + + let vmv_message = VMVMessage { c, d2, e1 }; + drop(_span_vmv); + + let _span_transcript = tracing::span!(tracing::Level::DEBUG, "vmv_transcript").entered(); + transcript.append_serde(b"vmv_c", &vmv_message.c); + transcript.append_serde(b"vmv_d2", &vmv_message.d2); + transcript.append_serde(b"vmv_e1", &vmv_message.e1); + drop(_span_transcript); + + let _span_init = tracing::span!( + tracing::Level::DEBUG, + "fixed_base_vector_scalar_mul_h2", + nu, + sigma + ) + .entered(); + + // v₂ = v_vec · Γ₂,fin (each scalar scales g_fin) + let v2 = { + let _span = + tracing::span!(tracing::Level::DEBUG, "fixed_base_vector_scalar_mul_h2").entered(); + M2::fixed_base_vector_scalar_mul(&setup.h2, &v_vec) + }; + + let mut padded_right_vec = right_vec.clone(); + let mut padded_left_vec = left_vec.clone(); + if nu < sigma { + padded_right_vec.resize(1 << sigma, F::zero()); + padded_left_vec.resize(1 << sigma, F::zero()); + } + + let mut prover_state = DoryProverState::new( + padded_row_commitments, // v1 = T_vec_prime (row commitments, padded) + v2, // v2 = v_vec · g_fin + Some(v_vec), // v2_scalars for first-round MSM+pair optimization + padded_right_vec, // s1 = right_vec (padded) + padded_left_vec, // s2 = left_vec (padded) + setup, + ); + drop(_span_init); + + let num_rounds = nu.max(sigma); + let mut first_messages = Vec::with_capacity(num_rounds); + let mut second_messages = Vec::with_capacity(num_rounds); + + for _round in 0..num_rounds { + let first_msg = prover_state.compute_first_message_compressed::(); + + transcript.append_serde(b"d1_left", &first_msg.d1_left); + transcript.append_serde(b"d1_right", &first_msg.d1_right); + transcript.append_serde(b"d2_left", &first_msg.d2_left); + transcript.append_serde(b"d2_right", &first_msg.d2_right); + transcript.append_serde(b"e1_beta", &first_msg.e1_beta); + transcript.append_serde(b"e2_beta", &first_msg.e2_beta); + + let beta = transcript.challenge_scalar(b"beta"); + prover_state.apply_first_challenge::(&beta); + + first_messages.push(first_msg); + + let second_msg = prover_state.compute_second_message_compressed::(); + + transcript.append_serde(b"c_plus", &second_msg.c_plus); + transcript.append_serde(b"c_minus", &second_msg.c_minus); + transcript.append_serde(b"e1_plus", &second_msg.e1_plus); + transcript.append_serde(b"e1_minus", &second_msg.e1_minus); + transcript.append_serde(b"e2_plus", &second_msg.e2_plus); + transcript.append_serde(b"e2_minus", &second_msg.e2_minus); + + let alpha = transcript.challenge_scalar(b"alpha"); + prover_state.apply_second_challenge::(&alpha); + + second_messages.push(second_msg); + } + + let gamma = transcript.challenge_scalar(b"gamma"); + let final_message = prover_state.compute_final_message::(&gamma); + + // We grab d challenge at the end (despite it being unused) to keep transcript states in-sync post proof. + let _d = transcript.challenge_scalar(b"d"); + + Ok(DoryProof { + vmv_message, + first_messages, + second_messages, + final_message, + nu, + sigma, + }) +} + /// Verify an evaluation proof /// /// Verifies that a committed polynomial evaluates to the claimed value at the given point. @@ -366,3 +571,143 @@ where verifier_state.verify_final(&proof.final_message, &gamma, &d) } + +/// Verify a compressed evaluation proof +/// +/// This is the same as verify_evaluation_proof, but the proof contains compressed GT elements. +/// We need this separate function because the compressed GT elements are appended to the transcript +/// by both the prover and the verifier, so in particular the verifier needs to append the compressed +/// GT elements to the transcript before uncompressing them for verification. +/// +/// Verifies that a committed polynomial evaluates to the claimed value at the given point. +/// Works with both square and non-square matrix layouts (nu ≤ sigma). +/// +/// # Algorithm +/// 1. Extract VMV message from proof +/// 2. Check sigma protocol 2: d2 = e(e1, h2) +/// 3. Compute e2 = h2 * evaluation +/// 4. Initialize verifier state with commitment and VMV message +/// 5. Run max(nu, sigma) rounds of reduce-and-fold verification (with automatic padding) +/// 6. Derive gamma and d challenges +/// 7. Verify final scalar product message +/// +/// # Parameters +/// - `commitment`: Polynomial commitment (in GT) - can be a homomorphically combined commitment +/// - `evaluation`: Claimed evaluation result +/// - `point`: Evaluation point (length must equal proof.nu + proof.sigma) +/// - `proof`: Evaluation proof to verify (contains nu and sigma dimensions) +/// - `setup`: Verifier setup +/// - `transcript`: Fiat-Shamir transcript for challenge generation +/// +/// # Returns +/// `Ok(())` if proof is valid, `Err(DoryError)` otherwise +/// +/// # Homomorphic Verification +/// This function can verify proofs for homomorphically combined polynomials. +/// The commitment parameter should be the combined commitment, and the evaluation +/// should be the evaluation of the combined polynomial. +/// +/// # Errors +/// Returns `DoryError::InvalidProof` if verification fails, or other variants +/// if the input parameters are incorrect (e.g., point dimension mismatch). +#[tracing::instrument(skip_all, name = "verify_evaluation_proof_compressed")] +pub fn verify_evaluation_proof_compressed( + commitment: E::CompressedGT, + evaluation: F, + point: &[F], + proof: &DoryProof, + setup: VerifierSetup, + transcript: &mut T, +) -> Result<(), DoryError> +where + F: Field, + E: CompressedPairingCurve, + E::G1: Group, + E::G2: Group, + E::GT: Group, + M1: DoryRoutines, + M2: DoryRoutines, + T: Transcript, + E::CompressedGT: Into, +{ + let nu = proof.nu; + let sigma = proof.sigma; + + if point.len() != nu + sigma { + return Err(DoryError::InvalidPointDimension { + expected: nu + sigma, + actual: point.len(), + }); + } + + let vmv_message = &proof.vmv_message; + transcript.append_serde(b"vmv_c", &vmv_message.c); + transcript.append_serde(b"vmv_d2", &vmv_message.d2); + transcript.append_serde(b"vmv_e1", &vmv_message.e1); + + let pairing_check = E::pair_compressed(&vmv_message.e1, &setup.h2); + if vmv_message.d2 != pairing_check { + return Err(DoryError::InvalidProof); + } + + let e2 = setup.h2.scale(&evaluation); + + // Folded-scalar accumulation with per-round coordinates. + // num_rounds = sigma (we fold column dimensions). + let num_rounds = sigma; + // s1 (right/prover): the σ column coordinates in natural order (LSB→MSB). + // No padding here: the verifier folds across the σ column dimensions. + // With MSB-first folding, these coordinates are only consumed after the first σ−ν rounds, + // which correspond to the padded MSB dimensions on the left tensor, matching the prover. + let col_coords = &point[..sigma]; + let s1_coords: Vec = col_coords.to_vec(); + // s2 (left/prover): the ν row coordinates in natural order, followed by zeros for the extra + // MSB dimensions. Conceptually this is s ⊗ [1,0]^(σ−ν): under MSB-first folds, the first + // σ−ν rounds multiply s2 by α⁻¹ while contributing no right halves (since those entries are 0). + let mut s2_coords: Vec = vec![F::zero(); sigma]; + let row_coords = &point[sigma..sigma + nu]; + s2_coords[..nu].copy_from_slice(&row_coords[..nu]); + + let mut verifier_state = DoryVerifierState::new( + vmv_message.c.into(), // c from VMV message + commitment.into(), // d1 = commitment + vmv_message.d2.into(), // d2 from VMV message + vmv_message.e1, // e1 from VMV message + e2, // e2 computed from evaluation + s1_coords, // s1: columns c0..c_{σ−1} (LSB→MSB), no padding; folded across σ dims + s2_coords, // s2: rows r0..r_{ν−1} then zeros in MSB dims (emulates s ⊗ [1,0]^(σ−ν)) + num_rounds, + setup.clone(), + ); + + for round in 0..num_rounds { + let compressed_first_msg = &proof.first_messages[round]; + let compressed_second_msg = &proof.second_messages[round]; + + transcript.append_serde(b"d1_left", &compressed_first_msg.d1_left); + transcript.append_serde(b"d1_right", &compressed_first_msg.d1_right); + transcript.append_serde(b"d2_left", &compressed_first_msg.d2_left); + transcript.append_serde(b"d2_right", &compressed_first_msg.d2_right); + transcript.append_serde(b"e1_beta", &compressed_first_msg.e1_beta); + transcript.append_serde(b"e2_beta", &compressed_first_msg.e2_beta); + let beta = transcript.challenge_scalar(b"beta"); + + transcript.append_serde(b"c_plus", &compressed_second_msg.c_plus); + transcript.append_serde(b"c_minus", &compressed_second_msg.c_minus); + transcript.append_serde(b"e1_plus", &compressed_second_msg.e1_plus); + transcript.append_serde(b"e1_minus", &compressed_second_msg.e1_minus); + transcript.append_serde(b"e2_plus", &compressed_second_msg.e2_plus); + transcript.append_serde(b"e2_minus", &compressed_second_msg.e2_minus); + let alpha = transcript.challenge_scalar(b"alpha"); + + let first_msg = compressed_first_msg.convert_gt::(); + let second_msg = compressed_second_msg.convert_gt::(); + + verifier_state.process_round(&first_msg, &second_msg, &alpha, &beta); + } + + let gamma = transcript.challenge_scalar(b"gamma"); + let d = transcript.challenge_scalar(b"d"); + + verifier_state.verify_final(&proof.final_message, &gamma, &d) +} diff --git a/src/lib.rs b/src/lib.rs index 37940e5..52135d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -110,6 +110,8 @@ pub use proof::DoryProof; pub use reduce_and_fold::{DoryProverState, DoryVerifierState}; pub use setup::{ProverSetup, VerifierSetup}; +use crate::primitives::arithmetic::CompressedPairingCurve; + /// Generate or load prover and verifier setups from disk /// /// Creates or loads the transparent setup parameters for Dory PCS. @@ -293,6 +295,77 @@ where ) } +/// Evaluate a polynomial at a point and create compressed proof +/// +/// Creates an evaluation proof for a polynomial at a given point using precomputed +/// tier-1 commitments (row commitments). +/// +/// # Workflow +/// 1. Call `polynomial.commit(nu, sigma, setup)` to get `(tier_2, row_commitments)` +/// 2. Call this function with the `row_commitments` to create the proof +/// 3. Use `tier_2` for verification via the `verify()` function +/// +/// # Parameters +/// - `polynomial`: Polynomial implementing MultilinearLagrange trait +/// - `point`: Evaluation point (length must equal nu + sigma) +/// - `row_commitments`: Tier-1 commitments (row commitments in G1) from `polynomial.commit()` +/// - `nu`: Log₂ of number of rows (constraint: nu ≤ sigma for non-square matrices) +/// - `sigma`: Log₂ of number of columns +/// - `setup`: Prover setup +/// - `transcript`: Fiat-Shamir transcript +/// +/// # Returns +/// The evaluation proof containing VMV message, reduce messages, and final message +/// +/// # Homomorphic Properties +/// Proofs can be created for homomorphically combined polynomials. If you have +/// commitments Com(P₁), Com(P₂), ..., Com(Pₙ) and want to prove evaluation of +/// r₁·P₁ + r₂·P₂ + ... + rₙ·Pₙ, you can: +/// 1. Combine tier-2 commitments: r₁·Com(P₁) + r₂·Com(P₂) + ... + rₙ·Com(Pₙ) +/// 2. Combine tier-1 commitments element-wise +/// 3. Generate proof using this function with the combined polynomial +/// +/// See `examples/homomorphic.rs` for a complete demonstration. +/// +/// # Errors +/// Returns `DoryError` if: +/// - Point dimension doesn't match nu + sigma +/// - Polynomial size doesn't match 2^(nu + sigma) +/// - Number of row commitments doesn't match 2^nu +#[allow(clippy::type_complexity)] +#[tracing::instrument(skip_all, name = "prove")] +pub fn prove_compressed( + polynomial: &P, + point: &[F], + row_commitments: Vec, + nu: usize, + sigma: usize, + setup: &ProverSetup, + transcript: &mut T, +) -> Result, DoryError> +where + F: Field, + E: CompressedPairingCurve, + E::G1: Group, + E::G2: Group, + E::GT: Group, + M1: DoryRoutines, + M2: DoryRoutines, + P: MultilinearLagrange, + T: primitives::transcript::Transcript, +{ + // Create evaluation proof using row_commitments + evaluation_proof::create_evaluation_proof_compressed::( + polynomial, + point, + Some(row_commitments), + nu, + sigma, + setup, + transcript, + ) +} + /// Verify an evaluation proof /// /// Verifies that a committed polynomial evaluates to the claimed value at the given point. @@ -338,3 +411,50 @@ where commitment, evaluation, point, proof, setup, transcript, ) } + +/// Verify a compressed evaluation proof +/// +/// Verifies that a committed polynomial evaluates to the claimed value at the given point. +/// The matrix dimensions (nu, sigma) are extracted from the proof. +/// +/// Works with both square and non-square matrix layouts (nu ≤ sigma), and can verify +/// proofs for homomorphically combined polynomials. +/// +/// # Parameters +/// - `commitment`: Polynomial commitment (in GT) - can be a combined commitment for homomorphic proofs +/// - `evaluation`: Claimed evaluation result +/// - `point`: Evaluation point (length must equal proof.nu + proof.sigma) +/// - `proof`: Evaluation proof to verify (contains nu and sigma) +/// - `setup`: Verifier setup +/// - `transcript`: Fiat-Shamir transcript +/// +/// # Returns +/// `Ok(())` if proof is valid, `Err(DoryError)` otherwise +/// +/// # Errors +/// Returns `DoryError::InvalidProof` if the proof is invalid, or other variants +/// if the input parameters are incorrect (e.g., point dimension mismatch). +#[tracing::instrument(skip_all, name = "verify")] +pub fn verify_compressed( + commitment: E::CompressedGT, + evaluation: F, + point: &[F], + proof: &DoryProof, + setup: VerifierSetup, + transcript: &mut T, +) -> Result<(), DoryError> +where + F: Field, + E: CompressedPairingCurve + Clone, + E::G1: Group, + E::G2: Group, + E::GT: Group, + M1: DoryRoutines, + M2: DoryRoutines, + T: primitives::transcript::Transcript, + E::CompressedGT: Into, +{ + evaluation_proof::verify_evaluation_proof_compressed::( + commitment, evaluation, point, proof, setup, transcript, + ) +} diff --git a/src/messages.rs b/src/messages.rs index dd877ee..e60ec58 100644 --- a/src/messages.rs +++ b/src/messages.rs @@ -64,3 +64,45 @@ pub struct ScalarProductMessage { /// E₂ - final G2 element pub e2: G2, } + +impl FirstReduceMessage { + /// Generate a FirstReduceMessage + /// + /// This function converts the FirstReduceMessage to a different GT type and is currently used for compression. + pub fn convert_gt(&self) -> FirstReduceMessage + where + GT: Into + Clone, + G1: Clone, + G2: Clone, + { + FirstReduceMessage { + d1_left: self.d1_left.clone().into(), + d1_right: self.d1_right.clone().into(), + d2_left: self.d2_left.clone().into(), + d2_right: self.d2_right.clone().into(), + e1_beta: self.e1_beta.clone(), + e2_beta: self.e2_beta.clone(), + } + } +} + +impl SecondReduceMessage { + /// Generate a SecondReduceMessage + /// + /// This function converts the SecondReduceMessage to a different GT type and is currently used for compression. + pub fn convert_gt(&self) -> SecondReduceMessage + where + GT: Into + Clone, + G1: Clone, + G2: Clone, + { + SecondReduceMessage { + c_plus: self.c_plus.clone().into(), + c_minus: self.c_minus.clone().into(), + e1_plus: self.e1_plus.clone(), + e1_minus: self.e1_minus.clone(), + e2_plus: self.e2_plus.clone(), + e2_minus: self.e2_minus.clone(), + } + } +} diff --git a/src/primitives/arithmetic.rs b/src/primitives/arithmetic.rs index 94dc54c..a1579ac 100644 --- a/src/primitives/arithmetic.rs +++ b/src/primitives/arithmetic.rs @@ -36,6 +36,11 @@ pub trait Field: fn from_i64(val: i64) -> Self; } +pub trait DoryElement: + Sized + Clone + Copy + PartialEq + Send + Sync + DorySerialize + DoryDeserialize + Default +{ +} + pub trait Group: Sized + Clone @@ -63,6 +68,54 @@ pub trait Group: fn random(rng: &mut R) -> Self; } +pub trait CompressedPairingCurve: PairingCurve { + type CompressedGT: DoryElement; + + fn pair_compressed(p: &Self::G1, q: &Self::G2) -> Self::CompressedGT { + Self::multi_pair_compressed(std::slice::from_ref(p), std::slice::from_ref(q)) + } + + fn multi_pair_compressed(ps: &[Self::G1], qs: &[Self::G2]) -> Self::CompressedGT; + + /// Optimized multi-pairing when G2 points come from setup/generators + /// + /// This variant should be used when the G2 points are from the prover setup + /// (e.g., g2_vec generators). Backend implementations can optimize this by + /// caching prepared G2 points. + /// + /// # Parameters + /// - `ps`: G1 points (typically computed values like row commitments or v-vectors) + /// - `qs`: G2 points from setup (e.g., `setup.g2_vec[..n]`) + /// + /// # Returns + /// Product of pairings: Π e(p_i, q_i) + /// + /// # Default Implementation + /// Delegates to `multi_pair` + fn multi_pair_g2_setup_compressed(ps: &[Self::G1], qs: &[Self::G2]) -> Self::CompressedGT { + Self::multi_pair_compressed(ps, qs) + } + + /// Optimized multi-pairing when G1 points are from the prover setup. + /// + /// This variant should be used when the G1 points are from the prover setup + /// (e.g., g1_vec generators). Backend implementations can optimize this by + /// caching prepared G1 points. + /// + /// # Parameters + /// - `ps`: G1 points from setup (e.g., `setup.g1_vec[..n]`) + /// - `qs`: G2 points (typically computed values like v-vectors) + /// + /// # Returns + /// Product of pairings: Π e(p_i, q_i) + /// + /// # Default Implementation + /// Delegates to `multi_pair` + fn multi_pair_g1_setup_compressed(ps: &[Self::G1], qs: &[Self::G2]) -> Self::CompressedGT { + Self::multi_pair_compressed(ps, qs) + } +} + pub trait PairingCurve: Clone { type G1: Group; type G2: Group; diff --git a/src/primitives/poly.rs b/src/primitives/poly.rs index 4ac5118..3a4aba7 100644 --- a/src/primitives/poly.rs +++ b/src/primitives/poly.rs @@ -1,7 +1,7 @@ //! Polynomial trait for multilinear polynomials -use crate::error::DoryError; use crate::setup::ProverSetup; +use crate::{error::DoryError, primitives::arithmetic::CompressedPairingCurve}; use super::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; @@ -88,6 +88,34 @@ pub trait Polynomial { E: PairingCurve, M1: DoryRoutines, E::G1: Group; + + /// Commit to polynomial using Dory's 2-tier (AFGHO) homomorphic commitment, where the commitment is compressed. + /// + /// Same as `commit`, but the commitment is compressed. + /// NOTE: Currently the compressed pairing curve E also needs to implement the pairing curve trait, because the output from tier 1 commitments are needed to compute the compressed commitment in tier 2. In the future, it is possible to make the compressed pairing curve trait inherit from the pairing curve trait. + /// + /// # Parameters + /// - `nu`: Log₂ of number of rows + /// - `sigma`: Log₂ of number of columns + /// - `setup`: Prover setup containing generators + /// + /// # Returns + /// `(commitment, row_commitments)` where: + /// - `commitment`: Final commitment in compressed GT + /// - `row_commitments`: Intermediate row commitments in G1 (used in opening proof) + /// + /// # Errors + /// Returns error if coefficient length doesn't match 2^(nu + sigma) or if setup is insufficient. + fn commit_compressed( + &self, + nu: usize, + sigma: usize, + setup: &ProverSetup, + ) -> Result<(E::CompressedGT, Vec<::G1>), DoryError> + where + E: CompressedPairingCurve, + M1: DoryRoutines<::G1>, + ::G1: Group; } /// Compute multilinear Lagrange basis evaluations at a point diff --git a/src/proof.rs b/src/proof.rs index cffd36f..42345b0 100644 --- a/src/proof.rs +++ b/src/proof.rs @@ -35,3 +35,68 @@ pub struct DoryProof { /// Log₂ of number of columns in the coefficient matrix pub sigma: usize, } + +impl DoryProof { + /// Return all GT elements in the proof (for testing) + pub fn gt_elements(&self) -> Vec + where + GT: Clone, + { + // Return all GT elements in the proof + let mut elements = Vec::new(); + elements.push(self.vmv_message.c.clone()); + elements.push(self.vmv_message.d2.clone()); + for msg in &self.first_messages { + elements.push(msg.d1_left.clone()); + elements.push(msg.d1_right.clone()); + elements.push(msg.d2_left.clone()); + elements.push(msg.d2_right.clone()); + } + for msg in &self.second_messages { + elements.push(msg.c_plus.clone()); + elements.push(msg.c_minus.clone()); + } + elements + } + + /// Convert the proof's GT type to another type + pub fn convert_gt(self) -> DoryProof + where + GT: Into, + { + DoryProof { + vmv_message: VMVMessage { + c: self.vmv_message.c.into(), + d2: self.vmv_message.d2.into(), + e1: self.vmv_message.e1, + }, + first_messages: self + .first_messages + .into_iter() + .map(|msg| FirstReduceMessage { + d1_left: msg.d1_left.into(), + d1_right: msg.d1_right.into(), + d2_left: msg.d2_left.into(), + d2_right: msg.d2_right.into(), + e1_beta: msg.e1_beta, + e2_beta: msg.e2_beta, + }) + .collect(), + second_messages: self + .second_messages + .into_iter() + .map(|msg| SecondReduceMessage { + c_plus: msg.c_plus.into(), + c_minus: msg.c_minus.into(), + e1_plus: msg.e1_plus, + e1_minus: msg.e1_minus, + e2_plus: msg.e2_plus, + e2_minus: msg.e2_minus, + }) + .collect(), + final_message: self.final_message, + nu: self.nu, + sigma: self.sigma, + } + } +} diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index 0cc4521..c2d9128 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -10,7 +10,9 @@ use crate::error::DoryError; use crate::messages::*; -use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; +use crate::primitives::arithmetic::{ + CompressedPairingCurve, DoryRoutines, Field, Group, PairingCurve, +}; use crate::setup::{ProverSetup, VerifierSetup}; /// Prover state for the Dory opening protocol @@ -185,6 +187,74 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { } } + /// Compute first reduce message for current round + /// Same as compute_first_message, but the result is compressed. + /// + /// Computes D1L, D1R, D2L, D2R, E1β, E2β based on current state. + #[tracing::instrument(skip_all, name = "DoryProverState::compute_first_message_compressed")] + pub fn compute_first_message_compressed( + &self, + ) -> FirstReduceMessage + where + E: CompressedPairingCurve, + M1: DoryRoutines, + M2: DoryRoutines, + E::G2: Group::Scalar>, + { + assert!( + self.num_rounds > 0, + "Not enough rounds left in prover state" + ); + + let n2 = 1 << (self.num_rounds - 1); // n/2 + + // Split vectors into left and right halves + let (v1_l, v1_r) = self.v1.split_at(n2); + let (v2_l, v2_r) = self.v2.split_at(n2); + + // Get collapsed generator vectors of length n/2 + let g1_prime = &self.setup.g1_vec[..n2]; + let g2_prime = &self.setup.g2_vec[..n2]; + + // Compute D values: multi-pairings between v-vectors and generators + // D₁L = ⟨v₁L, Γ₂'⟩, D₁R = ⟨v₁R, Γ₂'⟩ - g2_prime is from setup, use cached version + let d1_left = E::multi_pair_g2_setup_compressed(v1_l, g2_prime); + let d1_right = E::multi_pair_g2_setup_compressed(v1_r, g2_prime); + + // D₂L = ⟨Γ₁', v₂L⟩, D₂R = ⟨Γ₁', v₂R⟩ + // If v2 was constructed as h2 * scalars (first round), compute MSM(Γ₁', scalars) then one pairing. + let (d2_left, d2_right) = if let Some(scalars) = self.v2_scalars.as_ref() { + let (s_l, s_r) = scalars.split_at(n2); + let sum_left = M1::msm(g1_prime, s_l); + let sum_right = M1::msm(g1_prime, s_r); + ( + E::pair_compressed(&sum_left, &self.setup.h2), + E::pair_compressed(&sum_right, &self.setup.h2), + ) + } else { + ( + E::multi_pair_g1_setup_compressed(g1_prime, v2_l), + E::multi_pair_g1_setup_compressed(g1_prime, v2_r), + ) + }; + + // Compute E values for extended protocol: MSMs with scalar vectors + // E₁β = ⟨Γ₁, s₂⟩ + let e1_beta = M1::msm(&self.setup.g1_vec[..1 << self.num_rounds], &self.s2[..]); + + // E₂β = ⟨Γ₂, s₁⟩ + let e2_beta = M2::msm(&self.setup.g2_vec[..1 << self.num_rounds], &self.s1[..]); + + FirstReduceMessage { + d1_left, + d1_right, + d2_left, + d2_right, + e1_beta, + e2_beta, + } + } + /// Apply first challenge (beta) and combine vectors /// /// Updates the state by combining with generators scaled by beta. @@ -254,6 +324,54 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { } } + /// Compute second reduce message for current round + /// Same as compute_second_message, but the result is compressed. + /// + /// Computes C+, C-, E1+, E1-, E2+, E2- based on current state. + #[tracing::instrument(skip_all, name = "DoryProverState::compute_second_message_compressed")] + pub fn compute_second_message_compressed( + &self, + ) -> SecondReduceMessage + where + E: CompressedPairingCurve, + M1: DoryRoutines, + M2: DoryRoutines, + E::G2: Group::Scalar>, + { + let n2 = 1 << (self.num_rounds - 1); // n/2 + + // Split all vectors into left and right halves + let (v1_l, v1_r) = self.v1.split_at(n2); + let (v2_l, v2_r) = self.v2.split_at(n2); + let (s1_l, s1_r) = self.s1.split_at(n2); + let (s2_l, s2_r) = self.s2.split_at(n2); + + // Compute C terms: cross products of v-vectors + // C₊ = ⟨v₁L, v₂R⟩ + let c_plus = E::multi_pair_compressed(v1_l, v2_r); + // C₋ = ⟨v₁R, v₂L⟩ + let c_minus = E::multi_pair_compressed(v1_r, v2_l); + + // Compute E terms for extended protocol: cross products with scalars + // E₁₊ = ⟨v₁L, s₂R⟩ + let e1_plus = M1::msm(v1_l, s2_r); + // E₁₋ = ⟨v₁R, s₂L⟩ + let e1_minus = M1::msm(v1_r, s2_l); + // E₂₊ = ⟨s₁L, v₂R⟩ + let e2_plus = M2::msm(v2_r, s1_l); + // E₂₋ = ⟨s₁R, v₂L⟩ + let e2_minus = M2::msm(v2_l, s1_r); + + SecondReduceMessage { + c_plus, + c_minus, + e1_plus, + e1_minus, + e2_plus, + e2_minus, + } + } + /// Apply second challenge (alpha) and fold vectors /// /// Reduces the vector size by half using the alpha challenge.