diff --git a/.github/workflows/release-plz.yml b/.github/workflows/release-plz.yml new file mode 100644 index 000000000..2b635de8f --- /dev/null +++ b/.github/workflows/release-plz.yml @@ -0,0 +1,39 @@ +name: Release-plz + +on: + pull_request: + types: [closed] + branches: + - main + # Allow manual re-runs if publish fails (e.g., crates.io outage) + workflow_dispatch: + +jobs: + release-plz-release: + name: Release-plz release + runs-on: ubuntu-latest + if: >- + ${{ + github.repository_owner == 'Plonky3' && + ( + github.event_name == 'workflow_dispatch' || + (github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'release')) + ) + }} + permissions: + contents: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.RELEASE_PLZ_TOKEN }} + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + - name: Run release-plz + uses: release-plz/action@v0.5 + with: + command: release + env: + GITHUB_TOKEN: ${{ secrets.RELEASE_PLZ_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/Cargo.toml b/Cargo.toml index d32bcfa49..e12017bc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,11 +62,10 @@ transmute_undefined_repr = "allow" cognitive_complexity = "allow" [workspace.dependencies] -bincode = { version = "2.0.0", default-features = false } blake3 = { version = "1.5", default-features = false } clap = { version = "4.5.23", features = ["derive"] } clap_derive = "4.5.18" -criterion = "0.7" +criterion = "0.8" hashbrown = "0.16.0" hex-literal = "1.0.0" itertools = { version = "0.14.0", default-features = false, features = [ @@ -84,6 +83,7 @@ serde_json = "1.0.113" sha2 = { version = "0.10.8", default-features = false } sha3 = { version = "0.10.8", default-features = false } spin = "0.10.0" +thiserror = { version = "2.0", default-features = false } tiny-keccak = "2.0.2" tracing = { version = "0.1.37", default-features = false, features = [ "attributes", @@ -95,45 +95,45 @@ tracing-subscriber = { version = "0.3.17", default-features = false, features = transpose = "0.2.3" # Local dependencies -p3-air = { path = "air", version = "0.3.0" } -p3-baby-bear = { path = "baby-bear", version = "0.3.0" } -p3-blake3 = { path = "blake3", version = "0.3.0" } -p3-blake3-air = { path = "blake3-air", version = "0.3.0" } -p3-bn254 = { path = "bn254", version = "0.3.0" } -p3-challenger = { path = "challenger", version = "0.3.0" } -p3-circle = { path = "circle", version = "0.3.0" } -p3-commit = { path = "commit", version = "0.3.0" } -p3-dft = { path = "dft", version = "0.3.0" } +p3-air = { path = "air", version = "0.4.2" } +p3-baby-bear = { path = "baby-bear", version = "0.4.2" } +p3-blake3 = { path = "blake3", version = "0.4.2" } +p3-blake3-air = { path = "blake3-air", version = "0.4.2" } +p3-bn254 = { path = "bn254", version = "0.4.2" } +p3-challenger = { path = "challenger", version = "0.4.2" } +p3-circle = { path = "circle", version = "0.4.2" } +p3-commit = { path = "commit", version = "0.4.2" } +p3-dft = { path = "dft", version = "0.4.2" } p3-examples = { path = "examples", version = "0.3.0" } -p3-field = { path = "field", version = "0.3.0" } -p3-field-testing = { path = "field-testing", version = "0.3.0" } -p3-fri = { path = "fri", version = "0.3.0" } -p3-goldilocks = { path = "goldilocks", version = "0.3.0" } -p3-interpolation = { path = "interpolation", version = "0.3.0" } -p3-keccak = { path = "keccak", version = "0.3.0" } -p3-keccak-air = { path = "keccak-air", version = "0.3.0" } -p3-koala-bear = { path = "koala-bear", version = "0.3.0" } -p3-lookup = { path = "lookup", version = "0.3.0" } -p3-matrix = { path = "matrix", version = "0.3.0" } -p3-maybe-rayon = { path = "maybe-rayon", version = "0.3.0" } -p3-mds = { path = "mds", version = "0.3.0" } -p3-merkle-tree = { path = "merkle-tree", version = "0.3.0" } -p3-mersenne-31 = { path = "mersenne-31", version = "0.3.0" } -p3-monty-31 = { path = "monty-31", version = "0.3.0" } -p3-multilinear-util = { path = "multilinear-util", version = "0.3.0" } -p3-poseidon = { path = "poseidon", version = "0.3.0" } -p3-poseidon2 = { path = "poseidon2", version = "0.3.0" } -p3-poseidon2-air = { path = "poseidon2-air", version = "0.3.0" } -p3-rescue = { path = "rescue", version = "0.3.0" } -p3-sha256 = { path = "sha256", version = "0.3.0" } -p3-symmetric = { path = "symmetric", version = "0.3.0" } -p3-uni-stark = { path = "uni-stark", version = "0.3.0" } -p3-util = { path = "util", version = "0.3.0" } +p3-field = { path = "field", version = "0.4.2" } +p3-field-testing = { path = "field-testing", version = "0.4.2" } +p3-fri = { path = "fri", version = "0.4.2" } +p3-goldilocks = { path = "goldilocks", version = "0.4.2" } +p3-interpolation = { path = "interpolation", version = "0.4.2" } +p3-keccak = { path = "keccak", version = "0.4.2" } +p3-keccak-air = { path = "keccak-air", version = "0.4.2" } +p3-koala-bear = { path = "koala-bear", version = "0.4.2" } +p3-lookup = { path = "lookup", version = "0.4.2" } +p3-matrix = { path = "matrix", version = "0.4.2" } +p3-maybe-rayon = { path = "maybe-rayon", version = "0.4.2" } +p3-mds = { path = "mds", version = "0.4.2" } +p3-merkle-tree = { path = "merkle-tree", version = "0.4.2" } +p3-mersenne-31 = { path = "mersenne-31", version = "0.4.2" } +p3-monty-31 = { path = "monty-31", version = "0.4.2" } +p3-multilinear-util = { path = "multilinear-util", version = "0.4.2" } +p3-poseidon = { path = "poseidon", version = "0.4.2" } +p3-poseidon2 = { path = "poseidon2", version = "0.4.2" } +p3-poseidon2-air = { path = "poseidon2-air", version = "0.4.2" } +p3-rescue = { path = "rescue", version = "0.4.2" } +p3-sha256 = { path = "sha256", version = "0.4.2" } +p3-symmetric = { path = "symmetric", version = "0.4.2" } +p3-uni-stark = { path = "uni-stark", version = "0.4.2" } +p3-util = { path = "util", version = "0.4.2" } [workspace.package] # General description field used for the sub-crates that are currently missing a description. description = "Plonky3 is a toolkit for implementing polynomial IOPs (PIOPs), such as PLONK and STARKs." -version = "0.3.0" +version = "0.4.2" edition = "2024" license = "MIT OR Apache-2.0" repository = "https://github.com/Plonky3/Plonky3" diff --git a/README.md b/README.md index ccfae9268..ab36d61f7 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,7 @@ Hashes - [x] BLAKE3 - [ ] modifications to tune BLAKE3 for hashing small leaves - [x] Keccak-256 +- [x] SHA-256 - [x] Monolith diff --git a/RELEASING.md b/RELEASING.md new file mode 100644 index 000000000..21725d4ec --- /dev/null +++ b/RELEASING.md @@ -0,0 +1,50 @@ +# Releasing Plonky3 + +This document describes how to create a new release of Plonky3. + +## Prerequisites + +- Install [release-plz](https://release-plz.dev/docs/usage/installation) +- Set the `GIT_TOKEN` environment variable with a GitHub token that has permission to create PRs + +## Creating a Release + +1. Ensure your local `main` branch is up-to-date with the remote: + ```bash + git checkout main + git pull origin main + ``` + +2. Run the release script: + ```bash + ./create_release.sh + ``` + +3. This creates a PR with: + - Version bumps for all crates (in lock-step) + - Updated changelogs based on conventional commits + +4. Review and merge the PR. Once merged, CI automatically publishes all crates to crates.io. + +## How It Works + +- **Version grouping**: All crates share the same version via `version_group = "plonky3"` in `release-plz.toml` +- **Changelog generation**: Uses [git-cliff](https://git-cliff.org/) configured in `cliff.toml` +- **Version bump detection**: `release-plz` uses [cargo-semver-checks](https://github.com/obi1kenobi/cargo-semver-checks) to automatically detect the appropriate version bump: + - Breaking API changes (removed/changed public items) → major bump + - New public API additions → minor bump + - Bug fixes, internal changes, docs → patch bump +- **Conventional commits** (optional): If used, commit prefixes like `feat!:` can also signal breaking changes + +## Troubleshooting + +### CI publish failed + +If the publish step fails (e.g., crates.io outage), you can manually re-run the workflow: + +1. Go to Actions → Release-plz +2. Click "Run workflow" on the main branch + +### release-plz says "no changes to release" + +This means there are no conventional commits since the last release tag. Ensure your commits follow the [conventional commits](https://www.conventionalcommits.org/) format. diff --git a/air/CHANGELOG.md b/air/CHANGELOG.md new file mode 100644 index 000000000..76e90d5c3 --- /dev/null +++ b/air/CHANGELOG.md @@ -0,0 +1,35 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Air: more unit tests for air utils (#936) (Thomas Coratger) +- Replace `Copy` with `Clone` in `AirBuilder`'s `Var` (#930) (Linda Guiga) +- Air: better doc for traits (#935) (Thomas Coratger) +- Chore: various small changes (#944) (Thomas Coratger) +- Weaken the trait bound of AirBuilder to allow `F` to be merely a Ring. (#977) (AngusG) +- Doc: add better doc in air and fix TODO (#1061) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Add preprocessed/transparent columns to uni-stark (#1114) (o-k-d) +- Integrate lookups to prover and verifier (#1165) (Linda Guiga) + +### Authors +- AngusG +- Himess +- Linda Guiga +- Thomas Coratger +- o-k-d + diff --git a/air/Cargo.toml b/air/Cargo.toml index 0d363f36a..fe6cd6781 100644 --- a/air/Cargo.toml +++ b/air/Cargo.toml @@ -12,9 +12,11 @@ categories.workspace = true [dependencies] p3-field.workspace = true p3-matrix.workspace = true +serde = { workspace = true, features = ["derive", "alloc"] } +tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true +p3-baby-bear = { path = "../baby-bear" } [lints] workspace = true diff --git a/air/src/air.rs b/air/src/air.rs index 2f0958a0b..028f4eab2 100644 --- a/air/src/air.rs +++ b/air/src/air.rs @@ -1,17 +1,33 @@ +use alloc::vec; +use alloc::vec::Vec; use core::ops::{Add, Mul, Sub}; use p3_field::{Algebra, ExtensionField, Field, PrimeCharacteristicRing}; use p3_matrix::Matrix; use p3_matrix::dense::RowMajorMatrix; +use crate::lookup::{Kind, Lookup, LookupData, LookupEvaluator, LookupInput}; + /// The underlying structure of an AIR. pub trait BaseAir: Sync { /// The number of columns (a.k.a. registers) in this AIR. fn width(&self) -> usize; + /// Return an optional preprocessed trace matrix to be included in the prover's trace. fn preprocessed_trace(&self) -> Option> { None } + + /// Return the periodic table data. + /// + /// Periodic columns are columns whose values repeat with a period that divides the trace + /// length. Each inner `Vec` represents one periodic column. The length of the inner + /// vector is the period of that column (must be a power of 2 that divides the trace length). + /// + /// By default returns an empty table (no periodic columns). + fn periodic_table(&self) -> Vec> { + vec![] + } } /// An extension of `BaseAir` that includes support for public values. @@ -29,15 +45,89 @@ pub trait BaseAirWithPublicValues: BaseAir { /// constraint will compute a particular value or it can be applied symbolically /// with each constraint computing a symbolic expression. pub trait Air: BaseAir { + /// Update the number of auxiliary columns to account for a new lookup column, + /// and return its index (or indices). + /// + /// Default implementation returns an empty vector, indicating no lookup columns. + /// Override this method for AIRs that use lookups. + fn add_lookup_columns(&mut self) -> Vec { + vec![] + } + + /// Register all lookups for the current AIR and return them. + /// + /// Default implementation returns an empty vector, indicating no lookups. + /// Override this method for AIRs that use lookups. + fn get_lookups(&mut self) -> Vec> + where + AB: PermutationAirBuilder + AirBuilderWithPublicValues, + { + vec![] + } + + /// Register a lookup to be used in this AIR. + /// This method can be used before proving or verifying, as the resulting + /// data is shared between the prover and the verifier. + fn register_lookup(&mut self, kind: Kind, lookup_inputs: &[LookupInput]) -> Lookup + where + AB: PermutationAirBuilder + AirBuilderWithPublicValues, + { + let (element_exprs, multiplicities_exprs) = lookup_inputs + .iter() + .map(|(elems, mult, dir)| { + let multiplicity = dir.multiplicity(mult.clone()); + (elems.clone(), multiplicity) + }) + .unzip(); + + Lookup { + kind, + element_exprs, + multiplicities_exprs, + columns: self.add_lookup_columns(), + } + } + /// Evaluate all AIR constraints using the provided builder. /// /// The builder provides both the trace on which the constraints /// are evaluated on as well as the method of accumulating the /// constraint evaluations. /// + /// **Note**: Users do not need to specify lookup constraints evaluation in this method, + /// but instead only specify the AIR constraints and rely on `eval_with_lookups` to evaluate + /// both AIR and lookup constraints. + /// /// # Arguments /// - `builder`: Mutable reference to an `AirBuilder` for defining constraints. fn eval(&self, builder: &mut AB); + + /// Evaluate all AIR and lookup constraints using the provided builder. + /// + /// The default implementation calls `eval` and then evaluates lookups if any are provided, + /// using the provided lookup evaluator. + /// Users typically don't need to override this method unless they need a custom behavior. + /// + /// # Arguments + /// - `builder`: Mutable reference to an `AirBuilder` for defining constraints. + /// - `lookups`: References to the lookups to be evaluated. + /// - `lookup_data`: References to the lookup data to be used for evaluation. + /// - `lookup_evaluator`: Reference to the lookup evaluator to be used for evaluation. + fn eval_with_lookups( + &self, + builder: &mut AB, + lookups: &[Lookup], + lookup_data: &[LookupData], + lookup_evaluator: &LE, + ) where + AB: PermutationAirBuilder + AirBuilderWithPublicValues + PeriodicAirBuilder, + { + self.eval(builder); + + if !lookups.is_empty() { + lookup_evaluator.eval_lookups(builder, lookups, lookup_data); + } + } } /// A builder which contains both a trace on which AIR constraints can be evaluated as well as a method of accumulating the AIR constraint evaluations. @@ -77,6 +167,13 @@ pub trait AirBuilder: Sized { /// Return the matrix representing the main (primary) trace registers. fn main(&self) -> Self::M; + /// Return an optional matrix of preprocessed registers. + /// The default implementation returns `None`. + /// Override this for builders that provide preprocessed columns. + fn preprocessed(&self) -> Option { + None + } + /// Expression evaluating to 1 on the first row, 0 elsewhere. fn is_first_row(&self) -> Self::Expr; @@ -177,19 +274,13 @@ pub trait AirBuilderWithPublicValues: AirBuilder { fn public_values(&self) -> &[Self::PublicVar]; } -/// Trait for `AirBuilder` variants that include preprocessed data columns. -pub trait PairBuilder: AirBuilder { - /// Return a matrix of preprocessed registers. - fn preprocessed(&self) -> Self::M; -} - /// Extension of `AirBuilder` for working over extension fields. pub trait ExtensionBuilder: AirBuilder { /// Extension field type. type EF: ExtensionField; /// Expression type over extension field elements. - type ExprEF: Algebra + Algebra; + type ExprEF: From + Algebra; /// Variable type over extension field elements. type VarEF: Into + Copy + Send + Sync; @@ -232,6 +323,20 @@ pub trait PermutationAirBuilder: ExtensionBuilder { fn permutation_randomness(&self) -> &[Self::RandomVar]; } +/// Trait for builders supporting periodic columns. +/// +/// Periodic columns are columns whose values repeat with a period dividing the trace length. +/// They are never committed to the proof - instead, both prover and verifier compute them +/// from the periodic table data provided by the AIR. +pub trait PeriodicAirBuilder: AirBuilder { + /// Variable type for periodic column values. + /// For the prover, this is base field; for the verifier, this is extension field. + type PeriodicVar: Into + Copy; + + /// Return the evaluations of periodic columns at the current row. + fn periodic_values(&self) -> &[Self::PeriodicVar]; +} + /// A wrapper around an [`AirBuilder`] that enforces constraints only when a specified condition is met. /// /// This struct allows selectively applying constraints to certain rows or under certain conditions in the AIR, @@ -264,6 +369,10 @@ impl AirBuilder for FilteredAirBuilder<'_, AB> { self.inner.main() } + fn preprocessed(&self) -> Option { + self.inner.preprocessed() + } + fn is_first_row(&self) -> Self::Expr { self.inner.is_first_row() } @@ -281,12 +390,15 @@ impl AirBuilder for FilteredAirBuilder<'_, AB> { } } -impl PairBuilder for FilteredAirBuilder<'_, AB> { - fn preprocessed(&self) -> Self::M { - self.inner.preprocessed() +impl AirBuilderWithPublicValues for FilteredAirBuilder<'_, AB> { + type PublicVar = AB::PublicVar; + + fn public_values(&self) -> &[Self::PublicVar] { + self.inner.public_values() } } + impl ExtensionBuilder for FilteredAirBuilder<'_, AB> { type EF = AB::EF; type ExprEF = AB::ExprEF; @@ -296,7 +408,10 @@ impl ExtensionBuilder for FilteredAirBuilder<'_, AB> { where I: Into, { - self.inner.assert_zero_ext(x.into() * self.condition()); + let ext_x = x.into(); + let condition: Self::ExprEF = self.condition().into(); + + self.inner.assert_zero_ext(ext_x * condition); } } @@ -313,3 +428,11 @@ impl PermutationAirBuilder for FilteredAirBuilder<'_, self.inner.permutation_randomness() } } + +impl PeriodicAirBuilder for FilteredAirBuilder<'_, AB> { + type PeriodicVar = AB::PeriodicVar; + + fn periodic_values(&self) -> &[Self::PeriodicVar] { + self.inner.periodic_values() + } +} diff --git a/air/src/lib.rs b/air/src/lib.rs index 31a10af8c..99e70520c 100644 --- a/air/src/lib.rs +++ b/air/src/lib.rs @@ -5,8 +5,11 @@ extern crate alloc; mod air; +pub mod lookup; +pub mod symbolic; pub mod utils; mod virtual_column; pub use air::*; +pub use symbolic::*; pub use virtual_column::*; diff --git a/air/src/lookup/mod.rs b/air/src/lookup/mod.rs new file mode 100644 index 000000000..37a9cfa5d --- /dev/null +++ b/air/src/lookup/mod.rs @@ -0,0 +1,203 @@ +//! Lookup Arguments for STARKs + +use alloc::string::String; +use alloc::vec::Vec; +use core::ops::Neg; + +use p3_field::Field; +use serde::{Deserialize, Serialize}; + +use crate::{ + AirBuilderWithPublicValues, PeriodicAirBuilder, PermutationAirBuilder, SymbolicExpression, +}; + +/// Defines errors that can occur during lookup verification. +#[derive(Debug)] +pub enum LookupError { + /// Error indicating that the global cumulative sum is incorrect. + GlobalCumulativeMismatch(Option), +} + +/// Specifies whether a lookup is local to an AIR or part of a global interaction. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Kind { + /// A lookup where all entries are contained within a single AIR. + Local, + /// A lookup that spans multiple AIRs, identified by a unique interaction name. + /// + /// The interaction name is used to identify all elements that are part of the same interaction. + Global(String), +} + +/// Indicates the direction of data flow in a global lookup. +#[derive(Clone, Copy)] +pub enum Direction { + /// Indicates that elements are being sent (contributed) to the lookup. + Send, + /// Indicates that elements are being received (removed) from the lookup. + Receive, +} + +impl Direction { + /// Helper method to compute the signed multiplicity based on the direction. + pub fn multiplicity>(&self, mult: T) -> T { + match self { + Self::Send => -mult, + Self::Receive => mult, + } + } +} + +/// Data required for global lookup arguments in a multi-STARK proof. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct LookupData { + /// Name of the global lookup interaction. + pub name: String, + /// Index of the auxiliary column (if there are multiple auxiliary columns, this is the first one) + pub aux_idx: usize, + /// Expected cumulated value for a global lookup argument. + pub expected_cumulated: F, +} + +/// A type alias for a lookup input tuple. +/// +/// The tuple contains: +/// - a vector of symbolic expressions representing the elements involved in the lookup, +/// - a symbolic expression representing the multiplicity of the lookup, +/// - a direction indicating whether the elements are being sent or received. +/// +/// # Example +/// ```ignored +/// use p3_air::lookup::{LookupInput, Direction}; +/// use p3_air::SymbolicExpression; +/// +/// let lookup_input: LookupInput> = ( +/// vec![SymbolicExpression::Constant(F::ONE)], +/// SymbolicExpression::Constant(F::ONE), +/// Direction::Send +/// ); +/// ``` +pub type LookupInput = (Vec>, SymbolicExpression, Direction); + +/// A structure that holds the lookup data necessary to generate lookup contexts +/// via [`LookupTraceBuilder`]. It is shared between the prover and the verifier. +#[derive(Clone, Debug)] +pub struct Lookup { + /// Type of lookup: local or global + pub kind: Kind, + /// Elements being read (consumed from the table). Each `Vec>` + /// actually represents a tuple of elements that are bundled together to make one lookup. + pub element_exprs: Vec>>, + /// Multiplicities for the elements. + /// Note that Lagrange selectors may not be normalized, and so cannot be used as proper + /// filters in the multiplicities. + pub multiplicities_exprs: Vec>, + /// The column index in the permutation trace for this lookup's running sum + pub columns: Vec, +} + +impl Lookup { + /// Creates a new lookup with the specified column. + /// + /// # Arguments + /// * `elements` - Elements from either the main execution trace or a lookup table. + /// * `multiplicities` - How many times each `element` should appear + /// * `column` - The column index in the permutation trace for this lookup + pub const fn new( + kind: Kind, + element_exprs: Vec>>, + multiplicities_exprs: Vec>, + columns: Vec, + ) -> Self { + Self { + kind, + element_exprs, + multiplicities_exprs, + columns, + } + } +} + +/// Trait for evaluating lookup constraints. +/// This is the core interface needed by [`Air::eval_with_lookups`](crate::Air::eval_with_lookups). +pub trait LookupEvaluator { + /// Returns the number of auxiliary columns needed by this lookup protocol. + /// + /// For example: + /// - LogUp needs 1 column (running sum) + fn num_aux_cols(&self) -> usize; + + /// Returns the number of challenges for each lookup argument. + /// + /// For example, for LogUp, this is 2: + /// - one challenge for combining the lookup tuples, + /// - one challenge for the running sum. + fn num_challenges(&self) -> usize; + + /// Evaluates a local lookup argument based on the provided context. + /// + /// For example, in LogUp: + /// - this checks that the running sum is updated correctly. + /// - it checks that the final value of the running sum is 0. + fn eval_local_lookup(&self, builder: &mut AB, context: &Lookup) + where + AB: PermutationAirBuilder + AirBuilderWithPublicValues + PeriodicAirBuilder; + + /// Evaluates a global lookup update based on the provided context, and the expected cumulated value. + /// This evaluation is carried out at the AIR level. We still need to check that the permutation argument holds + /// over all AIRs involved in the interaction. + /// + /// For example, in LogUp: + /// - this checks that the running sum is updated correctly. + /// - it checks that the local final value of the running sum is equal to the value provided by the prover. + fn eval_global_update( + &self, + builder: &mut AB, + context: &Lookup, + expected_cumulated: AB::ExprEF, + ) where + AB: PermutationAirBuilder + AirBuilderWithPublicValues + PeriodicAirBuilder; + + /// Evalutes the lookup constraints for all provided contexts. + /// + /// For each context: + /// - if it is a local lookup, evaluates it with `eval_local_lookup`. + /// - if it is a global lookup, evaluates it with `eval_global_update`, using the expected cumulated value from `lookup_data`. + fn eval_lookups( + &self, + builder: &mut AB, + contexts: &[Lookup], + // Assumed to be sorted by auxiliary_index. + lookup_data: &[LookupData], + ) where + AB: PermutationAirBuilder + AirBuilderWithPublicValues + PeriodicAirBuilder, + { + let mut lookup_data_iter = lookup_data.iter(); + for context in contexts.iter() { + match &context.kind { + Kind::Local => { + self.eval_local_lookup(builder, context); + } + Kind::Global(_) => { + // Find the expected cumulated value for this context. + let LookupData { + name: _, + aux_idx, + expected_cumulated, + } = lookup_data_iter + .next() + .expect("Expected cumulated value missing"); + + if *aux_idx != context.columns[0] { + panic!("Expected cumulated values not sorted by auxiliary index"); + } + self.eval_global_update(builder, context, expected_cumulated.clone()); + } + } + } + assert!( + lookup_data_iter.next().is_none(), + "Too many expected cumulated values provided" + ); + } +} diff --git a/air/src/symbolic/expression.rs b/air/src/symbolic/expression.rs new file mode 100644 index 000000000..04970952d --- /dev/null +++ b/air/src/symbolic/expression.rs @@ -0,0 +1,877 @@ +use alloc::sync::Arc; +use core::fmt::Debug; +use core::iter::{Product, Sum}; +use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; + +use p3_field::extension::BinomialExtensionField; +use p3_field::{Algebra, ExtensionField, Field, InjectiveMonomial, PrimeCharacteristicRing}; + +use crate::symbolic::SymbolicVariable; + +impl From> + for SymbolicExpression> +where + F: Field, + BinomialExtensionField: ExtensionField, +{ + /// Generic implementation for ANY field F using a [`BinomialExtensionField`]. + /// This works for BabyBear, KoalaBear, Mersenne31, and any future field + /// without modifying this crate. + /// + /// Since [`BinomialExtensionField`] is always a distinct type from `F`, + /// this implementation doesn't conflict with the blanket `From for T`. + fn from(expr: SymbolicExpression) -> Self { + match expr { + SymbolicExpression::Variable(v) => { + Self::Variable(SymbolicVariable::new(v.entry, v.index)) + } + SymbolicExpression::IsFirstRow => Self::IsFirstRow, + SymbolicExpression::IsLastRow => Self::IsLastRow, + SymbolicExpression::IsTransition => Self::IsTransition, + SymbolicExpression::Constant(c) => { + // We convert the base constant 'c' into the extension field + Self::Constant(BinomialExtensionField::::from(c)) + } + SymbolicExpression::Add { + x, + y, + degree_multiple, + } => Self::Add { + x: Arc::new(Self::from((*x).clone())), + y: Arc::new(Self::from((*y).clone())), + degree_multiple, + }, + SymbolicExpression::Sub { + x, + y, + degree_multiple, + } => Self::Sub { + x: Arc::new(Self::from((*x).clone())), + y: Arc::new(Self::from((*y).clone())), + degree_multiple, + }, + SymbolicExpression::Neg { x, degree_multiple } => Self::Neg { + x: Arc::new(Self::from((*x).clone())), + degree_multiple, + }, + SymbolicExpression::Mul { + x, + y, + degree_multiple, + } => Self::Mul { + x: Arc::new(Self::from((*x).clone())), + y: Arc::new(Self::from((*y).clone())), + degree_multiple, + }, + } + } +} + +/// A symbolic expression tree representing AIR constraint computations over [`SymbolicVariable`]s. +/// +/// This enum forms an Abstract Syntax Tree (AST) for constraint expressions. +/// +/// Each node represents either: +/// - A leaf value (variable, constant, selector) or +/// - An arithmetic operation combining sub-expressions. +#[derive(Clone, Debug)] +pub enum SymbolicExpression { + /// A reference to a trace column or public input. + /// + /// Wraps a [`SymbolicVariable`] that identifies which column and row offset. + Variable(SymbolicVariable), + + /// Selector that is: + /// - 1 on the first row, + /// - 0 elsewhere. + /// + /// Evaluates to `L_0(x)`, the Lagrange basis polynomial for index 0. + IsFirstRow, + + /// Selector that is: + /// - 1 on the last row, + /// - 0 elsewhere. + /// + /// Evaluates to `L_{n-1}(x)`, the Lagrange basis polynomial for the last index. + IsLastRow, + + /// Selector that is: + /// - 1 on all rows except the last, + /// - 0 on the last row. + /// + /// Used for transition constraints that should not apply on the final row. + IsTransition, + + /// A constant field element. + Constant(F), + + /// Addition of two sub-expressions. + Add { + /// Left operand. + x: Arc, + /// Right operand. + y: Arc, + /// Cached degree multiple: `max(x.degree_multiple, y.degree_multiple)`. + degree_multiple: usize, + }, + + /// Subtraction of two sub-expressions. + Sub { + /// Left operand (minuend). + x: Arc, + /// Right operand (subtrahend). + y: Arc, + /// Cached degree multiple: `max(x.degree_multiple, y.degree_multiple)`. + degree_multiple: usize, + }, + + /// Negation of a sub-expression. + Neg { + /// The expression to negate. + x: Arc, + /// Cached degree multiple: same as `x.degree_multiple`. + degree_multiple: usize, + }, + + /// Multiplication of two sub-expressions. + Mul { + /// Left operand. + x: Arc, + /// Right operand. + y: Arc, + /// Cached degree multiple: `x.degree_multiple + y.degree_multiple`. + degree_multiple: usize, + }, +} + +impl SymbolicExpression { + /// Returns the degree multiple of this expression. + /// + /// The degree multiple represents how many times the trace length `n` + /// appears in the expression's polynomial degree. This determines: + /// - The quotient polynomial's degree + /// - The required FRI blowup factor + /// + /// # Degree Rules + /// + /// Degree 0 (constants): + /// - `Constant` + /// - `Variable` with public values or challenges + /// + /// Degree 1 (linear in trace length): + /// - `Variable` with trace columns (main, preprocessed, permutation) + /// - `IsFirstRow` + /// - `IsLastRow` + /// - `IsTransition` + /// + /// Composite expressions: + /// - `Add`, `Sub`: max of operands + /// - `Neg`: same as operand + /// - `Mul`: sum of operands + pub const fn degree_multiple(&self) -> usize { + match self { + Self::Variable(v) => v.degree_multiple(), + Self::IsFirstRow | Self::IsLastRow | Self::IsTransition => 1, + Self::Constant(_) => 0, + Self::Add { + degree_multiple, .. + } + | Self::Sub { + degree_multiple, .. + } + | Self::Neg { + degree_multiple, .. + } + | Self::Mul { + degree_multiple, .. + } => *degree_multiple, + } + } +} + +impl Default for SymbolicExpression { + fn default() -> Self { + Self::Constant(F::ZERO) + } +} + +impl> From> for SymbolicExpression { + fn from(var: SymbolicVariable) -> Self { + Self::Variable(SymbolicVariable::new(var.entry, var.index)) + } +} + +impl> From for SymbolicExpression { + fn from(var: F) -> Self { + Self::Constant(var.into()) + } +} + +impl PrimeCharacteristicRing for SymbolicExpression { + type PrimeSubfield = F::PrimeSubfield; + + const ZERO: Self = Self::Constant(F::ZERO); + const ONE: Self = Self::Constant(F::ONE); + const TWO: Self = Self::Constant(F::TWO); + const NEG_ONE: Self = Self::Constant(F::NEG_ONE); + + #[inline] + fn from_prime_subfield(f: Self::PrimeSubfield) -> Self { + F::from_prime_subfield(f).into() + } +} + +impl Algebra for SymbolicExpression {} + +impl Algebra> for SymbolicExpression {} + +// Note we cannot implement PermutationMonomial due to the degree_multiple part which makes +// operations non invertible. +impl, const N: u64> InjectiveMonomial for SymbolicExpression {} + +impl Add for SymbolicExpression +where + T: Into, +{ + type Output = Self; + + fn add(self, rhs: T) -> Self { + match (self, rhs.into()) { + (Self::Constant(lhs), Self::Constant(rhs)) => Self::Constant(lhs + rhs), + (lhs, rhs) => Self::Add { + degree_multiple: lhs.degree_multiple().max(rhs.degree_multiple()), + x: Arc::new(lhs), + y: Arc::new(rhs), + }, + } + } +} + +impl AddAssign for SymbolicExpression +where + T: Into, +{ + fn add_assign(&mut self, rhs: T) { + *self = self.clone() + rhs.into(); + } +} + +impl Sum for SymbolicExpression +where + T: Into, +{ + fn sum>(iter: I) -> Self { + iter.map(Into::into) + .reduce(|x, y| x + y) + .unwrap_or(Self::ZERO) + } +} + +impl> Sub for SymbolicExpression { + type Output = Self; + + fn sub(self, rhs: T) -> Self { + match (self, rhs.into()) { + (Self::Constant(lhs), Self::Constant(rhs)) => Self::Constant(lhs - rhs), + (lhs, rhs) => Self::Sub { + degree_multiple: lhs.degree_multiple().max(rhs.degree_multiple()), + x: Arc::new(lhs), + y: Arc::new(rhs), + }, + } + } +} + +impl SubAssign for SymbolicExpression +where + T: Into, +{ + fn sub_assign(&mut self, rhs: T) { + *self = self.clone() - rhs.into(); + } +} + +impl Neg for SymbolicExpression { + type Output = Self; + + fn neg(self) -> Self { + match self { + Self::Constant(c) => Self::Constant(-c), + expr => Self::Neg { + degree_multiple: expr.degree_multiple(), + x: Arc::new(expr), + }, + } + } +} + +impl> Mul for SymbolicExpression { + type Output = Self; + + fn mul(self, rhs: T) -> Self { + match (self, rhs.into()) { + (Self::Constant(lhs), Self::Constant(rhs)) => Self::Constant(lhs * rhs), + (lhs, rhs) => Self::Mul { + degree_multiple: lhs.degree_multiple() + rhs.degree_multiple(), + x: Arc::new(lhs), + y: Arc::new(rhs), + }, + } + } +} + +impl MulAssign for SymbolicExpression +where + T: Into, +{ + fn mul_assign(&mut self, rhs: T) { + *self = self.clone() * rhs.into(); + } +} + +impl> Product for SymbolicExpression { + fn product>(iter: I) -> Self { + iter.map(Into::into) + .reduce(|x, y| x * y) + .unwrap_or(Self::ONE) + } +} + +#[cfg(test)] +mod tests { + use alloc::vec; + use alloc::vec::Vec; + + use p3_baby_bear::BabyBear; + + use super::*; + use crate::symbolic::Entry; + + #[test] + fn test_symbolic_expression_degree_multiple() { + let constant_expr = SymbolicExpression::::Constant(BabyBear::new(5)); + assert_eq!( + constant_expr.degree_multiple(), + 0, + "Constant should have degree 0" + ); + + let variable_expr = + SymbolicExpression::Variable(SymbolicVariable::new(Entry::Main { offset: 0 }, 1)); + assert_eq!( + variable_expr.degree_multiple(), + 1, + "Main variable should have degree 1" + ); + + let preprocessed_var = SymbolicExpression::Variable(SymbolicVariable::new( + Entry::Preprocessed { offset: 0 }, + 2, + )); + assert_eq!( + preprocessed_var.degree_multiple(), + 1, + "Preprocessed variable should have degree 1" + ); + + let permutation_var = SymbolicExpression::Variable(SymbolicVariable::::new( + Entry::Permutation { offset: 0 }, + 3, + )); + assert_eq!( + permutation_var.degree_multiple(), + 1, + "Permutation variable should have degree 1" + ); + + let public_var = + SymbolicExpression::Variable(SymbolicVariable::::new(Entry::Public, 4)); + assert_eq!( + public_var.degree_multiple(), + 0, + "Public variable should have degree 0" + ); + + let challenge_var = + SymbolicExpression::Variable(SymbolicVariable::::new(Entry::Challenge, 5)); + assert_eq!( + challenge_var.degree_multiple(), + 0, + "Challenge variable should have degree 0" + ); + + let is_first_row = SymbolicExpression::::IsFirstRow; + assert_eq!( + is_first_row.degree_multiple(), + 1, + "IsFirstRow should have degree 1" + ); + + let is_last_row = SymbolicExpression::::IsLastRow; + assert_eq!( + is_last_row.degree_multiple(), + 1, + "IsLastRow should have degree 1" + ); + + let is_transition = SymbolicExpression::::IsTransition; + assert_eq!( + is_transition.degree_multiple(), + 1, + "IsTransition should have degree 1" + ); + + let add_expr = SymbolicExpression::::Add { + x: Arc::new(variable_expr.clone()), + y: Arc::new(preprocessed_var.clone()), + degree_multiple: 1, + }; + assert_eq!( + add_expr.degree_multiple(), + 1, + "Addition should take max degree of inputs" + ); + + let sub_expr = SymbolicExpression::::Sub { + x: Arc::new(variable_expr.clone()), + y: Arc::new(preprocessed_var.clone()), + degree_multiple: 1, + }; + assert_eq!( + sub_expr.degree_multiple(), + 1, + "Subtraction should take max degree of inputs" + ); + + let neg_expr = SymbolicExpression::::Neg { + x: Arc::new(variable_expr.clone()), + degree_multiple: 1, + }; + assert_eq!( + neg_expr.degree_multiple(), + 1, + "Negation should keep the degree" + ); + + let mul_expr = SymbolicExpression::::Mul { + x: Arc::new(variable_expr), + y: Arc::new(preprocessed_var), + degree_multiple: 2, + }; + assert_eq!( + mul_expr.degree_multiple(), + 2, + "Multiplication should sum degrees" + ); + } + + #[test] + fn test_addition_of_constants() { + let a = SymbolicExpression::Constant(BabyBear::new(3)); + let b = SymbolicExpression::Constant(BabyBear::new(4)); + let result = a + b; + match result { + SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(7)), + _ => panic!("Addition of constants did not simplify correctly"), + } + } + + #[test] + fn test_subtraction_of_constants() { + let a = SymbolicExpression::Constant(BabyBear::new(10)); + let b = SymbolicExpression::Constant(BabyBear::new(4)); + let result = a - b; + match result { + SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(6)), + _ => panic!("Subtraction of constants did not simplify correctly"), + } + } + + #[test] + fn test_negation() { + let a = SymbolicExpression::Constant(BabyBear::new(7)); + let result = -a; + match result { + SymbolicExpression::Constant(val) => { + assert_eq!(val, BabyBear::NEG_ONE * BabyBear::new(7)); + } + _ => panic!("Negation did not work correctly"), + } + } + + #[test] + fn test_multiplication_of_constants() { + let a = SymbolicExpression::Constant(BabyBear::new(3)); + let b = SymbolicExpression::Constant(BabyBear::new(5)); + let result = a * b; + match result { + SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(15)), + _ => panic!("Multiplication of constants did not simplify correctly"), + } + } + + #[test] + fn test_degree_multiple_for_addition() { + let a = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 1, + )); + let b = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 2, + )); + let result = a + b; + match result { + SymbolicExpression::Add { + degree_multiple, + x, + y, + } => { + assert_eq!(degree_multiple, 1); + assert!( + matches!(*x, SymbolicExpression::Variable(ref v) if v.index == 1 && matches!(v.entry, Entry::Main { offset: 0 })) + ); + assert!( + matches!(*y, SymbolicExpression::Variable(ref v) if v.index == 2 && matches!(v.entry, Entry::Main { offset: 0 })) + ); + } + _ => panic!("Addition did not create an Add expression"), + } + } + + #[test] + fn test_degree_multiple_for_multiplication() { + let a = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 1, + )); + let b = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 2, + )); + let result = a * b; + + match result { + SymbolicExpression::Mul { + degree_multiple, + x, + y, + } => { + assert_eq!(degree_multiple, 2, "Multiplication should sum degrees"); + + assert!( + matches!(*x, SymbolicExpression::Variable(ref v) + if v.index == 1 && matches!(v.entry, Entry::Main { offset: 0 }) + ), + "Left operand should match `a`" + ); + + assert!( + matches!(*y, SymbolicExpression::Variable(ref v) + if v.index == 2 && matches!(v.entry, Entry::Main { offset: 0 }) + ), + "Right operand should match `b`" + ); + } + _ => panic!("Multiplication did not create a `Mul` expression"), + } + } + + #[test] + fn test_sum_operator() { + let expressions = vec![ + SymbolicExpression::Constant(BabyBear::new(2)), + SymbolicExpression::Constant(BabyBear::new(3)), + SymbolicExpression::Constant(BabyBear::new(5)), + ]; + let result: SymbolicExpression = expressions.into_iter().sum(); + match result { + SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(10)), + _ => panic!("Sum did not produce correct result"), + } + } + + #[test] + fn test_product_operator() { + let expressions = vec![ + SymbolicExpression::Constant(BabyBear::new(2)), + SymbolicExpression::Constant(BabyBear::new(3)), + SymbolicExpression::Constant(BabyBear::new(4)), + ]; + let result: SymbolicExpression = expressions.into_iter().product(); + match result { + SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(24)), + _ => panic!("Product did not produce correct result"), + } + } + + #[test] + fn test_default_is_zero() { + // Default should produce ZERO constant. + let expr: SymbolicExpression = Default::default(); + + // Verify it matches the zero constant. + assert!(matches!( + expr, + SymbolicExpression::Constant(c) if c == BabyBear::ZERO + )); + } + + #[test] + fn test_ring_constants() { + // ZERO is a Constant variant wrapping the field's zero element. + assert!(matches!( + SymbolicExpression::::ZERO, + SymbolicExpression::Constant(c) if c == BabyBear::ZERO + )); + + // ONE is a Constant variant wrapping the field's one element. + assert!(matches!( + SymbolicExpression::::ONE, + SymbolicExpression::Constant(c) if c == BabyBear::ONE + )); + + // TWO is a Constant variant wrapping the field's two element. + assert!(matches!( + SymbolicExpression::::TWO, + SymbolicExpression::Constant(c) if c == BabyBear::TWO + )); + + // NEG_ONE is a Constant variant wrapping the field's -1 element. + assert!(matches!( + SymbolicExpression::::NEG_ONE, + SymbolicExpression::Constant(c) if c == BabyBear::NEG_ONE + )); + } + + #[test] + fn test_from_symbolic_variable() { + // Create a main trace variable at column index 3. + let var = SymbolicVariable::::new(Entry::Main { offset: 0 }, 3); + + // Convert to expression. + let expr: SymbolicExpression = var.into(); + + // Verify the variable is preserved with correct entry and index. + match expr { + SymbolicExpression::Variable(v) => { + assert!(matches!(v.entry, Entry::Main { offset: 0 })); + assert_eq!(v.index, 3); + } + _ => panic!("Expected Variable variant"), + } + } + + #[test] + fn test_from_field_element() { + // Convert a field element directly to expression. + let field_val = BabyBear::new(42); + let expr: SymbolicExpression = field_val.into(); + + // Verify it becomes a Constant with the same value. + assert!(matches!( + expr, + SymbolicExpression::Constant(c) if c == field_val + )); + } + + #[test] + fn test_from_prime_subfield() { + // Create expression from prime subfield element. + let prime_subfield_val = ::PrimeSubfield::new(7); + let expr = SymbolicExpression::::from_prime_subfield(prime_subfield_val); + + // Verify it produces a constant with the converted value. + assert!(matches!( + expr, + SymbolicExpression::Constant(c) if c == BabyBear::new(7) + )); + } + + #[test] + fn test_assign_operators() { + // Test AddAssign with constants (should simplify). + let mut expr = SymbolicExpression::Constant(BabyBear::new(5)); + expr += SymbolicExpression::Constant(BabyBear::new(3)); + assert!(matches!( + expr, + SymbolicExpression::Constant(c) if c == BabyBear::new(8) + )); + + // Test SubAssign with constants (should simplify). + let mut expr = SymbolicExpression::Constant(BabyBear::new(10)); + expr -= SymbolicExpression::Constant(BabyBear::new(4)); + assert!(matches!( + expr, + SymbolicExpression::Constant(c) if c == BabyBear::new(6) + )); + + // Test MulAssign with constants (should simplify). + let mut expr = SymbolicExpression::Constant(BabyBear::new(6)); + expr *= SymbolicExpression::Constant(BabyBear::new(7)); + assert!(matches!( + expr, + SymbolicExpression::Constant(c) if c == BabyBear::new(42) + )); + } + + #[test] + fn test_subtraction_creates_sub_node() { + // Create two trace variables. + let a = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 0, + )); + let b = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 1, + )); + + // Subtract them. + let result = a - b; + + // Should create Sub node (not simplified). + match result { + SymbolicExpression::Sub { + x, + y, + degree_multiple, + } => { + // Both operands have degree 1, so max is 1. + assert_eq!(degree_multiple, 1); + + // Verify left operand is main trace variable at index 0, offset 0. + assert!(matches!( + x.as_ref(), + SymbolicExpression::Variable(v) + if v.index == 0 && matches!(v.entry, Entry::Main { offset: 0 }) + )); + + // Verify right operand is main trace variable at index 1, offset 0. + assert!(matches!( + y.as_ref(), + SymbolicExpression::Variable(v) + if v.index == 1 && matches!(v.entry, Entry::Main { offset: 0 }) + )); + } + _ => panic!("Expected Sub variant"), + } + } + + #[test] + fn test_negation_creates_neg_node() { + // Create a trace variable. + let var = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 0, + )); + + // Negate it. + let result = -var; + + // Should create Neg node (not simplified). + match result { + SymbolicExpression::Neg { x, degree_multiple } => { + // Degree is preserved from operand. + assert_eq!(degree_multiple, 1); + + // Verify operand is main trace variable at index 0, offset 0. + assert!(matches!( + x.as_ref(), + SymbolicExpression::Variable(v) + if v.index == 0 && matches!(v.entry, Entry::Main { offset: 0 }) + )); + } + _ => panic!("Expected Neg variant"), + } + } + + #[test] + fn test_empty_sum_returns_zero() { + // Sum of empty iterator should be additive identity. + let empty: Vec> = vec![]; + let result: SymbolicExpression = empty.into_iter().sum(); + + assert!(matches!( + result, + SymbolicExpression::Constant(c) if c == BabyBear::ZERO + )); + } + + #[test] + fn test_empty_product_returns_one() { + // Product of empty iterator should be multiplicative identity. + let empty: Vec> = vec![]; + let result: SymbolicExpression = empty.into_iter().product(); + + assert!(matches!( + result, + SymbolicExpression::Constant(c) if c == BabyBear::ONE + )); + } + + #[test] + fn test_mixed_degree_addition() { + // Constant has degree 0. + let constant = SymbolicExpression::Constant(BabyBear::new(5)); + + // Variable has degree 1. + let var = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 0, + )); + + // Add them: max(0, 1) = 1. + let result = constant + var; + + match result { + SymbolicExpression::Add { + x, + y, + degree_multiple, + } => { + // Degree is max(0, 1) = 1. + assert_eq!(degree_multiple, 1); + + // Verify left operand is the constant 5. + assert!(matches!( + x.as_ref(), + SymbolicExpression::Constant(c) if *c == BabyBear::new(5) + )); + + // Verify right operand is main trace variable at index 0, offset 0. + assert!(matches!( + y.as_ref(), + SymbolicExpression::Variable(v) + if v.index == 0 && matches!(v.entry, Entry::Main { offset: 0 }) + )); + } + _ => panic!("Expected Add variant"), + } + } + + #[test] + fn test_chained_multiplication_degree() { + // Create three variables, each with degree 1. + let a = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 0, + )); + let b = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 1, + )); + let c = SymbolicExpression::Variable::(SymbolicVariable::new( + Entry::Main { offset: 0 }, + 2, + )); + + // a * b has degree 1 + 1 = 2. + let ab = a * b; + assert_eq!(ab.degree_multiple(), 2); + + // (a * b) * c has degree 2 + 1 = 3. + let abc = ab * c; + assert_eq!(abc.degree_multiple(), 3); + } +} diff --git a/air/src/symbolic/mod.rs b/air/src/symbolic/mod.rs new file mode 100644 index 000000000..6596d6c1f --- /dev/null +++ b/air/src/symbolic/mod.rs @@ -0,0 +1,7 @@ +//! Symbolic expression types for AIR constraint representation. + +mod expression; +mod variable; + +pub use expression::SymbolicExpression; +pub use variable::{Entry, SymbolicVariable}; diff --git a/uni-stark/src/symbolic_variable.rs b/air/src/symbolic/variable.rs similarity index 79% rename from uni-stark/src/symbolic_variable.rs rename to air/src/symbolic/variable.rs index a5552107e..4b36437c9 100644 --- a/uni-stark/src/symbolic_variable.rs +++ b/air/src/symbolic/variable.rs @@ -3,13 +3,14 @@ use core::ops::{Add, Mul, Sub}; use p3_field::Field; -use crate::symbolic_expression::SymbolicExpression; +use crate::SymbolicExpression; #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum Entry { Preprocessed { offset: usize }, Main { offset: usize }, Permutation { offset: usize }, + Periodic, Public, Challenge, } @@ -33,18 +34,17 @@ impl SymbolicVariable { pub const fn degree_multiple(&self) -> usize { match self.entry { - Entry::Preprocessed { .. } | Entry::Main { .. } | Entry::Permutation { .. } => 1, + Entry::Preprocessed { .. } + | Entry::Main { .. } + | Entry::Permutation { .. } + // Degree 1 is an approximation; see Winterfell's TransitionConstraintDegree for + // a more precise model: https://github.com/facebook/winterfell/blob/main/air/src/air/transition/degree.rs + | Entry::Periodic => 1, Entry::Public | Entry::Challenge => 0, } } } -impl From> for SymbolicExpression { - fn from(value: SymbolicVariable) -> Self { - Self::Variable(value) - } -} - impl Add for SymbolicVariable where T: Into>, diff --git a/baby-bear/CHANGELOG.md b/baby-bear/CHANGELOG.md new file mode 100644 index 000000000..bab2d20af --- /dev/null +++ b/baby-bear/CHANGELOG.md @@ -0,0 +1,46 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- GCD based inversion for 31 bit fields (#921) (AngusG) +- Adding Degree 8 extensions for KoalaBear and BabyBear. (#954) (AngusG) +- Packing Trick for Field Extensions (#958) (AngusG) +- Refactor to packed add methods (#972) (AngusG) +- Remove Nightly Features (#932) (AngusG) +- Move div_2_exp_u64 to ring (#970) (AngusG) +- Speed Up Base-Extension Multiplication (#998) (AngusG) +- Generic Poseidon2 Simplifications (#987) (AngusG) +- Poseidon2: add Neon implementation for Monty31 (#1023) (Thomas Coratger) +- Monty31: add aarch64 neon custom `exp_5` and `exp_7` (#1033) (Thomas Coratger) +- Fix: remove unused alloc::format imports (#1066) (Skylar Ray) +- Monty 31: more efficient aarch64 neon `quartic_mul_packed` (#1060) (Thomas Coratger) +- Refactor: remove redundant clones in crypto modules (#1080) (Skylar Ray) +- Refactor: remove redundant clones in crypto modules (#1086) (Skylar Ray) +- Clippy: small step (#1102) (Thomas Coratger) +- Feat: add thread safety to dft implementations (#999) (Jeremi Do Dinh) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Refactor: deduplicate field JSON serialization tests (#1162) (andrewshab) +- Implement uniform sampling of bits from field elements (#1050) (Sebastian) + +### Authors +- AngusG +- Himess +- Jeremi Do Dinh +- Sebastian +- Skylar Ray +- Thomas Coratger +- andrewshab + diff --git a/baby-bear/Cargo.toml b/baby-bear/Cargo.toml index 409a51b39..967707ca4 100644 --- a/baby-bear/Cargo.toml +++ b/baby-bear/Cargo.toml @@ -10,6 +10,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +p3-challenger.workspace = true p3-field.workspace = true p3-mds.workspace = true p3-monty-31.workspace = true @@ -19,9 +20,9 @@ p3-symmetric.workspace = true rand.workspace = true [dev-dependencies] -p3-dft.workspace = true -p3-field-testing.workspace = true -p3-util.workspace = true +p3-dft = { path = "../dft" } +p3-field-testing = { path = "../field-testing" } +p3-util = { path = "../util" } criterion.workspace = true num-bigint.workspace = true diff --git a/baby-bear/src/baby_bear.rs b/baby-bear/src/baby_bear.rs index e6fb9b3ef..838704f44 100644 --- a/baby-bear/src/baby_bear.rs +++ b/baby-bear/src/baby_bear.rs @@ -1,3 +1,4 @@ +use p3_challenger::UniformSamplingField; use p3_field::exponentiation::exp_1725656503; use p3_field::{Algebra, PrimeCharacteristicRing}; use p3_monty_31::{ @@ -102,6 +103,28 @@ impl BinomialExtensionData<8> for BabyBearParameters { ]); } +impl UniformSamplingField for BabyBearParameters { + const MAX_SINGLE_SAMPLE_BITS: usize = 27; + // NOTE: We only include `0` to not have to deal with one-off indexing. `k` must be > 0. + // Also, we don't care about k > 30 for BabyBear. + const SAMPLING_BITS_M: [u64; 64] = { + let prime: u64 = Self::PRIME as u64; + let mut a = [0u64; 64]; + let mut k = 0; + while k < 64 { + if k == 0 { + a[k] = prime; // This value is irrelevant in practice. `bits = 0` returns 0 always + } else { + // Create a mask to zero out the last k bits + let mask = !((1u64 << k) - 1); + a[k] = prime & mask; + } + k += 1; + } + a + }; +} + #[cfg(test)] mod tests { use core::array; @@ -111,7 +134,8 @@ mod tests { use p3_field::{InjectiveMonomial, PermutationMonomial, PrimeField64, TwoAdicField}; use p3_field_testing::{ test_field, test_field_dft, test_field_dft_consistency, test_field_dft_large, - test_prime_field, test_prime_field_32, test_prime_field_64, test_two_adic_field, + test_field_json_serialization, test_prime_field, test_prime_field_32, test_prime_field_64, + test_two_adic_field, }; use super::*; @@ -157,36 +181,7 @@ mod tests { assert_eq!(m2.injective_exp_n().injective_exp_root_n(), m2); assert_eq!(F::TWO.injective_exp_n().injective_exp_root_n(), F::TWO); - let f_serialized = serde_json::to_string(&f).unwrap(); - let f_deserialized: F = serde_json::from_str(&f_serialized).unwrap(); - assert_eq!(f, f_deserialized); - - let f_1_serialized = serde_json::to_string(&f_1).unwrap(); - let f_1_deserialized: F = serde_json::from_str(&f_1_serialized).unwrap(); - let f_1_serialized_again = serde_json::to_string(&f_1_deserialized).unwrap(); - let f_1_deserialized_again: F = serde_json::from_str(&f_1_serialized_again).unwrap(); - assert_eq!(f_1, f_1_deserialized); - assert_eq!(f_1, f_1_deserialized_again); - - let f_2_serialized = serde_json::to_string(&f_2).unwrap(); - let f_2_deserialized: F = serde_json::from_str(&f_2_serialized).unwrap(); - assert_eq!(f_2, f_2_deserialized); - - let f_p_minus_1_serialized = serde_json::to_string(&f_p_minus_1).unwrap(); - let f_p_minus_1_deserialized: F = serde_json::from_str(&f_p_minus_1_serialized).unwrap(); - assert_eq!(f_p_minus_1, f_p_minus_1_deserialized); - - let f_p_minus_2_serialized = serde_json::to_string(&f_p_minus_2).unwrap(); - let f_p_minus_2_deserialized: F = serde_json::from_str(&f_p_minus_2_serialized).unwrap(); - assert_eq!(f_p_minus_2, f_p_minus_2_deserialized); - - let m1_serialized = serde_json::to_string(&m1).unwrap(); - let m1_deserialized: F = serde_json::from_str(&m1_serialized).unwrap(); - assert_eq!(m1, m1_deserialized); - - let m2_serialized = serde_json::to_string(&m2).unwrap(); - let m2_deserialized: F = serde_json::from_str(&m2_serialized).unwrap(); - assert_eq!(m2, m2_deserialized); + test_field_json_serialization(&[f, f_1, f_2, f_p_minus_1, f_p_minus_2, m1, m2]); } // MontyField31's have no redundant representations. diff --git a/batch-stark/CHANGELOG.md b/batch-stark/CHANGELOG.md new file mode 100644 index 000000000..c9109f3b0 --- /dev/null +++ b/batch-stark/CHANGELOG.md @@ -0,0 +1,46 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor(field): Add packed field extraction helpers and FieldArray utilities (#1211) (Adrian Hamelink) +- Enable ZK for preprocessing and in batch-stark (#1178) (Linda Guiga) +- Small changes for recursive lookups (#1229) (Linda Guiga) +- Avoid change of Pcs's `open` method signature (#1230) (Linda Guiga) + +### Authors +- Adrian Hamelink +- Linda Guiga + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Rename multi-stark crate to batch-stark (#1122) (Sai) +- Add preprocessed/transparent columns to uni-stark (#1114) (o-k-d) +- Challenger: add `observe_base_as_algebra_element ` to `FieldChallenger` trait (#1152) (Thomas Coratger) +- Add preprocessed column support in batch-STARK (#1151) (Sai) +- Update lookup traits and add folders with lookups (#1160) (Linda Guiga) +- Derive Clone for PreprocessedInstanceMeta (#1166) (Linda Guiga) +- Clarify quotient degree vs quotient chunks naming (#1156) (Sai) +- Doc: add intra-doc links (#1174) (Robin Salen) +- Integrate lookups to prover and verifier (#1165) (Linda Guiga) +- Core: small touchups (#1186) (Thomas Coratger) +- Feat: add PoW phase for batching in FRI commit phase (#1164) (Zach Langley) +- Implement uniform sampling of bits from field elements (#1050) (Sebastian) + +### Authors +- Linda Guiga +- Robin Salen +- Sai +- Sebastian +- Thomas Coratger +- Zach Langley +- o-k-d + diff --git a/batch-stark/Cargo.toml b/batch-stark/Cargo.toml index bc76b33e9..41c6319e7 100644 --- a/batch-stark/Cargo.toml +++ b/batch-stark/Cargo.toml @@ -14,26 +14,29 @@ p3-air.workspace = true p3-challenger.workspace = true p3-commit.workspace = true p3-field.workspace = true +p3-lookup.workspace = true p3-matrix.workspace = true p3-maybe-rayon.workspace = true p3-uni-stark.workspace = true p3-util.workspace = true +hashbrown.workspace = true itertools.workspace = true serde = { workspace = true, features = ["derive", "alloc"] } tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-circle.workspace = true -p3-commit.workspace = true -p3-dft.workspace = true -p3-fri.workspace = true -p3-keccak.workspace = true -p3-matrix.workspace = true -p3-merkle-tree.workspace = true -p3-mersenne-31.workspace = true -p3-symmetric.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-circle = { path = "../circle" } +p3-commit = { path = "../commit" } +p3-dft = { path = "../dft" } +p3-fri = { path = "../fri" } +p3-keccak = { path = "../keccak" } +p3-matrix = { path = "../matrix" } +p3-merkle-tree = { path = "../merkle-tree" } +p3-mersenne-31 = { path = "../mersenne-31" } +p3-symmetric = { path = "../symmetric" } +postcard = { workspace = true, features = ["alloc"] } rand.workspace = true [features] diff --git a/batch-stark/src/check_constraints.rs b/batch-stark/src/check_constraints.rs new file mode 100644 index 000000000..dea8ca794 --- /dev/null +++ b/batch-stark/src/check_constraints.rs @@ -0,0 +1,254 @@ +use p3_air::{ + Air, AirBuilder, AirBuilderWithPublicValues, ExtensionBuilder, PeriodicAirBuilder, + PermutationAirBuilder, +}; +use p3_field::{ExtensionField, Field}; +use p3_lookup::lookup_traits::{Lookup, LookupData, LookupGadget}; +use p3_matrix::Matrix; +use p3_matrix::dense::{RowMajorMatrix, RowMajorMatrixView}; +use p3_matrix::stack::{VerticalPair, ViewPair}; +use tracing::instrument; + +/// Type alias for the inputs to lookup constraint checking. +/// - The first element is a slice of [`Lookup`] values (generic over a field `F`) representing the symbolic lookups to be performed. +/// - The second element is a slice of [`LookupData`] values (generic over an extension field `EF`) representing the lookup data for global lookups. +/// - The third element is a reference to the [`LookupGadget`] implementation. +#[allow(unused)] +type LookupConstraintsInputs<'a, F, EF, LG> = (&'a [Lookup], &'a [LookupData], &'a LG); + +/// Runs constraint checks using a given [AIR](`p3_air::Air`) implementation and trace matrix. +/// +/// Iterates over every row in `main`, providing both the current and next row +/// (with wraparound) to the [AIR](`p3_air::Air`) logic. Also injects public values into the builder +/// for first/last row assertions. +/// +/// # Arguments +/// - `air`: The [AIR](`p3_air::Air`) logic to run. +/// - `main`: The [`RowMajorMatrix`] containing rows of witness values. +/// - `permutation`: The permutation [`RowMajorMatrix`] (rows of permutation values). +/// - `permutation_challenges`: The challenges used for the permutation argument. +/// - `public_values`: Public values provided to the builder. +/// - `lookup_constraints_inputs`: Inputs necessary to check lookup constraints: +/// - the symbolic representation of the [`Lookup`] values, +/// - the [`LookupData`] for global lookups, +/// - the [`LookupGadget`] implementation. +#[instrument(name = "check constraints", skip_all)] +#[allow(unused)] +pub(crate) fn check_constraints<'b, F, EF, A, LG>( + air: &A, + main: &RowMajorMatrix, + preprocessed: &Option>, + permutation: &RowMajorMatrix, + permutation_challenges: &[EF], + public_values: &[F], + lookup_constraints_inputs: LookupConstraintsInputs<'b, F, EF, LG>, +) where + F: Field, + EF: ExtensionField, + A: for<'a> Air>, + LG: LookupGadget, +{ + let height = main.height(); + + let (lookups, lookup_data, lookup_gadget) = lookup_constraints_inputs; + + (0..height).for_each(|row_index| { + let row_index_next = (row_index + 1) % height; + + // Safety: + // - row_index < height so we can use unchecked indexing. + // - row_index_next < height so we can use unchecked indexing. + let (local, next, prep_local, prep_next, perm_local, perm_next) = unsafe { + ( + main.row_slice_unchecked(row_index), + main.row_slice_unchecked(row_index_next), + preprocessed + .as_ref() + .map(|p| p.row_slice_unchecked(row_index)), + preprocessed + .as_ref() + .map(|p| p.row_slice_unchecked(row_index_next)), + permutation.row_slice_unchecked(row_index), + permutation.row_slice_unchecked(row_index_next), + ) + }; + let main = VerticalPair::new( + RowMajorMatrixView::new_row(&*local), + RowMajorMatrixView::new_row(&*next), + ); + + let preprocessed_rows_data = prep_local.as_ref().zip(prep_next.as_ref()); + let preprocessed = preprocessed_rows_data.map(|(prep_local, prep_next)| { + VerticalPair::new( + RowMajorMatrixView::new_row(&**prep_local), + RowMajorMatrixView::new_row(&**prep_next), + ) + }); + + let permutation = VerticalPair::new( + RowMajorMatrixView::new_row(&*perm_local), + RowMajorMatrixView::new_row(&*perm_next), + ); + + let mut builder = DebugConstraintBuilderWithLookups { + row_index, + main, + preprocessed, + permutation, + permutation_challenges, + public_values, + is_first_row: F::from_bool(row_index == 0), + is_last_row: F::from_bool(row_index == height - 1), + is_transition: F::from_bool(row_index != height - 1), + }; + + >>::eval_with_lookups( + air, + &mut builder, + lookups, + lookup_data, + lookup_gadget, + ); + }); +} + +/// A builder that runs constraint assertions during testing. +/// +/// Used in conjunction with [`check_constraints`] to simulate +/// an execution trace and verify that the [AIR](`p3_air::Air`) logic enforces all constraints. +#[derive(Debug)] +#[allow(unused)] +pub struct DebugConstraintBuilderWithLookups<'a, F: Field, EF: ExtensionField> { + /// The index of the row currently being evaluated. + row_index: usize, + /// A view of the current and next row as a vertical pair. + main: ViewPair<'a, F>, + /// A view of the current and next preprocessed row as a vertical pair. + preprocessed: Option>, + /// The public values provided for constraint validation (e.g. inputs or outputs). + public_values: &'a [F], + /// A flag indicating whether this is the first row. + is_first_row: F, + /// A flag indicating whether this is the last row. + is_last_row: F, + /// A flag indicating whether this is a transition row (not the last row). + is_transition: F, + /// A view of the current and next permutation rows as a vertical pair. + permutation: ViewPair<'a, EF>, + /// The challenges used for the permutation argument. + permutation_challenges: &'a [EF], +} + +impl<'a, F, EF> AirBuilder for DebugConstraintBuilderWithLookups<'a, F, EF> +where + F: Field, + EF: ExtensionField, +{ + type F = F; + type Expr = F; + type Var = F; + type M = ViewPair<'a, F>; + + fn main(&self) -> Self::M { + self.main + } + + fn preprocessed(&self) -> Option { + self.preprocessed + } + + fn is_first_row(&self) -> Self::Expr { + self.is_first_row + } + + fn is_last_row(&self) -> Self::Expr { + self.is_last_row + } + + /// # Panics + /// This function panics if `size` is not `2`. + fn is_transition_window(&self, size: usize) -> Self::Expr { + if size == 2 { + self.is_transition + } else { + panic!("only supports a window size of 2") + } + } + + fn assert_zero>(&mut self, x: I) { + assert_eq!( + x.into(), + F::ZERO, + "constraints had nonzero value on row {}", + self.row_index + ); + } + + fn assert_eq, I2: Into>(&mut self, x: I1, y: I2) { + let x = x.into(); + let y = y.into(); + assert_eq!( + x, y, + "values didn't match on row {}: {} != {}", + self.row_index, x, y + ); + } +} + +impl> AirBuilderWithPublicValues + for DebugConstraintBuilderWithLookups<'_, F, EF> +{ + type PublicVar = Self::F; + + fn public_values(&self) -> &[Self::F] { + self.public_values + } +} + +impl<'a, F: Field, EF: ExtensionField> ExtensionBuilder + for DebugConstraintBuilderWithLookups<'a, F, EF> +{ + type EF = EF; + + type ExprEF = EF; + + type VarEF = EF; + + fn assert_zero_ext(&mut self, x: I) + where + I: Into, + { + assert_eq!( + x.into(), + EF::ZERO, + "constraints had nonzero value on row {}", + self.row_index + ); + } +} + +impl<'a, F: Field, EF: ExtensionField> PermutationAirBuilder + for DebugConstraintBuilderWithLookups<'a, F, EF> +{ + type MP = VerticalPair, RowMajorMatrixView<'a, EF>>; + + type RandomVar = EF; + + fn permutation(&self) -> Self::MP { + self.permutation + } + + fn permutation_randomness(&self) -> &[Self::RandomVar] { + self.permutation_challenges + } +} + +impl<'a, F: Field, EF: ExtensionField> PeriodicAirBuilder + for DebugConstraintBuilderWithLookups<'a, F, EF> +{ + type PeriodicVar = F; + + fn periodic_values(&self) -> &[Self::PeriodicVar] { + &[] + } +} diff --git a/batch-stark/src/common.rs b/batch-stark/src/common.rs new file mode 100644 index 000000000..2df4d5851 --- /dev/null +++ b/batch-stark/src/common.rs @@ -0,0 +1,267 @@ +//! Shared data between batch-STARK prover and verifier. +//! +//! This module is intended to store per-instance data that is common to both +//! proving and verification, such as lookup tables and preprocessed traces. +//! +//! The preprocessed support integrates with `p3-uni-stark`'s transparent +//! preprocessed columns API, but batches all preprocessed traces into a single +//! global commitment (one matrix per instance that uses preprocessed columns). + +use alloc::vec; +use alloc::vec::Vec; + +use hashbrown::HashMap; +use p3_air::Air; +use p3_challenger::FieldChallenger; +use p3_commit::Pcs; +use p3_field::BasedVectorSpace; +use p3_lookup::lookup_traits::{Kind, Lookup, LookupGadget}; +use p3_matrix::Matrix; +use p3_uni_stark::{SymbolicAirBuilder, SymbolicExpression, Val}; +use p3_util::log2_strict_usize; + +use crate::config::{Challenge, Commitment, Domain, StarkGenericConfig as SGC}; +use crate::prover::StarkInstance; + +/// Per-instance metadata for a preprocessed trace that lives inside a +/// global preprocessed commitment. +#[derive(Clone)] +pub struct PreprocessedInstanceMeta { + /// Index of this instance's preprocessed matrix inside the global [`Pcs`] + /// commitment / prover data. + pub matrix_index: usize, + /// Width (number of columns) of the preprocessed trace. + pub width: usize, + /// Log2 of the base trace degree for this instance's preprocessed trace. + /// + /// This matches the log2 of the main trace degree (without ZK padding) + /// for that instance. + pub degree_bits: usize, +} + +/// Global preprocessed data shared by all batch-STARK instances. +/// +/// This batches all per-instance preprocessed traces into a single [`Pcs`] +/// commitment and prover data object, while keeping a mapping from instance +/// index to matrix index and per-matrix metadata. +pub struct GlobalPreprocessed { + /// Single [`Pcs`] commitment to all preprocessed traces (one matrix per + /// instance that defines preprocessed columns). + pub commitment: Commitment, + /// [`Pcs`] prover data for the batched preprocessed commitment. + pub prover_data: , SC::Challenger>>::ProverData, + /// For each STARK instance, optional metadata describing its preprocessed + /// trace inside the global commitment. + /// + /// `instances[i] == None` means instance `i` has no preprocessed columns. + pub instances: Vec>, + /// Mapping from preprocessed matrix index to the corresponding instance index. + /// + /// This allows building per-matrix opening schedules and routing opened + /// values back to instances. + pub matrix_to_instance: Vec, +} + +// TODO: Local-only preprocessed +// Some AIRs only need local preprocessed openings and never use the "next" +// row for preprocessed columns. At the moment we always open both [zeta, zeta_next] +// per preprocessed matrix, which is sound but wastes openings. + +/// Struct storing data common to both the prover and verifier. +// TODO: Optionally cache a single challenger seed for transparent +// preprocessed data (per-instance widths + global root), so +// prover and verifier don't have to recompute/rehash it each run. +pub struct CommonData { + /// Optional global preprocessed commitment shared by all instances. + /// + /// When `None`, no instance uses preprocessed columns. + pub preprocessed: Option>, + /// The lookups used by each STARK instance. + /// There is one `Vec>>` per STARK instance. + /// They are stored in the same order as the STARK instance inputs provided to `new`. + pub lookups: Vec>>>, +} + +impl CommonData { + pub const fn new( + preprocessed: Option>, + lookups: Vec>>>, + ) -> Self { + Self { + preprocessed, + lookups, + } + } + + /// Create [`CommonData`] with no preprocessed columns or lookups. + /// + /// Use this when none of your [`Air`] implementations have preprocessed columns or lookups. + pub fn empty(num_instances: usize) -> Self { + let lookups = vec![Vec::new(); num_instances]; + Self { + preprocessed: None, + lookups, + } + } +} + +impl CommonData +where + SC: SGC, + Challenge: BasedVectorSpace>, +{ + /// Build [`CommonData`] directly from STARK instances. + /// + /// This automatically: + /// - Derives trace degrees from trace heights + /// - Computes extended degrees (base + ZK padding) + /// - Sets up preprocessed columns for [`Air`] implementations that define them, committing + /// to them in a single global [`Pcs`] commitment. + /// - Deduces symbolic lookups from the STARKs + /// + /// This is a convenience function mainly used for tests. + pub fn from_instances(config: &SC, instances: &[StarkInstance<'_, SC, A>]) -> Self + where + SymbolicExpression: From>>, + A: Air, SC::Challenge>> + Clone, + { + let degrees: Vec = instances.iter().map(|i| i.trace.height()).collect(); + let log_ext_degrees: Vec = degrees + .iter() + .map(|&d| log2_strict_usize(d) + config.is_zk()) + .collect(); + let mut airs: Vec = instances.iter().map(|i| i.air.clone()).collect(); + Self::from_airs_and_degrees(config, &mut airs, &log_ext_degrees) + } + + /// Build [`CommonData`] from [`Air`] implementations and their extended trace degree bits. + /// + /// # Arguments + /// + /// * `trace_ext_degree_bits` - Log2 of extended trace degrees (including ZK padding) + /// + /// # Returns + /// + /// Global preprocessed data shared by all instances. The global commitment + /// is present only if at least one [`Air`] defines preprocessed columns. + pub fn from_airs_and_degrees( + config: &SC, + airs: &mut [A], + trace_ext_degree_bits: &[usize], + ) -> Self + where + SymbolicExpression: From>>, + A: Air, SC::Challenge>>, + { + assert_eq!( + airs.len(), + trace_ext_degree_bits.len(), + "airs and trace_ext_degree_bits must have the same length" + ); + + let pcs = config.pcs(); + let is_zk = config.is_zk(); + + let mut instances_meta: Vec> = + Vec::with_capacity(airs.len()); + let mut matrix_to_instance: Vec = Vec::new(); + let mut domains_and_traces: Vec<(Domain, _)> = Vec::new(); + + for (i, (air, &ext_db)) in airs.iter().zip(trace_ext_degree_bits.iter()).enumerate() { + // Derive base trace degree bits from extended degree bits. + let base_db = ext_db - is_zk; + let maybe_prep = air.preprocessed_trace(); + + let Some(preprocessed) = maybe_prep else { + instances_meta.push(None); + continue; + }; + + let width = preprocessed.width(); + if width == 0 { + instances_meta.push(None); + continue; + } + + let degree = 1 << base_db; + let ext_degree = 1 << ext_db; + assert_eq!( + preprocessed.height(), + degree, + "preprocessed trace height must equal trace degree for instance {}", + i + ); + + let domain = pcs.natural_domain_for_degree(ext_degree); + let matrix_index = domains_and_traces.len(); + + domains_and_traces.push((domain, preprocessed)); + matrix_to_instance.push(i); + + instances_meta.push(Some(PreprocessedInstanceMeta { + matrix_index, + width, + degree_bits: ext_db, + })); + } + + let preprocessed = if domains_and_traces.is_empty() { + None + } else { + let (commitment, prover_data) = pcs.commit_preprocessing(domains_and_traces); + Some(GlobalPreprocessed { + commitment, + prover_data, + instances: instances_meta, + matrix_to_instance, + }) + }; + + let lookups = airs.iter_mut().map(|air| air.get_lookups()).collect(); + + Self { + preprocessed, + lookups, + } + } +} + +pub fn get_perm_challenges( + challenger: &mut SC::Challenger, + all_lookups: &[Vec>>], + lookup_gadget: &LG, +) -> Vec> { + let num_challenges_per_lookup = lookup_gadget.num_challenges(); + let mut global_perm_challenges = HashMap::new(); + + all_lookups + .iter() + .map(|contexts| { + // Pre-allocate for the instance's challenges. + let num_challenges = contexts.len() * num_challenges_per_lookup; + let mut instance_challenges = Vec::with_capacity(num_challenges); + + for context in contexts { + match &context.kind { + Kind::Global(name) => { + // Get or create the global challenges. + let challenges: &mut Vec = + global_perm_challenges.entry(name).or_insert_with(|| { + (0..num_challenges_per_lookup) + .map(|_| challenger.sample_algebra_element()) + .collect() + }); + instance_challenges.extend_from_slice(challenges); + } + Kind::Local => { + instance_challenges.extend( + (0..num_challenges_per_lookup) + .map(|_| challenger.sample_algebra_element::()), + ); + } + } + } + instance_challenges + }) + .collect() +} diff --git a/batch-stark/src/config.rs b/batch-stark/src/config.rs index d91f10668..440c397ba 100644 --- a/batch-stark/src/config.rs +++ b/batch-stark/src/config.rs @@ -10,38 +10,18 @@ pub use p3_uni_stark::{Domain, PackedChallenge, PackedVal, PcsError, StarkGeneri /// The challenge (extension field) type. pub type Challenge = ::Challenge; -/// The PCS commitment type for a STARK configuration. +/// The [`Pcs`] commitment type for a STARK configuration. pub type Commitment = <::Pcs as Pcs< ::Challenge, ::Challenger, >>::Commitment; -/// The PCS proof type for a STARK configuration. +/// The [`Pcs`] proof type for a STARK configuration. pub type PcsProof = <::Pcs as Pcs< ::Challenge, ::Challenger, >>::Proof; -/// Helper to observe base field elements as extension field elements for recursion-friendly transcripts. -/// -/// This simplifies recursive verifier circuits by using a uniform extension field challenger. -/// Instead of observing a mix of base and extension field elements, we convert all base field -/// observations (metadata, public values) to extension field elements before passing to the challenger. -/// -/// # Recursion Benefits -/// -/// In recursive proof systems, the verifier circuit needs to verify the inner proof. Since STARK -/// verification operates entirely in the extension field (challenges, opened values, constraint -/// evaluation), having a challenger that only observes extension field elements significantly -/// simplifies the recursive circuit implementation. -#[inline] -pub fn observe_base_as_ext(challenger: &mut SC::Challenger, val: Val) -where - Challenge: ExtensionField>, -{ - challenger.observe_algebra_element(Challenge::::from(val)); -} - #[inline] pub fn observe_instance_binding( ch: &mut SC::Challenger, @@ -52,8 +32,8 @@ pub fn observe_instance_binding( ) where Challenge: ExtensionField>, { - observe_base_as_ext::(ch, Val::::from_usize(log_ext_degree)); - observe_base_as_ext::(ch, Val::::from_usize(log_degree)); - observe_base_as_ext::(ch, Val::::from_usize(width)); - observe_base_as_ext::(ch, Val::::from_usize(n_quotient_chunks)); + ch.observe_base_as_algebra_element::>(Val::::from_usize(log_ext_degree)); + ch.observe_base_as_algebra_element::>(Val::::from_usize(log_degree)); + ch.observe_base_as_algebra_element::>(Val::::from_usize(width)); + ch.observe_base_as_algebra_element::>(Val::::from_usize(n_quotient_chunks)); } diff --git a/batch-stark/src/lib.rs b/batch-stark/src/lib.rs index ef1e24abb..de81e8944 100644 --- a/batch-stark/src/lib.rs +++ b/batch-stark/src/lib.rs @@ -1,46 +1,38 @@ //! Batch-STARK proving and verification. //! -//! This crate provides functionality for proving and verifying batched STARK instances -//! within a single proof, using a unified commitment scheme and shared transcript. -//! -//! # Overview -//! -//! The main workflow is: -//! 1. Create batched [`StarkInstance`]s, each with an AIR, trace, and public values -//! 2. Call [`prove_batch`] to generate a [`BatchProof`] -//! 3. Call [`verify_batch`] to verify the proof against the AIRs and public values -//! -//! # Example -//! //! ```ignore -//! use p3_batch_stark::{prove_batch, verify_batch, StarkInstance}; +//! use p3_batch_stark::{prove_batch_no_lookups, verify_batch_no_lookups, CommonData, StarkInstance}; //! -//! // Create instances for different computations //! let instances = vec![ -//! StarkInstance { air: &air1, trace: trace1, public_values: pv1 }, -//! StarkInstance { air: &air2, trace: trace2, public_values: pv2 }, +//! StarkInstance { air: &air1, trace: trace1, public_values: pv1, lookups: vec![] }, +//! StarkInstance { air: &air2, trace: trace2, public_values: pv2: lookups: vec![] }, //! ]; //! -//! // Generate a unified proof -//! let proof = prove_batch(&config, instances); -//! -//! // Verify the proof -//! verify_batch(&config, &[&air1, &air2], &proof, &[pv1, pv2])?; +//! let common = CommonData::from_instances(&config, &instances); +//! let proof = prove_batch_no_lookups(&config, instances, &common); +//! verify_batch_no_lookups(&config, &[air1, air2], &proof, &[pv1, pv2], &common)?; //! ``` #![no_std] extern crate alloc; +#[cfg(debug_assertions)] +mod check_constraints; +pub mod common; pub mod config; pub mod proof; pub mod prover; +pub mod symbolic; pub mod verifier; // Re-export main types and functions for convenience +#[cfg(debug_assertions)] +pub use check_constraints::*; +pub use common::{CommonData, get_perm_challenges}; pub use config::{ Challenge, Commitment, Domain, PackedChallenge, PackedVal, PcsError, PcsProof, - StarkGenericConfig, Val, observe_base_as_ext, + StarkGenericConfig, Val, }; pub use p3_uni_stark::{OpenedValues, VerificationError}; pub use proof::{BatchCommitments, BatchOpenedValues, BatchProof}; diff --git a/batch-stark/src/proof.rs b/batch-stark/src/proof.rs index 050d6b1c4..7a7eb972c 100644 --- a/batch-stark/src/proof.rs +++ b/batch-stark/src/proof.rs @@ -1,5 +1,6 @@ use alloc::vec::Vec; +use p3_lookup::lookup_traits::LookupData; use p3_uni_stark::OpenedValues; use serde::{Deserialize, Serialize}; @@ -15,6 +16,8 @@ pub struct BatchProof { pub opened_values: BatchOpenedValues>, /// PCS opening proof for all commitments. pub opening_proof: PcsProof, + /// Data necessary to verify the global lookup arguments across all instances. + pub global_lookup_data: Vec>>>, /// Per-instance log2 of the extended trace domain size. /// For instance i, this stores `log2(|extended trace domain|) = log2(N_i) + is_zk()`. pub degree_bits: Vec, @@ -25,13 +28,28 @@ pub struct BatchProof { pub struct BatchCommitments { /// Commitment to all main trace matrices (one per instance). pub main: Com, + /// Commitment to all permutation polynomials (one per instance). + pub permutation: Option, /// Commitment to all quotient polynomial chunks (across all instances). pub quotient_chunks: Com, + /// Commitment to all randomization polynomials (one per instance, if ZK is enabled). + pub random: Option, +} + +/// Opened values for a single instance in a batch-STARK proof, including lookup-related values. +#[derive(Debug, Serialize, Deserialize)] +pub struct OpenedValuesWithLookups { + /// Standard opened values (trace and quotient). + pub base_opened_values: OpenedValues, + /// Opened values for the permutation polynomials at the challenge `zeta`. + pub permutation_local: Vec, + /// Opened values for the permutation polynomials at the next row `g * zeta`. + pub permutation_next: Vec, } /// Opened values for all instances in a batch-STARK proof. #[derive(Debug, Serialize, Deserialize)] pub struct BatchOpenedValues { /// Opened values for each instance, in the same order as provided to the prover. - pub instances: Vec>, + pub instances: Vec>, } diff --git a/batch-stark/src/prover.rs b/batch-stark/src/prover.rs index 1f57754cb..fb1c98e0d 100644 --- a/batch-stark/src/prover.rs +++ b/batch-stark/src/prover.rs @@ -4,48 +4,108 @@ use alloc::vec::Vec; use p3_air::Air; use p3_challenger::{CanObserve, FieldChallenger}; use p3_commit::{Pcs, PolynomialSpace}; -use p3_field::PrimeCharacteristicRing; +use p3_field::{BasedVectorSpace, PackedFieldExtension, PackedValue, PrimeCharacteristicRing}; +use p3_lookup::folder::ProverConstraintFolderWithLookups; +use p3_lookup::logup::LogUpGadget; +use p3_lookup::lookup_traits::{Kind, Lookup, LookupData, LookupGadget, lookup_data_to_expr}; use p3_matrix::Matrix; use p3_matrix::dense::RowMajorMatrix; +use p3_maybe_rayon::prelude::*; use p3_uni_stark::{ - OpenedValues, ProverConstraintFolder, SymbolicAirBuilder, get_log_quotient_degree, - get_symbolic_constraints, quotient_values, + OpenedValues, PackedChallenge, PackedVal, ProverConstraintFolder, SymbolicAirBuilder, + SymbolicExpression, }; use p3_util::log2_strict_usize; -use tracing::instrument; +use tracing::{debug_span, instrument}; -use crate::config::{ - Challenge, Domain, StarkGenericConfig as SGC, Val, observe_base_as_ext, - observe_instance_binding, -}; -use crate::proof::{BatchCommitments, BatchOpenedValues, BatchProof}; +#[cfg(debug_assertions)] +use crate::check_constraints::DebugConstraintBuilderWithLookups; +use crate::common::{CommonData, get_perm_challenges}; +use crate::config::{Challenge, Domain, StarkGenericConfig as SGC, Val, observe_instance_binding}; +use crate::proof::{BatchCommitments, BatchOpenedValues, BatchProof, OpenedValuesWithLookups}; +use crate::symbolic::{get_log_num_quotient_chunks, get_symbolic_constraints}; #[derive(Debug)] pub struct StarkInstance<'a, SC: SGC, A> { pub air: &'a A, pub trace: RowMajorMatrix>, pub public_values: Vec>, + pub lookups: Vec>>, +} + +impl<'a, SC: SGC, A> StarkInstance<'a, SC, A> { + pub fn new_multiple( + airs: &'a [A], + traces: &[RowMajorMatrix>], + public_values: &[Vec>], + common_data: &CommonData, + ) -> Vec { + airs.iter() + .zip(traces.iter()) + .zip(public_values.iter()) + .zip(common_data.lookups.iter()) + .map(|(((air, trace), public_values), lookups)| Self { + air, + trace: trace.clone(), + public_values: public_values.clone(), + lookups: lookups.clone(), + }) + .collect() + } } #[instrument(skip_all)] -pub fn prove_batch(config: &SC, instances: Vec>) -> BatchProof +pub fn prove_batch< + SC, + #[cfg(debug_assertions)] A: for<'a> Air, SC::Challenge>> + + Air, SC::Challenge>> + + for<'a> Air> + + Clone, + #[cfg(not(debug_assertions))] A: for<'a> Air, SC::Challenge>> + + for<'a> Air> + + Clone, +>( + config: &SC, + instances: &[StarkInstance<'_, SC, A>], + common: &CommonData, +) -> BatchProof where SC: SGC, - A: Air>> + for<'a> Air>, + SymbolicExpression: From>>, { + // TODO: Extend if additional lookup gadgets are added. + let lookup_gadget = LogUpGadget::new(); + let pcs = config.pcs(); let mut challenger = config.initialise_challenger(); - // TODO: No ZK support for batch-stark yet. - if config.is_zk() != 0 { - panic!("p3-batch-stark: ZK mode is not supported yet"); - } - // Use instances in provided order. let degrees: Vec = instances.iter().map(|i| i.trace.height()).collect(); let log_degrees: Vec = degrees.iter().copied().map(log2_strict_usize).collect(); let log_ext_degrees: Vec = log_degrees.iter().map(|&d| d + config.is_zk()).collect(); + // Extract lookups and create lookup data in one pass. + let (all_lookups, mut lookup_data): (Vec>, Vec>) = instances + .iter() + .map(|inst| { + ( + inst.lookups.clone(), + // We only get `LookupData` for global lookups, since we only need it for the expected cumulated value. + inst.lookups + .iter() + .filter_map(|lookup| match &lookup.kind { + Kind::Global(name) => Some(LookupData { + name: name.clone(), + aux_idx: lookup.columns[0], + expected_cumulated: SC::Challenge::ZERO, + }), + _ => None, + }) + .collect::>(), + ) + }) + .unzip(); + // Domains for each instance (base and extended) in one pass. let (trace_domains, ext_trace_domains): (Vec>, Vec>) = degrees .iter() @@ -61,21 +121,36 @@ where let airs: Vec<&A> = instances.iter().map(|i| i.air).collect(); let pub_vals: Vec>> = instances.iter().map(|i| i.public_values.clone()).collect(); - // Precompute per-instance log_quotient_degrees and quotient_degrees in one pass. - let (log_quotient_degrees, quotient_degrees): (Vec, Vec) = airs + let mut preprocessed_widths = Vec::with_capacity(airs.len()); + let (log_num_quotient_chunks, num_quotient_chunks): (Vec, Vec) = airs .iter() .zip(pub_vals.iter()) - .map(|(air, pv)| { - let lqd = get_log_quotient_degree::, A>(air, 0, pv.len(), config.is_zk()); - let qd = 1 << (lqd + config.is_zk()); - (lqd, qd) + .enumerate() + .map(|(i, (air, pv))| { + let pre_w = common + .preprocessed + .as_ref() + .and_then(|g| g.instances[i].as_ref().map(|m| m.width)) + .unwrap_or(0); + preprocessed_widths.push(pre_w); + let lq_chunks = get_log_num_quotient_chunks::, SC::Challenge, A, LogUpGadget>( + air, + pre_w, + pv.len(), + &all_lookups[i], + &lookup_data_to_expr(&lookup_data[i]), + config.is_zk(), + &lookup_gadget, + ); + let n_chunks = 1 << (lq_chunks + config.is_zk()); + (lq_chunks, n_chunks) }) .unzip(); // Observe the number of instances up front so the transcript can't be reinterpreted // with a different partitioning. let n_instances = airs.len(); - observe_base_as_ext::(&mut challenger, Val::::from_usize(n_instances)); + challenger.observe_base_as_algebra_element::>(Val::::from_usize(n_instances)); // Observe per-instance binding data: (log_ext_degree, log_degree), width, num quotient chunks. for i in 0..n_instances { @@ -84,15 +159,15 @@ where log_ext_degrees[i], log_degrees[i], A::width(airs[i]), - quotient_degrees[i], + num_quotient_chunks[i], ); } // Commit to all traces using a single batched commitment, preserving input order. let main_commit_inputs = instances - .into_iter() + .iter() .zip(ext_trace_domains.iter().cloned()) - .map(|(inst, dom)| (dom, inst.trace)) + .map(|(inst, dom)| (dom, inst.trace.clone())) .collect::>(); let (main_commit, main_data) = pcs.commit(main_commit_inputs); @@ -102,7 +177,73 @@ where challenger.observe_slice(pv); } - // Compute quotient degrees and domains per instance inline in the loop below. + // Observe preprocessed widths for each instance, to bind transparent + // preprocessed columns into the transcript. If a global preprocessed + // commitment exists, observe it once. + for &pre_w in preprocessed_widths.iter() { + challenger.observe_base_as_algebra_element::>(Val::::from_usize(pre_w)); + } + if let Some(global) = &common.preprocessed { + challenger.observe(global.commitment.clone()); + } + + // Sample the lookup challenges. + let challenges_per_instance = + get_perm_challenges::(&mut challenger, &all_lookups, &lookup_gadget); + + // Get permutation matrices, if any, along with their associated trace domain + let mut permutation_commit_inputs = Vec::with_capacity(n_instances); + instances + .iter() + .enumerate() + .zip(ext_trace_domains.iter().cloned()) + .for_each(|((i, inst), ext_domain)| { + if !all_lookups[i].is_empty() { + let generated_perm = lookup_gadget.generate_permutation::( + &inst.trace, + &inst.air.preprocessed_trace(), + &inst.public_values, + &all_lookups[i], + &mut lookup_data[i], + &challenges_per_instance[i], + ); + permutation_commit_inputs + .push((ext_domain, generated_perm.clone().flatten_to_base())); + + #[cfg(debug_assertions)] + { + use crate::check_constraints::check_constraints; + + let preprocessed_trace = inst.air.preprocessed_trace(); + + let lookup_constraints_inputs = ( + all_lookups[i].as_slice(), + lookup_data[i].as_slice(), + &lookup_gadget, + ); + check_constraints( + inst.air, + &inst.trace, + &preprocessed_trace, + &generated_perm, + &challenges_per_instance[i], + &inst.public_values, + lookup_constraints_inputs, + ); + } + } + }); + + // Commit to all traces in one multi-matrix commitment, preserving input order. + let permutation_commit_and_data = if !permutation_commit_inputs.is_empty() { + let commitment = pcs.commit(permutation_commit_inputs); + challenger.observe(commitment.0.clone()); + Some(commitment) + } else { + None + }; + + // Compute quotient chunk counts and domains per instance inline in the loop below. // Get the random alpha to fold constraints. let alpha: Challenge = challenger.sample_algebra_element(); @@ -113,98 +254,223 @@ where // Track ranges so we can map openings back to instances. let mut quotient_chunk_ranges: Vec<(usize, usize)> = Vec::with_capacity(n_instances); + let mut perm_counter = 0; + // TODO: Parallelize this loop for better performance with many instances. for (i, trace_domain) in trace_domains.iter().enumerate() { - let lqd = log_quotient_degrees[i]; - let quotient_degree = quotient_degrees[i]; - // Disjoint domain sized by extended degree + quotient degree; use ext domain for shift. + let log_chunks = log_num_quotient_chunks[i]; + let n_chunks = num_quotient_chunks[i]; + // Disjoint domain of size ext_degree * num_quotient_chunks + // (log size = log_ext_degrees[i] + log_num_quotient_chunks[i]); use ext domain for shift. let quotient_domain = - ext_trace_domains[i].create_disjoint_domain(1 << (log_ext_degrees[i] + lqd)); + ext_trace_domains[i].create_disjoint_domain(1 << (log_ext_degrees[i] + log_chunks)); // Count constraints to size alpha powers packing. - let constraint_cnt = get_symbolic_constraints(airs[i], 0, pub_vals[i].len()).len(); + let (base_constraints, extension_constraints) = get_symbolic_constraints( + airs[i], + preprocessed_widths[i], + pub_vals[i].len(), + &all_lookups[i], + &lookup_data_to_expr(&lookup_data[i]), + &lookup_gadget, + ); + let constraint_len = base_constraints.len() + extension_constraints.len(); // Get evaluations on quotient domain from the main commitment. let trace_on_quotient_domain = pcs.get_evaluations_on_domain(&main_data, i, quotient_domain); + let permutation_on_quotient_domain = permutation_commit_and_data + .as_ref() + .filter(|_| !all_lookups[i].is_empty()) + .map(|(_, perm_data)| { + let evals = pcs.get_evaluations_on_domain(perm_data, perm_counter, quotient_domain); + perm_counter += 1; + evals + }); + + // Get preprocessed evaluations if this instance has preprocessed columns. + let preprocessed_on_quotient_domain = common + .preprocessed + .as_ref() + .and_then(|g| g.instances[i].as_ref().map(|meta| (g, meta))) + .map(|(g, meta)| { + pcs.get_evaluations_on_domain_no_random( + &g.prover_data, + meta.matrix_index, + quotient_domain, + ) + }); + // Compute quotient(x) = constraints(x)/Z_H(x) over quotient_domain, as extension values. - let q_values = quotient_values::( + let q_values = quotient_values::( airs[i], &pub_vals[i], *trace_domain, quotient_domain, &trace_on_quotient_domain, - None, // multi-stark doesn't support preprocessed columns yet + permutation_on_quotient_domain.as_ref(), + &all_lookups[i], + &lookup_data[i], + &lookup_gadget, + &challenges_per_instance[i], + preprocessed_on_quotient_domain.as_ref(), alpha, - constraint_cnt, + constraint_len, ); // Flatten to base field and split into chunks. let q_flat = RowMajorMatrix::new_col(q_values).flatten_to_base(); - let chunk_mats = quotient_domain.split_evals(quotient_degree, q_flat); - let chunk_domains = quotient_domain.split_domains(quotient_degree); + let chunk_mats = quotient_domain.split_evals(n_chunks, q_flat); + let chunk_domains = quotient_domain.split_domains(n_chunks); + + let evals = chunk_domains + .iter() + .zip(chunk_mats.iter()) + .map(|(d, m)| (*d, m.clone())); + let ldes = pcs.get_quotient_ldes(evals, n_chunks); let start = quotient_chunk_domains.len(); quotient_chunk_domains.extend(chunk_domains); - quotient_chunk_mats.extend(chunk_mats); + quotient_chunk_mats.extend(ldes); let end = quotient_chunk_domains.len(); quotient_chunk_ranges.push((start, end)); } // Commit to all quotient chunks together. - let quotient_commit_inputs = quotient_chunk_domains - .iter() - .cloned() - .zip(quotient_chunk_mats.into_iter()) - .collect::>(); - let (quotient_commit, quotient_data) = pcs.commit(quotient_commit_inputs); + let (quotient_commit, quotient_data) = pcs.commit_ldes(quotient_chunk_mats); challenger.observe(quotient_commit.clone()); - // ZK disabled: no randomization round. + // If zk is enabled, we generate random extension field values of the size of the randomized trace. If `n` is the degree of the initial trace, + // then the randomized trace has degree `2n`. To randomize the FRI batch polynomial, we then need an extension field random polynomial of degree `2n -1`. + // So we can generate a random polynomial of degree `2n`, and provide it to `open` as is. + // Then the method will add `(R(X) - R(z)) / (X - z)` (which is of the desired degree `2n - 1`), to the batch of polynomials. + // Since we need a random polynomial defined over the extension field, and the `commit` method is over the base field, + // we actually need to commit to `SC::Challenge::D` base field random polynomials. + // This is similar to what is done for the quotient polynomials. + // TODO: This approach is only statistically zk. To make it perfectly zk, `R` would have to truly be an extension field polynomial. + let (opt_r_commit, opt_r_data) = if SC::Pcs::ZK { + let (r_commit, r_data) = pcs + .get_opt_randomization_poly_commitment(ext_trace_domains.iter().copied()) + .expect("ZK is enabled, so we should have randomization commitments"); + (Some(r_commit), Some(r_data)) + } else { + (None, None) + }; + + if let Some(r_commit) = &opt_r_commit { + challenger.observe(r_commit.clone()); + } // Sample OOD point. let zeta: Challenge = challenger.sample_algebra_element(); - // Build opening rounds. - let round1_points = ext_trace_domains - .iter() - .map(|dom| { - vec![ - zeta, - dom.next_point(zeta) - .expect("domain should support next_point operation"), - ] - }) - .collect::>(); - let round1 = (&main_data, round1_points); - let round2_points = quotient_chunk_ranges - .iter() - .cloned() - .flat_map(|(s, e)| (s..e).map(|_| vec![zeta])) - .collect::>(); - let round2 = ("ient_data, round2_points); - let rounds = vec![round1, round2]; - - let (opened_values, opening_proof) = pcs.open(rounds, &mut challenger); - assert_eq!( - opened_values.len(), - 2, - "expected [main, quotient] opening groups from PCS" - ); - // Rely on open order: [main, quotient] since ZK is disabled. - let trace_idx = 0usize; - let quotient_idx = 1usize; + // Build opening rounds, including optional global preprocessed commitment. + let (opened_values, opening_proof) = { + let mut rounds = Vec::new(); + + let round0 = opt_r_data.as_ref().map(|r_data| { + let round0_points = trace_domains.iter().map(|_| vec![zeta]).collect::>(); + (r_data, round0_points) + }); + rounds.extend(round0); + // Main trace round: per instance, open at zeta and its next point. + let round1_points = trace_domains + .iter() + .map(|dom| { + vec![ + zeta, + dom.next_point(zeta) + .expect("domain should support next_point operation"), + ] + }) + .collect::>(); + rounds.push((&main_data, round1_points)); + + // Quotient chunks round: one point per chunk at zeta. + let round2_points = quotient_chunk_ranges + .iter() + .cloned() + .flat_map(|(s, e)| (s..e).map(|_| vec![zeta])) + .collect::>(); + rounds.push(("ient_data, round2_points)); + + // Optional global preprocessed round: one matrix per instance that + // has preprocessed columns. + if let Some(global) = &common.preprocessed { + let pre_points = global + .matrix_to_instance + .iter() + .map(|&inst_idx| { + let zeta_next_i = trace_domains[inst_idx] + .next_point(zeta) + .expect("domain should support next_point operation"); + vec![zeta, zeta_next_i] + }) + .collect::>(); + rounds.push((&global.prover_data, pre_points)); + } + + let lookup_points: Vec<_> = trace_domains + .iter() + .zip(&all_lookups) + .filter(|&(_, lookups)| !lookups.is_empty()) + .map(|(dom, _)| { + vec![ + zeta, + dom.next_point(zeta) + .expect("domain should support next_point operation"), + ] + }) + .collect(); + + if let Some((_, perm_data)) = &permutation_commit_and_data { + let lookup_round = (perm_data, lookup_points); + rounds.push(lookup_round); + } + + pcs.open_with_preprocessing(rounds, &mut challenger, common.preprocessed.is_some()) + }; + + // Rely on PCS indices for opened value groups: main trace, quotient, preprocessed. + let trace_idx = SC::Pcs::TRACE_IDX; + let quotient_idx = SC::Pcs::QUOTIENT_IDX; + let preprocessed_idx = SC::Pcs::PREPROCESSED_TRACE_IDX; + let permutation_idx = if common.preprocessed.is_some() { + preprocessed_idx + 1 + } else { + preprocessed_idx + }; // Parse trace opened values per instance. let trace_values_for_mats = &opened_values[trace_idx]; assert_eq!(trace_values_for_mats.len(), n_instances); // Parse quotient chunk opened values and map per instance. - let mut quotient_openings_iter = opened_values[quotient_idx].iter(); + let mut per_instance: Vec>> = + Vec::with_capacity(n_instances); + + // Preprocessed openings, if a global preprocessed commitment exists. + let preprocessed_openings = common + .preprocessed + .as_ref() + .map(|_| &opened_values[SC::Pcs::PREPROCESSED_TRACE_IDX]); + + let is_lookup = permutation_commit_and_data.is_some(); + let permutation_values_for_mats = if is_lookup { + &opened_values[permutation_idx] + } else { + &vec![] + }; + let mut permutation_values_for_mats = permutation_values_for_mats.iter(); - let mut per_instance: Vec>> = Vec::with_capacity(n_instances); + let mut quotient_openings_iter = opened_values[quotient_idx].iter(); for (i, (s, e)) in quotient_chunk_ranges.iter().copied().enumerate() { + let random = if opt_r_data.is_some() { + Some(opened_values[0][i][0].clone()) + } else { + None + }; // Trace locals let tv = &trace_values_for_mats[i]; let trace_local = tv[0].clone(); @@ -219,25 +485,231 @@ where qcs.push(mat_vals[0].clone()); } - per_instance.push(OpenedValues { + // Preprocessed openings (if present). + let (preprocessed_local, preprocessed_next) = if let (Some(global), Some(pre_round)) = + (&common.preprocessed, preprocessed_openings) + { + global.instances[i].as_ref().map_or((None, None), |meta| { + let vals = &pre_round[meta.matrix_index]; + assert_eq!( + vals.len(), + 2, + "expected two opening points (zeta, zeta_next) for preprocessed trace" + ); + (Some(vals[0].clone()), Some(vals[1].clone())) + }) + } else { + (None, None) + }; + + // Not all AIRs have lookups, so for each instance, we first need to check whether it has lookups. + let (permutation_local, permutation_next) = if !all_lookups[i].is_empty() { + let perm_v = permutation_values_for_mats + .next() + .expect("instance should have permutation openings"); + (perm_v[0].clone(), perm_v[1].clone()) + } else { + (vec![], vec![]) + }; + + let base_opened = OpenedValues { trace_local, trace_next, - preprocessed_local: None, // multi-stark doesn't support preprocessed columns yet - preprocessed_next: None, + preprocessed_local, + preprocessed_next, quotient_chunks: qcs, - random: None, // ZK not supported in batch-stark yet + random, + }; + + per_instance.push(OpenedValuesWithLookups { + base_opened_values: base_opened, + permutation_local, + permutation_next, }); } + let permutation = permutation_commit_and_data + .as_ref() + .map(|(comm, _)| comm.clone()); + BatchProof { commitments: BatchCommitments { main: main_commit, quotient_chunks: quotient_commit, + random: opt_r_commit, + permutation, }, opened_values: BatchOpenedValues { instances: per_instance, }, opening_proof, + global_lookup_data: lookup_data, degree_bits: log_ext_degrees, } } + +#[instrument(name = "compute quotient polynomial", skip_all)] +// TODO: Group some arguments to remove the `allow`? +#[allow(clippy::too_many_arguments)] +pub fn quotient_values( + air: &A, + public_values: &[Val], + trace_domain: Domain, + quotient_domain: Domain, + trace_on_quotient_domain: &Mat, + opt_permutation_on_quotient_domain: Option<&Mat>, + lookups: &[Lookup>], + lookup_data: &[LookupData], + lookup_gadget: &LG, + permutation_challenges: &[SC::Challenge], + preprocessed_on_quotient_domain: Option<&Mat>, + alpha: SC::Challenge, + constraint_count: usize, +) -> Vec +where + SC: SGC, + A: for<'a> Air>, + Mat: Matrix> + Sync, + LG: LookupGadget + Sync, +{ + let quotient_size = quotient_domain.size(); + let main_width = trace_on_quotient_domain.width(); + let (perm_width, perm_height) = opt_permutation_on_quotient_domain + .as_ref() + .map_or((0, 0), |mat| (mat.width(), mat.height())); + + let ext_degree = SC::Challenge::DIMENSION; + + let mut sels = debug_span!("Compute Selectors") + .in_scope(|| trace_domain.selectors_on_coset(quotient_domain)); + + let qdb = log2_strict_usize(quotient_domain.size()) - log2_strict_usize(trace_domain.size()); + let next_step = 1 << qdb; + + // Pad selectors with default values if the domain is smaller than the packing width. + let pack_width = PackedVal::::WIDTH; + if quotient_size < pack_width { + let pad_len = pack_width; + // Helper to resize a specific selector vector + let pad = |v: &mut Vec<_>| v.resize(pad_len, Val::::default()); + pad(&mut sels.is_first_row); + pad(&mut sels.is_last_row); + pad(&mut sels.is_transition); + pad(&mut sels.inv_vanishing); + } + + let mut alpha_powers = alpha.powers().collect_n(constraint_count); + alpha_powers.reverse(); + // alpha powers looks like Vec ~ Vec<[F; D]> + // It's useful to also have access to the transpose of this of form [Vec; D]. + let decomposed_alpha_powers: Vec<_> = (0..SC::Challenge::DIMENSION) + .map(|i| { + alpha_powers + .iter() + .map(|x| x.as_basis_coefficients_slice()[i]) + .collect() + }) + .collect(); + (0..quotient_size) + .into_par_iter() + .step_by(PackedVal::::WIDTH) + .flat_map_iter(|i_start| { + let i_range = i_start..i_start + PackedVal::::WIDTH; + + let is_first_row = *PackedVal::::from_slice(&sels.is_first_row[i_range.clone()]); + let is_last_row = *PackedVal::::from_slice(&sels.is_last_row[i_range.clone()]); + let is_transition = *PackedVal::::from_slice(&sels.is_transition[i_range.clone()]); + let inv_vanishing = *PackedVal::::from_slice(&sels.inv_vanishing[i_range]); + + // Retrieve main trace as a matrix evaluated on the quotient domain. + let main = RowMajorMatrix::new( + trace_on_quotient_domain.vertically_packed_row_pair(i_start, next_step), + main_width, + ); + + let preprocessed = preprocessed_on_quotient_domain.map(|preprocessed| { + let preprocessed_width = preprocessed.width(); + RowMajorMatrix::new( + preprocessed.vertically_packed_row_pair(i_start, next_step), + preprocessed_width, + ) + }); + + // Retrieve permutation trace as a matrix evaluated on the quotient domain. + let permutation = opt_permutation_on_quotient_domain.as_ref().map_or_else( + || RowMajorMatrix::new(vec![], 0), + |permutation_on_quotient_domain| { + let perms = (0..perm_width) + .step_by(ext_degree) + .map(|col| { + PackedChallenge::::from_basis_coefficients_fn(|i| { + PackedVal::::from_fn(|offset| { + permutation_on_quotient_domain + .get((i_start + offset) % perm_height, col + i) + .unwrap() + }) + }) + }) + .chain((0..perm_width).step_by(ext_degree).map(|col| { + PackedChallenge::::from_basis_coefficients_fn(|i| { + PackedVal::::from_fn(|offset| { + permutation_on_quotient_domain + .get((i_start + next_step + offset) % perm_height, col + i) + .unwrap() + }) + }) + })); + + RowMajorMatrix::new(perms.collect::>(), perm_width / ext_degree) + }, + ); + + let accumulator = PackedChallenge::::ZERO; + let inner_folder = ProverConstraintFolder { + main: main.as_view(), + preprocessed: preprocessed.as_ref().map(|m| m.as_view()), + public_values, + is_first_row, + is_last_row, + is_transition, + alpha_powers: &alpha_powers, + decomposed_alpha_powers: &decomposed_alpha_powers, + accumulator, + constraint_index: 0, + periodic_values: vec![], // batch-stark doesn't support periodic columns yet + }; + let packed_perm_challenges = permutation_challenges + .iter() + .map(|p_c| PackedChallenge::::from(*p_c)) + .collect::>(); + + let mut folder = ProverConstraintFolderWithLookups { + inner: inner_folder, + permutation: permutation.as_view(), + permutation_challenges: &packed_perm_challenges, + }; + A::eval_with_lookups( + air, + &mut folder, + lookups, + lookup_data + .iter() + .map(|ld| LookupData { + name: ld.name.clone(), + aux_idx: ld.aux_idx, + expected_cumulated: ld.expected_cumulated.into(), + }) + .collect::>() + .as_slice(), + lookup_gadget, + ); + + // quotient(x) = constraints(x) / Z_H(x) + let quotient = folder.inner.accumulator * inv_vanishing; + + // "Transpose" D packed base coefficients into WIDTH scalar extension coefficients. + (0..core::cmp::min(quotient_size, PackedVal::::WIDTH)) + .map(move |idx_in_packing| quotient.extract(idx_in_packing)) + }) + .collect() +} diff --git a/batch-stark/src/symbolic.rs b/batch-stark/src/symbolic.rs new file mode 100644 index 000000000..08c3cf2f8 --- /dev/null +++ b/batch-stark/src/symbolic.rs @@ -0,0 +1,110 @@ +use alloc::vec::Vec; + +use p3_air::Air; +use p3_field::{ExtensionField, Field}; +use p3_lookup::lookup_traits::{Lookup, LookupData, LookupGadget}; +use p3_uni_stark::{SymbolicAirBuilder, SymbolicExpression}; +use p3_util::log2_ceil_usize; +use tracing::instrument; + +#[instrument(name = "infer log of constraint degree", skip_all)] +pub fn get_log_num_quotient_chunks( + air: &A, + preprocessed_width: usize, + num_public_values: usize, + contexts: &[Lookup], + lookup_data: &[LookupData>], + is_zk: usize, + lookup_gadget: &LG, +) -> usize +where + F: Field, + EF: ExtensionField, + A: Air>, + SymbolicExpression: From>, + LG: LookupGadget, +{ + assert!(is_zk <= 1, "is_zk must be either 0 or 1"); + // We pad to at least degree 2, since a quotient argument doesn't make sense with smaller degrees. + let constraint_degree = (get_max_constraint_degree( + air, + preprocessed_width, + num_public_values, + contexts, + lookup_data, + lookup_gadget, + ) + is_zk) + .max(2); + + // The quotient's actual degree is approximately (max_constraint_degree - 1) n, + // where subtracting 1 comes from division by the vanishing polynomial. + // But we pad it to a power of two so that we can efficiently decompose the quotient. + log2_ceil_usize(constraint_degree - 1) +} + +#[instrument(name = "infer constraint degree", skip_all, level = "debug")] +pub fn get_max_constraint_degree( + air: &A, + preprocessed_width: usize, + num_public_values: usize, + contexts: &[Lookup], + lookup_data: &[LookupData>], + lookup_gadget: &LG, +) -> usize +where + F: Field, + EF: ExtensionField, + A: Air>, + SymbolicExpression: From>, + LG: LookupGadget, +{ + let (base, extension) = get_symbolic_constraints( + air, + preprocessed_width, + num_public_values, + contexts, + lookup_data, + lookup_gadget, + ); + let base_degree = base.iter().map(|c| c.degree_multiple()).max().unwrap_or(0); + let extension_degree = extension + .iter() + .map(|c| c.degree_multiple()) + .max() + .unwrap_or(0); + base_degree.max(extension_degree) +} + +#[instrument(name = "evaluate constraints symbolically", skip_all, level = "debug")] +pub fn get_symbolic_constraints( + air: &A, + preprocessed_width: usize, + num_public_values: usize, + contexts: &[Lookup], + lookup_data: &[LookupData>], + lookup_gadget: &LG, +) -> (Vec>, Vec>) +where + F: Field, + EF: ExtensionField, + A: Air>, + SymbolicExpression: From>, + LG: LookupGadget, +{ + let num_lookups = contexts.len(); + let num_aux_cols = num_lookups * lookup_gadget.num_aux_cols(); + let num_challenges = num_lookups * lookup_gadget.num_challenges(); + let mut builder = SymbolicAirBuilder::new( + preprocessed_width, + air.width(), + num_public_values, + num_aux_cols, + num_challenges, + ); + + // Evaluate AIR and lookup constraints. + >::eval_with_lookups(air, &mut builder, contexts, lookup_data, lookup_gadget); + let base_constraints = builder.base_constraints(); + let extension_constraints = builder.extension_constraints(); + (base_constraints, extension_constraints) +} diff --git a/batch-stark/src/verifier.rs b/batch-stark/src/verifier.rs index 594655198..bd508131e 100644 --- a/batch-stark/src/verifier.rs +++ b/batch-stark/src/verifier.rs @@ -1,22 +1,33 @@ +use alloc::string::String; use alloc::vec; use alloc::vec::Vec; +use core::fmt::Debug; +use hashbrown::HashMap; use p3_air::Air; use p3_challenger::{CanObserve, FieldChallenger}; use p3_commit::{Pcs, PolynomialSpace}; use p3_field::{BasedVectorSpace, PrimeCharacteristicRing}; +use p3_lookup::folder::VerifierConstraintFolderWithLookups; +use p3_lookup::logup::LogUpGadget; +use p3_lookup::lookup_traits::{ + Lookup, LookupData, LookupError, LookupGadget, lookup_data_to_expr, +}; +use p3_matrix::dense::RowMajorMatrixView; +use p3_matrix::stack::VerticalPair; use p3_uni_stark::{ - SymbolicAirBuilder, VerificationError, VerifierConstraintFolder, get_log_quotient_degree, - recompose_quotient_from_chunks, verify_constraints, + SymbolicAirBuilder, SymbolicExpression, VerificationError, VerifierConstraintFolder, + recompose_quotient_from_chunks, }; use p3_util::zip_eq::zip_eq; use tracing::instrument; +use crate::common::{CommonData, get_perm_challenges}; use crate::config::{ - Challenge, Domain, PcsError, StarkGenericConfig as SGC, Val, observe_base_as_ext, - observe_instance_binding, + Challenge, Domain, PcsError, StarkGenericConfig as SGC, Val, observe_instance_binding, }; use crate::proof::BatchProof; +use crate::symbolic::get_log_num_quotient_chunks; #[instrument(skip_all)] pub fn verify_batch( @@ -24,79 +35,140 @@ pub fn verify_batch( airs: &[A], proof: &BatchProof, public_values: &[Vec>], + common: &CommonData, ) -> Result<(), VerificationError>> where SC: SGC, - A: Air>> + for<'a> Air>, + SymbolicExpression: From>>, + A: Air, SC::Challenge>> + + for<'a> Air>, Challenge: BasedVectorSpace>, { + // TODO: Extend if additional lookup gadgets are added. + let lookup_gadget = LogUpGadget::new(); + let BatchProof { commitments, opened_values, opening_proof, + global_lookup_data, degree_bits, } = proof; + let all_lookups = &common.lookups; + let pcs = config.pcs(); let mut challenger = config.initialise_challenger(); - // ZK mode is not supported yet - if config.is_zk() != 0 { - panic!("p3-batch-stark: ZK mode is not supported yet"); - } - // Sanity checks if airs.len() != opened_values.instances.len() || airs.len() != public_values.len() || airs.len() != degree_bits.len() + || airs.len() != global_lookup_data.len() { return Err(VerificationError::InvalidProofShape); } + // Check that the random commitments are/are not present depending on the ZK setting. + // - If ZK is enabled, the prover should have random commitments. + // - If ZK is not enabled, the prover should not have random commitments. + if (opened_values + .instances + .iter() + .any(|ov| ov.base_opened_values.random.is_some() != SC::Pcs::ZK)) + || (commitments.random.is_some() != SC::Pcs::ZK) + { + return Err(VerificationError::RandomizationError); + } + // Observe the number of instances up front to match the prover's transcript. let n_instances = airs.len(); - observe_base_as_ext::(&mut challenger, Val::::from_usize(n_instances)); + challenger.observe_base_as_algebra_element::>(Val::::from_usize(n_instances)); // Validate opened values shape per instance and observe per-instance binding data. - // Precompute per-instance log_quotient_degrees and quotient_degrees in one pass. - let (log_quotient_degrees, quotient_degrees): (Vec, Vec) = airs - .iter() - .zip(public_values.iter()) - .map(|(air, pv)| { - let lqd = get_log_quotient_degree::, A>(air, 0, pv.len(), config.is_zk()); - let qd = 1 << (lqd + config.is_zk()); - (lqd, qd) - }) - .unzip(); + // Precompute per-instance preprocessed widths and number of quotient chunks. + let mut preprocessed_widths = Vec::with_capacity(airs.len()); + // Number of quotient chunks per instance before ZK randomization. + let mut log_num_quotient_chunks = Vec::with_capacity(airs.len()); + // The total number of quotient chunks, including ZK randomization. + let mut num_quotient_chunks = Vec::with_capacity(airs.len()); + + for (i, air) in airs.iter().enumerate() { + let pre_w = common + .preprocessed + .as_ref() + .and_then(|g| g.instances[i].as_ref().map(|m| m.width)) + .unwrap_or(0); + preprocessed_widths.push(pre_w); + + let log_num_chunks = get_log_num_quotient_chunks::, SC::Challenge, A, LogUpGadget>( + air, + pre_w, + public_values[i].len(), + &all_lookups[i], + &lookup_data_to_expr(&global_lookup_data[i]), + config.is_zk(), + &lookup_gadget, + ); + log_num_quotient_chunks.push(log_num_chunks); + + let n_chunks = 1 << (log_num_chunks + config.is_zk()); + num_quotient_chunks.push(n_chunks); + } for (i, air) in airs.iter().enumerate() { let air_width = A::width(air); let inst_opened_vals = &opened_values.instances[i]; + let inst_base_opened_vals = &inst_opened_vals.base_opened_values; // Validate trace widths match the AIR - if inst_opened_vals.trace_local.len() != air_width - || inst_opened_vals.trace_next.len() != air_width + if inst_base_opened_vals.trace_local.len() != air_width + || inst_base_opened_vals.trace_next.len() != air_width { return Err(VerificationError::InvalidProofShape); } // Validate quotient chunks structure - let quotient_degree = quotient_degrees[i]; - if inst_opened_vals.quotient_chunks.len() != quotient_degree { + let n_chunks = num_quotient_chunks[i]; + if inst_base_opened_vals.quotient_chunks.len() != n_chunks { return Err(VerificationError::InvalidProofShape); } - for chunk in &inst_opened_vals.quotient_chunks { + for chunk in &inst_base_opened_vals.quotient_chunks { if chunk.len() != Challenge::::DIMENSION { return Err(VerificationError::InvalidProofShape); } } + // Validate random commit + if inst_opened_vals + .base_opened_values + .random + .as_ref() + .is_some_and(|r_comm| r_comm.len() != SC::Challenge::DIMENSION) + { + return Err(VerificationError::RandomizationError); + } + + // Validate that any preprocessed width implied by CommonData matches the opened shapes. + let pre_w = preprocessed_widths[i]; + let pre_local_len = inst_base_opened_vals + .preprocessed_local + .as_ref() + .map_or(0, |v| v.len()); + let pre_next_len = inst_base_opened_vals + .preprocessed_next + .as_ref() + .map_or(0, |v| v.len()); + if pre_w != pre_local_len || pre_w != pre_next_len { + return Err(VerificationError::InvalidProofShape); + } + // Observe per-instance binding data: (log_ext_degree, log_degree), width, num quotient chunks. let ext_db = degree_bits[i]; let base_db = ext_db - config.is_zk(); let width = A::width(air); - observe_instance_binding::(&mut challenger, ext_db, base_db, width, quotient_degree); + observe_instance_binding::(&mut challenger, ext_db, base_db, width, n_chunks); } // Observe main commitment and public values (in instance order). @@ -105,12 +177,48 @@ where challenger.observe_slice(pv); } + // Observe preprocessed widths for each instance. If a global + // preprocessed commitment exists, observe it once. + for &pre_w in preprocessed_widths.iter() { + challenger.observe_base_as_algebra_element::>(Val::::from_usize(pre_w)); + } + if let Some(global) = &common.preprocessed { + challenger.observe(global.commitment.clone()); + } + + // Validate the shape of the lookup commitment. + let is_lookup = commitments.permutation.is_some(); + + if is_lookup != all_lookups.iter().any(|c| !c.is_empty()) { + return Err(VerificationError::InvalidProofShape); + } + + // Fetch lookups and sample their challenges. + let challenges_per_instance = + get_perm_challenges::(&mut challenger, all_lookups, &lookup_gadget); + + // Then, observe the permutation tables, if any. + if is_lookup { + challenger.observe( + commitments + .permutation + .clone() + .expect("We checked that the commitment exists"), + ); + } + // Sample alpha for constraint folding let alpha = challenger.sample_algebra_element(); // Observe quotient chunks commitment challenger.observe(commitments.quotient_chunks.clone()); + // We've already checked that commitments.random is present if and only if ZK is enabled. + // Observe the random commitment if it is present. + if let Some(r_commit) = commitments.random.clone() { + challenger.observe(r_commit); + } + // Sample OOD point let zeta = challenger.sample_algebra_element(); @@ -128,18 +236,42 @@ where ) }) .unzip(); + + if let Some(random_commit) = &commitments.random { + coms_to_verify.push(( + random_commit.clone(), + ext_trace_domains + .iter() + .zip(opened_values.instances.iter()) + .map(|(domain, inst_opened_vals)| { + // We already checked that random is present for each instance when ZK is enabled. + let random_vals = inst_opened_vals.base_opened_values.random.as_ref().unwrap(); + (*domain, vec![(zeta, random_vals.clone())]) + }) + .collect::>(), + )); + } + let trace_round: Vec<_> = ext_trace_domains .iter() .zip(opened_values.instances.iter()) - .map(|(ext_dom, inst_opened_vals)| { - let zeta_next = ext_dom + .enumerate() + .map(|(i, (ext_dom, inst_opened_vals))| { + let zeta_next = trace_domains[i] .next_point(zeta) .ok_or(VerificationError::NextPointUnavailable)?; + Ok(( *ext_dom, vec![ - (zeta, inst_opened_vals.trace_local.clone()), - (zeta_next, inst_opened_vals.trace_next.clone()), + ( + zeta, + inst_opened_vals.base_opened_values.trace_local.clone(), + ), + ( + zeta_next, + inst_opened_vals.base_opened_values.trace_next.clone(), + ), ], )) }) @@ -147,23 +279,34 @@ where coms_to_verify.push((commitments.main.clone(), trace_round)); // Quotient chunks round: flatten per-instance chunks to match commit order. - // Use extended domains for the outer commit domain, with size 2^(base_db + lqd + zk), and split into 2^(lqd+zk) chunks. + // Use extended domains for the outer commit domain, with size = base_degree * num_quotient_chunks. let quotient_domains: Vec>> = (0..degree_bits.len()) .map(|i| { let ext_db = degree_bits[i]; - let base_db = ext_db - config.is_zk(); - let lqd = log_quotient_degrees[i]; - let quotient_degree = quotient_degrees[i]; + let log_num_chunks = log_num_quotient_chunks[i]; + let n_chunks = num_quotient_chunks[i]; let ext_dom = ext_trace_domains[i]; - let qdom = ext_dom.create_disjoint_domain(1 << (base_db + lqd + config.is_zk())); - qdom.split_domains(quotient_degree) + let qdom = ext_dom.create_disjoint_domain(1 << (ext_db + log_num_chunks)); + qdom.split_domains(n_chunks) }) .collect(); + // When ZK is enabled, the size of the quotient chunks' domains doubles. + let randomized_quotient_chunks_domains = quotient_domains + .iter() + .map(|doms| { + doms.iter() + .map(|dom| pcs.natural_domain_for_degree(dom.size() << (config.is_zk()))) + .collect::>() + }) + .collect::>(); + // Build the per-matrix openings for the aggregated quotient commitment. let mut qc_round = Vec::new(); - for (i, domains) in quotient_domains.iter().enumerate() { - let inst_qcs = &opened_values.instances[i].quotient_chunks; + for (i, domains) in randomized_quotient_chunks_domains.iter().enumerate() { + let inst_qcs = &opened_values.instances[i] + .base_opened_values + .quotient_chunks; if inst_qcs.len() != domains.len() { return Err(VerificationError::InvalidProofShape); } @@ -177,6 +320,81 @@ where } coms_to_verify.push((commitments.quotient_chunks.clone(), qc_round)); + // Preprocessed rounds: a single global commitment with one matrix per + // instance that has preprocessed columns. + if let Some(global) = &common.preprocessed { + let mut pre_round = Vec::new(); + + for (matrix_index, &inst_idx) in global.matrix_to_instance.iter().enumerate() { + let pre_w = preprocessed_widths[inst_idx]; + if pre_w == 0 { + return Err(VerificationError::InvalidProofShape); + } + + let inst = &opened_values.instances[inst_idx]; + let local = inst + .base_opened_values + .preprocessed_local + .as_ref() + .ok_or(VerificationError::InvalidProofShape)?; + let next = inst + .base_opened_values + .preprocessed_next + .as_ref() + .ok_or(VerificationError::InvalidProofShape)?; + + // Validate that the preprocessed data's base degree matches what we expect. + let ext_db = degree_bits[inst_idx]; + + let meta = global.instances[inst_idx] + .as_ref() + .ok_or(VerificationError::InvalidProofShape)?; + if meta.matrix_index != matrix_index || meta.degree_bits != ext_db { + return Err(VerificationError::InvalidProofShape); + } + + let meta_db = meta.degree_bits; + let pre_domain = pcs.natural_domain_for_degree(1 << meta_db); + let zeta_next_i = trace_domains[inst_idx] + .next_point(zeta) + .ok_or(VerificationError::NextPointUnavailable)?; + + pre_round.push(( + pre_domain, + vec![(zeta, local.clone()), (zeta_next_i, next.clone())], + )); + } + + coms_to_verify.push((global.commitment.clone(), pre_round)); + } + + if is_lookup { + let permutation_commit = commitments.permutation.clone().unwrap(); + let mut permutation_round = Vec::new(); + for (i, (ext_dom, inst_opened_vals)) in ext_trace_domains + .iter() + .zip(opened_values.instances.iter()) + .enumerate() + { + if inst_opened_vals.permutation_local.len() != inst_opened_vals.permutation_next.len() { + return Err(VerificationError::InvalidProofShape); + } + if !inst_opened_vals.permutation_local.is_empty() { + let zeta_next = trace_domains[i] + .next_point(zeta) + .ok_or(VerificationError::NextPointUnavailable)?; + permutation_round.push(( + *ext_dom, + vec![ + (zeta, inst_opened_vals.permutation_local.clone()), + (zeta_next, inst_opened_vals.permutation_next.clone()), + ], + )); + } + } + coms_to_verify.push((permutation_commit, permutation_round)); + } + // Verify all openings via PCS. pcs.verify(coms_to_verify, opening_proof, &mut challenger) .map_err(VerificationError::InvalidOpeningArgument)?; @@ -189,23 +407,86 @@ where // Recompose quotient(zeta) from chunks using utility function. let quotient = recompose_quotient_from_chunks::( qc_domains, - &opened_values.instances[i].quotient_chunks, + &opened_values.instances[i] + .base_opened_values + .quotient_chunks, zeta, ); + // Recompose permutation openings from base-flattened columns into extension field columns. + // The permutation commitment is a base-flattened matrix with `width = aux_width * DIMENSION`. + // For constraint evaluation, we need an extension field matrix with width `aux_width``. + let aux_width = all_lookups[i] + .iter() + .flat_map(|ctx| ctx.columns.iter().cloned()) + .max() + .map(|m| m + 1) + .unwrap_or(0); + + let recompose = |flat: &[Challenge]| -> Vec> { + if aux_width == 0 { + return vec![]; + } + let ext_degree = Challenge::::DIMENSION; + assert!( + flat.len() == aux_width * ext_degree, + "flattened permutation opening length ({}) must equal aux_width ({}) * DIMENSION ({})", + flat.len(), + aux_width, + ext_degree + ); + // Chunk the flattened coefficients into groups of size `dim`. + // Each chunk represents the coefficients of one extension field element. + flat.chunks_exact(ext_degree) + .map(|coeffs| { + // Dot product: sum(coeff_j * basis_j) + coeffs + .iter() + .enumerate() + .map(|(j, &coeff)| { + coeff + * Challenge::::ith_basis_element(j) + .expect("Basis element should exist") + }) + .sum() + }) + .collect() + }; + + let perm_local_ext = recompose(&opened_values.instances[i].permutation_local); + let perm_next_ext = recompose(&opened_values.instances[i].permutation_next); + // Verify constraints at zeta using utility function. let init_trace_domain = trace_domains[i]; - verify_constraints::>( - air, - &opened_values.instances[i].trace_local, - &opened_values.instances[i].trace_next, - opened_values.instances[i].preprocessed_local.as_deref(), - opened_values.instances[i].preprocessed_next.as_deref(), - &public_values[i], - init_trace_domain, + let verifier_data = VerifierData { + trace_local: &opened_values.instances[i].base_opened_values.trace_local, + trace_next: &opened_values.instances[i].base_opened_values.trace_next, + preprocessed_local: opened_values.instances[i] + .base_opened_values + .preprocessed_local + .as_ref() + .map_or(&[], |v| v), + preprocessed_next: opened_values.instances[i] + .base_opened_values + .preprocessed_next + .as_ref() + .map_or(&[], |v| v), + permutation_local: &perm_local_ext, + permutation_next: &perm_next_ext, + permutation_challenges: &challenges_per_instance[i], + lookup_data: &proof.global_lookup_data[i], + lookups: &all_lookups[i], + public_values: &public_values[i], + trace_domain: init_trace_domain, zeta, alpha, quotient, + }; + + verify_constraints_with_lookups::>( + air, + &verifier_data, + &lookup_gadget, ) .map_err(|e| match e { VerificationError::OodEvaluationMismatch { .. } => { @@ -215,5 +496,133 @@ where })?; } + let mut global_cumulative = HashMap::<&String, Vec<_>>::new(); + for data in global_lookup_data.iter().flatten() { + global_cumulative + .entry(&data.name) + .or_default() + .push(data.expected_cumulated); + } + + for (name, all_expected_cumulative) in global_cumulative { + lookup_gadget + .verify_global_final_value(&all_expected_cumulative) + .map_err(|_| { + VerificationError::LookupError(LookupError::GlobalCumulativeMismatch(Some( + name.clone(), + ))) + })?; + } + + Ok(()) +} + +/// Structure storing all data needed for verifying one instance's constraints at the out-of-domain point. +pub struct VerifierData<'a, SC: SGC> { + // Out-of-domain point at which constraints are evaluated + zeta: SC::Challenge, + // Challenge used to fold constraints + alpha: SC::Challenge, + // Main trace evaluated at `zeta` + trace_local: &'a [SC::Challenge], + // Main trace evaluated at the following point `g * zeta`, where `g` is the subgroup generator + trace_next: &'a [SC::Challenge], + // Preprocessed trace evaluated at `zeta` + preprocessed_local: &'a [SC::Challenge], + // Preprocessed trace evaluated at the following point `g * zeta`, where `g` is the subgroup generator + preprocessed_next: &'a [SC::Challenge], + // Permutation trace evaluated at `zeta` + permutation_local: &'a [SC::Challenge], + // Permutation trace evaluated at the following point `g * zeta`, where `g` is the subgroup generator + permutation_next: &'a [SC::Challenge], + // Challenges used for the lookup argument + permutation_challenges: &'a [SC::Challenge], + // Lookup data needed for global lookup verification + lookup_data: &'a [LookupData], + // Lookup contexts for this instance + lookups: &'a [Lookup>], + // Public values for this instance + public_values: &'a [Val], + // Trace domain for this instance + trace_domain: Domain, + // Quotient polynomial evaluated at `zeta` + quotient: SC::Challenge, +} + +/// Verifies that the folded constraints match the quotient polynomial at zeta. +/// +/// This evaluates the AIR constraints at the out-of-domain point and checks +/// that constraints(zeta) / Z_H(zeta) = quotient(zeta). +#[allow(clippy::too_many_arguments)] +pub fn verify_constraints_with_lookups<'a, SC, A, LG: LookupGadget, PcsErr: Debug>( + air: &A, + verifier_data: &VerifierData<'a, SC>, + lookup_gadget: &LG, +) -> Result<(), VerificationError> +where + SC: SGC, + A: for<'b> Air>, +{ + let VerifierData { + trace_local, + trace_next, + preprocessed_local, + preprocessed_next, + permutation_local, + permutation_next, + permutation_challenges, + lookup_data, + lookups, + public_values, + trace_domain, + zeta, + alpha, + quotient, + } = verifier_data; + + let sels = trace_domain.selectors_at_point(*zeta); + + let main = VerticalPair::new( + RowMajorMatrixView::new_row(trace_local), + RowMajorMatrixView::new_row(trace_next), + ); + + let preprocessed = VerticalPair::new( + RowMajorMatrixView::new_row(preprocessed_local), + RowMajorMatrixView::new_row(preprocessed_next), + ); + + let inner_folder = VerifierConstraintFolder { + main, + preprocessed: if preprocessed_local.is_empty() { + None + } else { + Some(preprocessed) + }, + public_values, + is_first_row: sels.is_first_row, + is_last_row: sels.is_last_row, + is_transition: sels.is_transition, + alpha: *alpha, + accumulator: SC::Challenge::ZERO, + periodic_values: vec![], // batch-stark doesn't support periodic columns yet + }; + let mut folder = VerifierConstraintFolderWithLookups { + inner: inner_folder, + permutation: VerticalPair::new( + RowMajorMatrixView::new_row(permutation_local), + RowMajorMatrixView::new_row(permutation_next), + ), + permutation_challenges, + }; + // Evaluate AIR and lookup constraints. + A::eval_with_lookups(air, &mut folder, lookups, lookup_data, lookup_gadget); + let folded_constraints = folder.inner.accumulator; + + // Check that constraints(zeta) / Z_H(zeta) = quotient(zeta) + if folded_constraints * sels.inv_vanishing != *quotient { + return Err(VerificationError::OodEvaluationMismatch { index: None }); + } + Ok(()) } diff --git a/batch-stark/tests/fixtures/batch_stark_circle_v1.postcard b/batch-stark/tests/fixtures/batch_stark_circle_v1.postcard new file mode 100644 index 000000000..102acad49 Binary files /dev/null and b/batch-stark/tests/fixtures/batch_stark_circle_v1.postcard differ diff --git a/batch-stark/tests/fixtures/batch_stark_two_adic_v1.postcard b/batch-stark/tests/fixtures/batch_stark_two_adic_v1.postcard new file mode 100644 index 000000000..d1e2ad6eb Binary files /dev/null and b/batch-stark/tests/fixtures/batch_stark_two_adic_v1.postcard differ diff --git a/batch-stark/tests/simple.rs b/batch-stark/tests/simple.rs index 93b19dc92..0f11234e7 100644 --- a/batch-stark/tests/simple.rs +++ b/batch-stark/tests/simple.rs @@ -3,40 +3,68 @@ use core::fmt::Debug; use core::marker::PhantomData; use core::slice::from_ref; -use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, BaseAir}; +use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, BaseAir, PermutationAirBuilder}; use p3_baby_bear::{BabyBear, Poseidon2BabyBear}; -use p3_batch_stark::{StarkInstance, prove_batch, verify_batch}; +use p3_batch_stark::proof::{BatchProof, OpenedValuesWithLookups}; +use p3_batch_stark::{CommonData, StarkInstance, VerificationError, prove_batch, verify_batch}; use p3_challenger::{DuplexChallenger, HashChallenger, SerializingChallenger32}; use p3_circle::CirclePcs; use p3_commit::ExtensionMmcs; use p3_dft::Radix2DitParallel; use p3_field::extension::BinomialExtensionField; use p3_field::{Field, PrimeCharacteristicRing, PrimeField64}; -use p3_fri::{FriParameters, TwoAdicFriPcs, create_test_fri_params}; +use p3_fri::{FriParameters, HidingFriPcs, TwoAdicFriPcs, create_test_fri_params}; use p3_keccak::Keccak256Hash; +use p3_lookup::lookup_traits::{Direction, Kind, Lookup}; use p3_matrix::Matrix; use p3_matrix::dense::RowMajorMatrix; -use p3_merkle_tree::MerkleTreeMmcs; +use p3_merkle_tree::{MerkleTreeHidingMmcs, MerkleTreeMmcs}; use p3_mersenne_31::Mersenne31; use p3_symmetric::{ CompressionFunctionFromHasher, PaddingFreeSponge, SerializingHasher, TruncatedPermutation, }; -use p3_uni_stark::StarkConfig; +use p3_uni_stark::{StarkConfig, StarkGenericConfig, SymbolicAirBuilder, SymbolicExpression}; +use p3_util::log2_strict_usize; use rand::SeedableRng; use rand::rngs::SmallRng; +const TWO_ADIC_FIXTURE: &str = "tests/fixtures/batch_stark_two_adic_v1.postcard"; +const CIRCLE_FIXTURE: &str = "tests/fixtures/batch_stark_circle_v1.postcard"; + // --- Simple Fibonacci AIR and trace --- #[derive(Debug, Clone, Copy)] -struct FibonacciAir; +struct FibonacciAir { + /// log2 of the trace height; used to size preprocessed columns. + log_height: usize, + /// Index to tamper with in preprocessed trace (None = no tampering). + tamper_index: Option, +} -impl BaseAir for FibonacciAir { +impl BaseAir for FibonacciAir { fn width(&self) -> usize { 2 } + + fn preprocessed_trace(&self) -> Option> { + let n = 1 << self.log_height; + let mut m = RowMajorMatrix::new(F::zero_vec(n), 1); + for (i, v) in m.values.iter_mut().enumerate().take(n) { + *v = F::from_u64(i as u64); + } + if let Some(idx) = self.tamper_index + && idx < n + { + m.values[idx] += F::ONE; + } + Some(m) + } } -impl Air for FibonacciAir { +impl Air for FibonacciAir +where + AB::F: Field, +{ fn eval(&self, builder: &mut AB) { let main = builder.main(); let pis = builder.public_values(); @@ -98,8 +126,12 @@ fn fib_trace(a: u64, b: u64, n: usize) -> RowMajorMatrix { } fn fib_n(n: usize) -> u64 { - let mut a = 0u64; - let mut b = 1u64; + fib_n_from(0, 1, n) +} + +fn fib_n_from(a0: u64, b0: u64, n: usize) -> u64 { + let mut a = a0; + let mut b = b0; for _ in 0..n { let t = a + b; a = b; @@ -109,20 +141,22 @@ fn fib_n(n: usize) -> u64 { } // --- Simple multiplication AIR and trace --- +// The AIR has 3 * `reps` columns: +// - for each rep, 3 columns: `a`, `b`, `c` where we enforce `a * b = c` +// - an extra column at the end which is a permutation of the first `a` column (used for local lookups in `MulAirLookups`) #[derive(Debug, Clone, Copy)] struct MulAir { reps: usize, - step: u64, } impl Default for MulAir { fn default() -> Self { - Self { reps: 2, step: 1 } + Self { reps: 2 } } } impl BaseAir for MulAir { fn width(&self) -> usize { - self.reps * 3 + self.reps * 3 + 1 } } impl Air for MulAir { @@ -136,29 +170,329 @@ impl Air for MulAir { let b = local[s + 1].clone(); let c = local[s + 2].clone(); builder.assert_eq(a.clone() * b.clone(), c); + + builder + .when_transition() + .assert_eq(b.clone(), next[s].clone()); builder .when_transition() - .assert_eq(a + AB::Expr::from_u64(self.step), next[s].clone()); + .assert_eq(a + b, next[s + 1].clone()); + } + } +} + +// --- MulAirLookups structure for local and global lookups --- +// This AIR is a `MulAir` that can register global lookups with `FibAirLookups`, as well as local lookups with a lookup column. Its inputs are the Fibonacci values. +// - when `is_local` is true, this AIR creates local lookups between its first column and its last column. +// The latter corresponds to a permutation of the first column: +// - it receives the first column with multiplicity 1 +// - it sends the last column (permuted values) with multiplicity 1 +// - when `is_global` is true, this AIR creates global lookups between its inputs and `FibAirLookups` AIR's inputs: +// - For each `rep`, it sends its first two columns (a,b) to the global lookup with name `global_names[rep]` and multiplicity 1 +// - `num_lookups` tracks the number of registered lookups. It is 0 when the structure is created, +// and increments every time a new lookup is registered. +#[derive(Clone, Default)] +struct MulAirLookups { + air: MulAir, + is_local: bool, + is_global: bool, + num_lookups: usize, + global_names: Vec, +} + +impl MulAirLookups { + const fn new( + air: MulAir, + is_local: bool, + is_global: bool, + num_lookups: usize, + global_names: Vec, + ) -> Self { + Self { + air, + is_local, + is_global, + num_lookups, + global_names, } } } -fn mul_trace(rows: usize, reps: usize, _step: u64) -> RowMajorMatrix { +impl BaseAir for MulAirLookups { + fn width(&self) -> usize { + >::width(&self.air) + } +} + +impl Air for MulAirLookups +where + AB::Var: Debug, + AB: AirBuilder + PermutationAirBuilder + AirBuilderWithPublicValues, +{ + fn add_lookup_columns(&mut self) -> Vec { + let new_idx = self.num_lookups; + self.num_lookups += 1; + vec![new_idx] + } + + fn get_lookups(&mut self) -> Vec> { + let mut lookups = Vec::new(); + self.num_lookups = 0; + + // Create symbolic air builder to access symbolic variables + let symbolic_air_builder = + SymbolicAirBuilder::::new(0, BaseAir::::width(self), 0, 0, 0); + let symbolic_main = symbolic_air_builder.main(); + let symbolic_main_local = symbolic_main.row_slice(0).unwrap(); + + let last_idx = symbolic_air_builder.main().width() - 1; + let lut = symbolic_main_local[last_idx]; // Extra column that corresponds to a permutation of 'a' + + if self.is_global { + assert!(self.global_names.len() == self.air.reps); + } + // We add lookups rep by rep, so that we have a mix of local and global lookups, rather than having all local first then all global. + for rep in 0..self.air.reps { + if self.is_local { + let base_idx = rep * 3; + let a = symbolic_main_local[base_idx]; // First input + // Create lookup inputs for each multiplication input + // We'll create a local lookup table with integers 0 to height + let lookup_inputs = vec![ + // Lookup for 'a' against a permuted column. + ( + vec![a.into()], + SymbolicExpression::Constant(AB::F::ONE), + Direction::Receive, + ), + // Provide the range values (this would be done in the trace generation) + ( + vec![lut.into()], // This represents the range values + SymbolicExpression::Constant(AB::F::ONE), + Direction::Send, + ), + ]; + + let local_lookup = Air::::register_lookup(self, Kind::Local, &lookup_inputs); + lookups.push(local_lookup); + } + + // Global lookups: between MulAir inputs and FibAir inputs + if self.is_global { + let base_idx = rep * 3; + let a = symbolic_main_local[base_idx]; // First input + let b = symbolic_main_local[base_idx + 1]; // Second input + + // Global lookup between MulAir inputs and FibAir inputs + let lookup_inputs = vec![( + vec![a.into(), b.into()], + SymbolicExpression::Constant(AB::F::ONE), + Direction::Send, // MulAir sends data to the global lookup + )]; + + let global_lookup = Air::::register_lookup( + self, + Kind::Global(self.global_names[rep].clone()), + &lookup_inputs, + ); + lookups.push(global_lookup); + } + } + + lookups + } + + fn eval(&self, builder: &mut AB) { + self.air.eval(builder); + } +} + +fn mul_trace(rows: usize, reps: usize) -> RowMajorMatrix { assert!(rows.is_power_of_two()); - let w = reps * 3; + // The extra column corresponds to a permutation of the first column. + let w = reps * 3 + 1; let mut v = F::zero_vec(rows * w); - // Keep a simple constant b and c = a*b - for i in 0..rows { - for rep in 0..reps { + let last_idx = w - 1; + + for rep in 0..reps { + let mut a = F::ZERO; + let mut b = F::ONE; + for i in 0..rows { let idx = i * w + rep * 3; - v[idx] = F::from_u64(i as u64); - v[idx + 1] = F::from_u64(3); + v[idx] = a; + v[idx + 1] = b; v[idx + 2] = v[idx] * v[idx + 1]; + if i != rows - 1 { + v[i * w + last_idx] = b; + } + let tmp = a + b; + a = b; + b = tmp; } } RowMajorMatrix::new(v, w) } +// --- FibAirLookups structure for global lookups --- +// This AIR is a `FibonacciAir` that can register global lookups with `MulAir` AIRs. +// - when `is_global` is true, this AIR creates global lookups between its inputs and MulAir AIR's inputs: +// - it receives its two columns (left,right) from the global lookup with name `name_and_mult.0` +// and multiplicity `name_and_mult.1`. The default for `name_and_mult` is ("MulFib", 2). +// - `num_lookups` tracks the number of registered lookups. It is 0 when the structure is created, +// and increments every time a new lookup is registered. +// - `name_and_mult` is used when `is_global` is true. If provided, it specifies the name and multiplicity of the global lookup. +// If not provided and `is_global` is true, a default name "MulFib" and multiplicity 2 is used. +#[derive(Debug, Clone)] +struct FibAirLookups { + air: FibonacciAir, + is_global: bool, + num_lookups: usize, + name_and_mult: Option<(String, u64)>, +} + +impl Default for FibAirLookups { + fn default() -> Self { + Self { + air: FibonacciAir { + log_height: 3, + tamper_index: None, + }, + is_global: false, + num_lookups: 0, + name_and_mult: None, + } + } +} + +impl FibAirLookups { + const fn new( + air: FibonacciAir, + is_global: bool, + num_lookups: usize, + name_and_mult: Option<(String, u64)>, + ) -> Self { + Self { + air, + is_global, + num_lookups, + name_and_mult, + } + } +} + +impl BaseAir for FibAirLookups { + fn width(&self) -> usize { + >::width(&self.air) + } + + fn preprocessed_trace(&self) -> Option> { + self.air.preprocessed_trace() + } +} + +impl Air for FibAirLookups { + fn add_lookup_columns(&mut self) -> Vec { + let new_idx = self.num_lookups; + self.num_lookups += 1; + vec![new_idx] + } + + fn get_lookups(&mut self) -> Vec> { + let mut lookups = Vec::new(); + self.num_lookups = 0; + + if self.is_global { + // Create symbolic air builder to access symbolic variables + let symbolic_air_builder = + SymbolicAirBuilder::::new(0, BaseAir::::width(self), 3, 0, 0); + let symbolic_main = symbolic_air_builder.main(); + let symbolic_main_local = symbolic_main.row_slice(0).unwrap(); + + // Global lookups: between FibAir inputs and MulAir inputs + // FibAir has 2 columns: left and right + let left = symbolic_main_local[0]; // left column + let right = symbolic_main_local[1]; // right column + + let (name, multiplicity) = match &self.name_and_mult { + Some((n, m)) => (n.clone(), *m), + None => ("MulFib".to_string(), 2), + }; + + // Global lookup between FibAir inputs and MulAir inputs + let lookup_inputs = vec![( + vec![left.into(), right.into()], + SymbolicExpression::Constant(AB::F::from_u64(multiplicity)), + Direction::Receive, // FibAir receives data from the global lookup + )]; + + let global_lookup = + Air::::register_lookup(self, Kind::Global(name), &lookup_inputs); + lookups.push(global_lookup); + } + + lookups + } + + fn eval(&self, builder: &mut AB) { + self.air.eval(builder); + } +} + +// --- Preprocessed multiplication AIR and trace --- + +#[derive(Debug, Clone, Copy)] +struct PreprocessedMulAir { + /// log2 of the trace height; used to size preprocessed columns. + log_height: usize, + /// Multiplier to use in constraint (2 for correct, 3 for incorrect test). + multiplier: u64, +} + +impl BaseAir for PreprocessedMulAir { + fn width(&self) -> usize { + 1 + } + + fn preprocessed_trace(&self) -> Option> { + let n = 1 << self.log_height; + let mut m = RowMajorMatrix::new(F::zero_vec(n), 1); + for (i, v) in m.values.iter_mut().enumerate().take(n) { + *v = F::from_u64(i as u64); + } + Some(m) + } +} + +impl Air for PreprocessedMulAir +where + AB: AirBuilder, + AB::F: Field, +{ + fn eval(&self, builder: &mut AB) { + let main = builder.main(); + let preprocessed = builder.preprocessed().expect("Preprocessed is empty?"); + + let local_main = main.row_slice(0).expect("Matrix is empty?"); + let local_prep = preprocessed.row_slice(0).expect("Preprocessed is empty?"); + + // Enforce: main[0] = multiplier * preprocessed[0] + builder.assert_eq( + local_main[0].clone(), + local_prep[0].clone() * AB::Expr::from_u64(self.multiplier), + ); + } +} + +fn preprocessed_mul_trace(rows: usize, multiplier: u64) -> RowMajorMatrix { + assert!(rows.is_power_of_two()); + let mut v = F::zero_vec(rows); + // main[0] = multiplier * preprocessed[0], where preprocessed[0] = row_index + for (i, val) in v.iter_mut().enumerate() { + *val = F::from_u64(i as u64 * multiplier); + } + RowMajorMatrix::new(v, 1) +} + // --- Config types --- type Val = BabyBear; @@ -168,11 +502,23 @@ type MyHash = PaddingFreeSponge; type MyCompress = TruncatedPermutation; type ValMmcs = MerkleTreeMmcs<::Packing, ::Packing, MyHash, MyCompress, 8>; +type HidingValMmcs = MerkleTreeHidingMmcs< + ::Packing, + ::Packing, + MyHash, + MyCompress, + SmallRng, + 8, + 4, +>; type ChallengeMmcs = ExtensionMmcs; +type HidingChallengeMmcs = ExtensionMmcs; type Challenger = DuplexChallenger; type Dft = Radix2DitParallel; -type Pcs = TwoAdicFriPcs; -type MyConfig = StarkConfig; +type MyPcs = TwoAdicFriPcs; +type HidingPcs = HidingFriPcs; +type MyConfig = StarkConfig; +type MyHidingConfig = StarkConfig; fn make_config(seed: u64) -> MyConfig { let mut rng = SmallRng::seed_from_u64(seed); @@ -183,55 +529,205 @@ fn make_config(seed: u64) -> MyConfig { let challenge_mmcs = ChallengeMmcs::new(val_mmcs.clone()); let dft = Dft::default(); let fri_params = create_test_fri_params(challenge_mmcs, 2); - let pcs = Pcs::new(dft, val_mmcs, fri_params); + let pcs = MyPcs::new(dft, val_mmcs, fri_params); let challenger = Challenger::new(perm); StarkConfig::new(pcs, challenger) } +fn make_two_adic_compat_config(seed: u64) -> MyConfig { + let mut rng = SmallRng::seed_from_u64(seed); + let perm = Perm::new_from_rng_128(&mut rng); + let hash = MyHash::new(perm.clone()); + let compress = MyCompress::new(perm.clone()); + let val_mmcs = ValMmcs::new(hash, compress); + let challenge_mmcs = ChallengeMmcs::new(val_mmcs.clone()); + let dft = Dft::default(); + let fri_params = FriParameters { + log_blowup: 2, + log_final_poly_len: 2, + num_queries: 2, + commit_proof_of_work_bits: 1, + query_proof_of_work_bits: 1, + mmcs: challenge_mmcs, + }; + let pcs = MyPcs::new(dft, val_mmcs, fri_params); + let challenger = Challenger::new(perm); + StarkConfig::new(pcs, challenger) +} + +fn make_config_zk(seed: u64) -> MyHidingConfig { + let mut rng = SmallRng::seed_from_u64(seed); + let perm = Perm::new_from_rng_128(&mut rng); + let hash = MyHash::new(perm.clone()); + let compress = MyCompress::new(perm.clone()); + let val_mmcs = HidingValMmcs::new(hash, compress, rng.clone()); + let challenge_mmcs = HidingChallengeMmcs::new(val_mmcs.clone()); + let dft = Dft::default(); + let fri_params = create_test_fri_params(challenge_mmcs, 2); + let pcs = HidingPcs::new(dft, val_mmcs, fri_params, 4, rng); + let challenger = Challenger::new(perm); + StarkConfig::new(pcs, challenger) +} + +type CircleVal = Mersenne31; +type CircleChallenge = BinomialExtensionField; +type CircleByteHash = Keccak256Hash; +type CircleFieldHash = SerializingHasher; +type CircleCompress = CompressionFunctionFromHasher; +type CircleValMmcs = MerkleTreeMmcs; +type CircleChallengeMmcs = ExtensionMmcs; +type CircleChallenger = SerializingChallenger32>; +type CirclePcsType = CirclePcs; +type CircleConfig = StarkConfig; + +fn make_circle_config() -> CircleConfig { + let byte_hash = CircleByteHash {}; + let field_hash = CircleFieldHash::new(byte_hash); + let compress = CircleCompress::new(byte_hash); + let val_mmcs = CircleValMmcs::new(field_hash, compress); + let challenge_mmcs = CircleChallengeMmcs::new(val_mmcs.clone()); + + let fri_params = FriParameters { + log_blowup: 1, + log_final_poly_len: 0, + num_queries: 40, + commit_proof_of_work_bits: 8, + query_proof_of_work_bits: 8, + mmcs: challenge_mmcs, + }; + + let pcs = CirclePcsType { + mmcs: val_mmcs, + fri_params, + _phantom: PhantomData, + }; + let challenger = CircleChallenger::from_hasher(vec![], byte_hash); + CircleConfig::new(pcs, challenger) +} + // Heterogeneous enum wrapper for batching #[derive(Clone, Copy)] enum DemoAir { Fib(FibonacciAir), Mul(MulAir), + PreprocessedMul(PreprocessedMulAir), } -impl BaseAir for DemoAir { +impl BaseAir for DemoAir { fn width(&self) -> usize { match self { Self::Fib(a) => >::width(a), Self::Mul(a) => >::width(a), + Self::PreprocessedMul(a) => >::width(a), + } + } + + fn preprocessed_trace(&self) -> Option> { + match self { + Self::Fib(a) => >::preprocessed_trace(a), + Self::Mul(_) => None, + Self::PreprocessedMul(a) => >::preprocessed_trace(a), + } + } +} + +// Heterogeneous enum wrapper for lookup-enabled AIRs +// `FibLookups` receives its inputs from `MulAirLookups` AIRs +// (see `FibAirLookups` and `MulAirLookups` definitions for more details) +#[derive(Clone)] +enum DemoAirWithLookups { + FibLookups(FibAirLookups), + MulLookups(MulAirLookups), +} + +impl BaseAir for DemoAirWithLookups { + fn width(&self) -> usize { + match self { + Self::FibLookups(a) => >::width(a), + Self::MulLookups(a) => >::width(a), + } + } + + fn preprocessed_trace(&self) -> Option> { + match self { + Self::FibLookups(a) => >::preprocessed_trace(a), + Self::MulLookups(a) => >::preprocessed_trace(a), } } } -impl Air for DemoAir { + +impl Air for DemoAirWithLookups +where + AB::Var: Debug, +{ + fn add_lookup_columns(&mut self) -> Vec { + match self { + Self::FibLookups(a) => >::add_lookup_columns(a), + Self::MulLookups(a) => >::add_lookup_columns(a), + } + } + + fn get_lookups(&mut self) -> Vec> { + match self { + Self::FibLookups(a) => >::get_lookups(a), + Self::MulLookups(a) => >::get_lookups(a), + } + } + + fn eval(&self, builder: &mut AB) { + match self { + Self::FibLookups(a) => >::eval(a, builder), + Self::MulLookups(a) => >::eval(a, builder), + } + } +} + +impl Air for DemoAir +where + AB::Var: Debug, + AB::F: PrimeField64, +{ fn eval(&self, b: &mut AB) { match self { Self::Fib(a) => a.eval(b), Self::Mul(a) => a.eval(b), + Self::PreprocessedMul(a) => a.eval(b), } } } -// --- Test Helper Functions --- - /// Creates a Fibonacci instance with specified log height. fn create_fib_instance(log_height: usize) -> (DemoAir, RowMajorMatrix, Vec) { let n = 1 << log_height; - let air = DemoAir::Fib(FibonacciAir); + let air = DemoAir::Fib(FibonacciAir { + log_height, + tamper_index: None, + }); let trace = fib_trace::(0, 1, n); let pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(n))]; (air, trace, pis) } /// Creates a multiplication instance with specified configuration. -fn create_mul_instance( +fn create_mul_instance(log_height: usize, reps: usize) -> (DemoAir, RowMajorMatrix, Vec) { + let n = 1 << log_height; + let mul = MulAir { reps }; + let air = DemoAir::Mul(mul); + let trace = mul_trace::(n, reps); + let pis = vec![]; + (air, trace, pis) +} + +/// Creates a preprocessed multiplication instance with specified configuration. +fn create_preprocessed_mul_instance( log_height: usize, - reps: usize, - step: u64, + multiplier: u64, ) -> (DemoAir, RowMajorMatrix, Vec) { let n = 1 << log_height; - let mul = MulAir { reps, step }; - let air = DemoAir::Mul(mul); - let trace = mul_trace::(n, reps, step); + let air = DemoAir::PreprocessedMul(PreprocessedMulAir { + log_height, + multiplier, + }); + let trace = preprocessed_mul_trace::(n, multiplier); let pis = vec![]; (air, trace, pis) } @@ -241,26 +737,58 @@ fn test_two_instances() -> Result<(), impl Debug> { let config = make_config(1337); let (air_fib, fib_trace, fib_pis) = create_fib_instance(4); // 16 rows - let (air_mul, mul_trace, mul_pis) = create_mul_instance(4, 2, 1); // 16 rows, 2 reps + let (air_mul, mul_trace, mul_pis) = create_mul_instance(4, 2); // 16 rows, 2 reps let instances = vec![ StarkInstance { air: &air_fib, trace: fib_trace, public_values: fib_pis.clone(), + lookups: vec![], }, StarkInstance { air: &air_mul, trace: mul_trace, public_values: mul_pis.clone(), + lookups: vec![], }, ]; - let proof = prove_batch(&config, instances); + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); + + let airs = vec![air_fib, air_mul]; + let pvs = vec![fib_pis, mul_pis]; + verify_batch(&config, &airs, &proof, &pvs, &common) +} + +#[test] +fn test_two_instances_zk() -> Result<(), impl Debug> { + let config = make_config_zk(1337); + + let (air_fib, fib_trace, fib_pis) = create_fib_instance(4); // 16 rows + let (air_mul, mul_trace, mul_pis) = create_mul_instance(4, 2); // 16 rows, 2 reps + + let instances = vec![ + StarkInstance { + air: &air_fib, + trace: fib_trace, + public_values: fib_pis.clone(), + lookups: vec![], + }, + StarkInstance { + air: &air_mul, + trace: mul_trace, + public_values: mul_pis.clone(), + lookups: vec![], + }, + ]; + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); let airs = vec![air_fib, air_mul]; let pvs = vec![fib_pis, mul_pis]; - verify_batch(&config, &airs, &proof, &pvs) + verify_batch(&config, &airs, &proof, &pvs, &common) } #[test] @@ -268,7 +796,7 @@ fn test_three_instances_mixed_sizes() -> Result<(), impl Debug> { let config = make_config(2025); let (air_fib16, fib16_trace, fib16_pis) = create_fib_instance(4); // 16 rows - let (air_mul8, mul8_trace, mul8_pis) = create_mul_instance(3, 2, 1); // 8 rows + let (air_mul8, mul8_trace, mul8_pis) = create_mul_instance(3, 2); // 8 rows let (air_fib8, fib8_trace, fib8_pis) = create_fib_instance(3); // 8 rows let instances = vec![ @@ -276,23 +804,27 @@ fn test_three_instances_mixed_sizes() -> Result<(), impl Debug> { air: &air_fib16, trace: fib16_trace, public_values: fib16_pis.clone(), + lookups: vec![], }, StarkInstance { air: &air_mul8, trace: mul8_trace, public_values: mul8_pis.clone(), + lookups: vec![], }, StarkInstance { air: &air_fib8, trace: fib8_trace, public_values: fib8_pis.clone(), + lookups: vec![], }, ]; - let proof = prove_batch(&config, instances); + let common: CommonData = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); let airs = vec![air_fib16, air_mul8, air_fib8]; let pvs = vec![fib16_pis, mul8_pis, fib8_pis]; - verify_batch(&config, &airs, &proof, &pvs) + verify_batch(&config, &airs, &proof, &pvs, &common) } #[test] @@ -306,16 +838,19 @@ fn test_invalid_public_values_rejected() -> Result<(), Box should reject + let airs = vec![air_fib]; let wrong_pvs = vec![vec![ Val::from_u64(0), Val::from_u64(1), Val::from_u64(correct_x + 1), ]]; - let res = verify_batch(&config, &[air_fib], &proof, &wrong_pvs); + let res = verify_batch(&config, &airs, &proof, &wrong_pvs, &common); assert!(res.is_err(), "Should reject wrong public values"); Ok::<_, Box>(()) } @@ -325,32 +860,143 @@ fn test_different_widths() -> Result<(), impl Debug> { let config = make_config(4242); // Mul with reps=2 (width=6) and reps=3 (width=9) - let (air_mul2, mul2_trace, mul2_pis) = create_mul_instance(3, 2, 1); // 8 rows, width=6 + let (air_mul2, mul2_trace, mul2_pis) = create_mul_instance(3, 2); // 8 rows, width=6 let (air_fib, fib_trace, fib_pis) = create_fib_instance(3); // 8 rows, width=2 - let (air_mul3, mul3_trace, mul3_pis) = create_mul_instance(4, 3, 1); // 16 rows, width=9 + let (air_mul3, mul3_trace, mul3_pis) = create_mul_instance(4, 3); // 16 rows, width=9 let instances = vec![ StarkInstance { air: &air_mul2, trace: mul2_trace, public_values: mul2_pis.clone(), + lookups: vec![], }, StarkInstance { air: &air_fib, trace: fib_trace, public_values: fib_pis.clone(), + lookups: vec![], }, StarkInstance { air: &air_mul3, trace: mul3_trace, public_values: mul3_pis.clone(), + lookups: vec![], }, ]; - let proof = prove_batch(&config, instances); + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); let airs = vec![air_mul2, air_fib, air_mul3]; let pvs = vec![mul2_pis, fib_pis, mul3_pis]; - verify_batch(&config, &airs, &proof, &pvs) + verify_batch(&config, &airs, &proof, &pvs, &common) +} + +#[test] +fn test_preprocessed_tampered_fails() -> Result<(), Box> { + let config = make_config(9999); + + // Single Fibonacci instance with 8 rows and preprocessed index column. + let (air, trace, fib_pis) = create_fib_instance(3); + let instances = vec![StarkInstance { + air: &air, + trace, + public_values: fib_pis.clone(), + lookups: vec![], + }]; + + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); + + // First, sanity-check that verification succeeds with matching preprocessed data. + let airs = vec![air]; + let ok_res = verify_batch(&config, &airs, &proof, from_ref(&fib_pis), &common); + assert!( + ok_res.is_ok(), + "Expected verification to succeed with matching preprocessed data" + ); + + // Now tamper with the preprocessed trace by modifying the tamper_index in the AIR + // used to derive the preprocessed commitment for verification. + // The proof was generated with the original AIR, but we verify with a tampered AIR + // that would produce different preprocessed columns. + let air_tampered = DemoAir::Fib(FibonacciAir { + log_height: 3, + tamper_index: Some(2), + }); + // Create CommonData with tampered AIR to test verification failure + // Use the proof's degree_bits (which are log_degrees since ZK is not supported) + let degree_bits = proof.degree_bits.clone(); + let mut airs_tampered = vec![air_tampered]; + let verify_common_tampered = + CommonData::from_airs_and_degrees(&config, &mut airs_tampered, °ree_bits); + + let res = verify_batch( + &config, + &airs_tampered, + &proof, + &[fib_pis], + &verify_common_tampered, + ); + assert!( + res.is_err(), + "Verification should fail with tampered preprocessed columns" + ); + Ok(()) +} + +#[test] +fn test_preprocessed_reuse_common_multi_proofs() -> Result<(), Box> { + let config = make_config(2026); + + // Single Fibonacci instance with preprocessed index column, 8 rows. + let log_height = 3; + let n = 1 << log_height; + let air = DemoAir::Fib(FibonacciAir { + log_height, + tamper_index: None, + }); + + // First proof: standard Fibonacci trace starting from (0, 1). + let trace1 = fib_trace::(0, 1, n); + let fib_pis1 = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(n))]; + let instances1 = vec![StarkInstance { + air: &air, + trace: trace1, + public_values: fib_pis1.clone(), + lookups: vec![], + }]; + let common = CommonData::from_instances(&config, &instances1); + let proof1 = prove_batch(&config, &instances1, &common); + + // Verify the first proof. + let airs = vec![air]; + let res1 = verify_batch(&config, &airs, &proof1, from_ref(&fib_pis1), &common); + assert!(res1.is_ok(), "First verification should succeed"); + + // Second proof: DIFFERENT initial values (2, 3) - demonstrates CommonData is truly reusable + // across different traces with the same AIR and degree. + let trace2 = fib_trace::(2, 3, n); + let fib_pis2 = vec![ + Val::from_u64(2), + Val::from_u64(3), + Val::from_u64(fib_n_from(2, 3, n)), + ]; + let instances2 = vec![StarkInstance { + air: &airs[0], + trace: trace2, + public_values: fib_pis2.clone(), + lookups: vec![], + }]; + let proof2 = prove_batch(&config, &instances2, &common); + + let res2 = verify_batch(&config, &airs, &proof2, &[fib_pis2], &common); + assert!( + res2.is_ok(), + "Second verification should succeed with different trace values" + ); + + Ok(()) } #[test] @@ -364,10 +1010,45 @@ fn test_single_instance() -> Result<(), impl Debug> { air: &air_fib, trace: fib_trace, public_values: fib_pis.clone(), + lookups: vec![], }]; - let proof = prove_batch(&config, instances); - verify_batch(&config, &[air_fib], &proof, &[fib_pis]) + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); + let airs = vec![air_fib]; + verify_batch(&config, &airs, &proof, &[fib_pis], &common) +} + +#[test] +fn test_mixed_preprocessed() -> Result<(), impl Debug> { + let config = make_config(8888); + + let (air_fib, fib_trace, fib_pis) = create_fib_instance(4); // 16 rows, has preprocessed + let (air_mul, mul_trace, mul_pis) = create_mul_instance(4, 2); // 16 rows, no preprocessed + + let instances = vec![ + StarkInstance { + air: &air_fib, + trace: fib_trace, + public_values: fib_pis.clone(), + lookups: vec![], + }, + StarkInstance { + air: &air_mul, + trace: mul_trace, + public_values: mul_pis.clone(), + lookups: vec![], + }, + ]; + + let common = CommonData::from_instances(&config, &instances); + + let proof = prove_batch(&config, &instances, &common); + + let airs = vec![air_fib, air_mul]; + let pvs = vec![fib_pis, mul_pis]; + + verify_batch(&config, &airs, &proof, &pvs, &common) } #[test] @@ -384,47 +1065,78 @@ fn test_invalid_trace_width_rejected() { air: &air_fib, trace: fib_trace, public_values: fib_pis.clone(), + lookups: vec![], }]; // Generate a valid proof - let valid_proof = prove_batch(&config, instances); + let common = CommonData::from_instances(&config, &instances); + let valid_proof = prove_batch(&config, &instances, &common); // Tamper with the proof: change trace_local to have wrong width let mut tampered_proof = p3_batch_stark::proof::BatchProof { commitments: BatchCommitments { main: valid_proof.commitments.main, quotient_chunks: valid_proof.commitments.quotient_chunks, + permutation: valid_proof.commitments.permutation, + random: valid_proof.commitments.random, }, opened_values: BatchOpenedValues { - instances: vec![OpenedValues { - trace_local: vec![valid_proof.opened_values.instances[0].trace_local[0]], // Wrong width: 1 instead of 2 - trace_next: valid_proof.opened_values.instances[0].trace_next.clone(), - preprocessed_local: None, - preprocessed_next: None, - quotient_chunks: valid_proof.opened_values.instances[0] - .quotient_chunks + instances: vec![OpenedValuesWithLookups { + base_opened_values: OpenedValues { + trace_local: vec![ + valid_proof.opened_values.instances[0] + .base_opened_values + .trace_local[0], + ], // Wrong width: 1 instead of 2 + trace_next: valid_proof.opened_values.instances[0] + .base_opened_values + .trace_next + .clone(), + preprocessed_local: None, + preprocessed_next: None, + quotient_chunks: valid_proof.opened_values.instances[0] + .base_opened_values + .quotient_chunks + .clone(), + random: None, + }, + permutation_local: valid_proof.opened_values.instances[0] + .permutation_local + .clone(), + permutation_next: valid_proof.opened_values.instances[0] + .permutation_next .clone(), - random: None, }], }, opening_proof: valid_proof.opening_proof.clone(), + global_lookup_data: valid_proof.global_lookup_data.clone(), degree_bits: valid_proof.degree_bits.clone(), }; // Verification should fail due to width mismatch - let res = verify_batch(&config, &[air_fib], &tampered_proof, from_ref(&fib_pis)); + let airs = vec![air_fib]; + let res = verify_batch(&config, &airs, &tampered_proof, from_ref(&fib_pis), &common); assert!( res.is_err(), "Verifier should reject trace with wrong width" ); // Also test wrong trace_next width - tampered_proof.opened_values.instances[0].trace_local = - valid_proof.opened_values.instances[0].trace_local.clone(); - tampered_proof.opened_values.instances[0].trace_next = - vec![valid_proof.opened_values.instances[0].trace_next[0]]; // Wrong width - - let res = verify_batch(&config, &[air_fib], &tampered_proof, from_ref(&fib_pis)); + tampered_proof.opened_values.instances[0] + .base_opened_values + .trace_local = valid_proof.opened_values.instances[0] + .base_opened_values + .trace_local + .clone(); + tampered_proof.opened_values.instances[0] + .base_opened_values + .trace_next = vec![ + valid_proof.opened_values.instances[0] + .base_opened_values + .trace_next[0], + ]; // Wrong width + + let res = verify_batch(&config, &airs, &tampered_proof, from_ref(&fib_pis), &common); assert!( res.is_err(), "Verifier should reject trace_next with wrong width" @@ -437,25 +1149,41 @@ fn test_reorder_instances_rejected() { let config = make_config(123); let (air_a, tr_a, pv_a) = create_fib_instance(4); - let (air_b, tr_b, pv_b) = create_mul_instance(4, 2, 1); + let (air_b, tr_b, pv_b) = create_mul_instance(4, 2); let instances = vec![ StarkInstance { air: &air_a, trace: tr_a, public_values: pv_a.clone(), + lookups: vec![], }, StarkInstance { air: &air_b, trace: tr_b, public_values: pv_b.clone(), + lookups: vec![], }, ]; - let proof = prove_batch(&config, instances); - - // Swap order at verify -> should fail - let res = verify_batch(&config, &[air_b, air_a], &proof, &[pv_b, pv_a]); + // DemoAir::Fib has preprocessed columns, so compute degrees for swapped verification + let degrees: Vec = instances.iter().map(|i| i.trace.height()).collect(); + let log_degrees: Vec = degrees.iter().copied().map(log2_strict_usize).collect(); + + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); + + // Swap order at verify -> should fail (create new CommonData with swapped AIRs) + let mut airs_swapped = vec![air_b, air_a]; + let common_swapped = + CommonData::from_airs_and_degrees(&config, &mut airs_swapped, &log_degrees); + let res = verify_batch( + &config, + &airs_swapped, + &proof, + &[pv_b, pv_a], + &common_swapped, + ); assert!(res.is_err(), "Verifier should reject reordered instances"); } @@ -471,13 +1199,19 @@ fn test_quotient_chunk_element_len_rejected() { air: &air, trace: tr, public_values: pv.clone(), + lookups: vec![], }]; - let proof = prove_batch(&config, instances); + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); let mut tampered = proof; - tampered.opened_values.instances[0].quotient_chunks[0].pop(); + tampered.opened_values.instances[0] + .base_opened_values + .quotient_chunks[0] + .pop(); - let res = verify_batch(&config, &[air], &tampered, from_ref(&pv)); + let airs = vec![air]; + let res = verify_batch(&config, &airs, &tampered, from_ref(&pv), &common); assert!( res.is_err(), "Verifier should reject truncated quotient chunk element" @@ -487,73 +1221,968 @@ fn test_quotient_chunk_element_len_rejected() { #[test] fn test_circle_stark_batch() -> Result<(), impl Debug> { // Test batch-stark with Circle PCS (non-two-adic field) - type Val = Mersenne31; - type Challenge = BinomialExtensionField; - - type ByteHash = Keccak256Hash; - type FieldHash = SerializingHasher; - let byte_hash = ByteHash {}; - let field_hash = FieldHash::new(byte_hash); - - type MyCompress = CompressionFunctionFromHasher; - let compress = MyCompress::new(byte_hash); - - type ValMmcs = MerkleTreeMmcs; - let val_mmcs = ValMmcs::new(field_hash, compress); - - type ChallengeMmcs = ExtensionMmcs; - let challenge_mmcs = ChallengeMmcs::new(val_mmcs.clone()); + let config = make_circle_config(); - type Challenger = SerializingChallenger32>; - - let fri_params = FriParameters { - log_blowup: 1, - log_final_poly_len: 0, - num_queries: 40, - proof_of_work_bits: 8, - mmcs: challenge_mmcs, + // Create two Fibonacci instances with different sizes. + // Here we don't use preprocessed columns (Circle PCS + plain FibonacciAir). + let air_fib1 = FibonacciAir { + log_height: 0, + tamper_index: None, }; - - type Pcs = CirclePcs; - let pcs = Pcs { - mmcs: val_mmcs, - fri_params, - _phantom: PhantomData, + let air_fib2 = FibonacciAir { + log_height: 0, + tamper_index: None, }; - let challenger = Challenger::from_hasher(vec![], byte_hash); - type MyConfig = StarkConfig; - let config = MyConfig::new(pcs, challenger); + let fib_pis1 = vec![ + CircleVal::from_u64(0), + CircleVal::from_u64(1), + CircleVal::from_u64(fib_n(8)), + ]; // F_8 = 21 + let fib_pis2 = vec![ + CircleVal::from_u64(0), + CircleVal::from_u64(1), + CircleVal::from_u64(fib_n(4)), + ]; // F_4 = 3 - // Create two Fibonacci instances with different sizes - let air_fib1 = FibonacciAir; - let air_fib2 = FibonacciAir; + let trace1 = fib_trace::(0, 1, 8); + let trace2 = fib_trace::(0, 1, 4); - let fib_pis1 = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(8))]; // F_8 = 21 - let fib_pis2 = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(4))]; // F_4 = 3 - - let trace1 = fib_trace::(0, 1, 8); - let trace2 = fib_trace::(0, 1, 4); + let airs = vec![air_fib1, air_fib2]; let instances = vec![ StarkInstance { - air: &air_fib1, + air: &airs[0], trace: trace1, public_values: fib_pis1.clone(), + lookups: vec![], }, StarkInstance { - air: &air_fib2, + air: &airs[1], trace: trace2, public_values: fib_pis2.clone(), + lookups: vec![], }, ]; // Generate batch-proof - let proof = prove_batch(&config, instances); + // Plain FibonacciAir doesn't have preprocessed columns + let common = CommonData::empty(airs.len()); + let proof = prove_batch(&config, &instances, &common); // Verify batch-proof - let airs = vec![air_fib1, air_fib2]; let public_values = vec![fib_pis1, fib_pis2]; - verify_batch(&config, &airs, &proof, &public_values) + verify_batch(&config, &airs, &proof, &public_values, &common) .map_err(|e| format!("Verification failed: {:?}", e)) } + +type CompatCase = ( + Config, + Vec, + Vec>, + Vec>, + Vec, +); + +fn two_adic_compat_case() -> CompatCase { + let config = make_two_adic_compat_config(777); + let reps = 2; + let log_n = 5; + let n = 1 << log_n; + + let mul_air = MulAir { reps }; + let mul_air_lookups = MulAirLookups::new( + mul_air, + false, + true, + 0, + vec!["MulFib".to_string(), "MulFib".to_string()], + ); + + let fibonacci_air = FibonacciAir { + log_height: log_n, + tamper_index: None, + }; + let fib_air_lookups = FibAirLookups::new(fibonacci_air, true, 0, None); + + let mul_trace = mul_trace::(n, reps); + let fib_trace = fib_trace::(0, 1, n); + let fib_pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(n))]; + + let air1 = DemoAirWithLookups::MulLookups(mul_air_lookups); + let air2 = DemoAirWithLookups::FibLookups(fib_air_lookups); + + let is_zk = config.is_zk(); + let log_degrees: Vec = vec![mul_trace.height(), fib_trace.height()] + .into_iter() + .map(|height| log2_strict_usize(height) + is_zk) + .collect(); + ( + config, + vec![air1, air2], + vec![mul_trace, fib_trace], + vec![vec![], fib_pis], + log_degrees, + ) +} + +fn circle_compat_case() -> CompatCase { + let config = make_circle_config(); + let reps = 2; + let log_n = 3; + let n = 1 << log_n; + + let mul_air = MulAir { reps }; + let mul_air_lookups = MulAirLookups::new( + mul_air, + false, + true, + 0, + vec!["MulFib".to_string(), "MulFib".to_string()], + ); + + let fibonacci_air = FibonacciAir { + log_height: log_n, + tamper_index: None, + }; + let fib_air_lookups = FibAirLookups::new(fibonacci_air, true, 0, None); + + let mul_trace = mul_trace::(n, reps); + let fib_trace = fib_trace::(0, 1, n); + let fib_pis = vec![ + CircleVal::from_u64(0), + CircleVal::from_u64(1), + CircleVal::from_u64(fib_n(n)), + ]; + + let air1 = DemoAirWithLookups::MulLookups(mul_air_lookups); + let air2 = DemoAirWithLookups::FibLookups(fib_air_lookups); + + let is_zk = config.is_zk(); + let log_degrees: Vec = vec![mul_trace.height(), fib_trace.height()] + .into_iter() + .map(|height| log2_strict_usize(height) + is_zk) + .collect(); + ( + config, + vec![air1, air2], + vec![mul_trace, fib_trace], + vec![vec![], fib_pis], + log_degrees, + ) +} + +fn write_fixture(path: &str, bytes: &[u8]) -> std::io::Result<()> { + let full_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join(path); + if let Some(parent) = full_path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(full_path, bytes) +} + +fn read_fixture(path: &str) -> std::io::Result> { + let full_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join(path); + std::fs::read(full_path) +} + +#[test] +fn verify_two_adic_compat_fixture() -> Result<(), Box> { + let (config, mut airs, _traces, pvs, _log_degrees) = two_adic_compat_case(); + let proof_bytes = read_fixture(TWO_ADIC_FIXTURE) + .expect("Missing fixture. Run: cargo test -p p3-batch-stark --test simple -- --ignored"); + let proof: BatchProof = postcard::from_bytes(&proof_bytes)?; + let common = CommonData::from_airs_and_degrees(&config, &mut airs, &proof.degree_bits); + verify_batch(&config, &airs, &proof, &pvs, &common)?; + Ok(()) +} + +#[test] +fn verify_circle_compat_fixture() -> Result<(), Box> { + let (config, mut airs, _traces, pvs, _log_degrees) = circle_compat_case(); + let proof_bytes = read_fixture(CIRCLE_FIXTURE) + .expect("Missing fixture. Run: cargo test -p p3-batch-stark --test simple -- --ignored"); + let proof: BatchProof = postcard::from_bytes(&proof_bytes)?; + let common = CommonData::from_airs_and_degrees(&config, &mut airs, &proof.degree_bits); + verify_batch(&config, &airs, &proof, &pvs, &common)?; + Ok(()) +} + +#[test] +#[ignore] +fn generate_two_adic_fixture() -> Result<(), Box> { + // Regen: cargo test -p p3-batch-stark --test simple -- --ignored + let (config, mut airs, traces, pvs, log_degrees) = two_adic_compat_case(); + let common = CommonData::from_airs_and_degrees(&config, &mut airs, &log_degrees); + let instances = StarkInstance::new_multiple(&airs, &traces, &pvs, &common); + let proof = prove_batch(&config, &instances, &common); + let bytes = postcard::to_allocvec(&proof)?; + write_fixture(TWO_ADIC_FIXTURE, &bytes)?; + Ok(()) +} + +#[test] +#[ignore] +fn generate_circle_fixture() -> Result<(), Box> { + // Regen: cargo test -p p3-batch-stark --test simple -- --ignored + let (config, mut airs, traces, pvs, log_degrees) = circle_compat_case(); + let common = CommonData::from_airs_and_degrees(&config, &mut airs, &log_degrees); + let instances = StarkInstance::new_multiple(&airs, &traces, &pvs, &common); + let proof = prove_batch(&config, &instances, &common); + let bytes = postcard::to_allocvec(&proof)?; + write_fixture(CIRCLE_FIXTURE, &bytes)?; + Ok(()) +} + +#[test] +fn test_preprocessed_constraint_positive() -> Result<(), impl Debug> { + // Test that preprocessed columns are correctly used in constraints + // Enforces: main[0] = 2 * preprocessed[0] + let config = make_config(8888); + + let (air, trace, pis) = create_preprocessed_mul_instance(4, 2); // 16 rows, multiplier=2 + + let instances = vec![StarkInstance { + air: &air, + trace, + public_values: pis.clone(), + lookups: vec![], + }]; + + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); + let airs = vec![air]; + verify_batch(&config, &airs, &proof, &[pis], &common) +} + +#[test] +fn test_preprocessed_constraint_negative() -> Result<(), Box> { + // Test that incorrect preprocessed constraints are caught via OOD evaluation mismatch + // Proof is generated with multiplier=2, but verification uses multiplier=3 + let config = make_config(9999); + + // Generate proof with multiplier=2 + let (air_prove, trace, pis) = create_preprocessed_mul_instance(4, 2); // 16 rows, multiplier=2 + + let instances = vec![StarkInstance { + air: &air_prove, + trace, + public_values: pis.clone(), + lookups: vec![], + }]; + + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); + + // Verify with wrong multiplier=3 (should fail) + let air_verify = DemoAir::PreprocessedMul(PreprocessedMulAir { + log_height: 4, + multiplier: 3, // Wrong multiplier! + }); + let mut airs = vec![air_verify]; + let degree_bits = proof.degree_bits.clone(); + let verify_common = CommonData::from_airs_and_degrees(&config, &mut airs, °ree_bits); + + let res = verify_batch(&config, &airs, &proof, &[pis], &verify_common); + let err = res.expect_err( + "Verification should fail when preprocessed constraint multiplier doesn't match", + ); + match err { + VerificationError::OodEvaluationMismatch { .. } => (), + _ => panic!("unexpected error: {err:?}"), + } + Ok(()) +} + +#[test] +fn test_mixed_preprocessed_constraints() -> Result<(), impl Debug> { + // Test batching PreprocessedMulAir (uses pp in constraints) with MulAir and FibonacciAir + // This exercises matrix_to_instance routing and point-scheduling with heterogeneous instances + // while preprocessed values actually affect constraints. + let config = make_config(1111); + + let (air_fib, fib_trace, fib_pis) = create_fib_instance(4); // 16 rows, has pp but doesn't use in constraints + let (air_mul, mul_trace, mul_pis) = create_mul_instance(4, 2); // 16 rows, no pp + let (air_pp_mul, pp_mul_trace, pp_mul_pis) = create_preprocessed_mul_instance(4, 2); // 16 rows, uses pp in constraints + + let instances = vec![ + StarkInstance { + air: &air_fib, + trace: fib_trace, + public_values: fib_pis.clone(), + lookups: vec![], + }, + StarkInstance { + air: &air_mul, + trace: mul_trace, + public_values: mul_pis.clone(), + lookups: vec![], + }, + StarkInstance { + air: &air_pp_mul, + trace: pp_mul_trace, + public_values: pp_mul_pis.clone(), + lookups: vec![], + }, + ]; + + let common = CommonData::from_instances(&config, &instances); + let proof = prove_batch(&config, &instances, &common); + + let airs = vec![air_fib, air_mul, air_pp_mul]; + let pvs = vec![fib_pis, mul_pis, pp_mul_pis]; + verify_batch(&config, &airs, &proof, &pvs, &common) +} + +// Tests for local and global lookup handling in multi-stark. + +/// Test with local lookups only using MulAirLookups +#[test] +fn test_batch_stark_one_instance_local_only() -> Result<(), impl Debug> { + let config = make_config(2024); + + let reps = 1; + // Create MulAir instance with local lookups configuration + let mul_air = MulAir { reps }; + let mul_air_lookups = MulAirLookups::new(mul_air, true, false, 0, vec![]); // local only + + let log_height = 3; // 8 rows + let mul_trace = mul_trace::(1 << log_height, reps); + + let mut airs = [DemoAirWithLookups::MulLookups(mul_air_lookups)]; + + // Get lookups from the lookup-enabled AIRs + let common_data = + CommonData::::from_airs_and_degrees(&config, &mut airs, &[log_height]); + + let instances = StarkInstance::new_multiple(&airs, &[mul_trace], &[vec![]], &common_data); + + let proof = prove_batch(&config, &instances, &common_data); + + let pvs = vec![vec![]]; + verify_batch(&config, &airs, &proof, &pvs, &common_data) +} + +/// Test with local lookups only, which fail due to wrong permutation column. +/// The failure occurs in `check_constraints` during proof generation, since it fails the last local constraint (the final local sum is not zero). +#[cfg(debug_assertions)] +#[test] +#[should_panic(expected = "constraints had nonzero value on row 7")] +fn test_batch_stark_one_instance_local_fails() { + let config = make_config(2024); + + let reps = 2; + // Create MulAir instance with local lookups configuration + let mul_air = MulAir { reps }; + let mul_air_lookups = MulAirLookups::new(mul_air, true, false, 0, vec![]); // local only + + let log_height = 3; // 8 rows + let mut mul_trace = mul_trace::(1 << log_height, reps); + + // Tamper with the permutation column to cause lookup failure. + mul_trace.values[reps * 3] = Val::from_u64(9999); + + let mut airs = [DemoAirWithLookups::MulLookups(mul_air_lookups)]; + + // Get lookups from the lookup-enabled AIRs + let common_data = + CommonData::::from_airs_and_degrees(&config, &mut airs, &[log_height]); + + let instances = StarkInstance::new_multiple(&airs, &[mul_trace], &[vec![]], &common_data); + + prove_batch(&config, &instances, &common_data); +} + +/// Test with local lookups only, which fail due to wrong permutation column. +/// The verification fails, since the last local constraint fails (the final local sum is not zero). +#[cfg(not(debug_assertions))] +#[test] +#[should_panic(expected = "OodEvaluationMismatch")] +fn test_batch_stark_one_instance_local_fails() { + let config = make_config(2024); + + let reps = 2; + let log_height = 3; // 8 rows + // Create MulAir instance with local lookups configuration + let mul_air = MulAir { reps }; + let mul_air_lookups = MulAirLookups::new(mul_air, true, false, 0, vec![]); // local only + + let mut mul_trace = mul_trace::(1 << log_height, reps); + + // Tamper with the permutation column to cause lookup failure. + mul_trace.values[reps * 3] = Val::from_u64(9999); + + let mut airs = [DemoAirWithLookups::MulLookups(mul_air_lookups)]; + + // Get lookups from the lookup-enabled AIRs + let common_data = + CommonData::::from_airs_and_degrees(&config, &mut airs, &[log_height]); + + let instances = StarkInstance::new_multiple(&airs, &[mul_trace], &[vec![]], &common_data); + + let proof = prove_batch(&config, &instances, &common_data); + + verify_batch(&config, &airs, &proof, &[vec![]], &common_data).unwrap(); +} + +/// Test with local lookups only using MulAirLookups +#[test] +fn test_batch_stark_local_lookups_only() -> Result<(), impl Debug> { + let config = make_config(2024); + + let log_height = 4; // 16 rows + let height = 1 << log_height; + // Create MulAir instance with local lookups configuration + let mul_air = MulAir { reps: 2 }; + let mul_air_lookups = MulAirLookups::new(mul_air, true, false, 0, vec![]); // local only + let fib_air_lookups = FibAirLookups::new( + FibonacciAir { + log_height, + tamper_index: None, + }, + false, + 0, + None, + ); // no lookups + + let mul_trace = mul_trace::(height, 2); + let fib_trace = fib_trace::(0, 1, 16); + let fib_pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(16))]; + + // Use the enum wrapper for heterogeneous types + let air1 = DemoAirWithLookups::MulLookups(mul_air_lookups); + let air2 = DemoAirWithLookups::FibLookups(fib_air_lookups); + + let mut airs = [air1, air2]; + + // Get lookups from the lookup-enabled AIRs + let common_data = CommonData::::from_airs_and_degrees( + &config, + &mut airs, + &[log_height, log_height], + ); + + let instances = StarkInstance::new_multiple( + &airs, + &[mul_trace, fib_trace], + &[vec![], fib_pis.clone()], + &common_data, + ); + + let proof = prove_batch(&config, &instances, &common_data); + + let pvs = vec![vec![], fib_pis]; + verify_batch(&config, &airs, &proof, &pvs, &common_data) +} + +/// Test with global lookups only using MulAirLookups and FibAirLookups +#[test] +fn test_batch_stark_global_lookups_only() -> Result<(), impl Debug> { + let config = make_config(2025); + + let reps = 2; + // Create instances with global lookups configuration + let mul_air = MulAir { reps }; + // Both global lookups (for each rep) look into the same FibAir inputs, so they share the same name. + let mul_air_lookups = MulAirLookups::new( + mul_air, + false, + true, + 0, + vec!["MulFib".to_string(), "MulFib".to_string()], + ); // global only + + let log_n = 3; + let n = 1 << log_n; + + let fibonacci_air = FibonacciAir { + log_height: log_n, + tamper_index: None, + }; + let fib_air_lookups = FibAirLookups::new(fibonacci_air, true, 0, None); // global lookups + + let mul_trace = mul_trace::(n, 2); + let fib_trace = fib_trace::(0, 1, n); + let fib_pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(n))]; + + // Use the enum wrapper for heterogeneous types + let air1 = DemoAirWithLookups::MulLookups(mul_air_lookups); + let air2 = DemoAirWithLookups::FibLookups(fib_air_lookups); + + // Get lookups from the lookup-enabled AIRs + let mut airs = [air1, air2]; + let common_data = + CommonData::::from_airs_and_degrees(&config, &mut airs, &[log_n, log_n]); + + let instances = StarkInstance::new_multiple( + &airs, + &[mul_trace, fib_trace], + &[vec![], fib_pis.clone()], + &common_data, + ); + + let proof = prove_batch(&config, &instances, &common_data); + + let pvs = vec![vec![], fib_pis]; + verify_batch(&config, &airs, &proof, &pvs, &common_data) +} + +/// Test with both local and global lookups using MulAirLookups and FibAirLookups +#[test] +fn test_batch_stark_both_lookups() -> Result<(), impl Debug> { + let config = make_config(2026); + + let reps = 2; + // Create instances with both local and global lookups configuration + let mul_air = MulAir { reps }; + let mul_air_lookups = MulAirLookups::new( + mul_air, + true, + true, + 0, + vec!["MulFib".to_string(), "MulFib".to_string()], + ); // both + + let log_height = 4; + let height = 1 << log_height; + + let fibonacci_air = FibonacciAir { + log_height, + tamper_index: None, + }; + let fib_air_lookups = FibAirLookups::new(fibonacci_air, true, 0, None); // global lookups + + let mul_trace = mul_trace::(height, 2); + let fib_trace = fib_trace::(0, 1, height); + let fib_pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(16))]; + + // Use the enum wrapper for heterogeneous types + let air1 = DemoAirWithLookups::MulLookups(mul_air_lookups); + let air2 = DemoAirWithLookups::FibLookups(fib_air_lookups); + + let mut airs = [air1, air2]; + // Get lookups from the lookup-enabled AIRs + let common_data = CommonData::::from_airs_and_degrees( + &config, + &mut airs, + &[log_height, log_height], + ); + + let instances = StarkInstance::new_multiple( + &airs, + &[mul_trace, fib_trace], + &[vec![], fib_pis.clone()], + &common_data, + ); + + let proof = prove_batch(&config, &instances, &common_data); + + let pvs = vec![vec![], fib_pis]; + verify_batch(&config, &airs, &proof, &pvs, &common_data) +} + +/// Test with both local and global lookups using MulAirLookups and FibAirLookups, with ZK mode activated +#[test] +fn test_batch_stark_both_lookups_zk() -> Result<(), impl Debug> { + let config = make_config_zk(2026); + + let reps = 2; + // Create instances with both local and global lookups configuration + let mul_air = MulAir { reps }; + let mul_air_lookups = MulAirLookups::new( + mul_air, + true, + true, + 0, + vec!["MulFib".to_string(), "MulFib".to_string()], + ); // both + + let log_height = 4; + let height = 1 << log_height; + + let fibonacci_air = FibonacciAir { + log_height, + tamper_index: None, + }; + let fib_air_lookups = FibAirLookups::new(fibonacci_air, true, 0, None); // global lookups + + let mul_trace = mul_trace::(height, 2); + let fib_trace = fib_trace::(0, 1, height); + let fib_pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(16))]; + + // Use the enum wrapper for heterogeneous types + let air1 = DemoAirWithLookups::MulLookups(mul_air_lookups); + let air2 = DemoAirWithLookups::FibLookups(fib_air_lookups); + + let mut airs = [air1, air2]; + // Get lookups from the lookup-enabled AIRs + let common_data = CommonData::::from_airs_and_degrees( + &config, + &mut airs, + &[log_height + config.is_zk(), log_height + config.is_zk()], + ); + + let instances = StarkInstance::new_multiple( + &airs, + &[mul_trace, fib_trace], + &[vec![], fib_pis.clone()], + &common_data, + ); + + let proof = prove_batch(&config, &instances, &common_data); + + let pvs = vec![vec![], fib_pis]; + verify_batch(&config, &airs, &proof, &pvs, &common_data) +} + +#[test] +#[should_panic(expected = "LookupError(GlobalCumulativeMismatch(Some(\"MulFib2\"))")] +fn test_batch_stark_failed_global_lookup() { + let config = make_config(2025); + + let reps = 2; + // Create instances with global lookups configuration + let mul_air = MulAir { reps }; + // MulAir uses two different names for its reps, which will create two separate global lookups + let mul_air_lookups = MulAirLookups::new( + mul_air, + false, + true, + 0, + vec!["MulFib1".to_string(), "MulFib2".to_string()], // Different names! + ); + // This creates a mismatch: MulAir sends to "MulFib1" and "MulFib2" + // but FibAir only receives from "MulFib1" + let log_n = 3; + let n = 1 << log_n; + let fibonacci_air = FibonacciAir { + log_height: log_n, + tamper_index: None, + }; + let fib_air_lookups = + FibAirLookups::new(fibonacci_air, true, 0, Some(("MulFib1".to_string(), 1))); + + let mul_trace = mul_trace::(n, 2); + let fib_trace = fib_trace::(0, 1, n); + + let fib_pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(n))]; + let traces = vec![mul_trace, fib_trace]; + let pvs = vec![vec![], fib_pis]; + // Use the enum wrapper for heterogeneous types + let air1 = DemoAirWithLookups::MulLookups(mul_air_lookups); + let air2 = DemoAirWithLookups::FibLookups(fib_air_lookups); + + // Get lookups from the lookup-enabled AIRs + let mut airs = [air1, air2]; + let common_data = + CommonData::::from_airs_and_degrees(&config, &mut airs, &[log_n, log_n]); + + let instances = StarkInstance::new_multiple(&airs, &traces, &pvs, &common_data); + + let proof = prove_batch(&config, &instances, &common_data); + + // This should panic with GlobalCumulativeMismatch because: + // - MulAir sends values to "MulFib1" and "MulFib2" lookups + // - FibAir only receives from "MulFib" lookup + // - The global cumulative sums won't match + verify_batch(&config, &airs, &proof, &pvs, &common_data).unwrap(); +} + +/// Test mixing instances with lookups and instances without lookups. +/// We have the following instances: +/// - MulAir with both local and global lookups (looking into two different FibAir instances for each rep) +/// - FibAir without lookups +/// - FibAir with global lookups (sends values for first rep of MulAir) +/// - MulAir without lookups +/// - FibAir with global lookups (sends values for second rep of MulAir) +/// - MulAir with local lookups only +#[test] +fn test_batch_stark_mixed_lookups() -> Result<(), impl Debug> { + let config = make_config(2027); + + let reps = 2; + + // Create instances with different lookup configurations: + let mul_air_with_lookups = MulAir { reps }; + // This AIR has two different global lookups (one for each rep) with two different names. It also has two local lookups (one for each rep). + let mul_air_lookups = MulAirLookups::new( + mul_air_with_lookups, + true, + true, + 0, + vec!["MulFib1".to_string(), "MulFib2".to_string()], + ); + // This AIR has no lookups. + let mul_air_no_lookups = MulAirLookups::new(mul_air_with_lookups, false, false, 0, vec![]); + // This AIR only has local lookups. + let mul_air_local_lookups = MulAirLookups::new(mul_air_with_lookups, true, false, 0, vec![]); // local lookups only + + let log_n1 = 4; // 16 rows + let log_n2 = 3; // 8 rows + let n1 = 1 << log_n1; + let n2 = 1 << log_n2; + + let fib_air_lookups = FibonacciAir { + log_height: log_n1, + tamper_index: None, + }; + + let fib_air_no_lookups = FibonacciAir { + log_height: log_n2, + tamper_index: None, + }; + + // The mul air with global lookups looks into two different Fibonacci instances. + // So we have to create two separate FibAir instances with a different global lookup name. + let fib_air_with_lookups1 = + FibAirLookups::new(fib_air_lookups, true, 0, Some(("MulFib1".to_string(), 1))); // global lookups + let fib_air_with_lookups2 = + FibAirLookups::new(fib_air_lookups, true, 0, Some(("MulFib2".to_string(), 1))); // global lookups + let fib_air_no_lookups = FibAirLookups::new(fib_air_no_lookups, false, 0, None); // global lookups + + // Generate traces. The airs with and without lookups have different heights. + let mul_with_lookups_trace = mul_trace::(n1, reps); + let fib_with_lookups_trace = fib_trace::(0, 1, n1); + let mul_no_lookups_trace = mul_trace::(n2, reps); + let fib_no_lookups_trace = fib_trace::(0, 1, n2); + + // Public values + let fib_with_lookups_pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(16))]; + let fib_no_lookups_pis = vec![Val::from_u64(0), Val::from_u64(1), Val::from_u64(fib_n(8))]; + + // Create lookup-enabled AIRs + let air_mul_with_lookups = DemoAirWithLookups::MulLookups(mul_air_lookups); + let air_fib_with_lookups1 = DemoAirWithLookups::FibLookups(fib_air_with_lookups1); + let air_fib_with_lookups2 = DemoAirWithLookups::FibLookups(fib_air_with_lookups2); + let air_mul_with_local_lookups = DemoAirWithLookups::MulLookups(mul_air_local_lookups); + + // Create non-lookup AIRs + let air_mul_no_lookups = DemoAirWithLookups::MulLookups(mul_air_no_lookups); + let air_fib_no_lookups = DemoAirWithLookups::FibLookups(fib_air_no_lookups); + + let mut all_airs = vec![ + air_mul_with_lookups, + air_fib_no_lookups, + air_fib_with_lookups1, + air_mul_no_lookups, + air_fib_with_lookups2, + air_mul_with_local_lookups, + ]; + + // Get all lookups + let common_data = CommonData::::from_airs_and_degrees( + &config, + &mut all_airs, + &[log_n1, log_n2, log_n1, log_n2, log_n1, log_n1], + ); + + let traces = vec![ + mul_with_lookups_trace.clone(), + fib_no_lookups_trace, + fib_with_lookups_trace.clone(), + mul_no_lookups_trace, + fib_with_lookups_trace, + mul_with_lookups_trace, + ]; + + // Get all public values + let all_pvs = vec![ + vec![], // mul with lookups + fib_no_lookups_pis, // fib no lookups + fib_with_lookups_pis.clone(), // fib with lookups + vec![], // mul no lookups + fib_with_lookups_pis, // fib with lookups + vec![], // mul with local lookups + ]; + + // Create instances - mixing lookup and non-lookup instances + let instances = StarkInstance::new_multiple(&all_airs, &traces, &all_pvs, &common_data); + + let proof = prove_batch(&config, &instances, &common_data); + + // Verify with mixed AIRs + verify_batch(&config, &all_airs, &proof, &all_pvs, &common_data) +} + +// Single table with local lookup involving the Lagrange selectors. Since the selectors are not normalized, +// we need to add multiplicity columns and multiply them by the selectors. +#[derive(Debug, Clone, Copy)] +struct SingleTableLocalLookupAir { + num_lookups: usize, +} + +impl SingleTableLocalLookupAir { + const fn new() -> Self { + Self { num_lookups: 0 } + } +} + +impl BaseAir for SingleTableLocalLookupAir { + fn width(&self) -> usize { + 7 // 7 columns: 3 sender columns (1 for each selector type), lookup table, 3 multiplicty columns (1 for each selector type) + } +} + +impl Air for SingleTableLocalLookupAir +where + AB::Var: Debug, + AB: AirBuilder + PermutationAirBuilder + AirBuilderWithPublicValues, +{ + fn add_lookup_columns(&mut self) -> Vec { + let new_idx = self.num_lookups; + self.num_lookups += 1; + vec![new_idx] + } + + fn get_lookups(&mut self) -> Vec> { + let mut lookups = Vec::new(); + self.num_lookups = 0; + + // Create symbolic air builder to access symbolic variables + let symbolic_air_builder = + SymbolicAirBuilder::::new(0, BaseAir::::width(self), 0, 0, 0); + let symbolic_main = symbolic_air_builder.main(); + let symbolic_main_local = symbolic_main.row_slice(0).unwrap(); + + let sender_col1 = symbolic_main_local[0]; // Column that sends values + let sender_col2 = symbolic_main_local[1]; // Column that sends values + let sender_col3 = symbolic_main_local[2]; // Column that sends values + let lookup_table_col = symbolic_main_local[3]; // Column that receives lookups + let mul1 = symbolic_main_local[4]; // Multiplicity column for first selector + let mul2 = symbolic_main_local[5]; // Multiplicity column for second selector + let mul3 = symbolic_main_local[6]; // Multiplicity column for third selector + + // Local lookup: sender column looks up into lookup table column + // Sender: send is_transition * sender_col + // Receiver: receive lookup_table_col with multiplicity 1 + let lookup_inputs1 = vec![ + // Sender: send values from sender column with `is_first_row` multiplicity + ( + vec![sender_col1.into()], + symbolic_air_builder.is_first_row(), + Direction::Receive, + ), + // Receiver: receive values in lookup table column with multiplicity 1 * `is_first_row` multiplicity. + // Note that we need to multiply by `is_first_row` here because the Lagrange selectors are not normalized. + ( + vec![lookup_table_col.into()], + symbolic_air_builder.is_first_row() * mul1, + Direction::Send, + ), + ]; + + let lookup_inputs2 = vec![ + // Sender: send values from sender column with `is_last_row` multiplicity + ( + vec![sender_col2.into()], + symbolic_air_builder.is_transition(), + Direction::Receive, + ), + // Receiver: receive values in lookup table column with multiplicity 1 * `is_transition` multiplicity. + // Note that we need to multiply by `is_transition` here because the Lagrange selectors are not normalized. + ( + vec![lookup_table_col.into()], + symbolic_air_builder.is_transition() * mul2, + Direction::Send, + ), + ]; + + let lookup_inputs3 = vec![ + // Sender: send values from sender column with `is_transition` multiplicity + ( + vec![sender_col3.into()], + symbolic_air_builder.is_last_row(), + Direction::Receive, + ), + // Receiver: receive values in lookup table column with multiplicity 1 * `is_last_row` multiplicity. + // Note that we need to multiply by `is_last_row` here because the Lagrange selectors are not normalized. + ( + vec![lookup_table_col.into()], + symbolic_air_builder.is_last_row() * mul3, + Direction::Send, + ), + ]; + + let all_lookup_inputs = vec![lookup_inputs1, lookup_inputs2, lookup_inputs3]; + + for lookup_inputs in all_lookup_inputs { + let local_lookup = Air::::register_lookup(self, Kind::Local, &lookup_inputs); + lookups.push(local_lookup); + } + + lookups + } + + fn eval(&self, _builder: &mut AB) { + // No additional constraints needed for this simple table + } +} + +// Trace generation function for single table with local lookup +fn single_table_local_lookup_trace(height: usize) -> RowMajorMatrix { + assert!(height.is_power_of_two()); + assert!(height >= 2); // Need at least some transition rows and last row + + let width = 7; + let mut v = F::zero_vec(height * width); // 2 columns + // Column 0: all rows: value height - 1 + // Column 1: all rows except last: 7 to 1, and last value is 11 + // Column 2: all rows are 11 except last row which is 0 + // Column 3: Lookup table column: values 7 to 0 + // Column 4: (mult1) 1 at row 0, 0 elsewhere + // Column 5: (mult1): 1 everywhere except last row, which is 0 + // Column 6: (mult2): 1 at last row, 0 elsewhere + for i in 0..height { + // Sender columns: + // Column 0 + v[i * width] = F::from_u64((height - 1) as u64); + // Column 1 + v[i * width + 1] = if i < height - 1 { + F::from_u64((height - i - 1) as u64) + } else { + F::from_u64(11) // Last row value + }; + // Column 2 + if i != height - 1 { + v[i * width + 2] = F::from_u64(11); + } + // Column 3: lookup table column + v[i * width + 3] = F::from_u64((height - i - 1) as u64); + // Multiplicity columns + v[i * width + 4] = if i == 0 { F::ONE } else { F::ZERO }; // mult1: is_first_row + v[i * width + 5] = if i < height - 1 { F::ONE } else { F::ZERO }; // mult2: is_transition + v[i * width + 6] = if i == height - 1 { F::ONE } else { F::ZERO }; // mult3: is_last_row + } + + RowMajorMatrix::new(v, width) +} + +/// Test with a single table doing local lookup between its two columns. +/// The goal of this test is to check that the use of (non-normalized) Lagrange selectors does not cause isssues. +#[test] +fn test_single_table_local_lookup() -> Result<(), impl Debug> { + let config = make_config(2029); + + let log_height = 3; + let height = 1 << log_height; // Single table with 8 rows + + // Create instance + let air = SingleTableLocalLookupAir::new(); + + let mut airs = [air]; + + // Get lookups from the lookup-enabled AIR + let common_data = + CommonData::::from_airs_and_degrees(&config, &mut airs, &[log_height]); + + // Generate trace + let trace = single_table_local_lookup_trace::(height); + + let traces = vec![trace]; + let pvs = vec![vec![]]; // No public values + + let instances = StarkInstance::new_multiple(&airs, &traces, &pvs, &common_data); + + let proof = prove_batch(&config, &instances, &common_data); + + verify_batch(&config, &airs, &proof, &pvs, &common_data) +} diff --git a/blake3-air/CHANGELOG.md b/blake3-air/CHANGELOG.md new file mode 100644 index 000000000..246a3c19c --- /dev/null +++ b/blake3-air/CHANGELOG.md @@ -0,0 +1,26 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Replace `Copy` with `Clone` in `AirBuilder`'s `Var` (#930) (Linda Guiga) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) + +### Authors +- Himess +- Linda Guiga +- Thomas Coratger + diff --git a/blake3/CHANGELOG.md b/blake3/CHANGELOG.md new file mode 100644 index 000000000..ccd796c9d --- /dev/null +++ b/blake3/CHANGELOG.md @@ -0,0 +1,23 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Clippy: small step (#1102) (Thomas Coratger) + +### Authors +- Himess +- Thomas Coratger + diff --git a/bn254/CHANGELOG.md b/bn254/CHANGELOG.md new file mode 100644 index 000000000..3017e45f0 --- /dev/null +++ b/bn254/CHANGELOG.md @@ -0,0 +1,45 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor: add public const `new` and `new_array` for all fields (#1222) (Adrian Hamelink) + +### Authors +- Adrian Hamelink + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Porting BN254 to our own code base (#913) (AngusG) +- Interleaved Montgomery Multiplication (#915) (AngusG) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Custom halve impl for Bn254 (#919) (AngusG) +- From_biguint method for Bn254 (#914) (AngusG) +- Bn254 Binary Euclidean Inversion (#920) (AngusG) +- Fast GCD Inverse for Goldilocks (#925) (AngusG) +- Adding Macros to remove boilerplate impls (#943) (AngusG) +- Adding Degree 8 extensions for KoalaBear and BabyBear. (#954) (AngusG) +- Move halve to ring (#969) (AngusG) +- Must Use (#996) (AngusG) +- Refactor: remove redundant clones in crypto modules (#1086) (Skylar Ray) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Refactor: deduplicate field JSON serialization tests (#1162) (andrewshab) + +### Authors +- AngusG +- Himess +- Skylar Ray +- Thomas Coratger +- andrewshab + diff --git a/bn254/Cargo.toml b/bn254/Cargo.toml index d1f3ef1ec..2a9660bb7 100644 --- a/bn254/Cargo.toml +++ b/bn254/Cargo.toml @@ -21,7 +21,7 @@ rand.workspace = true serde = { workspace = true, features = ["derive"] } [dev-dependencies] -p3-field-testing.workspace = true +p3-field-testing = { path = "../field-testing" } criterion.workspace = true serde_json.workspace = true diff --git a/bn254/src/bn254.rs b/bn254/src/bn254.rs index ee1b7c2bc..5cde66d20 100644 --- a/bn254/src/bn254.rs +++ b/bn254/src/bn254.rs @@ -61,6 +61,28 @@ pub struct Bn254 { } impl Bn254 { + /// Create a new field element from any `[u64; 4]` in little-endian limb order. + /// + /// Any value is accepted and automatically reduced modulo P. + #[inline] + pub const fn new(value: [u64; 4]) -> Self { + Self::new_monty(monty_mul(BN254_MONTY_R_SQ, value)) + } + + /// Convert a `[[u64; 4]; N]` array to an array of field elements. + /// + /// Const version of `input.map(Bn254::new)`. + #[inline] + pub const fn new_array(input: [[u64; 4]; N]) -> [Self; N] { + let mut output = [Self::ZERO; N]; + let mut i = 0; + while i < N { + output[i] = Self::new(input[i]); + i += 1; + } + output + } + /// Creates a new BN254 field element from an array of 4 u64's. /// /// The array is assumed to correspond to a 254-bit integer less than P and is interpreted as @@ -508,7 +530,7 @@ impl TwoAdicField for Bn254 { #[cfg(test)] mod tests { - use p3_field_testing::{test_field, test_prime_field}; + use p3_field_testing::{test_field, test_field_json_serialization, test_prime_field}; use super::*; @@ -557,28 +579,7 @@ mod tests { let f_r_minus_1 = F::NEG_ONE; let f_r_minus_2 = F::NEG_ONE + F::NEG_ONE; - let f_serialized = serde_json::to_string(&f_100).unwrap(); - let f_deserialized: F = serde_json::from_str(&f_serialized).unwrap(); - assert_eq!(f_100, f_deserialized); - - let f_1_serialized = serde_json::to_string(&f_1).unwrap(); - let f_1_deserialized: F = serde_json::from_str(&f_1_serialized).unwrap(); - let f_1_serialized_again = serde_json::to_string(&f_1_deserialized).unwrap(); - let f_1_deserialized_again: F = serde_json::from_str(&f_1_serialized_again).unwrap(); - assert_eq!(f_1, f_1_deserialized); - assert_eq!(f_1, f_1_deserialized_again); - - let f_2_serialized = serde_json::to_string(&f_2).unwrap(); - let f_2_deserialized: F = serde_json::from_str(&f_2_serialized).unwrap(); - assert_eq!(f_2, f_2_deserialized); - - let f_r_minus_1_serialized = serde_json::to_string(&f_r_minus_1).unwrap(); - let f_r_minus_1_deserialized: F = serde_json::from_str(&f_r_minus_1_serialized).unwrap(); - assert_eq!(f_r_minus_1, f_r_minus_1_deserialized); - - let f_r_minus_2_serialized = serde_json::to_string(&f_r_minus_2).unwrap(); - let f_r_minus_2_deserialized: F = serde_json::from_str(&f_r_minus_2_serialized).unwrap(); - assert_eq!(f_r_minus_2, f_r_minus_2_deserialized); + test_field_json_serialization(&[f_100, f_1, f_2, f_r_minus_1, f_r_minus_2]); } const ZERO: Bn254 = Bn254::ZERO; diff --git a/bn254/src/helpers.rs b/bn254/src/helpers.rs index b4aaccfae..aab2e98aa 100644 --- a/bn254/src/helpers.rs +++ b/bn254/src/helpers.rs @@ -5,6 +5,22 @@ use p3_util::gcd_inner; use crate::{BN254_MONTY_MU_64, BN254_PRIME}; +/// Const lexicographic comparison: returns true if a < b (little-endian limbs). +#[inline] +pub(crate) const fn const_lt(a: [u64; 4], b: [u64; 4]) -> bool { + // Compare from most significant limb to least significant + if a[3] != b[3] { + return a[3] < b[3]; + } + if a[2] != b[2] { + return a[2] < b[2]; + } + if a[1] != b[1] { + return a[1] < b[1]; + } + a[0] < b[0] +} + /// Convert a fixed-size array of u64s (little-endian) to a BigUint. #[inline] pub(crate) fn to_biguint(value: [u64; N]) -> BigUint { @@ -29,12 +45,14 @@ const fn carrying_add(lhs: u64, rhs: u64, carry: bool) -> (u64, bool) { /// Compute `lhs + rhs`, returning a bool if overflow occurred. #[inline] -pub(crate) fn wrapping_add(lhs: [u64; N], rhs: [u64; N]) -> ([u64; N], bool) { +pub(crate) const fn wrapping_add(lhs: [u64; 4], rhs: [u64; 4]) -> ([u64; 4], bool) { let mut carry = false; - let mut output = [0; N]; + let mut output = [0; 4]; - for i in 0..N { + let mut i = 0; + while i < 4 { (output[i], carry) = carrying_add(lhs[i], rhs[i], carry); + i += 1; } (output, carry) @@ -57,12 +75,15 @@ const fn borrowing_sub(lhs: u64, rhs: u64, borrow: bool) -> (u64, bool) { /// Compute `lhs - rhs`, returning a bool if underflow occurred. #[inline] -pub(crate) fn wrapping_sub(lhs: [u64; N], rhs: [u64; N]) -> ([u64; N], bool) { +pub(crate) const fn wrapping_sub(lhs: [u64; 4], rhs: [u64; 4]) -> ([u64; 4], bool) { let mut borrow = false; - let mut output = [0; N]; + let mut output = [0; 4]; - for i in 0..N { + // use manual `while` loop to enable `const` + let mut i = 0; + while i < 4 { (output[i], borrow) = borrowing_sub(lhs[i], rhs[i], borrow); + i += 1; } (output, borrow) @@ -72,7 +93,7 @@ pub(crate) fn wrapping_sub(lhs: [u64; N], rhs: [u64; N]) -> ([u6 /// /// Returns the lowest output limb and the remaining limbs in a 4-limb array. #[inline] -pub(crate) fn mul_small(lhs: [u64; 4], rhs: u64) -> (u64, [u64; 4]) { +pub(crate) const fn mul_small(lhs: [u64; 4], rhs: u64) -> (u64, [u64; 4]) { let mut output = [0u64; 4]; let mut acc; @@ -84,13 +105,17 @@ pub(crate) fn mul_small(lhs: [u64; 4], rhs: u64) -> (u64, [u64; 4]) { acc >>= 64; // Process the remaining limbs. - for i in 1..4 { + // use manual `while` loop to enable `const` + let mut i = 1; + while i < 4 { // Product of u64's < 2^128 - 2^64 so this addition will not overflow. acc += (lhs[i] as u128) * (rhs as u128); output[i - 1] = acc as u64; // acc < 2^64 acc >>= 64; + + i += 1; } output[3] = acc as u64; @@ -101,7 +126,7 @@ pub(crate) fn mul_small(lhs: [u64; 4], rhs: u64) -> (u64, [u64; 4]) { /// /// Returns the lowest output limb and the remaining limbs in a 4-limb array. #[inline] -pub(crate) fn mul_small_and_acc(lhs: [u64; 4], rhs: u64, add: [u64; 4]) -> (u64, [u64; 4]) { +pub(crate) const fn mul_small_and_acc(lhs: [u64; 4], rhs: u64, add: [u64; 4]) -> (u64, [u64; 4]) { let mut output = [0u64; 4]; let mut acc; @@ -113,13 +138,17 @@ pub(crate) fn mul_small_and_acc(lhs: [u64; 4], rhs: u64, add: [u64; 4]) -> (u64, acc >>= 64; // Process the remaining limbs. - for i in 1..4 { + // use manual `while` loop to enable `const` + let mut i = 1; + while i < 4 { // Product of u64's < 2^128 - 2^64 so this addition will not overflow. acc += (lhs[i] as u128) * (rhs as u128) + (add[i] as u128); output[i - 1] = acc as u64; // acc < 2^64 acc >>= 64; + + i += 1; } output[3] = acc as u64; @@ -165,11 +194,11 @@ pub(crate) fn mul_small_and_acc(lhs: [u64; 4], rhs: u64, add: [u64; 4]) -> (u64, /// The incoming number is split into 5 64-bit limbs with the /// first limb separated out as it will be treated differently. #[inline] -fn interleaved_monty_reduction(acc0: u64, acc: [u64; 4]) -> [u64; 4] { +const fn interleaved_monty_reduction(acc0: u64, acc: [u64; 4]) -> [u64; 4] { let t = acc0.wrapping_mul(BN254_MONTY_MU_64); let (_, u) = mul_small(BN254_PRIME, t); - let (sub, under) = wrapping_sub::<4>(acc, u); + let (sub, under) = wrapping_sub(acc, u); if under { let (sub_corr, _) = wrapping_add(sub, BN254_PRIME); sub_corr @@ -185,10 +214,10 @@ fn interleaved_monty_reduction(acc0: u64, acc: [u64; 4]) -> [u64; 4] { /// The output is a 4-limb array representing the result of `lhs * rhs * 2^{-256} mod P` /// guaranteed to be in the range `[0, P)`. #[inline] -pub(crate) fn monty_mul(lhs: [u64; 4], rhs: [u64; 4]) -> [u64; 4] { +pub(crate) const fn monty_mul(lhs: [u64; 4], rhs: [u64; 4]) -> [u64; 4] { // We need to ensure that `lhs < P` otherwise it's possible for the // algorithm to fail and produce a value which is too large. - debug_assert!(lhs.iter().rev().cmp(BN254_PRIME.iter().rev()) == core::cmp::Ordering::Less); + debug_assert!(const_lt(lhs, BN254_PRIME)); // Our accumulator starts at 0 so we start with mul_small let (acc0, acc) = mul_small(lhs, rhs[0]); @@ -317,15 +346,20 @@ const fn conditional_neg(a: &mut [u64; 4], sign: u64) { /// The result is a 320-bit signed integer represented as 4 64-bit limbs of positive integers /// and an i64 for the highest limb. #[inline] -fn linear_comb_signed(a: [u64; 4], b: [u64; 4], f: i64, g: i64) -> ([u64; 4], i64) { +const fn linear_comb_signed(a: [u64; 4], b: [u64; 4], f: i64, g: i64) -> ([u64; 4], i64) { let mut output = [0_u64; 4]; let mut carry = (a[0] as i128) * (f as i128) + (b[0] as i128) * (g as i128); output[0] = carry as u64; carry >>= 64; - for i in 1..4 { + + // use manual `while` loop to enable `const` + let mut i = 1; + while i < 4 { carry += (a[i] as i128) * (f as i128) + (b[i] as i128) * (g as i128); output[i] = carry as u64; carry >>= 64; + + i += 1; } (output, carry as i64) @@ -334,15 +368,20 @@ fn linear_comb_signed(a: [u64; 4], b: [u64; 4], f: i64, g: i64) -> ([u64; 4], i6 /// Compute the linear combination `af + bg` where `a, b` are `256-bit` positive integers /// and `f, g` are `64-bit` positive integers. #[inline] -fn linear_comb_unsigned(a: [u64; 4], b: [u64; 4], f: u64, g: u64) -> [u64; 5] { +const fn linear_comb_unsigned(a: [u64; 4], b: [u64; 4], f: u64, g: u64) -> [u64; 5] { let mut output = [0_u64; 5]; let mut carry = (a[0] as u128) * (f as u128) + (b[0] as u128) * (g as u128); output[0] = carry as u64; carry >>= 64; - for i in 1..4 { + + // use manual `while` loop to enable `const` + let mut i = 1; + while i < 4 { carry += (a[i] as u128) * (f as u128) + (b[i] as u128) * (g as u128); output[i] = carry as u64; carry >>= 64; + + i += 1; } output[4] = carry as u64; @@ -355,7 +394,7 @@ fn linear_comb_unsigned(a: [u64; 4], b: [u64; 4], f: u64, g: u64) -> [u64; 5] { /// If the output would be negative, it is negated using 2's complement. A i64 is returned indicating /// if the negation was applied. The i64 is `-1` if the output was negated `0` otherwise. #[inline] -fn linear_comb_div(a: [u64; 4], b: [u64; 4], f: i64, g: i64, k: usize) -> ([u64; 4], i64) { +const fn linear_comb_div(a: [u64; 4], b: [u64; 4], f: i64, g: i64, k: usize) -> ([u64; 4], i64) { let (product, hi_limb) = linear_comb_signed(a, b, f, g); let mut output = [0_u64; 4]; @@ -373,7 +412,7 @@ fn linear_comb_div(a: [u64; 4], b: [u64; 4], f: i64, g: i64, k: usize) -> ([u64; } #[inline] -fn linear_comb_monty_red(a: [u64; 4], b: [u64; 4], f: i64, g: i64) -> [u64; 4] { +const fn linear_comb_monty_red(a: [u64; 4], b: [u64; 4], f: i64, g: i64) -> [u64; 4] { // Get the signs and absolute values of f and g let s_f = f >> 63; let s_g = g >> 63; @@ -387,7 +426,9 @@ fn linear_comb_monty_red(a: [u64; 4], b: [u64; 4], f: i64, g: i64) -> [u64; 4] { let b_signed = if s_g == -1 { b_sub } else { b }; let product = linear_comb_unsigned(a_signed, b_signed, abs_f, abs_g); - interleaved_monty_reduction(product[0], product[1..].try_into().unwrap()) + // manually construct `acc` for `const` + let acc = [product[1], product[2], product[3], product[4]]; + interleaved_monty_reduction(product[0], acc) } /// An adjustment factor equal to `2^{1030} mod P` @@ -414,7 +455,7 @@ pub(crate) const BN254_2_POW_1030: [u64; 4] = [ /// This implementation is also impervious to side-channel attacks as an added bonus. In principal we could make /// the average case a little faster if we didn't care about this property but the worst case would be unchanged and /// potentially even slightly worse. -pub(crate) fn gcd_inversion(input: [u64; 4]) -> [u64; 4] { +pub(crate) const fn gcd_inversion(input: [u64; 4]) -> [u64; 4] { // The standard binary GCD inversion algorithm for a field `P` has // a single input `input` and // four internal variables: `a`, `u`, `b`, and `v`. @@ -450,8 +491,11 @@ pub(crate) fn gcd_inversion(input: [u64; 4]) -> [u64; 4] { const ROUND_SIZE: usize = 31; // If you want to change round size, you will also need to modify the constant in get_approximation. const FINAL_ROUND_SIZE: usize = 41; const NUM_ROUNDS: usize = 15; - assert_eq!(NUM_ROUNDS * ROUND_SIZE + FINAL_ROUND_SIZE, 506); - for _ in 0..NUM_ROUNDS { + const { assert!(NUM_ROUNDS * ROUND_SIZE + FINAL_ROUND_SIZE == 506) }; + + // use manual `while` loop to enable `const` + let mut i = 0; + while i < NUM_ROUNDS { // Find the a and b approximations for this set of inner rounds. // If both a and b now fit in a u64, return those. Otherwise take the bottom // 31 bits and the top 33 bits and assemble into a u64. @@ -489,6 +533,8 @@ pub(crate) fn gcd_inversion(input: [u64; 4]) -> [u64; 4] { b = new_b; u = new_u; v = new_v; + + i += 1; } // a and b are now guaranteed to fit in a u64 so we can just use the inner loop diff --git a/challenger/CHANGELOG.md b/challenger/CHANGELOG.md new file mode 100644 index 000000000..e40319379 --- /dev/null +++ b/challenger/CHANGELOG.md @@ -0,0 +1,37 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- SIMD optimization for proof-of-work grinding in DuplexChallenger (#1208) (Utsav Sharma) + +### Authors +- Utsav Sharma + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Add a comment about non-uniformity in `CanSampleBits` (#1026) (Tom Wambsgans) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Challenger: add `observe_base_as_algebra_element ` to `FieldChallenger` trait (#1152) (Thomas Coratger) +- Challenger: add unit tests for `observe_base_as_algebra_element` (#1155) (Thomas Coratger) +- Challenger: add `observe_algebra_elements` method (#1176) (Thomas Coratger) +- Feat: add PoW phase for batching in FRI commit phase (#1164) (Zach Langley) +- Implement uniform sampling of bits from field elements (#1050) (Sebastian) + +### Authors +- Himess +- Sebastian +- Thomas Coratger +- Tom Wambsgans +- Zach Langley + diff --git a/challenger/Cargo.toml b/challenger/Cargo.toml index 1c2533655..a6921a788 100644 --- a/challenger/Cargo.toml +++ b/challenger/Cargo.toml @@ -12,14 +12,15 @@ categories.workspace = true [dependencies] p3-field.workspace = true p3-maybe-rayon.workspace = true +p3-monty-31.workspace = true p3-symmetric.workspace = true p3-util.workspace = true tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-goldilocks.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-goldilocks = { path = "../goldilocks" } [lints] workspace = true diff --git a/challenger/src/duplex_challenger.rs b/challenger/src/duplex_challenger.rs index 4800ada58..894b49709 100644 --- a/challenger/src/duplex_challenger.rs +++ b/challenger/src/duplex_challenger.rs @@ -1,10 +1,13 @@ use alloc::vec; use alloc::vec::Vec; +use core::error::Error; +use core::fmt::{Display, Formatter}; use p3_field::{BasedVectorSpace, Field, PrimeField64}; +use p3_monty_31::{MontyField31, MontyParameters}; use p3_symmetric::{CryptographicPermutation, Hash}; -use crate::{CanObserve, CanSample, CanSampleBits, FieldChallenger}; +use crate::{CanObserve, CanSample, CanSampleBits, CanSampleUniformBits, FieldChallenger}; /// A generic duplex sponge challenger over a finite field, used for generating deterministic /// challenges from absorbed inputs. @@ -204,12 +207,183 @@ where } } +/// Trait for fields that support uniform bit sampling optimizations +pub trait UniformSamplingField { + /// Maximum number of bits we can sample at negligible (~1/field prime) probability of + /// triggering an error / requiring a resample. + const MAX_SINGLE_SAMPLE_BITS: usize; + /// An array storing the largest value `m_k` for each `k` in [0, 31], such that `m_k` + /// is a multiple of `2^k` and less than P. `m_k` is defined as: + /// + /// \( m_k = ⌊P / 2^k⌋ · 2^k \) + /// + /// This is used as a rejection sampling threshold (or error trigger), when sampling + /// random bits from uniformly sampled field elements. As long as we sample up to the `k` + /// least significant bits in the range [0, m_k), we sample from exactly `m_k` elements. As + /// `m_k` is divisible by 2^k, each of the least significant `k` bits has exactly the same + /// number of zeroes and ones, leading to a uniform sampling. + const SAMPLING_BITS_M: [u64; 64]; +} + +// Provide a blanket implementation for Monty31 fields here, which forwards the +// implementation of the variables to the generic argument `Parameter`, +// for which we implement the trait (KoalaBear, BabyBear). +impl UniformSamplingField for MontyField31 +where + MP: UniformSamplingField + MontyParameters, +{ + const MAX_SINGLE_SAMPLE_BITS: usize = MP::MAX_SINGLE_SAMPLE_BITS; + const SAMPLING_BITS_M: [u64; 64] = MP::SAMPLING_BITS_M; +} + +// Set of different strategies we currently support for sampling +// Implementations for each are below. +/// A zero-sized struct representing the "resample" strategy. +pub(super) struct ResampleOnRejection; +/// A zero-sized struct representing the "error" strategy. +pub(super) struct ErrorOnRejection; + +/// Custom error raised when resampling is required for uniform bits but disabled +/// via `ErrorOnRejection` strategy. +#[derive(Debug)] +pub struct ResamplingError { + /// The sampled value + value: u64, + /// The target value we need to be smaller than + m: u64, +} + +impl Display for ResamplingError { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Encountered value {0}, which requires resampling for uniform bits as it not smaller than {1}. But resampling is not enabled.", + self.value, self.m + ) + } +} + +impl Error for ResamplingError {} + +/// A trait that defines a strategy for handling out-of-range samples. +pub(super) trait BitSamplingStrategy +where + F: PrimeField64, + P: CryptographicPermutation<[F; W]>, +{ + /// Whether to error instead of resampling when a drawn value is too large. + const ERROR_ON_REJECTION: bool; + + #[inline] + fn sample_value( + challenger: &mut DuplexChallenger, + m: u64, + ) -> Result { + let mut result: F = challenger.sample(); + if Self::ERROR_ON_REJECTION { + if result.as_canonical_u64() >= m { + return Err(ResamplingError { + value: result.as_canonical_u64(), + m, + }); + } + } else { + while result.as_canonical_u64() >= m { + result = challenger.sample(); + } + } + Ok(result) + } +} + +/// Implement rejection sampling +impl BitSamplingStrategy for ResampleOnRejection +where + F: PrimeField64, + P: CryptographicPermutation<[F; W]>, +{ + const ERROR_ON_REJECTION: bool = false; +} + +/// Implement erroring on a required rejection +impl BitSamplingStrategy for ErrorOnRejection +where + F: PrimeField64, + P: CryptographicPermutation<[F; W]>, +{ + const ERROR_ON_REJECTION: bool = true; +} + +impl DuplexChallenger +where + F: UniformSamplingField + PrimeField64, + P: CryptographicPermutation<[F; WIDTH]>, +{ + /// Generic implementation for uniform bit sampling, parameterized by a strategy. + #[inline] + fn sample_uniform_bits_with_strategy( + &mut self, + bits: usize, + ) -> Result + where + S: BitSamplingStrategy, + { + if bits == 0 { + return Ok(0); + }; + assert!(bits < usize::BITS as usize, "bit count must be valid"); + assert!( + (1u64 << bits) < F::ORDER_U64, + "bit count exceeds field order" + ); + let m = F::SAMPLING_BITS_M[bits]; + if bits <= F::MAX_SINGLE_SAMPLE_BITS { + // Fast path: Only one sample is needed for sufficient uniformity. + let rand_f = S::sample_value(self, m); + Ok(rand_f?.as_canonical_u64() as usize & ((1 << bits) - 1)) + } else { + // Slow path: Sample twice to construct the required number of bits. + // This reduces the bias introduced by a single, larger sample. + let half_bits1 = bits / 2; + let half_bits2 = bits - half_bits1; + // Sample the first chunk of bits. + let rand1 = S::sample_value(self, F::SAMPLING_BITS_M[half_bits1]); + let chunk1 = rand1?.as_canonical_u64() as usize & ((1 << half_bits1) - 1); + // Sample the second chunk of bits. + let rand2 = S::sample_value(self, F::SAMPLING_BITS_M[half_bits2]); + let chunk2 = rand2?.as_canonical_u64() as usize & ((1 << half_bits2) - 1); + + // Combine the chunks. + Ok(chunk1 | (chunk2 << half_bits1)) + } + } +} + +impl CanSampleUniformBits + for DuplexChallenger +where + F: UniformSamplingField + PrimeField64, + P: CryptographicPermutation<[F; WIDTH]>, +{ + fn sample_uniform_bits( + &mut self, + bits: usize, + ) -> Result { + if RESAMPLE { + self.sample_uniform_bits_with_strategy::(bits) + } else { + self.sample_uniform_bits_with_strategy::(bits) + } + } +} + #[cfg(test)] mod tests { use core::iter; use p3_baby_bear::BabyBear; use p3_field::PrimeCharacteristicRing; + use p3_field::extension::BinomialExtensionField; use p3_goldilocks::Goldilocks; use p3_symmetric::Permutation; @@ -220,6 +394,8 @@ mod tests { const RATE: usize = 16; type G = Goldilocks; + type EF2G = BinomialExtensionField; + type BB = BabyBear; #[derive(Clone)] @@ -471,4 +647,139 @@ mod tests { // Output buffer should match expected state from duplexing assert_eq!(chal.output_buffer, expected_output); } + + #[test] + fn test_observe_base_as_algebra_element_consistency_with_direct_observe() { + // Create two identical challengers to verify behavior equivalence + let mut chal1 = + DuplexChallenger::::new(TestPermutation {}); + let mut chal2 = + DuplexChallenger::::new(TestPermutation {}); + + let base_val = G::from_u8(99); + + // Method 1: Use the convenience method for base-to-extension observation + chal1.observe_base_as_algebra_element::(base_val); + + // Method 2: Manually convert to extension field then observe + let ext_val = EF2G::from(base_val); + chal2.observe_algebra_element(ext_val); + + // Both methods must produce identical internal state + assert_eq!(chal1.input_buffer, chal2.input_buffer); + assert_eq!(chal1.output_buffer, chal2.output_buffer); + assert_eq!(chal1.sponge_state, chal2.sponge_state); + } + + #[test] + fn test_observe_base_as_algebra_element_stream_consistency() { + // Create two identical challengers for stream observation test + let mut chal1 = + DuplexChallenger::::new(TestPermutation {}); + let mut chal2 = + DuplexChallenger::::new(TestPermutation {}); + + // Define a base value vector + let base_values: Vec<_> = (0u8..25).map(G::from_u8).collect(); + + // Method 1: Observe stream using convenience method + for &val in &base_values { + chal1.observe_base_as_algebra_element::(val); + } + + // Method 2: Manually convert each element before observing + for &val in &base_values { + let ext_val = EF2G::from(val); + chal2.observe_algebra_element(ext_val); + } + + // Verify identical state through sequential observations and duplexing. + assert_eq!(chal1.input_buffer, chal2.input_buffer); + assert_eq!(chal1.output_buffer, chal2.output_buffer); + assert_eq!(chal1.sponge_state, chal2.sponge_state); + + // Verify sampling produces identical challenges + let sample1: EF2G = chal1.sample_algebra_element(); + let sample2: EF2G = chal2.sample_algebra_element(); + assert_eq!(sample1, sample2); + + // Verify state consistency is maintained after sampling + assert_eq!(chal1.input_buffer, chal2.input_buffer); + assert_eq!(chal1.output_buffer, chal2.output_buffer); + assert_eq!(chal1.sponge_state, chal2.sponge_state); + } + + #[test] + fn test_observe_algebra_elements_equivalence() { + // Test that the two following paths give the same results: + // - `observe_algebra_slice` + // - `observe_algebra_element` in a loop + let mut chal1 = + DuplexChallenger::::new(TestPermutation {}); + let mut chal2 = + DuplexChallenger::::new(TestPermutation {}); + + // Create a slice of extension field elements + let ext_values: Vec = (0u8..10).map(|i| EF2G::from(G::from_u8(i))).collect(); + + // Method 1: Use observe_algebra_slice with slice + chal1.observe_algebra_slice(&ext_values); + + // Method 2: Call observe_algebra_element individually + for ext_val in &ext_values { + chal2.observe_algebra_element(*ext_val); + } + + // Verify identical internal state + assert_eq!(chal1.input_buffer, chal2.input_buffer); + assert_eq!(chal1.output_buffer, chal2.output_buffer); + assert_eq!(chal1.sponge_state, chal2.sponge_state); + + // Verify sampling produces identical challenges + let sample1: EF2G = chal1.sample_algebra_element(); + let sample2: EF2G = chal2.sample_algebra_element(); + assert_eq!(sample1, sample2); + } + + #[test] + fn test_observe_algebra_elements_empty_slice() { + // Test that observing an empty slice does not change state + let mut chal1 = + DuplexChallenger::::new(TestPermutation {}); + let mut chal2 = + DuplexChallenger::::new(TestPermutation {}); + + // Observe some values first to have non-trivial state + chal1.observe(G::from_u8(42)); + chal2.observe(G::from_u8(42)); + + // Observe empty slice + let empty: Vec = vec![]; + chal1.observe_algebra_slice(&empty); + + // Verify state unchanged + assert_eq!(chal1.input_buffer, chal2.input_buffer); + assert_eq!(chal1.output_buffer, chal2.output_buffer); + assert_eq!(chal1.sponge_state, chal2.sponge_state); + } + + #[test] + fn test_observe_algebra_elements_triggers_duplexing() { + // Test that observing enough elements triggers duplexing + let mut chal = DuplexChallenger::::new(TestPermutation {}); + + // EF2G has dimension 2, so we need RATE/2 elements to fill the buffer + // + // With RATE=16, we need 8 EF2G elements to trigger duplexing + let ext_values: Vec = (0u8..8).map(|i| EF2G::from(G::from_u8(i))).collect(); + + assert!(chal.input_buffer.is_empty()); + assert!(chal.output_buffer.is_empty()); + + chal.observe_algebra_slice(&ext_values); + + // After observing 8 EF2G elements (16 base field elements), duplexing should occur + assert!(chal.input_buffer.is_empty()); + assert!(!chal.output_buffer.is_empty()); + } } diff --git a/challenger/src/grinding_challenger.rs b/challenger/src/grinding_challenger.rs index 30992cb86..7cf95dddc 100644 --- a/challenger/src/grinding_challenger.rs +++ b/challenger/src/grinding_challenger.rs @@ -1,9 +1,12 @@ -use p3_field::{Field, PrimeField, PrimeField32, PrimeField64}; +use p3_field::{Field, PackedValue, PrimeField, PrimeField32, PrimeField64}; use p3_maybe_rayon::prelude::*; use p3_symmetric::CryptographicPermutation; use tracing::instrument; -use crate::{CanObserve, CanSampleBits, DuplexChallenger, MultiField32Challenger}; +use crate::{ + CanObserve, CanSampleBits, CanSampleUniformBits, DuplexChallenger, MultiField32Challenger, + UniformSamplingField, +}; /// Trait for challengers that support proof-of-work (PoW) grinding. /// @@ -35,33 +38,245 @@ pub trait GrindingChallenger: /// Returns `true` if the witness passes the PoW check, `false` otherwise. #[must_use] fn check_witness(&mut self, bits: usize, witness: Self::Witness) -> bool { + if bits == 0 { + return true; + } self.observe(witness); self.sample_bits(bits) == 0 } } +/// Trait for challengers that support proof-of-work (PoW) grinding with +/// guaranteed uniformly sampled bits. +pub trait UniformGrindingChallenger: + GrindingChallenger + CanSampleUniformBits +{ + /// Grinds based on *uniformly sampled bits*. This variant is allowed to do rejection + /// sampling if a value is sampled that would violate our uniformity requirement + /// (chance of about 1/P). + /// + /// Use this together with `check_witness_uniform`. + fn grind_uniform(&mut self, bits: usize) -> Self::Witness; + + /// Grinds based on *uniformly sampled bits*. This variant errors if a value is + /// sampled, which would violate our uniformity requirement (chance of about 1/P). + /// See the `UniformSamplingField` trait implemented for each field for details. + /// + /// Use this together with `check_witness_uniform_may_error`. + fn grind_uniform_may_error(&mut self, bits: usize) -> Self::Witness; + + /// Check whether a given `witness` satisfies the PoW condition. + /// + /// After absorbing the witness, the challenger samples `bits` random bits + /// *uniformly* and verifies that all bits sampled are zero. The uniform + /// sampling implies we do rejection sampling in about ~1/P cases. + /// + /// Returns `true` if the witness passes the PoW check, `false` otherwise. + fn check_witness_uniform(&mut self, bits: usize, witness: Self::Witness) -> bool { + self.observe(witness); + self.sample_uniform_bits::(bits) + .expect("Error impossible here due to resampling strategy") + == 0 + } + + /// Check whether a given `witness` satisfies the PoW condition. + /// + /// After absorbing the witness, the challenger samples `bits` random bits + /// *uniformly* and verifies that all bits sampled are zero. In about ~1/P + /// cases this function may error if a sampled value lies outside a range + /// in which we can guarantee uniform bits. + /// + /// Returns `true` if the witness passes the PoW check, `false` otherwise. + fn check_witness_uniform_may_error(&mut self, bits: usize, witness: Self::Witness) -> bool { + self.observe(witness); + self.sample_uniform_bits::(bits) + .is_ok_and(|v| v == 0) + } +} + impl GrindingChallenger for DuplexChallenger where F: PrimeField64, - P: CryptographicPermutation<[F; WIDTH]>, + P: CryptographicPermutation<[F; WIDTH]> + + CryptographicPermutation<[::Packing; WIDTH]>, { type Witness = F; #[instrument(name = "grind for proof-of-work witness", skip_all)] fn grind(&mut self, bits: usize) -> Self::Witness { - assert!(bits < (usize::BITS as usize)); + // Ensure `bits` is small enough to be used in a shift. + assert!(bits < (usize::BITS as usize), "bit count must be valid"); + + // Ensure the PoW target 2^bits is smaller than the field order. + // Otherwise, the probability analysis for grinding would break. assert!((1 << bits) < F::ORDER_U64); + // Trivial case: 0 bits mean no PoW is required and any witness is valid. + if bits == 0 { + return F::ZERO; + } + + // SIMD width: number of field elements processed in parallel. + // Each SIMD lane corresponds to one candidate witness. + let lanes = F::Packing::WIDTH; + + // Total number of batches needed to cover all field elements. + // Each batch tests `lanes` witnesses in parallel. + let num_batches = (F::ORDER_U64 as usize).div_ceil(lanes); + + // Cache the field order. + let order = F::ORDER_U64; + + // Bitmask used to check the PoW condition. eg. bits = 3 => mask = 0b111 + // We accept a witness if (sample & mask) == 0. This verifies 'bits' trailing zeros. + let mask = (1usize << bits) - 1; + + // In a duplex sponge, new inputs are absorbed sequentially at indices [0, 1, 2, ...]. + // The grinding witness is therefore absorbed at the next available position. + let witness_idx = self.input_buffer.len(); + + // Build the sponge state as packed field elements (SIMD vectors). + // + // The current transcript is split across: + // - `input_buffer`: recently observed transcript elements that have not yet been permuted + // - `sponge_state`: the internal sponge state after previous permutations + // + // Logically, the next permutation would act on: + // [input_buffer || sponge_state] + // + // This is invariant across batches, so we compute it once. + let base_packed_state: [_; WIDTH] = core::array::from_fn(|i| { + if i < self.input_buffer.len() { + // Broadcast buffered transcript elements (input_buffer) to all SIMD lanes. + F::Packing::from(self.input_buffer[i]) + } else { + // Broadcast existing sponge state (sponge_state) to all SIMD lanes. + F::Packing::from(self.sponge_state[i]) + } + }); + + // Grinding is implemented via parallel brute-force search over candidate witnesses. + // + // For efficiency, the search is vectorized using SIMD: + // It is semantically equivalent to serially trying witnesses until the PoW condition is met. + // + // - Each SIMD lane corresponds to a distinct candidate witness + // - All lanes share the same transcript prefix + // - A single permutation evaluates multiple candidates in parallel + let witness = (0..num_batches) + .into_par_iter() + .find_map_any(|batch| { + // Compute the starting candidate for this batch. + // + // Each batch processes `F::Packing::WIDTH` candidates: + // - Batch 0 -> candidates [0, 1, ..., F::Packing::WIDTH - 1] + // - Batch 1 -> candidates [F::Packing::WIDTH, ..., 2 * F::Packing::WIDTH - 1] + // - Batch k -> candidates [k * F::Packing::WIDTH, ..., (k+1) * F::Packing::WIDTH - 1] + let base = (batch * lanes) as u64; + + // Start with a copy of the precomputed base state. + let mut packed_state = base_packed_state; + + // Generate SIMD-packed candidate witnesses. + // Each lane receives a distinct field element. + // [base + 0, base + 1, ..., base + F::Packing::WIDTH - 1] + let packed_witnesses = F::Packing::from_fn(|lane| { + let candidate = base + lane as u64; + if candidate < order { + // SAFETY: candidate < field order, so this is a valid canonical field element. + unsafe { F::from_canonical_unchecked(candidate) } + } else { + // Values outside the field order can never satisfy PoW, so we repeat the last potential witness + F::NEG_ONE + } + }); + + // Insert the candidate witnesses at the next absorption position. + // + // This simulates absorbing `transcript || witness` before the Fiat–Shamir challenge is derived. + packed_state[witness_idx] = packed_witnesses; + + // Apply the cryptographic permutation (SIMD version) + // + // This permutes all `lanes` candidates simultaneously. + self.permutation.permute_mut(&mut packed_state); + + // Check each lane for the PoW condition + // + // - In a duplex sponge, output is read from position [RATE-1] (last rate element). + // - We check if the low `bits` of each sample are all zeros. + // + // We scan SIMD lanes to find the first candidate whose output satisfies the PoW condition. + packed_state[RATE - 1] + .as_slice() + .iter() + .zip(packed_witnesses.as_slice()) + .find(|(sample, _)| { + // Accept if the low `bits` bits are all zero. + (sample.as_canonical_u64() as usize & mask) == 0 + }) + .map(|(_, &witness)| witness) + }) + .expect("failed to find proof-of-work witness"); + + // Double-check the witness using the standard verifier logic and update the challenger state. + assert!(self.check_witness(bits, witness)); + + witness + } +} + +impl UniformGrindingChallenger + for DuplexChallenger +where + F: UniformSamplingField + PrimeField64, + P: CryptographicPermutation<[F; WIDTH]> + + CryptographicPermutation<[::Packing; WIDTH]>, +{ + #[instrument(name = "grind uniform for proof-of-work witness", skip_all)] + fn grind_uniform(&mut self, bits: usize) -> Self::Witness { + // Call the generic grinder with the "resample" checking logic. + self.grind_generic(bits, |challenger, witness| { + challenger.check_witness_uniform(bits, witness) + }) + } + #[instrument(name = "grind uniform may error for proof-of-work witness", skip_all)] + fn grind_uniform_may_error(&mut self, bits: usize) -> Self::Witness { + // Call the generic grinder with the "error" checking logic. + self.grind_generic(bits, |challenger, witness| { + challenger.check_witness_uniform_may_error(bits, witness) + }) + } +} +impl DuplexChallenger +where + F: PrimeField64, + P: CryptographicPermutation<[F; WIDTH]>, +{ + /// A generic, private helper for PoW grinding, parameterized by the checking function. + fn grind_generic(&mut self, bits: usize, check_fn: CHECK) -> F + where + CHECK: Fn(&mut Self, F) -> bool + Sync + Send, + { + // Maybe check that bits is greater than 0? + assert!(bits < (usize::BITS as usize), "bit count must be valid"); + assert!( + (1u64 << bits) < F::ORDER_U64, + "bit count exceeds field order" + ); + // The core parallel brute-force search logic. let witness = (0..F::ORDER_U64) .into_par_iter() .map(|i| unsafe { - // i < F::ORDER_U64 by construction so this is safe. + // This is safe as i is always in range. F::from_canonical_unchecked(i) }) - .find_any(|witness| self.clone().check_witness(bits, *witness)) - .expect("failed to find witness"); - assert!(self.check_witness(bits, witness)); + .find_any(|&witness| check_fn(&mut self.clone(), witness)) + .expect("failed to find proof-of-work witness"); + // Run the check one last time on the *original* challenger to update its state + // and confirm the witness is valid. + assert!(check_fn(self, witness)); witness } } @@ -77,7 +292,7 @@ where #[instrument(name = "grind for proof-of-work witness", skip_all)] fn grind(&mut self, bits: usize) -> Self::Witness { - assert!(bits < (usize::BITS as usize)); + assert!(bits < (usize::BITS as usize), "bit count must be valid"); assert!((1 << bits) < F::ORDER_U32); let witness = (0..F::ORDER_U32) .into_par_iter() diff --git a/challenger/src/lib.rs b/challenger/src/lib.rs index d7cf6fc10..afac8ef39 100644 --- a/challenger/src/lib.rs +++ b/challenger/src/lib.rs @@ -17,7 +17,7 @@ pub use duplex_challenger::*; pub use grinding_challenger::*; pub use hash_challenger::*; pub use multi_field_challenger::*; -use p3_field::{BasedVectorSpace, Field}; +use p3_field::{Algebra, BasedVectorSpace, Field, PrimeField64}; pub use serializing_challenger::*; /// A generic trait for absorbing elements into the transcript. @@ -70,6 +70,31 @@ pub trait CanSampleBits { fn sample_bits(&mut self, bits: usize) -> T; } +/// Uniform bit sampling interface. +/// +/// This trait provides a method for drawing uniformly distributed bitstrings +/// from a Fiat–Shamir transcript. The goal is to obtain an integer supported +/// on the range $[0, 2^{bits})$ with each value having equal probability. +pub trait CanSampleUniformBits { + /// Sample a random `bits`-bit integer from the transcript with a guarantee of + /// uniformly sampled bits. + /// + /// Performance overhead depends on the field and number of bits requested. + /// E.g. for KoalaBear sampling up to 24 bits uniformly is essentially free. + /// + /// If `REJECTION_SAMPLE` is set to true then this function will sample multiple field + /// elements until it finds one which will produce uniform bits. + /// If `REJECTION_SAMPLE` is set to false then this function will sample a single field + /// element and produce and error if the value would produce non-uniform bits. + /// + /// The probability of a panic or a resample is about 1/P for most fields. + /// See `UniformSamplingField` implementation for each field for details. + fn sample_uniform_bits( + &mut self, + bits: usize, + ) -> Result; +} + /// A high-level trait combining observation and sampling over a finite field. pub trait FieldChallenger: CanObserve + CanSample + CanSampleBits + Sync @@ -77,16 +102,48 @@ pub trait FieldChallenger: /// Absorb an element from a vector space over the base field. /// /// Decomposes the element into its basis coefficients and absorbs each. + #[inline(always)] fn observe_algebra_element>(&mut self, alg_elem: A) { self.observe_slice(alg_elem.as_basis_coefficients_slice()); } + /// Absorb a slice of elements from a vector space over the base field. + /// + /// Decomposes each element into its basis coefficients and absorbs them. + #[inline(always)] + fn observe_algebra_slice + Clone>(&mut self, alg_elems: &[A]) { + for alg_elem in alg_elems { + self.observe_algebra_element(alg_elem.clone()); + } + } + /// Sample an element of a vector space over the base field. /// /// Constructs the element by sampling basis coefficients. + #[inline(always)] fn sample_algebra_element>(&mut self) -> A { A::from_basis_coefficients_fn(|_| self.sample()) } + + /// Observe base field elements as extension field elements for recursion-friendly transcripts. + /// + /// This simplifies recursive verifier circuits by using a uniform extension field challenger. + /// Instead of observing a mix of base and extension field elements, we convert all base field + /// observations (metadata, public values) to extension field elements before passing to the challenger. + /// + /// # Recursion Benefits + /// + /// In recursive proof systems, the verifier circuit needs to verify the inner proof. Since STARK + /// verification operates entirely in the extension field (challenges, opened values, constraint + /// evaluation), having a challenger that only observes extension field elements significantly + /// simplifies the recursive circuit implementation. + #[inline(always)] + fn observe_base_as_algebra_element(&mut self, val: F) + where + EF: Algebra + BasedVectorSpace, + { + self.observe_algebra_element(EF::from(val)); + } } impl CanObserve for &mut C @@ -137,17 +194,17 @@ where } } -impl FieldChallenger for &mut C +impl FieldChallenger for &mut C where C: FieldChallenger {} + +impl CanSampleUniformBits for &mut C where - C: FieldChallenger, + F: PrimeField64, + C: CanSampleUniformBits, { - #[inline(always)] - fn observe_algebra_element>(&mut self, ext: EF) { - (*self).observe_algebra_element(ext); - } - - #[inline(always)] - fn sample_algebra_element>(&mut self) -> EF { - (*self).sample_algebra_element() + fn sample_uniform_bits( + &mut self, + bits: usize, + ) -> Result { + (*self).sample_uniform_bits::(bits) } } diff --git a/circle/CHANGELOG.md b/circle/CHANGELOG.md new file mode 100644 index 000000000..929bce152 --- /dev/null +++ b/circle/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Enable ZK for preprocessing and in batch-stark (#1178) (Linda Guiga) +- Avoid change of Pcs's `open` method signature (#1230) (Linda Guiga) + +### Authors +- Linda Guiga + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Remove Nightly Features (#932) (AngusG) +- Docs: improve documentation for Circle STARKs deep quotient algorithms (#1079) (Adrian) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Circle: batch inverses in selectors_on_coset (#1068) (Forostovec) +- Core: add error messages to error enums via thiserror (#1168) (Thomas Coratger) +- Challenger: use `observe_algebra_slice` when possible (#1187) (Thomas Coratger) +- Feat: add PoW phase for batching in FRI commit phase (#1164) (Zach Langley) + +### Authors +- Adrian +- AngusG +- Forostovec +- Himess +- Thomas Coratger +- Zach Langley + diff --git a/circle/Cargo.toml b/circle/Cargo.toml index 10a7b9f24..68a37b912 100644 --- a/circle/Cargo.toml +++ b/circle/Cargo.toml @@ -21,14 +21,15 @@ p3-util.workspace = true itertools.workspace = true serde.workspace = true +thiserror.workspace = true tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-keccak.workspace = true -p3-merkle-tree.workspace = true -p3-mersenne-31.workspace = true -p3-symmetric.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-keccak = { path = "../keccak" } +p3-merkle-tree = { path = "../merkle-tree" } +p3-mersenne-31 = { path = "../mersenne-31" } +p3-symmetric = { path = "../symmetric" } criterion.workspace = true hashbrown.workspace = true diff --git a/circle/src/pcs.rs b/circle/src/pcs.rs index f7b0a7095..1e35aa1d8 100644 --- a/circle/src/pcs.rs +++ b/circle/src/pcs.rs @@ -17,6 +17,7 @@ use p3_maybe_rayon::prelude::*; use p3_util::log2_strict_usize; use p3_util::zip_eq::zip_eq; use serde::{Deserialize, Serialize}; +use thiserror::Error; use tracing::info_span; use crate::deep_quotient::{deep_quotient_reduce_row, extract_lambda}; @@ -57,10 +58,17 @@ pub struct CircleInputProof< first_layer_proof: FriMmcs::Proof, } -#[derive(Debug)] -pub enum InputError { +#[derive(Debug, Error)] +pub enum InputError +where + InputMmcsError: core::fmt::Debug, + FriMmcsError: core::fmt::Debug, +{ + #[error("input MMCS error: {0:?}")] InputMmcsError(InputMmcsError), + #[error("first layer MMCS error: {0:?}")] FirstLayerMmcsError(FriMmcsError), + #[error("input shape error: mismatched dimensions")] InputShapeError, } @@ -130,6 +138,32 @@ where (comm, mmcs_data) } + fn get_quotient_ldes( + &self, + evaluations: impl IntoIterator)>, + _num_chunks: usize, + ) -> Vec> { + evaluations + .into_iter() + .map(|(domain, evals)| { + assert!( + domain.log_n >= 2, + "CirclePcs cannot commit to a matrix with fewer than 4 rows.", + // (because we bivariate fold one bit, and fri needs one more bit) + ); + CircleEvaluations::from_natural_order(domain, evals) + .extrapolate(CircleDomain::standard( + domain.log_n + self.fri_params.log_blowup, + )) + .to_cfft_order() + }) + .collect_vec() + } + + fn commit_ldes(&self, ldes: Vec>) -> (Self::Commitment, Self::ProverData) { + self.mmcs.commit(ldes) + } + fn get_evaluations_on_domain<'a>( &self, data: &'a Self::ProverData, @@ -187,9 +221,7 @@ where let ps_at_zeta = info_span!("compute opened values with Lagrange interpolation") .in_scope(|| evals.evaluate_at_point(zeta)); - ps_at_zeta - .iter() - .for_each(|&p| challenger.observe_algebra_element(p)); + challenger.observe_algebra_slice(&ps_at_zeta); ps_at_zeta }) .collect() @@ -368,9 +400,7 @@ where for (_, round) in &rounds { for (_, mat) in round { for (_, point) in mat { - point - .iter() - .for_each(|&opening| challenger.observe_algebra_element(opening)); + challenger.observe_algebra_slice(point); } } } diff --git a/circle/src/prover.rs b/circle/src/prover.rs index 4cdcea6c7..b5c0e58fc 100644 --- a/circle/src/prover.rs +++ b/circle/src/prover.rs @@ -40,7 +40,7 @@ where let commit_phase_result = commit_phase(folding, params, inputs, challenger); - let pow_witness = challenger.grind(params.proof_of_work_bits); + let pow_witness = challenger.grind(params.query_proof_of_work_bits); let query_proofs = info_span!("query phase").in_scope(|| { iter::repeat_with(|| { diff --git a/circle/src/verifier.rs b/circle/src/verifier.rs index 9e59acf4b..2fc6845c2 100644 --- a/circle/src/verifier.rs +++ b/circle/src/verifier.rs @@ -46,7 +46,7 @@ where } // Check PoW. - if !challenger.check_witness(params.proof_of_work_bits, proof.pow_witness) { + if !challenger.check_witness(params.query_proof_of_work_bits, proof.pow_witness) { return Err(FriError::InvalidPowWitness); } diff --git a/commit/CHANGELOG.md b/commit/CHANGELOG.md new file mode 100644 index 000000000..82ce4b6ae --- /dev/null +++ b/commit/CHANGELOG.md @@ -0,0 +1,37 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Enable ZK for preprocessing and in batch-stark (#1178) (Linda Guiga) +- Avoid change of Pcs's `open` method signature (#1230) (Linda Guiga) + +### Authors +- Linda Guiga + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Mmcs: better doc for `ExtensionMmcs` (#947) (Thomas Coratger) +- Optimize split_evals to reduce copying (#1043) (sashass1315) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Add preprocessed/transparent columns to uni-stark (#1114) (o-k-d) + +### Authors +- Adrian Hamelink +- Himess +- Thomas Coratger +- o-k-d +- sashass1315 + diff --git a/commit/Cargo.toml b/commit/Cargo.toml index 6516fd56f..ecef09223 100644 --- a/commit/Cargo.toml +++ b/commit/Cargo.toml @@ -22,8 +22,8 @@ p3-challenger = { workspace = true, optional = true } p3-dft = { workspace = true, optional = true } [dev-dependencies] -p3-challenger.workspace = true -p3-dft.workspace = true +p3-challenger = { path = "../challenger" } +p3-dft = { path = "../dft" } [features] default = ["test-utils"] diff --git a/commit/src/pcs.rs b/commit/src/pcs.rs index 694b80db9..d7c97d1cb 100644 --- a/commit/src/pcs.rs +++ b/commit/src/pcs.rs @@ -67,6 +67,19 @@ where evaluations: impl IntoIterator>)>, ) -> (Self::Commitment, Self::ProverData); + /// Same as `commit` but without randomization. This is used for preprocessed columns + /// which do not have to be randomized even when ZK is enabled. Note that the preprocessed columns still + /// need to be padded to the extended domain height. + /// + /// Returns both the commitment which should be sent to the verifier + /// and the prover data which can be used to produce opening proofs. + fn commit_preprocessing( + &self, + evaluations: impl IntoIterator>)>, + ) -> (Self::Commitment, Self::ProverData) { + self.commit(evaluations) + } + /// Commit to the quotient polynomial. We first decompose the quotient polynomial into /// `num_chunks` many smaller polynomials each of degree `degree / num_chunks`. /// This can have minor performance benefits, but is not strictly necessary in the non `zk` case. @@ -93,13 +106,32 @@ where quotient_domain.split_evals(num_chunks, quotient_evaluations); let quotient_sub_domains = quotient_domain.split_domains(num_chunks); - self.commit( + let ldes = self.get_quotient_ldes( quotient_sub_domains .into_iter() .zip(quotient_sub_evaluations), - ) + num_chunks, + ); + self.commit_ldes(ldes) } + /// When committing to quotient polynomials in batch-STARK, + /// it is simpler to first compute the LDE evaluations before batch-committing to them. + /// + /// This corresponds to the first step of `commit_quotient`. When `zk` is enabled, + /// this will additionally add randomization. + fn get_quotient_ldes( + &self, + evaluations: impl IntoIterator>)>, + num_chunks: usize, + ) -> Vec>>; + + /// Commits to a collection of LDE evaluation matrices. + fn commit_ldes( + &self, + ldes: Vec>>, + ) -> (Self::Commitment, Self::ProverData); + /// Given prover data corresponding to a commitment to a collection of evaluation matrices, /// return the evaluations of those matrices on the given domain. /// @@ -112,6 +144,17 @@ where domain: Self::Domain, ) -> Self::EvaluationsOnDomain<'a>; + /// This is the same as `get_evaluations_on_domain` but without randomization. + /// This is used for preprocessed columns which do not have to be randomized even when ZK is enabled. + fn get_evaluations_on_domain_no_random<'a>( + &self, + prover_data: &'a Self::ProverData, + idx: usize, + domain: Self::Domain, + ) -> Self::EvaluationsOnDomain<'a> { + self.get_evaluations_on_domain(prover_data, idx, domain) + } + /// Open a collection of polynomial commitments at a set of points. Produce the values at those points along with a proof /// of correctness. /// @@ -145,6 +188,48 @@ where fiat_shamir_challenger: &mut Challenger, ) -> (OpenedValues, Self::Proof); + /// Open a collection of polynomial commitments at a set of points, when there is preprocessing data. + /// It is the same as `open` when `ZK` is disabled. + /// Produce the values at those points along with a proof of correctness. + /// + /// Arguments: + /// - `commitment_data_with_opening_points`: A vector whose elements are a pair: + /// - `data`: The prover data corresponding to a multi-matrix commitment. + /// - `opening_points`: A vector containing, for each matrix committed to, a vector of opening points. + /// - `fiat_shamir_challenger`: The challenger that will be used to generate the proof. + /// - `is_preprocessing`: If one of the committed matrices corresponds to preprocessed columns, this is the index of that matrix. + /// + /// Unwrapping the arguments further, each `data` contains a vector of the committed matrices (`matrices = Vec`). + /// If the length of `matrices` is not equal to the length of `opening_points` the function will error. Otherwise, for + /// each index `i`, the matrix `M = matrices[i]` will be opened at the points `opening_points[i]`. + /// + /// This means that each column of `M` will be interpreted as the evaluation vector of some polynomial + /// and we will compute the value of all of those polynomials at `opening_points[i]`. + /// + /// The domains on which the evaluation vectors are defined is not part of the arguments here + /// but should be public information known to both the prover and verifier. + fn open_with_preprocessing( + &self, + // For each multi-matrix commitment, + commitment_data_with_opening_points: Vec<( + // The matrices and auxiliary prover data + &Self::ProverData, + // for each matrix, + Vec< + // the points to open + Vec, + >, + )>, + fiat_shamir_challenger: &mut Challenger, + _is_preprocessing: bool, + ) -> (OpenedValues, Self::Proof) { + debug_assert!( + !Self::ZK, + "open_with_preprocessing should have a different implementation when ZK is enabled" + ); + self.open(commitment_data_with_opening_points, fiat_shamir_challenger) + } + /// Verify that a collection of opened values is correct. /// /// Arguments: @@ -180,7 +265,7 @@ where fn get_opt_randomization_poly_commitment( &self, - _domain: Self::Domain, + _domain: impl IntoIterator, ) -> Option<(Self::Commitment, Self::ProverData)> { None } diff --git a/commit/src/testing.rs b/commit/src/testing.rs index c7d50d401..7ac78e470 100644 --- a/commit/src/testing.rs +++ b/commit/src/testing.rs @@ -12,7 +12,7 @@ use p3_util::log2_strict_usize; use p3_util::zip_eq::zip_eq; use serde::{Deserialize, Serialize}; -use crate::{OpenedValues, Pcs}; +use crate::{OpenedValues, Pcs, PolynomialSpace}; /// A trivial PCS: its commitment is simply the coefficients of each poly. #[derive(Debug)] @@ -90,6 +90,36 @@ where ) } + fn commit_quotient( + &self, + quotient_domain: Self::Domain, + quotient_evaluations: RowMajorMatrix>, + num_chunks: usize, + ) -> (Self::Commitment, Self::ProverData) { + let quotient_sub_evaluations = + quotient_domain.split_evals(num_chunks, quotient_evaluations); + let quotient_sub_domains = quotient_domain.split_domains(num_chunks); + + Pcs::::commit( + self, + quotient_sub_domains + .into_iter() + .zip(quotient_sub_evaluations), + ) + } + + fn get_quotient_ldes( + &self, + _evaluations: impl IntoIterator)>, + _num_chunks: usize, + ) -> Vec>> { + unimplemented!("This PCS does not support computing of LDEs"); + } + + fn commit_ldes(&self, _ldes: Vec>) -> (Self::Commitment, Self::ProverData) { + unimplemented!("This PCS does not support computing of LDEs"); + } + fn get_evaluations_on_domain<'a>( &self, prover_data: &'a Self::ProverData, diff --git a/create_release.sh b/create_release.sh new file mode 100755 index 000000000..845bf0af2 --- /dev/null +++ b/create_release.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Script to automate release generation for `Plonky3`. +# +# This script creates a release PR that, when merged, triggers CI to publish to crates.io. +# +# How it works: +# 1. `release-plz release-pr` analyzes commits since the last release +# 2. Determines version bumps based on conventional commits (respecting version_group for lock-step) +# 3. Generates changelogs using cliff.toml +# 4. Creates a PR with the "release" label containing all changes +# 5. When that PR is merged, CI runs `release-plz release` to publish to crates.io + +set -euo pipefail + +check_binary_installed() { + local binary_name="$1" + if ! command -v "$binary_name" &> /dev/null; then + echo "Error: $binary_name is not installed." + exit 1 + fi +} + +if [ -z "${GIT_TOKEN:-}" ]; then + echo "Error: GIT_TOKEN is not set. release-plz requires it to create PRs." + exit 1 +fi + +check_binary_installed "release-plz" + +# Ensure local main is up-to-date with remote +git fetch origin main +local_main=$(git rev-parse main) +remote_main=$(git rev-parse origin/main) + +if [ "$local_main" != "$remote_main" ]; then + echo "Error: Local 'main' is not up-to-date with 'origin/main'." + echo "Please run: git checkout main && git pull" + exit 1 +fi + +echo "Creating release PR..." +release-plz release-pr diff --git a/dft/CHANGELOG.md b/dft/CHANGELOG.md new file mode 100644 index 000000000..0228b1dcd --- /dev/null +++ b/dft/CHANGELOG.md @@ -0,0 +1,36 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- More Clippy Complaints (#931) (AngusG) +- Small refactor trying to clean up #897 (#900) (AngusG) +- Chore: use `collect_n` with powers when possible (#963) (Thomas Coratger) +- Remove Nightly Features (#932) (AngusG) +- Clippy: small step (#1102) (Thomas Coratger) +- Feat: add thread safety to dft implementations (#999) (Jeremi Do Dinh) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Implement uniform sampling of bits from field elements (#1050) (Sebastian) + +### Authors +- Adrian Hamelink +- AngusG +- Himess +- Jeremi Do Dinh +- Sebastian +- Thomas Coratger + diff --git a/dft/Cargo.toml b/dft/Cargo.toml index f4ce8059b..e2eeb2b23 100644 --- a/dft/Cargo.toml +++ b/dft/Cargo.toml @@ -20,10 +20,10 @@ spin.workspace = true tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-goldilocks.workspace = true -p3-mersenne-31.workspace = true -p3-monty-31.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-goldilocks = { path = "../goldilocks" } +p3-mersenne-31 = { path = "../mersenne-31" } +p3-monty-31 = { path = "../monty-31" } criterion.workspace = true rand.workspace = true diff --git a/dft/src/util.rs b/dft/src/util.rs index 28e139f5d..42dd33a79 100644 --- a/dft/src/util.rs +++ b/dft/src/util.rs @@ -21,7 +21,7 @@ pub fn divide_by_height + BorrowMut<[F]>>( // It's also cheaper to work in the PrimeSubfield whenever possible. let h_inv_subfield = F::PrimeSubfield::ONE.div_2exp_u64(log_h as u64); let h_inv = F::from_prime_subfield(h_inv_subfield); - mat.scale(h_inv) + mat.scale(h_inv); } /// Multiply each element of row `i` of `mat` by `shift**i`. diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 85c4c711e..f224233ad 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -8,6 +8,7 @@ repository.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true +publish = false [dependencies] p3-air.workspace = true @@ -29,17 +30,17 @@ p3-poseidon2-air.workspace = true p3-symmetric.workspace = true p3-uni-stark.workspace = true -bincode = { workspace = true, features = ["serde", "alloc"] } clap.workspace = true +postcard = { workspace = true, features = ["alloc"] } rand.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-challenger.workspace = true -p3-commit = { workspace = true, features = ["test-utils"] } -p3-dft.workspace = true -p3-koala-bear.workspace = true -p3-matrix.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-challenger = { path = "../challenger" } +p3-commit = { path = "../commit", features = ["test-utils"] } +p3-dft = { path = "../dft" } +p3-koala-bear = { path = "../koala-bear" } +p3-matrix = { path = "../matrix" } tracing-forest = { workspace = true, features = ["ansi", "smallvec"] } tracing-subscriber = { workspace = true, features = ["std", "env-filter"] } diff --git a/examples/src/proofs.rs b/examples/src/proofs.rs index 52fa0881c..9c789d5c0 100644 --- a/examples/src/proofs.rs +++ b/examples/src/proofs.rs @@ -242,17 +242,13 @@ pub fn report_result(result: Result<(), impl Debug>) { /// Report the size of the serialized proof. /// -/// Serializes the given proof instance using bincode and prints the size in bytes. +/// Serializes the given proof instance using postcard and prints the size in bytes. /// Panics if serialization fails. #[inline] pub fn report_proof_size(proof: &Proof) where SC: StarkGenericConfig, { - let config = bincode::config::standard() - .with_little_endian() - .with_fixed_int_encoding(); - let proof_bytes = - bincode::serde::encode_to_vec(proof, config).expect("Failed to serialize proof"); + let proof_bytes = postcard::to_allocvec(proof).expect("Failed to serialize proof"); println!("Proof size: {} bytes", proof_bytes.len()); } diff --git a/field-testing/CHANGELOG.md b/field-testing/CHANGELOG.md new file mode 100644 index 000000000..3df92df52 --- /dev/null +++ b/field-testing/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Clippy wants us to put things inside of fmt now instead of just extra arguments... (#916) (AngusG) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Custom halve impl for Bn254 (#919) (AngusG) +- Adding custom mul/div_exp_2_u64 for the Goldilocks field. (#923) (AngusG) +- More Clippy Complaints (#931) (AngusG) +- Chore: use `collect_n` with powers when possible (#963) (Thomas Coratger) +- Move halve to ring (#969) (AngusG) +- Move div_2_exp_u64 to ring (#970) (AngusG) +- Speed Up Base-Extension Multiplication (#998) (AngusG) +- Monty31: add aarch64 neon custom `exp_5` and `exp_7` (#1033) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) +- Feat: add thread safety to dft implementations (#999) (Jeremi Do Dinh) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Refactor: deduplicate field JSON serialization tests (#1162) (andrewshab) + +### Authors +- Adrian Hamelink +- AngusG +- Himess +- Jeremi Do Dinh +- Thomas Coratger +- andrewshab + diff --git a/field-testing/Cargo.toml b/field-testing/Cargo.toml index f6a41c7c7..c6544de5d 100644 --- a/field-testing/Cargo.toml +++ b/field-testing/Cargo.toml @@ -19,6 +19,9 @@ criterion.workspace = true num-bigint.workspace = true rand.workspace = true +serde.workspace = true +serde_json.workspace = true + [lints] workspace = true diff --git a/field-testing/src/lib.rs b/field-testing/src/lib.rs index 2d2999166..b18a46127 100644 --- a/field-testing/src/lib.rs +++ b/field-testing/src/lib.rs @@ -28,6 +28,8 @@ pub use packedfield_testing::*; use rand::distr::{Distribution, StandardUniform}; use rand::rngs::SmallRng; use rand::{Rng, SeedableRng}; +use serde::Serialize; +use serde::de::DeserializeOwned; #[allow(clippy::eq_op)] pub fn test_ring_with_eq(zeros: &[R], ones: &[R]) @@ -324,6 +326,41 @@ where } } +/// Test JSON serialization and deserialization for a set of field values. +/// +/// This function tests that: +/// 1. Each value can be serialized and deserialized correctly +/// 2. Double round-trip serialization is consistent +pub fn test_field_json_serialization(values: &[F]) +where + F: PrimeCharacteristicRing + Serialize + DeserializeOwned + Eq, +{ + for value in values { + // Single round-trip + let serialized = serde_json::to_string(value).expect("Failed to serialize field element"); + let deserialized: F = + serde_json::from_str(&serialized).expect("Failed to deserialize field element"); + assert_eq!( + *value, deserialized, + "Single round-trip serialization failed" + ); + + // Double round-trip to ensure consistency + let serialized_again = serde_json::to_string(&deserialized) + .expect("Failed to serialize field element (second time)"); + let deserialized_again: F = serde_json::from_str(&serialized_again) + .expect("Failed to deserialize field element (second time)"); + assert_eq!( + *value, deserialized_again, + "Double round-trip serialization failed" + ); + assert_eq!( + deserialized, deserialized_again, + "Deserialized values should be equal" + ); + } +} + pub fn test_dot_product(u: &[R; 64], v: &[R; 64]) { let mut dot = R::ZERO; assert_eq!( diff --git a/field/CHANGELOG.md b/field/CHANGELOG.md new file mode 100644 index 000000000..24c38c15e --- /dev/null +++ b/field/CHANGELOG.md @@ -0,0 +1,68 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Chore(field): Make `BinomialExtensionField::new` public (#1209) (Adrian Hamelink) +- Chore(field): revert making `BinomialExtensionField::new` public and replace with `From<[A; D]>` (#1210) (Adrian Hamelink) +- Refactor(field): Add packed field extraction helpers and FieldArray utilities (#1211) (Adrian Hamelink) +- Refactor: add public const `new` and `new_array` for all fields (#1222) (Adrian Hamelink) +- Feat: use compile-time asserts for const generic parameters (#1232) (Himess) + +### Authors +- Adrian Hamelink +- Himess + +## [0.4.1] - 2025-12-18 +### Merged PRs +- fix: remove undefined WIDTH const in interleave module (#1199) (Robin Salen) + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Clippy wants us to put things inside of fmt now instead of just extra arguments... (#916) (AngusG) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- From_biguint method for Bn254 (#914) (AngusG) +- More Clippy Complaints (#931) (AngusG) +- Adding Macros to remove boilerplate impls (#943) (AngusG) +- Combining Interleave Code (#950) (AngusG) +- Add a macro for implying PackedValue for PackedFields (#949) (AngusG) +- Fast Octic inverse (#955) (AngusG) +- Fast Optic Square (#957) (AngusG) +- Fast Octic Multiplication (#956) (AngusG) +- Packing Trick for Field Extensions (#958) (AngusG) +- Refactor to packed add methods (#972) (AngusG) +- Speed up Extension Field Addition (#980) (AngusG) +- Remove Nightly Features (#932) (AngusG) +- Move halve to ring (#969) (AngusG) +- Packed Sub Refactor (#979) (AngusG) +- Move div_2_exp_u64 to ring (#970) (AngusG) +- Speed Up Extension Field Subtraction (#988) (AngusG) +- Must Use (#996) (AngusG) +- Move Interleave into the Packed submodule (#997) (AngusG) +- Speed Up Base-Extension Multiplication (#998) (AngusG) +- Compile Time asserts (#1015) (AngusG) +- Rename frobenius_inv -> pseudo_inv and fix doc (#1049) (Tom Wambsgans) +- Fix: Clarify NEON transmute conversions (#1073) (Skylar Ray) +- Refactor: remove redundant clones in crypto modules (#1080) (Skylar Ray) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Challenger: add `observe_base_as_algebra_element ` to `FieldChallenger` trait (#1152) (Thomas Coratger) +- Feat: revert `builder.assert_bool` to previous impl (#1191) (Zach Langley) + +### Authors +- Adrian Hamelink +- AngusG +- Himess +- Robin Salen +- Skylar Ray +- Thomas Coratger +- Tom Wambsgans +- Zach Langley + diff --git a/field/Cargo.toml b/field/Cargo.toml index 5555ea4df..5d9ae6b34 100644 --- a/field/Cargo.toml +++ b/field/Cargo.toml @@ -21,8 +21,8 @@ serde = { workspace = true, features = ["derive"] } tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-goldilocks.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-goldilocks = { path = "../goldilocks" } [lints] workspace = true diff --git a/field/src/array.rs b/field/src/array.rs index 1d6199dd3..9ea1de040 100644 --- a/field/src/array.rs +++ b/field/src/array.rs @@ -1,6 +1,8 @@ use core::array; use core::iter::{Product, Sum}; -use core::ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign}; +use core::ops::{Add, AddAssign, Div, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign}; + +use p3_util::{as_base_slice, as_base_slice_mut}; use crate::batch_inverse::batch_multiplicative_inverse_general; use crate::{Algebra, Field, PackedValue, PrimeCharacteristicRing}; @@ -11,11 +13,90 @@ use crate::{Algebra, Field, PackedValue, PrimeCharacteristicRing}; pub struct FieldArray(pub [F; N]); impl FieldArray { + /// Compute the element-wise multiplicative inverse using batched inversion. + /// + /// Uses Montgomery's batch inversion trick to compute all inverses with a + /// single field inversion, improving performance when N > 1. + #[inline] pub(crate) fn inverse(&self) -> Self { let mut result = Self::default(); batch_multiplicative_inverse_general(&self.0, &mut result.0, |x| x.inverse()); result } + + /// Apply a function to each element, returning a new `FieldArray`. + #[inline] + pub fn map(self, f: Func) -> FieldArray + where + Func: FnMut(F) -> U, + { + FieldArray(self.map_into_array(f)) + } + + /// Apply a function to each element, returning a raw array `[U; N]`. + /// + /// Unlike [`map`](Self::map), this does not require `U: Field`. + #[inline] + pub fn map_into_array(self, f: Func) -> [U; N] + where + Func: FnMut(F) -> U, + { + self.0.map(f) + } + + /// View as a slice of raw `[F; N]` arrays. + /// + /// This is a zero-cost transmute enabled by the `#[repr(transparent)]` layout. + #[inline] + pub const fn as_raw_slice(s: &[Self]) -> &[[F; N]] { + // SAFETY: `FieldArray` is `#[repr(transparent)]` over `[F; N]`, + // so `&[FieldArray]` and `&[[F; N]]` have identical layouts. + unsafe { as_base_slice(s) } + } + + /// View as a mutable slice of raw `[F; N]` arrays. + /// + /// This is a zero-cost transmute enabled by the `#[repr(transparent)]` layout. + #[inline] + pub const fn as_raw_slice_mut(s: &mut [Self]) -> &mut [[F; N]] { + // SAFETY: `FieldArray` is `#[repr(transparent)]` over `[F; N]`, + // so `&mut [FieldArray]` and `&mut [[F; N]]` have identical layouts. + unsafe { as_base_slice_mut(s) } + } + + /// Reinterpret a slice of `[F; N]` as a slice of `FieldArray`. + /// + /// This is a zero-cost transmute enabled by the `#[repr(transparent)]` layout. + #[inline] + pub const fn from_raw_slice(s: &[[F; N]]) -> &[Self] { + // SAFETY: `FieldArray` is `#[repr(transparent)]` over `[F; N]`, + // so `&[[F; N]]` and `&[FieldArray]` have identical layouts. + unsafe { as_base_slice(s) } + } + + /// Reinterpret a mutable slice of `[F; N]` as a mutable slice of `FieldArray`. + /// + /// This is a zero-cost transmute enabled by the `#[repr(transparent)]` layout. + #[inline] + pub const fn from_raw_slice_mut(s: &mut [[F; N]]) -> &mut [Self] { + // SAFETY: `FieldArray` is `#[repr(transparent)]` over `[F; N]`, + // so `&mut [[F; N]]` and `&mut [FieldArray]` have identical layouts. + unsafe { as_base_slice_mut(s) } + } +} + +impl IndexMut for FieldArray { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + self.0.index_mut(index) + } +} + +impl Index for FieldArray { + type Output = F; + + fn index(&self, index: usize) -> &Self::Output { + self.0.index(index) + } } impl Default for FieldArray { diff --git a/field/src/extension/binomial_extension.rs b/field/src/extension/binomial_extension.rs index b33bf6ce0..8387f513b 100644 --- a/field/src/extension/binomial_extension.rs +++ b/field/src/extension/binomial_extension.rs @@ -35,7 +35,12 @@ pub struct BinomialExtensionField { } impl BinomialExtensionField { - pub(crate) const fn new(value: [A; D]) -> Self { + /// Create an extension field element from an array of base elements. + /// + /// Any array is accepted. No reduction is required since + /// base elements are already valid field elements. + #[inline] + pub const fn new(value: [A; D]) -> Self { Self { value, _phantom: PhantomData, @@ -43,6 +48,26 @@ impl BinomialExtensionField { } } +impl BinomialExtensionField { + /// Convert a `[[F; D]; N]` array to an array of extension field elements. + /// + /// Const version of `input.map(BinomialExtensionField::new)`. + /// + /// # Panics + /// Panics if `N == 0`. + #[inline] + pub const fn new_array(input: [[F; D]; N]) -> [Self; N] { + const { assert!(N > 0) } + let mut output = [Self::new(input[0]); N]; + let mut i = 1; + while i < N { + output[i] = Self::new(input[i]); + i += 1; + } + output + } +} + impl, const D: usize> Default for BinomialExtensionField { fn default() -> Self { Self::new(array::from_fn(|_| A::ZERO)) @@ -55,6 +80,16 @@ impl, const D: usize> From for BinomialExtensionField } } +impl From<[A; D]> for BinomialExtensionField { + #[inline] + fn from(x: [A; D]) -> Self { + Self { + value: x, + _phantom: PhantomData, + } + } +} + impl, const D: usize> Packable for BinomialExtensionField {} impl, A: Algebra, const D: usize> BasedVectorSpace diff --git a/field/src/extension/packed_binomial_extension.rs b/field/src/extension/packed_binomial_extension.rs index d66772343..0d390d8ab 100644 --- a/field/src/extension/packed_binomial_extension.rs +++ b/field/src/extension/packed_binomial_extension.rs @@ -207,19 +207,6 @@ where Self::new(res) } - #[inline] - fn to_ext_iter( - iter: impl IntoIterator, - ) -> impl Iterator> { - let width = F::Packing::WIDTH; - iter.into_iter().flat_map(move |x| { - (0..width).map(move |i| { - let values = array::from_fn(|j| x.value[j].as_slice()[i]); - BinomialExtensionField::new(values) - }) - }) - } - #[inline] fn packed_ext_powers(base: BinomialExtensionField) -> crate::Powers { let width = F::Packing::WIDTH; diff --git a/field/src/field.rs b/field/src/field.rs index a6eebd7e6..cc8a1a661 100644 --- a/field/src/field.rs +++ b/field/src/field.rs @@ -197,15 +197,16 @@ pub trait PrimeCharacteristicRing: (Self::ONE - self.clone()) * y.clone() } - /// The vanishing polynomial for boolean values: `x * (1 - x)`. + /// The vanishing polynomial for boolean values: `x * (x - 1)`. /// /// This is a polynomial of degree `2` that evaluates to `0` if the input is `0` or `1`. /// If our space is a field, then this will be nonzero on all other inputs. #[must_use] #[inline(always)] fn bool_check(&self) -> Self { - // We use `x * (1 - x)` instead of `x * (x - 1)` as this lets us delegate to the `andn` function. - self.andn(self) + // Note: We could delegate to `andn`, but to maintain backwards + // compatible AIR definitions, we stick with `x * (x - 1)` here. + self.clone() * (self.clone() - Self::ONE) } /// Exponentiation by a `u64` power. diff --git a/field/src/packed/interleaves.rs b/field/src/packed/interleaves.rs index fc4dc7483..14b36da44 100644 --- a/field/src/packed/interleaves.rs +++ b/field/src/packed/interleaves.rs @@ -174,7 +174,7 @@ pub mod interleave { fn interleave1_antidiagonal(x: __m512i, y: __m512i) -> __m512i { const INTERLEAVE1_INDICES: __m512i = unsafe { // Safety: `[u32; 16]` is trivially transmutable to `__m512i`. - transmute::<[u32; WIDTH], _>([ + transmute::<[u32; 16], _>([ 0x01, 0x10, 0x03, 0x12, 0x05, 0x14, 0x07, 0x16, 0x09, 0x18, 0x0b, 0x1a, 0x0d, 0x1c, 0x0f, 0x1e, ]) diff --git a/field/src/packed/packed_traits.rs b/field/src/packed/packed_traits.rs index 34b70455f..dbf66e9e6 100644 --- a/field/src/packed/packed_traits.rs +++ b/field/src/packed/packed_traits.rs @@ -1,4 +1,3 @@ -use alloc::vec::Vec; use core::iter::{Product, Sum}; use core::mem::MaybeUninit; use core::ops::{Div, DivAssign}; @@ -165,6 +164,33 @@ pub unsafe trait PackedValue: 'static + Copy + Send + Sync { let n = buf.len() * Self::WIDTH; unsafe { slice::from_raw_parts(buf_ptr, n) } } + + /// Extract the scalar value at the given SIMD lane. + /// + /// This is equivalent to `self.as_slice()[lane]` but more explicit about the + /// SIMD extraction semantics. + #[inline] + #[must_use] + fn extract(&self, lane: usize) -> Self::Value { + self.as_slice()[lane] + } + + /// Unpack `N` packed values into `WIDTH` rows of `N` scalars. + /// + /// ## Inputs + /// - `packed`: An array of `N` packed values. + /// - `rows`: A mutable slice of exactly `WIDTH` arrays to write the unpacked values. + /// + /// ## Panics + /// Panics if `rows.len() != WIDTH`. + #[inline] + fn unpack_into(packed: &[Self; N], rows: &mut [[Self::Value; N]]) { + assert_eq!(rows.len(), Self::WIDTH); + #[allow(clippy::needless_range_loop)] + for lane in 0..Self::WIDTH { + rows[lane] = array::from_fn(|col| packed[col].extract(lane)); + } + } } unsafe impl PackedValue for [T; WIDTH] { @@ -319,19 +345,24 @@ pub trait PackedFieldExtension< #[must_use] fn from_ext_slice(ext_slice: &[ExtField]) -> Self; - /// Given a iterator of packed extension field elements, convert to an iterator of + /// Extract the extension field element at the given SIMD lane. + #[inline] + #[must_use] + fn extract(&self, lane: usize) -> ExtField { + ExtField::from_basis_coefficients_fn(|d| { + self.as_basis_coefficients_slice()[d].as_slice()[lane] + }) + } + + /// Convert an iterator of packed extension field elements to an iterator of /// extension field elements. /// /// This performs the inverse transformation to `from_ext_slice`. #[inline] #[must_use] fn to_ext_iter(iter: impl IntoIterator) -> impl Iterator { - iter.into_iter().flat_map(|x| { - let packed_coeffs = x.as_basis_coefficients_slice(); - (0..BaseField::Packing::WIDTH) - .map(|i| ExtField::from_basis_coefficients_fn(|j| packed_coeffs[j].as_slice()[i])) - .collect::>() // PackedFieldExtension's should reimplement this to avoid this allocation. - }) + iter.into_iter() + .flat_map(|x| (0..BaseField::Packing::WIDTH).map(move |i| x.extract(i))) } /// Similar to `packed_powers`, construct an iterator which returns diff --git a/field/tests/helpers_test.rs b/field/tests/helpers_test.rs index fe3ff8ca6..59d41c018 100644 --- a/field/tests/helpers_test.rs +++ b/field/tests/helpers_test.rs @@ -25,6 +25,7 @@ mod helpers { let expected = vec![x1 + s * y1, x2 + s * y2]; assert_eq!(x, expected); + assert_eq!(par_x, expected); } #[test] diff --git a/fri/CHANGELOG.md b/fri/CHANGELOG.md new file mode 100644 index 000000000..25218d2c4 --- /dev/null +++ b/fri/CHANGELOG.md @@ -0,0 +1,46 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Enable ZK for preprocessing and in batch-stark (#1178) (Linda Guiga) +- Avoid change of Pcs's `open` method signature (#1230) (Linda Guiga) + +### Authors +- Linda Guiga + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- More Clippy Complaints (#931) (AngusG) +- Shrink some test sizes (#524) (Daniel Lubarov) +- Update doc comment and some other comment fixes. (#959) (AngusG) +- Minor FRI refactor - make open input its own function (#961) (AngusG) +- Remove duplicated definition (#1031) (AngusG) +- Refactor: remove redundant clones in crypto modules (#1086) (Skylar Ray) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Core: add error messages to error enums via thiserror (#1168) (Thomas Coratger) +- Challenger: use `observe_algebra_slice` when possible (#1187) (Thomas Coratger) +- Feat: add PoW phase for batching in FRI commit phase (#1164) (Zach Langley) + +### Authors +- Adrian Hamelink +- AngusG +- Daniel Lubarov +- Himess +- Skylar Ray +- Thomas Coratger +- Zach Langley + diff --git a/fri/Cargo.toml b/fri/Cargo.toml index 2c8d8528d..b92340aa4 100644 --- a/fri/Cargo.toml +++ b/fri/Cargo.toml @@ -22,17 +22,18 @@ p3-util.workspace = true itertools.workspace = true rand.workspace = true serde = { workspace = true, features = ["derive", "alloc"] } +thiserror.workspace = true tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-circle.workspace = true -p3-dft.workspace = true -p3-goldilocks.workspace = true -p3-keccak.workspace = true -p3-merkle-tree.workspace = true -p3-mersenne-31.workspace = true -p3-symmetric.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-circle = { path = "../circle" } +p3-dft = { path = "../dft" } +p3-goldilocks = { path = "../goldilocks" } +p3-keccak = { path = "../keccak" } +p3-merkle-tree = { path = "../merkle-tree" } +p3-mersenne-31 = { path = "../mersenne-31" } +p3-symmetric = { path = "../symmetric" } criterion.workspace = true diff --git a/fri/src/config.rs b/fri/src/config.rs index bec65aa87..7ed6a405b 100644 --- a/fri/src/config.rs +++ b/fri/src/config.rs @@ -11,7 +11,10 @@ pub struct FriParameters { // TODO: This parameter and FRI early stopping are not yet implemented in `CirclePcs`. pub log_final_poly_len: usize, pub num_queries: usize, - pub proof_of_work_bits: usize, + /// Number of bits for the PoW phase before sampling _each_ batching challenge. + pub commit_proof_of_work_bits: usize, + /// Number of bits for the PoW phase before sampling the queries. + pub query_proof_of_work_bits: usize, pub mmcs: M, } @@ -30,7 +33,7 @@ impl FriParameters { /// Certain users may instead want to look at proven soundness, a more complex calculation which /// isn't currently supported by this crate. pub const fn conjectured_soundness_bits(&self) -> usize { - self.log_blowup * self.num_queries + self.proof_of_work_bits + self.log_blowup * self.num_queries + self.query_proof_of_work_bits } } @@ -69,7 +72,8 @@ pub const fn create_test_fri_params( log_blowup: 2, log_final_poly_len, num_queries: 2, - proof_of_work_bits: 1, + commit_proof_of_work_bits: 1, + query_proof_of_work_bits: 1, mmcs, } } @@ -81,7 +85,8 @@ pub const fn create_test_fri_params_zk(mmcs: Mmcs) -> FriParameters log_blowup: 2, log_final_poly_len: 0, num_queries: 2, - proof_of_work_bits: 1, + commit_proof_of_work_bits: 1, + query_proof_of_work_bits: 1, mmcs, } } @@ -93,7 +98,8 @@ pub const fn create_benchmark_fri_params(mmcs: Mmcs) -> FriParameters(mmcs: Mmcs) -> FriParameters::commit(&self.inner, randomized_evaluations) } - /// Commit to the quotient polynomial. We first decompose the quotient polynomial into + fn commit_preprocessing( + &self, + evaluations: impl IntoIterator)>, + ) -> (Self::Commitment, Self::ProverData) { + // Pad values with zero columns instead of random columns. + let padded_evals = evaluations + .into_iter() + .map(|(domain, mat)| { + let mat_width = mat.width(); + // Let `w` and `h` be the width and height of the original matrix. The padded matrix should have height `2h` and width `w`. + // To generate it, we add `w` zero columns to the original matrix, then reshape it by setting the width to `w`. + // All columns are added on the right hand side so, after reshaping, this has the net effect of adding interleaving the original trace with zero rows. + let mut padded_evaluation = add_zero_cols(&mat, mat_width); + padded_evaluation.width = mat_width; + (domain, padded_evaluation) + }) + .collect::>(); + + Pcs::::commit(&self.inner, padded_evals) + } + + /// Get the quotient polynomial LDEs. We first decompose the quotient polynomial into /// `num_chunks` many smaller polynomials each of degree `degree / num_chunks`. /// These quotient polynomials are then randomized as explained in Section 4.2 of /// https://eprint.iacr.org/2024/1037.pdf . @@ -125,19 +145,12 @@ where /// # Panics /// This function panics if `num_chunks` is either `0` or `1`. The first case makes no logical /// sense and in the second case, the resulting commitment would not be hiding. - fn commit_quotient( + fn get_quotient_ldes( &self, - quotient_domain: Self::Domain, - quotient_evaluations: RowMajorMatrix, + evaluations: impl IntoIterator)>, num_chunks: usize, - ) -> (Self::Commitment, Self::ProverData) { - assert!(num_chunks > 1); - - // Given the evaluation vector of `Q_i(x)` over a domain, split it into evaluation vectors - // of `q_{i0}(x), ...` over subdomains. - let evaluations = quotient_domain.split_evals(num_chunks, quotient_evaluations); - let domains = quotient_domain.split_domains(num_chunks); - + ) -> Vec> { + let (domains, evaluations): (Vec<_>, Vec<_>) = evaluations.into_iter().unzip(); let cis = get_zp_cis(&domains); let last_chunk = num_chunks - 1; let last_chunk_ci_inv = cis[last_chunk].inverse(); @@ -169,7 +182,7 @@ where } } - let ldes: Vec<_> = domains + domains .into_iter() .zip(randomized_evaluations) .enumerate() @@ -215,9 +228,11 @@ where lde_evals.bit_reverse_rows().to_row_major_matrix() }) - .collect(); + .collect() + } - self.inner.mmcs.commit(ldes) + fn commit_ldes(&self, ldes: Vec>) -> (Self::Commitment, Self::ProverData) { + Pcs::::commit_ldes(&self.inner, ldes) } fn get_evaluations_on_domain<'a>( @@ -238,6 +253,23 @@ where HorizontallyTruncated::new(inner_evals, inner_width - self.num_random_codewords).unwrap() } + fn get_evaluations_on_domain_no_random<'a>( + &self, + prover_data: &'a Self::ProverData, + idx: usize, + domain: Self::Domain, + ) -> Self::EvaluationsOnDomain<'a> { + let inner_evals = as Pcs< + Challenge, + Challenger, + >>::get_evaluations_on_domain( + &self.inner, prover_data, idx, domain + ); + let inner_width = inner_evals.width(); + + HorizontallyTruncated::new(inner_evals, inner_width).unwrap() + } + fn open( &self, // For each round, @@ -251,21 +283,45 @@ where )>, challenger: &mut Challenger, ) -> (OpenedValues, Self::Proof) { - let (mut inner_opened_values, inner_proof) = self.inner.open(rounds, challenger); + self.open_with_preprocessing(rounds, challenger, false) + } + fn open_with_preprocessing( + &self, + // For each round, + rounds: Vec<( + &Self::ProverData, + // for each matrix, + Vec< + // points to open + Vec, + >, + )>, + challenger: &mut Challenger, + is_preprocessing: bool, + ) -> (OpenedValues, Self::Proof) { + let (mut inner_opened_values, inner_proof) = + self.inner + .open_with_preprocessing(rounds, challenger, is_preprocessing); // inner_opened_values includes opened values for the random codewords. Those should be // hidden from our caller, so we split them off and store them in the proof. let opened_values_rand = inner_opened_values .iter_mut() - .map(|opened_values_for_round| { + .enumerate() + .map(|(idx, opened_values_for_round)| { opened_values_for_round .iter_mut() .map(|opened_values_for_mat| { opened_values_for_mat .iter_mut() .map(|opened_values_for_point| { - let split = - opened_values_for_point.len() - self.num_random_codewords; + let num_random_codewords = + if is_preprocessing && idx == >::PREPROCESSED_TRACE_IDX { + 0 + } else { + self.num_random_codewords + }; + let split = opened_values_for_point.len() - num_random_codewords; opened_values_for_point.drain(split..).collect() }) .collect() @@ -322,19 +378,23 @@ where fn get_opt_randomization_poly_commitment( &self, - ext_trace_domain: Self::Domain, + ext_trace_domains: impl IntoIterator, ) -> Option<(Self::Commitment, Self::ProverData)> { - let random_vals = DenseMatrix::rand( - &mut *self.rng.borrow_mut(), - ext_trace_domain.size(), - self.num_random_codewords + Challenge::DIMENSION, - ); - let extended_domain = >::natural_domain_for_degree( - self, - ext_trace_domain.size(), - ); + let random_input_vals = ext_trace_domains + .into_iter() + .map(|domain| { + let m = DenseMatrix::rand( + &mut *self.rng.borrow_mut(), + domain.size(), + self.num_random_codewords + Challenge::DIMENSION, + ); + + (domain, m) + }) + .collect::>(); + let r_commit_and_data = - Pcs::::commit(&self.inner, [(extended_domain, random_vals)]); + Pcs::::commit(&self.inner, random_input_vals); Some(r_commit_and_data) } } @@ -368,6 +428,29 @@ where result } +/// Adds `num_extra_columns` zero columns to the right of `mat`, then reshapes it by setting the width to +/// `mat.width() + num_extra_columns`. +#[instrument(level = "debug", skip_all)] +fn add_zero_cols(mat: &RowMajorMatrix, num_extra_columns: usize) -> RowMajorMatrix +where + Val: Field, +{ + let old_w = mat.width(); + let new_w = old_w + num_extra_columns; + let h = mat.height(); + + let new_values = Val::zero_vec(new_w * h); + let mut result = RowMajorMatrix::new(new_values, new_w); + + result + .rows_mut() + .zip(mat.row_slices()) + .for_each(|(new_row, old_row)| { + new_row[..old_w].copy_from_slice(old_row); + }); + result +} + /// Compute the normalizing constants for the Langrange selectors of the provided domains. /// See Section 4.2 of https://eprint.iacr.org/2024/1037.pdf for more details. fn get_zp_cis(qc_domains: &[D]) -> Vec> { diff --git a/fri/src/proof.rs b/fri/src/proof.rs index 584faee2a..ee385b955 100644 --- a/fri/src/proof.rs +++ b/fri/src/proof.rs @@ -11,9 +11,10 @@ use serde::{Deserialize, Serialize}; ))] pub struct FriProof, Witness, InputProof> { pub commit_phase_commits: Vec, + pub commit_pow_witnesses: Vec, pub query_proofs: Vec>, pub final_poly: Vec, - pub pow_witness: Witness, + pub query_pow_witness: Witness, } #[derive(Serialize, Deserialize, Clone)] diff --git a/fri/src/prover.rs b/fri/src/prover.rs index c237a989f..92f0e7b64 100644 --- a/fri/src/prover.rs +++ b/fri/src/prover.rs @@ -86,7 +86,7 @@ where // Produce a proof of work witness before receiving any query challenges. // This helps to prevent grinding attacks. - let pow_witness = challenger.grind(params.proof_of_work_bits); + let pow_witness = challenger.grind(params.query_proof_of_work_bits); let query_proofs = info_span!("query phase").in_scope(|| { // Sample num_queries indexes to check. @@ -122,15 +122,17 @@ where FriProof { commit_phase_commits: commit_phase_result.commits, + commit_pow_witnesses: commit_phase_result.pow_witnesses, query_proofs, final_poly: commit_phase_result.final_poly, - pow_witness, + query_pow_witness: pow_witness, } } -struct CommitPhaseResult> { +struct CommitPhaseResult, Witness> { commits: Vec, data: Vec>>, + pow_witnesses: Vec, final_poly: Vec, } @@ -158,18 +160,19 @@ fn commit_phase( params: &FriParameters, inputs: Vec>, challenger: &mut Challenger, -) -> CommitPhaseResult +) -> CommitPhaseResult::Witness> where Val: TwoAdicField, Challenge: ExtensionField, M: Mmcs, - Challenger: FieldChallenger + CanObserve, + Challenger: FieldChallenger + GrindingChallenger + CanObserve, Folding: FriFoldingStrategy, { let mut inputs_iter = inputs.into_iter().peekable(); let mut folded = inputs_iter.next().unwrap(); let mut commits = vec![]; let mut data = vec![]; + let mut pow_witnesses = vec![]; while folded.len() > params.blowup() * params.final_poly_len() { // As folded is in bit reversed order, it looks like: @@ -182,6 +185,11 @@ where challenger.observe(commit.clone()); commits.push(commit); + // Produce a proof of work witness after observing the commitment and + // before the Fiat-Shamir batching challenge. + let pow_witness = challenger.grind(params.commit_proof_of_work_bits); + pow_witnesses.push(pow_witness); + // Get the Fiat-Shamir challenge for this round. let beta: Challenge = challenger.sample_algebra_element(); @@ -211,13 +219,12 @@ where .in_scope(|| Radix2DFTSmallBatch::default().idft_algebra(folded)); // Observe all coefficients of the final polynomial. - for &x in &final_poly { - challenger.observe_algebra_element(x); - } + challenger.observe_algebra_slice(&final_poly); CommitPhaseResult { commits, data, + pow_witnesses, final_poly, } } diff --git a/fri/src/two_adic_pcs.rs b/fri/src/two_adic_pcs.rs index b4bf3b729..f253f6153 100644 --- a/fri/src/two_adic_pcs.rs +++ b/fri/src/two_adic_pcs.rs @@ -223,6 +223,34 @@ where self.mmcs.commit(ldes) } + fn get_quotient_ldes( + &self, + evaluations: impl IntoIterator)>, + _num_chunks: usize, + ) -> Vec> { + evaluations + .into_iter() + .map(|(domain, evals)| { + assert_eq!(domain.size(), evals.height()); + // coset_lde_batch converts from evaluations over `xH` to evaluations over `shift * x * K`. + // Hence, letting `shift = g/x` the output will be evaluations over `gK` as desired. + // When `x = g`, we could just use the standard LDE but currently this doesn't seem + // to give a meaningful performance boost. + let shift = Val::GENERATOR / domain.shift(); + // Compute the LDE with blowup factor fri.log_blowup. + // We bit reverse as this is required by our implementation of the FRI protocol. + self.dft + .coset_lde_batch(evals, self.fri.log_blowup, shift) + .bit_reverse_rows() + .to_row_major_matrix() + }) + .collect() + } + + fn commit_ldes(&self, ldes: Vec>) -> (Self::Commitment, Self::ProverData) { + self.mmcs.commit(ldes) + } + /// Given the evaluations on a domain `gH`, return the evaluations on a different domain `g'K`. /// /// Arguments: @@ -304,7 +332,11 @@ where // Contained in each `Self::ProverData` is a list of matrices which have been committed to. // We extract those matrices to be able to refer to them directly. - let mats_and_points = commitment_data_with_opening_points + let commitment_data_with_opening_pts = commitment_data_with_opening_points + .iter() + .map(|(data, points)| (*data, points.clone())) + .collect::>(); + let mats_and_points = commitment_data_with_opening_pts .iter() .map(|(data, points)| { let mats = self @@ -390,8 +422,8 @@ where inv_denoms, ) }); - ys.iter() - .for_each(|&y| challenger.observe_algebra_element(y)); + + challenger.observe_algebra_slice(&ys); ys }) .collect_vec() @@ -406,7 +438,7 @@ where // See the discussion in the doc comment of [`prove_fri`]. Essentially, the soundness error // for this sample is tightly tied to the soundness error of the FRI protocol. // Roughly speaking, at a minimum is it k/|EF| where `k` is the sum of, for each function, the number of - // points it needs to be opened at. This comes from the fact that we are takeing a large linear combination + // points it needs to be opened at. This comes from the fact that we are taking a large linear combination // of `(f(zeta) - f(x))/(zeta - x)` for each function `f` and all of `f`'s opening points. // In our setup, k is two times the trace width plus the number of quotient polynomials. let alpha: Challenge = challenger.sample_algebra_element(); @@ -514,7 +546,7 @@ where fri_input, challenger, log_global_max_height, - &commitment_data_with_opening_points, + &commitment_data_with_opening_pts, &self.mmcs, ); @@ -535,9 +567,7 @@ where for (_, round) in &commitments_with_opening_points { for (_, mat) in round { for (_, point) in mat { - point - .iter() - .for_each(|&opening| challenger.observe_algebra_element(opening)); + challenger.observe_algebra_slice(point); } } } diff --git a/fri/src/verifier.rs b/fri/src/verifier.rs index 1c2b9243b..d953a3314 100644 --- a/fri/src/verifier.rs +++ b/fri/src/verifier.rs @@ -10,19 +10,30 @@ use p3_field::{ExtensionField, Field, TwoAdicField}; use p3_matrix::Dimensions; use p3_util::zip_eq::zip_eq; use p3_util::{log2_strict_usize, reverse_bits_len}; +use thiserror::Error; use crate::{ CommitPhaseProofStep, CommitmentWithOpeningPoints, FriFoldingStrategy, FriParameters, FriProof, QueryProof, }; -#[derive(Debug)] -pub enum FriError { +#[derive(Debug, Error)] +pub enum FriError +where + CommitMmcsErr: core::fmt::Debug, + InputError: core::fmt::Debug, +{ + #[error("invalid proof shape")] InvalidProofShape, + #[error("commit phase MMCS error: {0:?}")] CommitPhaseMmcsError(CommitMmcsErr), + #[error("input error: {0:?}")] InputError(InputError), + #[error("final polynomial mismatch: evaluation does not match expected value")] FinalPolyMismatch, + #[error("invalid proof-of-work witness")] InvalidPowWitness, + #[error("missing input: required input is not present")] MissingInput, } @@ -78,17 +89,25 @@ where let log_global_max_height = proof.commit_phase_commits.len() + params.log_blowup + params.log_final_poly_len; - // Generate all of the random challenges for the FRI rounds. + if proof.commit_pow_witnesses.len() != proof.commit_phase_commits.len() { + return Err(FriError::InvalidProofShape); + } + + // Generate all of the random challenges for the FRI rounds, checking PoW per round. let betas: Vec = proof .commit_phase_commits .iter() - .map(|comm| { - // To match with the prover (and for security purposes), - // we observe the commitment before sampling the challenge. + .zip(&proof.commit_pow_witnesses) + .map(|(comm, witness)| { + // Observe the commitment, check the PoW witness, then sample the + // folding challenge. challenger.observe(comm.clone()); - challenger.sample_algebra_element() + if !challenger.check_witness(params.commit_proof_of_work_bits, *witness) { + return Err(FriError::InvalidPowWitness); + } + Ok(challenger.sample_algebra_element()) }) - .collect(); + .collect::, _>>()?; // Ensure that the final polynomial has the expected degree. if proof.final_poly.len() != params.final_poly_len() { @@ -96,10 +115,7 @@ where } // Observe all coefficients of the final polynomial. - proof - .final_poly - .iter() - .for_each(|x| challenger.observe_algebra_element(*x)); + challenger.observe_algebra_slice(&proof.final_poly); // Ensure that we have the expected number of FRI query proofs. if proof.query_proofs.len() != params.num_queries { @@ -107,7 +123,7 @@ where } // Check PoW. - if !challenger.check_witness(params.proof_of_work_bits, proof.pow_witness) { + if !challenger.check_witness(params.query_proof_of_work_bits, proof.query_pow_witness) { return Err(FriError::InvalidPowWitness); } diff --git a/fri/tests/fri.rs b/fri/tests/fri.rs index 104a50ad5..f62dd9105 100644 --- a/fri/tests/fri.rs +++ b/fri/tests/fri.rs @@ -35,7 +35,8 @@ fn get_ldt_for_testing(rng: &mut R, log_final_poly_len: usize) -> (Perm, log_blowup: 1, log_final_poly_len, num_queries: 10, - proof_of_work_bits: 8, + commit_proof_of_work_bits: 0, + query_proof_of_work_bits: 8, mmcs: fri_mmcs, }; let dft = Radix2Dit::default(); diff --git a/fri/tests/pcs.rs b/fri/tests/pcs.rs index a7d3378ff..2db5867d7 100644 --- a/fri/tests/pcs.rs +++ b/fri/tests/pcs.rs @@ -180,7 +180,8 @@ mod babybear_fri_pcs { log_blowup, log_final_poly_len: 0, num_queries: 10, - proof_of_work_bits: 8, + commit_proof_of_work_bits: 0, + query_proof_of_work_bits: 8, mmcs: challenge_mmcs, }; @@ -233,7 +234,8 @@ mod m31_fri_pcs { log_blowup, log_final_poly_len: 0, num_queries: 10, - proof_of_work_bits: 8, + commit_proof_of_work_bits: 0, + query_proof_of_work_bits: 8, mmcs: challenge_mmcs, }; let pcs = Pcs { diff --git a/goldilocks/CHANGELOG.md b/goldilocks/CHANGELOG.md new file mode 100644 index 000000000..db69ac834 --- /dev/null +++ b/goldilocks/CHANGELOG.md @@ -0,0 +1,48 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor: add public const `new` and `new_array` for all fields (#1222) (Adrian Hamelink) + +### Authors +- Adrian Hamelink + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Adding custom mul/div_exp_2_u64 for the Goldilocks field. (#923) (AngusG) +- Fast GCD Inverse for Goldilocks (#925) (AngusG) +- Packing: small touchups (#937) (Thomas Coratger) +- Use `#[derive(...)]` for Debug and Default for packed fields. (#945) (AngusG) +- Adding Macros to remove boilerplate impls (#943) (AngusG) +- Packed Goldilocks Small Refactor (#946) (AngusG) +- Combining Interleave Code (#950) (AngusG) +- Add a macro for implying PackedValue for PackedFields (#949) (AngusG) +- Packing Trick for Field Extensions (#958) (AngusG) +- Remove Nightly Features (#932) (AngusG) +- Move halve to ring (#969) (AngusG) +- Move div_2_exp_u64 to ring (#970) (AngusG) +- Must Use (#996) (AngusG) +- Make Assume unsafe and add a doc comment (#1005) (AngusG) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Allow users to impl either permute or permute_mut (#1175) (AngusG) +- Implement uniform sampling of bits from field elements (#1050) (Sebastian) + +### Authors +- AngusG +- Himess +- Sebastian +- Thomas Coratger + diff --git a/goldilocks/Cargo.toml b/goldilocks/Cargo.toml index a14b35380..1aedb2c01 100644 --- a/goldilocks/Cargo.toml +++ b/goldilocks/Cargo.toml @@ -10,6 +10,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +p3-challenger.workspace = true p3-dft.workspace = true p3-field.workspace = true p3-mds.workspace = true @@ -23,8 +24,8 @@ rand.workspace = true serde = { workspace = true, features = ["derive"] } [dev-dependencies] -p3-field-testing.workspace = true -p3-poseidon.workspace = true +p3-field-testing = { path = "../field-testing" } +p3-poseidon = { path = "../poseidon" } criterion.workspace = true rand.workspace = true diff --git a/goldilocks/src/goldilocks.rs b/goldilocks/src/goldilocks.rs index a92c7129e..f878197df 100644 --- a/goldilocks/src/goldilocks.rs +++ b/goldilocks/src/goldilocks.rs @@ -7,6 +7,7 @@ use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAss use core::{array, fmt}; use num_bigint::BigUint; +use p3_challenger::UniformSamplingField; use p3_field::exponentiation::exp_10540996611094048183; use p3_field::integers::QuotientMap; use p3_field::op_assign_macros::{ @@ -37,15 +38,20 @@ pub struct Goldilocks { } impl Goldilocks { - pub(crate) const fn new(value: u64) -> Self { + /// Create a new field element from any `u64`. + /// + /// Any `u64` value is accepted. No reduction is performed since + /// Goldilocks uses a non-canonical internal representation. + #[inline] + pub const fn new(value: u64) -> Self { Self { value } } - /// Convert a constant u64 array into a constant Goldilocks array. + /// Convert a `[u64; N]` array to an array of field elements. /// - /// This is a const version of `.map(Goldilocks::new)`. + /// Const version of `input.map(Goldilocks::new)`. #[inline] - pub(crate) const fn new_array(input: [u64; N]) -> [Self; N] { + pub const fn new_array(input: [u64; N]) -> [Self; N] { let mut output = [Self::ZERO; N]; let mut i = 0; while i < N { @@ -171,6 +177,26 @@ impl Distribution for StandardUniform { } } +impl UniformSamplingField for Goldilocks { + const MAX_SINGLE_SAMPLE_BITS: usize = 24; + const SAMPLING_BITS_M: [u64; 64] = { + let prime: u64 = P; + let mut a = [0u64; 64]; + let mut k = 0; + while k < 64 { + if k == 0 { + a[k] = prime; // This value is irrelevant in practice. `bits = 0` returns 0 always. + } else { + // Create a mask to zero out the last k bits + let mask = !((1u64 << k) - 1); + a[k] = prime & mask; + } + k += 1; + } + a + }; +} + impl PrimeCharacteristicRing for Goldilocks { type PrimeSubfield = Self; diff --git a/goldilocks/src/mds.rs b/goldilocks/src/mds.rs index f79605360..ef3a72c31 100644 --- a/goldilocks/src/mds.rs +++ b/goldilocks/src/mds.rs @@ -73,10 +73,6 @@ impl Permutation<[Goldilocks; 8]> for MdsMatrixGoldilocks { SmallConvolveGoldilocks::conv8, ) } - - fn permute_mut(&self, input: &mut [Goldilocks; 8]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -92,10 +88,6 @@ impl Permutation<[Goldilocks; 12]> for MdsMatrixGoldilocks { SmallConvolveGoldilocks::conv12, ) } - - fn permute_mut(&self, input: &mut [Goldilocks; 12]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -112,10 +104,6 @@ impl Permutation<[Goldilocks; 16]> for MdsMatrixGoldilocks { SmallConvolveGoldilocks::conv16, ) } - - fn permute_mut(&self, input: &mut [Goldilocks; 16]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -133,10 +121,6 @@ impl Permutation<[Goldilocks; 24]> for MdsMatrixGoldilocks { fn permute(&self, input: [Goldilocks; 24]) -> [Goldilocks; 24] { apply_circulant(&MATRIX_CIRC_MDS_24_GOLDILOCKS, &input) } - - fn permute_mut(&self, input: &mut [Goldilocks; 24]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -157,10 +141,6 @@ impl Permutation<[Goldilocks; 32]> for MdsMatrixGoldilocks { const ENTRIES: [u64; 32] = first_row_to_first_col(&MATRIX_CIRC_MDS_32_GOLDILOCKS); apply_circulant_fft(&FFT_ALGO, ENTRIES, &input) } - - fn permute_mut(&self, input: &mut [Goldilocks; 32]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -189,10 +169,6 @@ impl Permutation<[Goldilocks; 64]> for MdsMatrixGoldilocks { const ENTRIES: [u64; 64] = first_row_to_first_col(&MATRIX_CIRC_MDS_64_GOLDILOCKS); apply_circulant_fft(&FFT_ALGO, ENTRIES, &input) } - - fn permute_mut(&self, input: &mut [Goldilocks; 64]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -221,10 +197,6 @@ impl Permutation<[Goldilocks; 68]> for MdsMatrixGoldilocks { fn permute(&self, input: [Goldilocks; 68]) -> [Goldilocks; 68] { apply_circulant(&MATRIX_CIRC_MDS_68_GOLDILOCKS, &input) } - - fn permute_mut(&self, input: &mut [Goldilocks; 68]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} diff --git a/goldilocks/src/x86_64_avx2/mds.rs b/goldilocks/src/x86_64_avx2/mds.rs index c2fd6e81f..15cba3ecb 100644 --- a/goldilocks/src/x86_64_avx2/mds.rs +++ b/goldilocks/src/x86_64_avx2/mds.rs @@ -22,10 +22,6 @@ impl Permutation<[PackedGoldilocksAVX2; 8]> for MdsMatrixGoldilocks { const MATRIX_CIRC_MDS_8_SML_ROW_U64: [u64; 8] = convert_array(MATRIX_CIRC_MDS_8_SML_ROW); apply_circulant(&MATRIX_CIRC_MDS_8_SML_ROW_U64, &input) } - - fn permute_mut(&self, input: &mut [PackedGoldilocksAVX2; 8]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -35,10 +31,6 @@ impl Permutation<[PackedGoldilocksAVX2; 12]> for MdsMatrixGoldilocks { const MATRIX_CIRC_MDS_12_SML_ROW_U64: [u64; 12] = convert_array(MATRIX_CIRC_MDS_12_SML_ROW); apply_circulant(&MATRIX_CIRC_MDS_12_SML_ROW_U64, &input) } - - fn permute_mut(&self, input: &mut [PackedGoldilocksAVX2; 12]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -48,10 +40,6 @@ impl Permutation<[PackedGoldilocksAVX2; 16]> for MdsMatrixGoldilocks { const MATRIX_CIRC_MDS_16_SML_ROW_U64: [u64; 16] = convert_array(MATRIX_CIRC_MDS_16_SML_ROW); apply_circulant(&MATRIX_CIRC_MDS_16_SML_ROW_U64, &input) } - - fn permute_mut(&self, input: &mut [PackedGoldilocksAVX2; 16]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -60,10 +48,6 @@ impl Permutation<[PackedGoldilocksAVX2; 24]> for MdsMatrixGoldilocks { fn permute(&self, input: [PackedGoldilocksAVX2; 24]) -> [PackedGoldilocksAVX2; 24] { apply_circulant(&MATRIX_CIRC_MDS_24_GOLDILOCKS, &input) } - - fn permute_mut(&self, input: &mut [PackedGoldilocksAVX2; 24]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} diff --git a/goldilocks/src/x86_64_avx512/mds.rs b/goldilocks/src/x86_64_avx512/mds.rs index c58244ae7..e240fc656 100644 --- a/goldilocks/src/x86_64_avx512/mds.rs +++ b/goldilocks/src/x86_64_avx512/mds.rs @@ -22,10 +22,6 @@ impl Permutation<[PackedGoldilocksAVX512; 8]> for MdsMatrixGoldilocks { const MATRIX_CIRC_MDS_8_SML_ROW_U64: [u64; 8] = convert_array(MATRIX_CIRC_MDS_8_SML_ROW); apply_circulant(&MATRIX_CIRC_MDS_8_SML_ROW_U64, &input) } - - fn permute_mut(&self, input: &mut [PackedGoldilocksAVX512; 8]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -35,10 +31,6 @@ impl Permutation<[PackedGoldilocksAVX512; 12]> for MdsMatrixGoldilocks { const MATRIX_CIRC_MDS_12_SML_ROW_U64: [u64; 12] = convert_array(MATRIX_CIRC_MDS_12_SML_ROW); apply_circulant(&MATRIX_CIRC_MDS_12_SML_ROW_U64, &input) } - - fn permute_mut(&self, input: &mut [PackedGoldilocksAVX512; 12]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -48,10 +40,6 @@ impl Permutation<[PackedGoldilocksAVX512; 16]> for MdsMatrixGoldilocks { const MATRIX_CIRC_MDS_16_SML_ROW_U64: [u64; 16] = convert_array(MATRIX_CIRC_MDS_16_SML_ROW); apply_circulant(&MATRIX_CIRC_MDS_16_SML_ROW_U64, &input) } - - fn permute_mut(&self, input: &mut [PackedGoldilocksAVX512; 16]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} @@ -60,10 +48,6 @@ impl Permutation<[PackedGoldilocksAVX512; 24]> for MdsMatrixGoldilocks { fn permute(&self, input: [PackedGoldilocksAVX512; 24]) -> [PackedGoldilocksAVX512; 24] { apply_circulant(&MATRIX_CIRC_MDS_24_GOLDILOCKS, &input) } - - fn permute_mut(&self, input: &mut [PackedGoldilocksAVX512; 24]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixGoldilocks {} diff --git a/interpolation/CHANGELOG.md b/interpolation/CHANGELOG.md new file mode 100644 index 000000000..f2ca8af3d --- /dev/null +++ b/interpolation/CHANGELOG.md @@ -0,0 +1,28 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Chore: use `collect_n` with powers when possible (#963) (Thomas Coratger) +- Refactor: remove redundant clones in crypto modules (#1086) (Skylar Ray) +- Clippy: small step (#1102) (Thomas Coratger) + +### Authors +- Adrian Hamelink +- Himess +- Skylar Ray +- Thomas Coratger + diff --git a/interpolation/Cargo.toml b/interpolation/Cargo.toml index 2ab93fe3e..b1ded097e 100644 --- a/interpolation/Cargo.toml +++ b/interpolation/Cargo.toml @@ -16,7 +16,7 @@ p3-maybe-rayon.workspace = true p3-util.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true +p3-baby-bear = { path = "../baby-bear" } [lints] workspace = true diff --git a/keccak-air/CHANGELOG.md b/keccak-air/CHANGELOG.md new file mode 100644 index 000000000..7e9dbcb5b --- /dev/null +++ b/keccak-air/CHANGELOG.md @@ -0,0 +1,31 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Replace `Copy` with `Clone` in `AirBuilder`'s `Var` (#930) (Linda Guiga) +- Keccak air: better doc and some touchups (#942) (Thomas Coratger) +- Remove Nightly Features (#932) (AngusG) +- Clippy: small step (#1102) (Thomas Coratger) +- Refactor: Replace &Vec with &[T] in function parameters (#1111) (Merkel Tranjes) +- Fix(keccak-air): align state indexing with Keccak specification (#1177) (Himess) + +### Authors +- AngusG +- Himess +- Linda Guiga +- Merkel Tranjes +- Thomas Coratger + diff --git a/keccak-air/Cargo.toml b/keccak-air/Cargo.toml index 3f1010deb..85ff08f7c 100644 --- a/keccak-air/Cargo.toml +++ b/keccak-air/Cargo.toml @@ -20,20 +20,20 @@ rand.workspace = true tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-challenger.workspace = true -p3-circle.workspace = true -p3-commit.workspace = true -p3-dft.workspace = true -p3-fri.workspace = true -p3-goldilocks.workspace = true -p3-keccak.workspace = true -p3-merkle-tree.workspace = true -p3-mersenne-31.workspace = true -p3-monty-31.workspace = true -p3-sha256.workspace = true -p3-symmetric.workspace = true -p3-uni-stark.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-challenger = { path = "../challenger" } +p3-circle = { path = "../circle" } +p3-commit = { path = "../commit" } +p3-dft = { path = "../dft" } +p3-fri = { path = "../fri" } +p3-goldilocks = { path = "../goldilocks" } +p3-keccak = { path = "../keccak" } +p3-merkle-tree = { path = "../merkle-tree" } +p3-mersenne-31 = { path = "../mersenne-31" } +p3-monty-31 = { path = "../monty-31" } +p3-sha256 = { path = "../sha256" } +p3-symmetric = { path = "../symmetric" } +p3-uni-stark = { path = "../uni-stark" } tracing-forest = { workspace = true, features = ["ansi", "smallvec"] } tracing-subscriber = { workspace = true, features = ["std", "env-filter"] } diff --git a/keccak-air/src/generation.rs b/keccak-air/src/generation.rs index d05de9387..09563dc8c 100644 --- a/keccak-air/src/generation.rs +++ b/keccak-air/src/generation.rs @@ -47,8 +47,14 @@ pub fn generate_trace_rows( /// `rows` will normally consist of 24 rows, with an exception for the final row. fn generate_trace_rows_for_perm(rows: &mut [KeccakCols], input: [u64; 25]) { - let mut current_state: [[u64; 5]; 5] = unsafe { transmute(input) }; + // Convert flat input array to 5x5 matrix. + // The input uses standard Keccak indexing: input[x + 5*y] corresponds to state[x][y]. + // After transmute, we get row-major layout: transmuted[i][j] = input[i*5 + j]. + // To align with Keccak's state[x][y] = input[x + 5*y], we need to transpose. + let transmuted: [[u64; 5]; 5] = unsafe { transmute(input) }; + let mut current_state: [[u64; 5]; 5] = array::from_fn(|x| array::from_fn(|y| transmuted[y][x])); + // initial_state is stored in y-major order for the AIR columns (preimage[y][x]). let initial_state: [[[F; 4]; 5]; 5] = array::from_fn(|y| array::from_fn(|x| u64_to_16_bit_limbs(current_state[x][y]))); @@ -135,3 +141,189 @@ fn generate_trace_row_for_round( row.a_prime_prime_prime_0_0_limbs = u64_to_16_bit_limbs(current_state[0][0]); } + +#[cfg(test)] +mod tests { + use alloc::vec; + + use p3_goldilocks::Goldilocks; + use p3_keccak::KeccakF; + use p3_symmetric::Permutation; + + use super::*; + + /// Helper function to extract the output state from the trace after all 24 rounds. + /// The output is stored in `a_prime_prime_prime` for (0,0) and `a_prime_prime` for others. + fn extract_output_from_trace(rows: &[KeccakCols]) -> [u64; 25] { + let last_row = &rows[NUM_ROUNDS - 1]; + let mut output = [0u64; 25]; + + for y in 0..5 { + for x in 0..5 { + let mut value = 0u64; + for limb in 0..U64_LIMBS { + let limb_val = last_row.a_prime_prime_prime(y, x, limb).as_canonical_u64(); + value |= limb_val << (limb * 16); + } + // Standard Keccak indexing: state[x + 5*y] + output[x + 5 * y] = value; + } + } + output + } + + /// Helper function to extract the input preimage from the trace. + fn extract_input_from_trace(rows: &[KeccakCols]) -> [u64; 25] { + let first_row = &rows[0]; + let mut input = [0u64; 25]; + + for y in 0..5 { + for x in 0..5 { + let mut value = 0u64; + for limb in 0..U64_LIMBS { + let limb_val = first_row.preimage[y][x][limb].as_canonical_u64(); + value |= limb_val << (limb * 16); + } + // Standard Keccak indexing: state[x + 5*y] + input[x + 5 * y] = value; + } + } + input + } + + #[test] + fn test_keccak_permutation_matches_p3_keccak() { + // Test with a non-trivial input state + let input: [u64; 25] = core::array::from_fn(|i| i as u64 * 0x0123456789ABCDEFu64); + + // Compute expected output using p3-keccak (reference implementation) + let mut expected_output = input; + KeccakF.permute_mut(&mut expected_output); + + // Generate trace using our implementation + let trace = generate_trace_rows::(vec![input], 0); + let (prefix, rows, suffix) = unsafe { trace.values.align_to::>() }; + assert!(prefix.is_empty()); + assert!(suffix.is_empty()); + + // Verify input was stored correctly + let stored_input = extract_input_from_trace(&rows[..NUM_ROUNDS]); + assert_eq!( + stored_input, input, + "Input state should match the provided input" + ); + + // Verify output matches p3-keccak + let our_output = extract_output_from_trace(&rows[..NUM_ROUNDS]); + assert_eq!( + our_output, expected_output, + "Keccak-f output should match p3-keccak reference implementation" + ); + } + + #[test] + fn test_keccak_permutation_zero_state() { + // Test with all-zero state + let input = [0u64; 25]; + + let mut expected_output = input; + KeccakF.permute_mut(&mut expected_output); + + let trace = generate_trace_rows::(vec![input], 0); + let (prefix, rows, suffix) = unsafe { trace.values.align_to::>() }; + assert!(prefix.is_empty()); + assert!(suffix.is_empty()); + + let our_output = extract_output_from_trace(&rows[..NUM_ROUNDS]); + assert_eq!( + our_output, expected_output, + "Keccak-f on zero state should match p3-keccak" + ); + } + + #[test] + fn test_keccak_permutation_known_vector() { + // Known test vector: state with only first element set to 1 + let mut input = [0u64; 25]; + input[0] = 1; + + let mut expected_output = input; + KeccakF.permute_mut(&mut expected_output); + + let trace = generate_trace_rows::(vec![input], 0); + let (prefix, rows, suffix) = unsafe { trace.values.align_to::>() }; + assert!(prefix.is_empty()); + assert!(suffix.is_empty()); + + let our_output = extract_output_from_trace(&rows[..NUM_ROUNDS]); + assert_eq!( + our_output, expected_output, + "Keccak-f with input[0]=1 should match p3-keccak" + ); + } + + #[test] + fn test_multiple_permutations() { + // Test multiple permutations in a single trace + let inputs: Vec<[u64; 25]> = (0..4) + .map(|i| core::array::from_fn(|j| (i * 25 + j) as u64)) + .collect(); + + let expected_outputs: Vec<[u64; 25]> = inputs + .iter() + .map(|input| { + let mut output = *input; + KeccakF.permute_mut(&mut output); + output + }) + .collect(); + + let trace = generate_trace_rows::(inputs, 0); + let (prefix, rows, suffix) = unsafe { trace.values.align_to::>() }; + assert!(prefix.is_empty()); + assert!(suffix.is_empty()); + + for (i, expected) in expected_outputs.iter().enumerate() { + let start = i * NUM_ROUNDS; + let our_output = extract_output_from_trace(&rows[start..start + NUM_ROUNDS]); + assert_eq!( + our_output, *expected, + "Permutation {} should match p3-keccak", + i + ); + } + } + + #[test] + fn test_input_output_limb_indexing() { + // Verify that input_limb and output_limb functions use correct indexing + // This tests the column mapping for preimage and output + + let input: [u64; 25] = core::array::from_fn(|i| i as u64 + 1); + let trace = generate_trace_rows::(vec![input], 0); + let (prefix, rows, suffix) = unsafe { trace.values.align_to::>() }; + assert!(prefix.is_empty()); + assert!(suffix.is_empty()); + + // Check that preimage is stored in y-major order as per Keccak spec + let first_row = &rows[0]; + for (i_u64, &expected_val) in input.iter().enumerate() { + let y = i_u64 / 5; + let x = i_u64 % 5; + + let mut stored_value = 0u64; + for limb in 0..U64_LIMBS { + let limb_val = first_row.preimage[y][x][limb].as_canonical_u64(); + stored_value |= limb_val << (limb * 16); + } + + // input[i_u64] should be stored at preimage[y][x] where i_u64 = x + 5*y + // So input[x + 5*y] should equal preimage[y][x] + assert_eq!( + stored_value, expected_val, + "preimage[{}][{}] should equal input[{}]", + y, x, i_u64 + ); + } + } +} diff --git a/keccak/CHANGELOG.md b/keccak/CHANGELOG.md new file mode 100644 index 000000000..d59a3d395 --- /dev/null +++ b/keccak/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Remove Nightly Features (#932) (AngusG) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Fixing a few clippy lints (#1115) (AngusG) +- Allow users to impl either permute or permute_mut (#1175) (AngusG) + +### Authors +- AngusG +- Himess +- Thomas Coratger + diff --git a/keccak/Cargo.toml b/keccak/Cargo.toml index 9b3d825e5..5f0c2257d 100644 --- a/keccak/Cargo.toml +++ b/keccak/Cargo.toml @@ -17,7 +17,7 @@ p3-util.workspace = true tiny-keccak = { workspace = true, features = ["keccak"] } [dev-dependencies] -p3-mersenne-31.workspace = true +p3-mersenne-31 = { path = "../mersenne-31" } criterion.workspace = true diff --git a/keccak/src/lib.rs b/keccak/src/lib.rs index 9b3949d6d..cc605c12b 100644 --- a/keccak/src/lib.rs +++ b/keccak/src/lib.rs @@ -85,10 +85,6 @@ impl Permutation<[u8; 200]> for KeccakF { u64_limb.to_le_bytes()[i % 8] }) } - - fn permute_mut(&self, input: &mut [u8; 200]) { - *input = self.permute(*input); - } } impl CryptographicPermutation<[u8; 200]> for KeccakF {} diff --git a/koala-bear/CHANGELOG.md b/koala-bear/CHANGELOG.md new file mode 100644 index 000000000..c4d985b73 --- /dev/null +++ b/koala-bear/CHANGELOG.md @@ -0,0 +1,42 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- GCD based inversion for 31 bit fields (#921) (AngusG) +- Adding Degree 8 extensions for KoalaBear and BabyBear. (#954) (AngusG) +- Fast Octic inverse (#955) (AngusG) +- Packing Trick for Field Extensions (#958) (AngusG) +- Refactor to packed add methods (#972) (AngusG) +- Remove Nightly Features (#932) (AngusG) +- Move div_2_exp_u64 to ring (#970) (AngusG) +- Generic Poseidon2 Simplifications (#987) (AngusG) +- Koalabear: add default poseidon constants (#1008) (Thomas Coratger) +- Poseidon2: add Neon implementation for Monty31 (#1023) (Thomas Coratger) +- Fix: remove unused alloc::format imports (#1066) (Skylar Ray) +- Refactor: remove redundant clones in crypto modules (#1080) (Skylar Ray) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Refactor: deduplicate field JSON serialization tests (#1162) (andrewshab) +- Implement uniform sampling of bits from field elements (#1050) (Sebastian) + +### Authors +- AngusG +- Himess +- Sebastian +- Skylar Ray +- Thomas Coratger +- andrewshab + diff --git a/koala-bear/Cargo.toml b/koala-bear/Cargo.toml index 0b2908e06..d0d5fc6fa 100644 --- a/koala-bear/Cargo.toml +++ b/koala-bear/Cargo.toml @@ -10,6 +10,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +p3-challenger.workspace = true p3-field.workspace = true p3-monty-31.workspace = true p3-poseidon2.workspace = true @@ -18,9 +19,9 @@ p3-symmetric.workspace = true rand.workspace = true [dev-dependencies] -p3-dft.workspace = true -p3-field-testing.workspace = true -p3-util.workspace = true +p3-dft = { path = "../dft" } +p3-field-testing = { path = "../field-testing" } +p3-util = { path = "../util" } criterion.workspace = true num-bigint.workspace = true diff --git a/koala-bear/src/koala_bear.rs b/koala-bear/src/koala_bear.rs index 3b02d7a5b..97e77116b 100644 --- a/koala-bear/src/koala_bear.rs +++ b/koala-bear/src/koala_bear.rs @@ -1,3 +1,4 @@ +use p3_challenger::UniformSamplingField; use p3_field::exponentiation::exp_1420470955; use p3_field::{Algebra, PrimeCharacteristicRing}; use p3_monty_31::{ @@ -23,6 +24,28 @@ impl MontyParameters for KoalaBearParameters { const MONTY_MU: u32 = 0x81000001; } +impl UniformSamplingField for KoalaBearParameters { + const MAX_SINGLE_SAMPLE_BITS: usize = 24; + // NOTE: We only include `0` to not have to deal with one-off indexing. `k` must be > 0. + // Also, we don't care about k > 30 for KoalaBear. + const SAMPLING_BITS_M: [u64; 64] = { + let prime: u64 = Self::PRIME as u64; + let mut a = [0u64; 64]; + let mut k = 0; + while k < 64 { + if k == 0 { + a[k] = prime; // This value is irrelevant in practice. `bits = 0` returns 0 always. + } else { + // Create a mask to zero out the last k bits + let mask = !((1u64 << k) - 1); + a[k] = prime & mask; + } + k += 1; + } + a + }; +} + impl PackedMontyParameters for KoalaBearParameters {} impl BarrettParameters for KoalaBearParameters {} @@ -109,8 +132,8 @@ mod tests { use p3_field::extension::BinomialExtensionField; use p3_field::{InjectiveMonomial, PermutationMonomial, PrimeField64, TwoAdicField}; use p3_field_testing::{ - test_field, test_field_dft, test_prime_field, test_prime_field_32, test_prime_field_64, - test_two_adic_field, + test_field, test_field_dft, test_field_json_serialization, test_prime_field, + test_prime_field_32, test_prime_field_64, test_two_adic_field, }; use super::*; @@ -147,36 +170,7 @@ mod tests { assert_eq!(m2.injective_exp_n().injective_exp_root_n(), m2); assert_eq!(f_2.injective_exp_n().injective_exp_root_n(), f_2); - let f_serialized = serde_json::to_string(&f).unwrap(); - let f_deserialized: F = serde_json::from_str(&f_serialized).unwrap(); - assert_eq!(f, f_deserialized); - - let f_1_serialized = serde_json::to_string(&f_1).unwrap(); - let f_1_deserialized: F = serde_json::from_str(&f_1_serialized).unwrap(); - let f_1_serialized_again = serde_json::to_string(&f_1_deserialized).unwrap(); - let f_1_deserialized_again: F = serde_json::from_str(&f_1_serialized_again).unwrap(); - assert_eq!(f_1, f_1_deserialized); - assert_eq!(f_1, f_1_deserialized_again); - - let f_2_serialized = serde_json::to_string(&f_2).unwrap(); - let f_2_deserialized: F = serde_json::from_str(&f_2_serialized).unwrap(); - assert_eq!(f_2, f_2_deserialized); - - let f_p_minus_1_serialized = serde_json::to_string(&f_p_minus_1).unwrap(); - let f_p_minus_1_deserialized: F = serde_json::from_str(&f_p_minus_1_serialized).unwrap(); - assert_eq!(f_p_minus_1, f_p_minus_1_deserialized); - - let f_p_minus_2_serialized = serde_json::to_string(&f_p_minus_2).unwrap(); - let f_p_minus_2_deserialized: F = serde_json::from_str(&f_p_minus_2_serialized).unwrap(); - assert_eq!(f_p_minus_2, f_p_minus_2_deserialized); - - let m1_serialized = serde_json::to_string(&m1).unwrap(); - let m1_deserialized: F = serde_json::from_str(&m1_serialized).unwrap(); - assert_eq!(m1, m1_deserialized); - - let m2_serialized = serde_json::to_string(&m2).unwrap(); - let m2_deserialized: F = serde_json::from_str(&m2_serialized).unwrap(); - assert_eq!(m2, m2_deserialized); + test_field_json_serialization(&[f, f_1, f_2, f_p_minus_1, f_p_minus_2, m1, m2]); } // MontyField31's have no redundant representations. diff --git a/lookup/CHANGELOG.md b/lookup/CHANGELOG.md new file mode 100644 index 000000000..dc5360961 --- /dev/null +++ b/lookup/CHANGELOG.md @@ -0,0 +1,36 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Small changes for recursive lookups (#1229) (Linda Guiga) + +### Authors +- Linda Guiga + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Add modular lookups (local and global) with logup implementation (#1090) (Linda Guiga) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Update lookup traits and add folders with lookups (#1160) (Linda Guiga) +- ExtensionBuilder for SymbolicAirBuilder (#1161) (Linda Guiga) +- Core: add error messages to error enums via thiserror (#1168) (Thomas Coratger) +- Doc: add intra-doc links (#1174) (Robin Salen) +- Integrate lookups to prover and verifier (#1165) (Linda Guiga) +- Core: small touchups (#1186) (Thomas Coratger) + +### Authors +- Linda Guiga +- Robin Salen +- Thomas Coratger + diff --git a/lookup/Cargo.toml b/lookup/Cargo.toml index b00c78a1c..2d8df7bdd 100644 --- a/lookup/Cargo.toml +++ b/lookup/Cargo.toml @@ -15,10 +15,13 @@ p3-field = { workspace = true } p3-matrix = { workspace = true } p3-uni-stark = { workspace = true } rand = { workspace = true } +serde = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } [dev-dependencies] -p3-baby-bear = { workspace = true } -p3-goldilocks = { workspace = true } +p3-baby-bear = { path = "../baby-bear" } +p3-goldilocks = { path = "../goldilocks" } [features] default = [] diff --git a/lookup/src/folder.rs b/lookup/src/folder.rs new file mode 100644 index 000000000..da74b606d --- /dev/null +++ b/lookup/src/folder.rs @@ -0,0 +1,217 @@ +use p3_air::{ + AirBuilder, AirBuilderWithPublicValues, ExtensionBuilder, PeriodicAirBuilder, + PermutationAirBuilder, +}; +use p3_matrix::dense::RowMajorMatrixView; +use p3_matrix::stack::ViewPair; +use p3_uni_stark::{ + PackedChallenge, PackedVal, ProverConstraintFolder, StarkGenericConfig, Val, + VerifierConstraintFolder, +}; + + +pub struct ProverConstraintFolderWithLookups<'a, SC: StarkGenericConfig> { + pub inner: ProverConstraintFolder<'a, SC>, + pub permutation: RowMajorMatrixView<'a, PackedChallenge>, + pub permutation_challenges: &'a [PackedChallenge], +} + +impl<'a, SC: StarkGenericConfig> AirBuilder for ProverConstraintFolderWithLookups<'a, SC> { + type F = Val; + type Expr = PackedVal; + type Var = PackedVal; + type M = RowMajorMatrixView<'a, PackedVal>; + + fn main(&self) -> Self::M { + self.inner.main + } + + fn preprocessed(&self) -> Option { + self.inner.preprocessed + } + + #[inline] + fn is_first_row(&self) -> Self::Expr { + self.inner.is_first_row + } + + #[inline] + fn is_last_row(&self) -> Self::Expr { + self.inner.is_last_row + } + + /// Returns an expression indicating rows where transition constraints should be checked. + /// + /// # Panics + /// This function panics if `size` is not `2`. + #[inline] + fn is_transition_window(&self, size: usize) -> Self::Expr { + if size == 2 { + self.inner.is_transition + } else { + panic!("uni-stark only supports a window size of 2") + } + } + + #[inline] + fn assert_zero>(&mut self, x: I) { + self.inner.assert_zero(x); + } + + #[inline] + fn assert_zeros>(&mut self, array: [I; N]) { + self.inner.assert_zeros(array); + } +} + +impl AirBuilderWithPublicValues + for ProverConstraintFolderWithLookups<'_, SC> +{ + type PublicVar = Self::F; + + #[inline] + fn public_values(&self) -> &[Self::F] { + self.inner.public_values + } +} + +impl ExtensionBuilder for ProverConstraintFolderWithLookups<'_, SC> { + type EF = SC::Challenge; + type ExprEF = PackedChallenge; + type VarEF = PackedChallenge; + + fn assert_zero_ext(&mut self, x: I) + where + I: Into, + { + let alpha_power = self.inner.alpha_powers[self.inner.constraint_index]; + self.inner.accumulator += >::from(alpha_power) * x.into(); + self.inner.constraint_index += 1; + } +} + +impl<'a, SC: StarkGenericConfig> PermutationAirBuilder + for ProverConstraintFolderWithLookups<'a, SC> +{ + type MP = RowMajorMatrixView<'a, PackedChallenge>; + + type RandomVar = PackedChallenge; + fn permutation(&self) -> RowMajorMatrixView<'a, PackedChallenge> { + self.permutation + } + + fn permutation_randomness(&self) -> &[PackedChallenge] { + self.permutation_challenges + } +} + +impl PeriodicAirBuilder for ProverConstraintFolderWithLookups<'_, SC> { + type PeriodicVar = PackedVal; + + fn periodic_values(&self) -> &[Self::PeriodicVar] { + self.inner.periodic_values() + } +} + +pub struct VerifierConstraintFolderWithLookups<'a, SC: StarkGenericConfig> { + pub inner: VerifierConstraintFolder<'a, SC>, + pub permutation: ViewPair<'a, SC::Challenge>, + pub permutation_challenges: &'a [SC::Challenge], +} + +impl<'a, SC: StarkGenericConfig> AirBuilder for VerifierConstraintFolderWithLookups<'a, SC> { + type F = Val; + type Expr = SC::Challenge; + type Var = SC::Challenge; + type M = ViewPair<'a, SC::Challenge>; + + fn main(&self) -> Self::M { + self.inner.main + } + + fn preprocessed(&self) -> Option { + self.inner.preprocessed + } + + #[inline] + fn is_first_row(&self) -> Self::Expr { + self.inner.is_first_row + } + + #[inline] + fn is_last_row(&self) -> Self::Expr { + self.inner.is_last_row + } + + /// Returns an expression indicating rows where transition constraints should be checked. + /// + /// # Panics + /// This function panics if `size` is not `2`. + #[inline] + fn is_transition_window(&self, size: usize) -> Self::Expr { + if size == 2 { + self.inner.is_transition + } else { + panic!("uni-stark only supports a window size of 2") + } + } + + #[inline] + fn assert_zero>(&mut self, x: I) { + self.inner.assert_zero(x); + } + + #[inline] + fn assert_zeros>(&mut self, array: [I; N]) { + self.inner.assert_zeros(array); + } +} + +impl AirBuilderWithPublicValues + for VerifierConstraintFolderWithLookups<'_, SC> +{ + type PublicVar = Self::F; + + #[inline] + fn public_values(&self) -> &[Self::F] { + self.inner.public_values + } +} + +impl ExtensionBuilder for VerifierConstraintFolderWithLookups<'_, SC> { + type EF = SC::Challenge; + type ExprEF = SC::Challenge; + type VarEF = SC::Challenge; + + fn assert_zero_ext(&mut self, x: I) + where + I: Into, + { + self.inner.accumulator *= self.inner.alpha; + self.inner.accumulator += x.into(); + } +} + +impl<'a, SC: StarkGenericConfig> PermutationAirBuilder + for VerifierConstraintFolderWithLookups<'a, SC> +{ + type MP = ViewPair<'a, SC::Challenge>; + + type RandomVar = SC::Challenge; + + fn permutation(&self) -> ViewPair<'a, SC::Challenge> { + self.permutation + } + + fn permutation_randomness(&self) -> &[SC::Challenge] { + self.permutation_challenges + } +} + +impl PeriodicAirBuilder for VerifierConstraintFolderWithLookups<'_, SC> { + type PeriodicVar = SC::Challenge; + + fn periodic_values(&self) -> &[Self::PeriodicVar] { + self.inner.periodic_values() + } +} diff --git a/lookup/src/lib.rs b/lookup/src/lib.rs index bf35b63f0..602604667 100644 --- a/lookup/src/lib.rs +++ b/lookup/src/lib.rs @@ -4,6 +4,7 @@ extern crate alloc; +pub mod folder; pub mod logup; pub mod lookup_traits; #[cfg(test)] diff --git a/lookup/src/logup.rs b/lookup/src/logup.rs index 7df2a6417..f9fe6a717 100644 --- a/lookup/src/logup.rs +++ b/lookup/src/logup.rs @@ -20,11 +20,20 @@ use alloc::vec; use alloc::vec::Vec; -use p3_air::{AirBuilderWithPublicValues, ExtensionBuilder, PairBuilder, PermutationAirBuilder}; +use p3_air::lookup::{LookupError, LookupEvaluator}; +use p3_air::{ + AirBuilderWithPublicValues, ExtensionBuilder, PeriodicAirBuilder, PermutationAirBuilder, +}; use p3_field::{Field, PrimeCharacteristicRing}; use p3_matrix::Matrix; +use p3_matrix::dense::{RowMajorMatrix, RowMajorMatrixView}; +use p3_matrix::stack::VerticalPair; +use p3_uni_stark::{StarkGenericConfig, Val}; +use tracing::instrument; -use crate::lookup_traits::{Kind, Lookup, LookupError, LookupGadget, symbolic_to_expr}; +use crate::lookup_traits::{ + Kind, Lookup, LookupData, LookupGadget, LookupTraceBuilder, symbolic_to_expr, +}; /// Core LogUp gadget implementing lookup arguments via logarithmic derivatives. /// @@ -59,6 +68,32 @@ impl LogUpGadget { Self {} } + /// Computes the combined elements for each tuple using the challenge `beta`: + /// `combined_elements[i] = ∑elements[i][n-j] * β^j` + fn combine_elements( + &self, + elements: &[Vec], + alpha: &AB::ExprEF, + beta: &AB::ExprEF, + ) -> Vec + where + AB: PermutationAirBuilder, + E: Into + Clone, + { + elements + .iter() + .map(|elts| { + // Combine the elements in the tuple using beta. + let combined_elt = elts.iter().fold(AB::ExprEF::ZERO, |acc, elt| { + elt.clone().into() + acc * beta.clone() + }); + + // Compute (α - combined_elt) + alpha.clone() - combined_elt + }) + .collect::>() + } + /// Computes the numerator and denominator of the fraction: /// `∑(m_i / (α - combined_elements[i]))`, where /// `combined_elements[i] = ∑elements[i][n-j] * β^j @@ -81,18 +116,7 @@ impl LogUpGadget { let n = elements.len(); // Precompute all (α - ∑e_{i, j} β^j) terms - let terms = elements - .iter() - .map(|elts| { - // Combine the elements in the tuple using beta. - let combined_elt = elts.iter().fold(AB::ExprEF::ZERO, |acc, elt| { - elt.clone().into() + acc * beta.clone() - }); - - // Compute (α - combined_elt) - alpha.clone() - combined_elt - }) - .collect::>(); + let terms = self.combine_elements::(elements, alpha, beta); // Build prefix products: pref[i] = ∏_{j=0}^{i-1}(α - e_j) let mut pref = Vec::with_capacity(n + 1); @@ -133,10 +157,10 @@ impl LogUpGadget { fn eval_update( &self, builder: &mut AB, - context: Lookup, + context: &Lookup, opt_expected_cumulated: Option, ) where - AB: PermutationAirBuilder + PairBuilder + AirBuilderWithPublicValues, + AB: PeriodicAirBuilder + PermutationAirBuilder + AirBuilderWithPublicValues, { let Lookup { kind, @@ -162,14 +186,14 @@ impl LogUpGadget { .map(|exprs| { exprs .iter() - .map(|expr| symbolic_to_expr(builder, expr)) + .map(|expr| symbolic_to_expr(builder, expr).into()) .collect::>() }) .collect::>(); let multiplicities = multiplicities_exprs .iter() - .map(|expr| symbolic_to_expr(builder, expr)) + .map(|expr| symbolic_to_expr(builder, expr).into()) .collect::>(); // Access the permutation (aux) table. It carries the running sum column `s`. @@ -183,9 +207,9 @@ impl LogUpGadget { ); // Challenge for the running sum. - let alpha = permutation_challenges[2 * column]; + let alpha = permutation_challenges[self.num_challenges() * column]; // Challenge for combining the lookup tuples. - let beta = permutation_challenges[2 * column + 1]; + let beta = permutation_challenges[self.num_challenges() * column + 1]; let s = permutation.row_slice(0).unwrap(); assert!(s.len() > column, "Permutation trace has insufficient width"); @@ -242,7 +266,7 @@ impl LogUpGadget { } } -impl LookupGadget for LogUpGadget { +impl LookupEvaluator for LogUpGadget { fn num_aux_cols(&self) -> usize { 1 } @@ -261,9 +285,9 @@ impl LookupGadget for LogUpGadget { /// `combined_elements[i] = ∑elements[i][n-j] * β^j`. /// /// This is implemented using a running sum column that should sum to zero. - fn eval_local_lookup(&self, builder: &mut AB, context: Lookup) + fn eval_local_lookup(&self, builder: &mut AB, context: &Lookup) where - AB: PermutationAirBuilder + PairBuilder + AirBuilderWithPublicValues, +AB: PeriodicAirBuilder + PermutationAirBuilder + AirBuilderWithPublicValues, { if let Kind::Global(_) = context.kind { panic!("Global lookups are not supported in local evaluation") @@ -288,14 +312,16 @@ impl LookupGadget for LogUpGadget { fn eval_global_update( &self, builder: &mut AB, - context: Lookup, + context: &Lookup, expected_cumulated: AB::ExprEF, ) where - AB: PermutationAirBuilder + PairBuilder + AirBuilderWithPublicValues, + AB: PeriodicAirBuilder + PermutationAirBuilder + AirBuilderWithPublicValues, { self.eval_update(builder, context, Some(expected_cumulated)); } +} +impl LookupGadget for LogUpGadget { fn verify_global_final_value( &self, all_expected_cumulative: &[EF], @@ -303,7 +329,9 @@ impl LookupGadget for LogUpGadget { let total = all_expected_cumulative.iter().cloned().sum::(); if !total.is_zero() { - return Err(LookupError::GlobalCumulativeMismatch); + // We set the name associated to the lookup to None because we don't have access to the actual name here. + // The actual name will be set in the verifier directly. + return Err(LookupError::GlobalCumulativeMismatch(None)); } Ok(()) @@ -352,4 +380,190 @@ impl LookupGadget for LogUpGadget { deg_denom_constr.max(deg_num) } + + #[instrument(name = "generate lookup permutation", skip_all, level = "debug")] + fn generate_permutation( + &self, + main: &RowMajorMatrix>, + preprocessed: &Option>>, + public_values: &[Val], + lookups: &[Lookup>], + lookup_data: &mut [LookupData], + permutation_challenges: &[SC::Challenge], + ) -> RowMajorMatrix { + let height = main.height(); + let width = self.num_aux_cols() * lookups.len(); + + // Validate challenge count matches number of lookups. + debug_assert_eq!( + permutation_challenges.len(), + lookups.len() * self.num_challenges(), + "perm challenge count must be per-lookup" + ); + + // Enforce uniqueness of auxiliary column indices across lookups. + #[cfg(debug_assertions)] + { + use alloc::collections::btree_set::BTreeSet; + + let mut seen = BTreeSet::new(); + for ctx in lookups { + let a = ctx.columns[0]; + if !seen.insert(a) { + panic!("duplicate aux column index {a} across lookups"); + } + } + } + // 1. PRE-COMPUTE DENOMINATORS + // We flatten all denominators from all rows/lookups into one giant vector. + // Order: Row -> Lookup -> Element Tuple + let total_denominators: usize = + height * lookups.iter().map(|l| l.element_exprs.len()).sum::(); + let mut all_denominators = Vec::with_capacity(total_denominators); + + for i in 0..height { + let local_main_row = main.row_slice(i).unwrap(); + let next_main_row = main.row_slice((i + 1) % height).unwrap(); + let main_rows = VerticalPair::new( + RowMajorMatrixView::new_row(&local_main_row), + RowMajorMatrixView::new_row(&next_main_row), + ); + let preprocessed_rows_data = preprocessed.as_ref().map(|prep| { + ( + prep.row_slice(i).unwrap(), + prep.row_slice((i + 1) % height).unwrap(), + ) + }); + let preprocessed_rows = preprocessed_rows_data.as_ref().map( + |(local_preprocessed_row, next_preprocessed_row)| { + VerticalPair::new( + RowMajorMatrixView::new_row(local_preprocessed_row), + RowMajorMatrixView::new_row(next_preprocessed_row), + ) + }, + ); + + let row_builder: LookupTraceBuilder<'_, SC> = LookupTraceBuilder::new( + main_rows, + preprocessed_rows, + public_values, + permutation_challenges, + height, + i, + ); + + for context in lookups { + let aux_idx = context.columns[0]; + let alpha = &permutation_challenges[self.num_challenges() * aux_idx]; + let beta = &permutation_challenges[self.num_challenges() * aux_idx + 1]; + + // Reconstruct elements + let elements = context + .element_exprs + .iter() + .map(|elts| { + elts.iter() + .map(|e| symbolic_to_expr(&row_builder, e)) + .collect::>() + }) + .collect::>(); + + // Compute combined element: (alpha - sum(elt * beta^j)) + let denoms = self.combine_elements::, Val>( + &elements, alpha, beta, + ); + all_denominators.extend(denoms); + } + } + + // 2. BATCH INVERSION + // This turns O(N) inversions into O(1) inversion + O(N) multiplications. + // Recomputing multiplicities during trace building is cheaper than recomputing inversions, or storing them beforehand (as they could possibly constitute quite a large amount of data). + let all_inverses = p3_field::batch_multiplicative_inverse(&all_denominators); + + // 3. BUILD TRACE + let mut aux_trace = vec![SC::Challenge::ZERO; height * width]; + let mut inv_cursor = 0; + let mut permutation_counter = 0; + + for i in 0..height { + let local_main_row = main.row_slice(i).unwrap(); + let next_main_row = main.row_slice((i + 1) % height).unwrap(); + let main_rows = VerticalPair::new( + RowMajorMatrixView::new_row(&local_main_row), + RowMajorMatrixView::new_row(&next_main_row), + ); + + let preprocessed_rows_data = preprocessed.as_ref().map(|prep| { + ( + prep.row_slice(i).unwrap(), + prep.row_slice((i + 1) % height).unwrap(), + ) + }); + let preprocessed_rows = preprocessed_rows_data.as_ref().map( + |(local_preprocessed_row, next_preprocessed_row)| { + VerticalPair::new( + RowMajorMatrixView::new_row(local_preprocessed_row), + RowMajorMatrixView::new_row(next_preprocessed_row), + ) + }, + ); + + let row_builder: LookupTraceBuilder<'_, SC> = LookupTraceBuilder::new( + main_rows, + preprocessed_rows, + public_values, + permutation_challenges, + height, + i, + ); + + lookups.iter().for_each(|context| { + let aux_idx = context.columns[0]; + + // Re-calculate multiplicities only + let multiplicities = context + .multiplicities_exprs + .iter() + .map(|e| symbolic_to_expr(&row_builder, e)) + .collect::>>(); + + // Consume inverses for this lookup to compute `sum(multiplicity / combined_elt)` + let sum: SC::Challenge = multiplicities + .iter() + .map(|m| { + let inv = all_inverses[inv_cursor]; + inv_cursor += 1; + inv * SC::Challenge::from(*m) + }) + .sum(); + + // Update running sum + if i < height - 1 { + aux_trace[(i + 1) * width + aux_idx] = aux_trace[i * width + aux_idx] + sum; + } + + // Update the expected cumulative for global lookups, at the last row. + if i == height - 1 { + match context.kind { + Kind::Global(_) => { + lookup_data[permutation_counter].expected_cumulated = + aux_trace[i * width + aux_idx] + sum; + permutation_counter += 1; + } + Kind::Local => {} + } + } + }); + + // Check that we have updated all `lookup_data1` entries + if i == height - 1 { + assert_eq!(permutation_counter, lookup_data.len()); + } + } + + // Check that we have consumed all inverses, meaning that `elements` and `multiplicities` lengths matched. + debug_assert_eq!(inv_cursor, all_inverses.len()); + RowMajorMatrix::new(aux_trace, width) + } } diff --git a/lookup/src/lookup_traits.rs b/lookup/src/lookup_traits.rs index 589a20d70..6376684b7 100644 --- a/lookup/src/lookup_traits.rs +++ b/lookup/src/lookup_traits.rs @@ -1,57 +1,48 @@ -use alloc::string::String; use alloc::vec::Vec; -use core::ops::Neg; -use p3_air::{Air, AirBuilderWithPublicValues, PairBuilder, PermutationAirBuilder}; -use p3_field::Field; +use p3_air::lookup::LookupEvaluator; +/// Public re-exports of lookup types. +pub use p3_air::lookup::{Direction, Kind, Lookup, LookupData, LookupError, LookupInput}; +use p3_air::{ + AirBuilder, AirBuilderWithPublicValues, ExtensionBuilder, PeriodicAirBuilder, + PermutationAirBuilder, SymbolicExpression, +}; +use p3_field::{Field, PrimeCharacteristicRing}; use p3_matrix::Matrix; -use p3_uni_stark::{Entry, SymbolicExpression}; - -/// Defines errors that can occur during lookup verification. -#[derive(Debug)] -pub enum LookupError { - /// Error indicating that the global cumulative sum is incorrect. - GlobalCumulativeMismatch, +use p3_matrix::dense::{RowMajorMatrix, RowMajorMatrixView}; +use p3_matrix::stack::ViewPair; +use p3_uni_stark::{Entry, StarkGenericConfig, Val}; +use tracing::warn; + +/// Converts `LookupData` to `LookupData>`. +pub fn lookup_data_to_expr( + lookup_data: &[LookupData], +) -> Vec>> { + lookup_data + .iter() + .map(|data| { + let expected = SymbolicExpression::Constant(data.expected_cumulated.clone()); + LookupData { + name: data.name.clone(), + aux_idx: data.aux_idx, + expected_cumulated: expected, + } + }) + .collect() } /// A trait for lookup argument. -pub trait LookupGadget { - /// Returns the number of auxiliary columns needed by this lookup protocol. - /// - /// For example: - /// - LogUp needs 1 column (running sum) - fn num_aux_cols(&self) -> usize; - - /// Returns the number of challenges for each lookup argument. - /// - /// For example, for LogUp, this is 2: - /// - one challenge for combining the lookup tuples, - /// - one challenge for the running sum. - fn num_challenges(&self) -> usize; - - /// Evaluates a local lookup argument based on the provided context. - /// - /// For example, in LogUp: - /// - this checks that the running sum is updated correctly. - /// - it checks that the final value of the running sum is 0. - fn eval_local_lookup(&self, builder: &mut AB, context: Lookup) - where - AB: PermutationAirBuilder + PairBuilder + AirBuilderWithPublicValues; - - /// Evaluates a global lookup update based on the provided context, and the expected cumulated value. - /// This evaluation is carried out at the AIR level. We still need to check that the permutation argument holds - /// over all AIRs involved in the interaction. - /// - /// For example, in LogUp: - /// - this checks that the running sum is updated correctly. - /// - it checks that the local final value of the running sum is equal to the value provided by the prover. - fn eval_global_update( +pub trait LookupGadget: LookupEvaluator { + /// Generates the permutation matrix for the lookup argument. + fn generate_permutation( &self, - builder: &mut AB, - context: Lookup, - expected_cumulated: AB::ExprEF, - ) where - AB: PermutationAirBuilder + PairBuilder + AirBuilderWithPublicValues; + main: &RowMajorMatrix>, + preprocessed: &Option>>, + public_values: &[Val], + lookups: &[Lookup>], + lookup_data: &mut [LookupData], + permutation_challenges: &[SC::Challenge], + ) -> RowMajorMatrix; /// Evaluates the final cumulated value over all AIRs involved in the interaction, /// and checks that it is equal to the expected final value. @@ -68,174 +59,180 @@ pub trait LookupGadget { fn constraint_degree(&self, context: Lookup) -> usize; } -/// Specifies whether a lookup is local to an AIR or part of a global interaction. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Kind { - /// A lookup where all entries are contained within a single AIR. - Local, - /// A lookup that spans multiple AIRs, identified by a unique interaction name. - /// - /// The interaction name is used to identify all elements that are part of the same interaction. - Global(String), +/// A builder to generate the lookup traces, given the main trace, public values and permutation challenges. +pub struct LookupTraceBuilder<'a, SC: StarkGenericConfig> { + main: ViewPair<'a, Val>, + preprocessed: Option>>, + public_values: &'a [Val], + permutation_challenges: &'a [SC::Challenge], + height: usize, + row: usize, } -/// Indicates the direction of data flow in a global lookup. -#[derive(Clone, Copy)] -pub enum Direction { - /// Indicates that elements are being sent (contributed) to the lookup. - Send, - /// Indicates that elements are being received (removed) from the lookup. - Receive, +impl<'a, SC: StarkGenericConfig> LookupTraceBuilder<'a, SC> { + pub const fn new( + main: ViewPair<'a, Val>, + preprocessed: Option>>, + public_values: &'a [Val], + permutation_challenges: &'a [SC::Challenge], + height: usize, + row: usize, + ) -> Self { + Self { + main, + preprocessed, + public_values, + permutation_challenges, + height, + row, + } + } } -impl Direction { - /// Helper method to compute the signed multiplicity based on the direction. - pub fn multiplicity>(&self, mult: T) -> T { - match self { - Self::Send => -mult, - Self::Receive => mult, +impl<'a, SC: StarkGenericConfig> AirBuilder for LookupTraceBuilder<'a, SC> { + type F = Val; + type Expr = Val; + type Var = Val; + type M = ViewPair<'a, Val>; + + #[inline] + fn main(&self) -> Self::M { + self.main + } + + fn preprocessed(&self) -> Option { + self.preprocessed + } + + #[inline] + fn is_first_row(&self) -> Self::Expr { + Self::F::from_bool(self.row == 0) + } + + #[inline] + fn is_last_row(&self) -> Self::Expr { + Self::F::from_bool(self.row + 1 == self.height) + } + + #[inline] + fn is_transition_window(&self, size: usize) -> Self::Expr { + if size == 2 { + Self::F::from_bool(self.row + 1 < self.height) + } else { + panic!("uni-stark only supports a window size of 2") + } + } + + #[inline] + fn assert_zero>(&mut self, x: I) { + assert!(x.into() == Self::F::ZERO); + } + + #[inline] + fn assert_zeros>(&mut self, array: [I; N]) { + for item in array { + assert!(item.into() == Self::F::ZERO); } } } -/// A type alias for a lookup input tuple. It contains: -/// - a vector of symbolic expressions representing the elements involved in the lookup, -/// - a symbolic expression representing the multiplicity of the lookup, -/// - a direction indicating whether the elements are being sent or received. -pub type LookupInput = (Vec>, SymbolicExpression, Direction); - -/// A structure that holds the lookup data necessary to generate a `LookupContext`. It is shared between the prover and the verifier. -#[derive(Clone, Debug)] -pub struct Lookup { - /// Type of lookup: local or global - pub kind: Kind, - /// Elements being read (consumed from the table). Each `Vec>` actually represents a tuple of elements that are bundled together to make one lookup. - pub element_exprs: Vec>>, - /// Multiplicities for the elements. - pub multiplicities_exprs: Vec>, - /// The column index in the permutation trace for this lookup's running sum - pub columns: Vec, +impl AirBuilderWithPublicValues for LookupTraceBuilder<'_, SC> { + type PublicVar = Val; + + #[inline] + fn public_values(&self) -> &[Self::F] { + self.public_values + } } -impl Lookup { - /// Creates a new lookup with the specified column. - /// - /// # Arguments - /// * `elements` - Elements from the either the main execution trace or a lookup table. - /// * `multiplicities` - How many times each `element` should appear - /// * `column` - The column index in the permutation trace for this lookup - pub const fn new( - kind: Kind, - element_exprs: Vec>>, - multiplicities_exprs: Vec>, - columns: Vec, - ) -> Self { - Self { - kind, - element_exprs, - multiplicities_exprs, - columns, - } +impl ExtensionBuilder for LookupTraceBuilder<'_, SC> { + type EF = SC::Challenge; + type ExprEF = SC::Challenge; + type VarEF = SC::Challenge; + + fn assert_zero_ext>(&mut self, x: I) { + assert!(x.into() == SC::Challenge::ZERO); } } -/// A trait for an AIR that handles lookup arguments. -pub trait AirLookupHandler: Air -where - AB: PermutationAirBuilder + PairBuilder + AirBuilderWithPublicValues, - AB::Var: Copy + Into, - AB::ExprEF: From + From, -{ - /// Register a lookup to be used in this AIR. - /// This method can be used before proving or verifying, as the resulting data is shared between the prover and the verifier. - fn register_lookup( - &mut self, - kind: Kind, - lookup_inputs: &[LookupInput], - ) -> Lookup { - let (element_exprs, multiplicities_exprs) = lookup_inputs - .iter() - .map(|(elems, mult, dir)| { - let multiplicity = dir.multiplicity(mult.clone()); - (elems.clone(), multiplicity) - }) - .unzip(); - - Lookup { - kind, - element_exprs, - multiplicities_exprs, - columns: self.add_lookup_columns(), - } +impl<'a, SC: StarkGenericConfig> PermutationAirBuilder for LookupTraceBuilder<'a, SC> { + type MP = RowMajorMatrixView<'a, SC::Challenge>; + + type RandomVar = SC::Challenge; + fn permutation(&self) -> RowMajorMatrixView<'a, SC::Challenge> { + panic!("we should not be accessing the permutation matrix while building it"); } - /// Updates the number of auxiliary columns to account for a new lookup column, and returns its index (or indices). - fn add_lookup_columns(&mut self) -> Vec; + fn permutation_randomness(&self) -> &[SC::Challenge] { + self.permutation_challenges + } +} + +impl<'a, SC: StarkGenericConfig> PeriodicAirBuilder for LookupTraceBuilder<'a, SC> { + type PeriodicVar = Val; - /// Register all lookups for the current AIR and return them. - fn get_lookups(&mut self) -> Vec>; + fn periodic_values(&self) -> &[Self::PeriodicVar] { + &[] + } } -/// Takes a symbolic expression and converts it into an expression in the context of the provided AirBuilder. -pub fn symbolic_to_expr( - builder: &mut AB, - symbolic: &SymbolicExpression, -) -> AB::ExprEF { - let turn_into_expr = |values: &[AB::Var]| { - values - .iter() - .map(|v| AB::Expr::from(v.clone())) - .collect::>() - }; - let main = builder.main(); - let local_values = &turn_into_expr(&main.row_slice(0).unwrap()); - let next_values = &turn_into_expr(&main.row_slice(1).unwrap()); - - let public_values = builder - .public_values() - .iter() - .map(|v| AB::ExprEF::from((*v).into())) - .collect::>(); - - match symbolic { - SymbolicExpression::Constant(c) => AB::ExprEF::from(AB::EF::from(*c)), - SymbolicExpression::Variable(v) => { - let get_val = |offset: usize, - index: usize, - local_vals: &[AB::Expr], - next_vals: &[AB::Expr]| match offset { - 0 => AB::ExprEF::from(local_vals[index].clone()), - 1 => AB::ExprEF::from(next_vals[index].clone()), +/// Evaluates a symbolic expression in the context of an AIR builder. +/// +/// Converts `SymbolicExpression` to the builder's expression type `AB::Expr`. +pub fn symbolic_to_expr(builder: &AB, expr: &SymbolicExpression) -> AB::Expr +where + AB: AirBuilderWithPublicValues + PermutationAirBuilder + PeriodicAirBuilder, +{ + match expr { + SymbolicExpression::Variable(v) => match v.entry { + Entry::Main { offset } => match offset { + 0 => builder.main().row_slice(0).unwrap()[v.index].clone().into(), + 1 => builder.main().row_slice(1).unwrap()[v.index].clone().into(), _ => panic!("Cannot have expressions involving more than two rows."), - }; - - match v.entry { - Entry::Main { offset } => get_val(offset, v.index, local_values, next_values), - Entry::Public => public_values[v.index].clone(), - _ => unimplemented!(), - } + }, + Entry::Public => builder.public_values()[v.index].into(), + Entry::Preprocessed { offset } => match offset { + 0 => builder + .preprocessed() + .expect("Missing preprocessed columns") + .row_slice(0) + .unwrap()[v.index] + .clone() + .into(), + 1 => builder + .preprocessed() + .expect("Missing preprocessed columns") + .row_slice(1) + .unwrap()[v.index] + .clone() + .into(), + _ => panic!("Cannot have expressions involving more than two rows."), + }, + Entry::Periodic => builder.periodic_values()[v.index].into(), + _ => unimplemented!("Entry type {:?} not supported in interactions", v.entry), + }, + SymbolicExpression::IsFirstRow => { + warn!("IsFirstRow is not normalized"); + builder.is_first_row() } - SymbolicExpression::Add { x, y, .. } => { - let x_expr = symbolic_to_expr(builder, x); - let y_expr = symbolic_to_expr(builder, y); - x_expr + y_expr + SymbolicExpression::IsLastRow => { + warn!("IsLastRow is not normalized"); + builder.is_last_row() } - SymbolicExpression::Mul { x, y, .. } => { - let x_expr = symbolic_to_expr(builder, x); - let y_expr = symbolic_to_expr(builder, y); - x_expr * y_expr + SymbolicExpression::IsTransition => { + warn!("IsTransition is not normalized"); + builder.is_transition_window(2) + } + SymbolicExpression::Constant(c) => AB::Expr::from(*c), + SymbolicExpression::Add { x, y, .. } => { + symbolic_to_expr(builder, x) + symbolic_to_expr(builder, y) } SymbolicExpression::Sub { x, y, .. } => { - let x_expr = symbolic_to_expr(builder, x); - let y_expr = symbolic_to_expr(builder, y); - x_expr - y_expr + symbolic_to_expr(builder, x) - symbolic_to_expr(builder, y) } - SymbolicExpression::Neg { x, .. } => { - let x_expr = symbolic_to_expr(builder, x); - -x_expr + SymbolicExpression::Neg { x, .. } => -symbolic_to_expr(builder, x), + SymbolicExpression::Mul { x, y, .. } => { + symbolic_to_expr(builder, x) * symbolic_to_expr(builder, y) } - SymbolicExpression::IsFirstRow => builder.is_first_row().into(), - SymbolicExpression::IsLastRow => builder.is_last_row().into(), - SymbolicExpression::IsTransition => builder.is_transition().into(), } } diff --git a/lookup/src/tests.rs b/lookup/src/tests.rs index d87fcd524..09f62a788 100644 --- a/lookup/src/tests.rs +++ b/lookup/src/tests.rs @@ -1,10 +1,11 @@ -use alloc::rc::Rc; use alloc::string::ToString; +use alloc::sync::Arc; use alloc::vec; use alloc::vec::Vec; +use p3_air::lookup::LookupEvaluator; use p3_air::{ - Air, AirBuilder, AirBuilderWithPublicValues, BaseAir, ExtensionBuilder, PairBuilder, + Air, AirBuilder, AirBuilderWithPublicValues, BaseAir, ExtensionBuilder, PeriodicAirBuilder, PermutationAirBuilder, }; use p3_baby_bear::BabyBear; @@ -17,9 +18,7 @@ use rand::rngs::SmallRng; use rand::{Rng, SeedableRng}; use crate::logup::LogUpGadget; -use crate::lookup_traits::{ - AirLookupHandler, Direction, Kind, Lookup, LookupGadget, symbolic_to_expr, -}; +use crate::lookup_traits::{Direction, Kind, Lookup, LookupGadget, symbolic_to_expr}; /// Base field type for the test type F = BabyBear; @@ -27,8 +26,8 @@ type F = BabyBear; type EF = BinomialExtensionField; fn create_symbolic_with_degree(degree: usize) -> SymbolicExpression { - let x = Rc::new(SymbolicExpression::Constant(F::ONE)); - let y = Rc::new(SymbolicExpression::Constant(F::TWO)); + let x = Arc::new(SymbolicExpression::Constant(F::ONE)); + let y = Arc::new(SymbolicExpression::Constant(F::TWO)); SymbolicExpression::Mul { x, y, @@ -232,12 +231,6 @@ impl PermutationAirBuilder for MockAirBuilder { } } -impl PairBuilder for MockAirBuilder { - fn preprocessed(&self) -> Self::M { - RowMajorMatrix::new(vec![], 0) - } -} - impl AirBuilderWithPublicValues for MockAirBuilder { type PublicVar = Self::F; @@ -245,6 +238,14 @@ impl AirBuilderWithPublicValues for MockAirBuilder { &[] } } + +impl PeriodicAirBuilder for MockAirBuilder { + type PeriodicVar = F; + + fn periodic_values(&self) -> &[Self::PeriodicVar] { + &[] + } +} /// An AIR designed to perform range checks using the `LogUpGadget`. /// /// This AIR demonstrates how to use LogUp for range checking. It supports multiple @@ -277,29 +278,7 @@ impl RangeCheckAir { impl Air for RangeCheckAir where - AB: PermutationAirBuilder - + PairBuilder - + AirBuilderWithPublicValues, - AB::Var: Copy + Into, - AB::ExprEF: From + From, - F: Copy + Into, -{ - fn eval(&self, _builder: &mut AB) { - // There are no constraints, only lookups for the range checks. - } -} - -impl BaseAir for RangeCheckAir { - fn width(&self) -> usize { - 3 * self.num_lookups // [read, provide, mult] per lookup - } -} - -impl AirLookupHandler for RangeCheckAir -where - AB: PermutationAirBuilder - + PairBuilder - + AirBuilderWithPublicValues, + AB: PermutationAirBuilder + AirBuilderWithPublicValues, AB::Var: Copy + Into, AB::ExprEF: From + From, F: Copy + Into, @@ -313,7 +292,7 @@ where fn get_lookups(&mut self) -> Vec> { let symbolic_air_builder = - SymbolicAirBuilder::::new(0, >::width(self), 0); + SymbolicAirBuilder::::new(0, BaseAir::::width(self), 0, 0, 0); let symbolic_main = symbolic_air_builder.main(); let symbolic_main_local = symbolic_main.row_slice(0).unwrap(); @@ -341,10 +320,20 @@ where ]; // Register the local lookup. - >::register_lookup(self, Kind::Local, &lookup_inputs) + Air::::register_lookup(self, Kind::Local, &lookup_inputs) }) .collect::>() } + + fn eval(&self, _builder: &mut AB) { + // There are no constraints, only lookups for the range checks. + } +} + +impl BaseAir for RangeCheckAir { + fn width(&self) -> usize { + 3 * self.num_lookups // [read, provide, mult] per lookup + } } /// Computes the contribution to the LogUp running sum for a single row. @@ -538,7 +527,7 @@ fn test_symbolic_to_expr() { use p3_field::PrimeCharacteristicRing; use p3_uni_stark::SymbolicAirBuilder; - let mut builder = SymbolicAirBuilder::::new(0, 2, 0); + let mut builder = SymbolicAirBuilder::::new(0, 2, 0, 0, 0); let main = builder.main(); @@ -551,7 +540,7 @@ fn test_symbolic_to_expr() { builder.when_transition().assert_zero(sub - local[0]); builder.when_last_row().assert_zero(mul - local[0]); - let constraints = builder.constraints(); + let constraints = builder.base_constraints(); let mut main_flat = Vec::new(); main_flat.extend([F::new(10), F::new(10)]); @@ -596,14 +585,14 @@ fn test_symbolic_to_expr() { let last_expected_val = is_last_row * (mul - EF::from(local[0])); // Evaluate the constraints at row `i`. - let first_eval = symbolic_to_expr(&mut builder, &constraints[0].clone()); - let transition_eval = symbolic_to_expr(&mut builder, &constraints[1].clone()); - let last_eval = symbolic_to_expr(&mut builder, &constraints[2].clone()); + let first_eval = symbolic_to_expr(&builder, &constraints[0]); + let transition_eval = symbolic_to_expr(&builder, &constraints[1]); + let last_eval = symbolic_to_expr(&builder, &constraints[2]); // Assert that the evaluated constraints are correct. - assert_eq!(first_eval, first_expected_val); - assert_eq!(transition_eval, transition_expected_val); - assert_eq!(last_eval, last_expected_val); + assert_eq!(first_expected_val, first_eval.into()); + assert_eq!(transition_expected_val, transition_eval.into()); + assert_eq!(last_expected_val, last_eval.into()); } } @@ -627,7 +616,8 @@ fn test_range_check_end_to_end_valid() { .row(aux_trace.height() - 1) .unwrap() .into_iter() - .collect::>()[0]; + .next() + .unwrap(); let last_row_data = main_trace .row(main_trace.height() - 1) .unwrap() @@ -651,7 +641,7 @@ fn test_range_check_end_to_end_valid() { let mut builder = MockAirBuilder::new(main_trace, aux_trace, challenges.to_vec()); let lookup_gadget = LogUpGadget::new(); - let lookups = >::get_lookups(&mut air); + let lookups = >::get_lookups(&mut air); // Check that the lookup was created correctly. assert_eq!(lookups.len(), 1, "Should have one lookup defined"); @@ -672,7 +662,7 @@ fn test_range_check_end_to_end_valid() { for i in 0..builder.height { builder.for_row(i); lookups.iter().for_each(|lookup| { - lookup_gadget.eval_local_lookup(&mut builder, lookup.clone()); + lookup_gadget.eval_local_lookup(&mut builder, lookup); }); } } @@ -730,7 +720,7 @@ fn test_range_check_end_to_end_invalid() { let mut builder = MockAirBuilder::new(main_trace, aux_trace, vec![alpha, beta]); let lookup_gadget = LogUpGadget::new(); - let lookups = >::get_lookups(&mut air); + let lookups = >::get_lookups(&mut air); // Evaluate constraints. // @@ -738,7 +728,7 @@ fn test_range_check_end_to_end_invalid() { for i in 0..builder.height { builder.for_row(i); lookups.iter().for_each(|lookup| { - lookup_gadget.eval_local_lookup(&mut builder, lookup.clone()); + lookup_gadget.eval_local_lookup(&mut builder, lookup); }); } } @@ -797,13 +787,13 @@ fn test_inconsistent_witness_fails_transition() { // Register the lookups. let lookup_gadget = LogUpGadget::new(); - let lookups = >::get_lookups(&mut air); + let lookups = >::get_lookups(&mut air); // Evaluate the constraints. for i in 0..builder.height { builder.for_row(i); lookups.iter().for_each(|lookup| { - lookup_gadget.eval_local_lookup(&mut builder, lookup.clone()); + lookup_gadget.eval_local_lookup(&mut builder, lookup); }); } } @@ -843,7 +833,8 @@ fn test_zero_multiplicity_is_not_counted() { .row(main_trace.height() - 1) .unwrap() .into_iter() - .collect::>()[0]; + .next() + .unwrap(); assert_ne!(final_s, EF::ZERO); // Evaluate constraints @@ -852,7 +843,7 @@ fn test_zero_multiplicity_is_not_counted() { // Register the lookups. let lookup_gadget = LogUpGadget::new(); - let lookups = >::get_lookups(&mut air); + let lookups = >::get_lookups(&mut air); // The initial boundary constraint will fail on row 0 since s[0] is incorrect. // @@ -860,7 +851,7 @@ fn test_zero_multiplicity_is_not_counted() { for i in 0..builder.height { builder.for_row(i); lookups.iter().for_each(|lookup| { - lookup_gadget.eval_local_lookup(&mut builder, lookup.clone()); + lookup_gadget.eval_local_lookup(&mut builder, lookup); }); } } @@ -876,13 +867,13 @@ fn test_empty_lookup_is_valid() { let mut builder = MockAirBuilder::new(main_trace, aux_trace, vec![alpha]); let lookup_gadget = LogUpGadget::new(); - let lookups = >::get_lookups(&mut air); + let lookups = >::get_lookups(&mut air); // This should not panic, as there are no rows to evaluate. for i in 0..builder.height { builder.for_row(i); lookups.iter().for_each(|lookup| { - lookup_gadget.eval_local_lookup(&mut builder, lookup.clone()); + lookup_gadget.eval_local_lookup(&mut builder, lookup); }); } @@ -939,7 +930,8 @@ fn test_nontrivial_permutation() { .row(aux_trace.height() - 1) .unwrap() .into_iter() - .collect::>()[0]; + .next() + .unwrap(); let last_row_data = main_trace .row(main_trace.height() - 1) .unwrap() @@ -964,13 +956,13 @@ fn test_nontrivial_permutation() { // Register the lookups. let lookup_gadget = LogUpGadget::new(); - let lookups = >::get_lookups(&mut air); + let lookups = >::get_lookups(&mut air); // Evaluate constraints for every row for i in 0..builder.height { builder.for_row(i); lookups.iter().for_each(|lookup| { - lookup_gadget.eval_local_lookup(&mut builder, lookup.clone()); + lookup_gadget.eval_local_lookup(&mut builder, lookup); }); } } @@ -1079,7 +1071,7 @@ fn test_multiple_lookups_different_columns() { // Register lookups. let lookup_gadget = LogUpGadget::new(); - let lookups = >::get_lookups(&mut air); + let lookups = >::get_lookups(&mut air); // Check that the lookup was created correctly. assert_eq!(lookups.len(), 2, "Should have two lookups defined"); @@ -1100,7 +1092,7 @@ fn test_multiple_lookups_different_columns() { for i in 0..builder.height { builder.for_row(i); lookups.iter().for_each(|lookup| { - lookup_gadget.eval_local_lookup(&mut builder, lookup.clone()); + lookup_gadget.eval_local_lookup(&mut builder, lookup); }); } } @@ -1139,23 +1131,7 @@ impl BaseAir for AddAir { impl Air for AddAir where - AB: PermutationAirBuilder - + PairBuilder - + AirBuilderWithPublicValues, - AB::Var: Copy + Into, - AB::ExprEF: From + From, - F: Copy + Into, -{ - fn eval(&self, _builder: &mut AB) { - // No constraints, only lookups - } -} - -impl AirLookupHandler for AddAir -where - AB: PermutationAirBuilder - + PairBuilder - + AirBuilderWithPublicValues, + AB: PermutationAirBuilder + AirBuilderWithPublicValues, AB::Var: Copy + Into, AB::ExprEF: From + From, F: Copy + Into, @@ -1169,7 +1145,7 @@ where fn get_lookups(&mut self) -> Vec> { let symbolic_air_builder = - SymbolicAirBuilder::::new(0, >::width(self), 0); + SymbolicAirBuilder::::new(0, BaseAir::::width(self), 0, 0, 0); let symbolic_main = symbolic_air_builder.main(); let symbolic_main_local = symbolic_main.row_slice(0).unwrap(); @@ -1196,24 +1172,24 @@ where (b_elements.clone(), b_multiplicities, Direction::Send), ]; - let local_lookup = - >::register_lookup(self, Kind::Local, &lookup_inputs); + let local_lookup = Air::::register_lookup(self, Kind::Local, &lookup_inputs); // also need is_send let (is_global, direction) = self.with_global; if is_global { let lookup_inputs = vec![(b_elements, SymbolicExpression::Constant(F::ONE), direction)]; - let global_lookup = >::register_lookup( - self, - Kind::Global("LUT".to_string()), - &lookup_inputs, - ); + let global_lookup = + Air::::register_lookup(self, Kind::Global("LUT".to_string()), &lookup_inputs); // Return the local and global lookups. return vec![local_lookup, global_lookup]; } // Return the local lookup. vec![local_lookup] } + + fn eval(&self, _builder: &mut AB) { + // No constraints, only lookups + } } #[test] @@ -1237,7 +1213,8 @@ fn test_tuple_lookup() { .row(aux_trace.height() - 1) .unwrap() .into_iter() - .collect::>()[0]; + .next() + .unwrap(); let last_row_data = main_trace .row(main_trace.height() - 1) .unwrap() @@ -1261,13 +1238,13 @@ fn test_tuple_lookup() { // Register the lookups. let lookup_gadget = LogUpGadget::new(); - let lookups = >::get_lookups(&mut air); + let lookups = >::get_lookups(&mut air); // Evaluate the constraints for every row. for i in 0..builder.height { builder.for_row(i); lookups.iter().for_each(|lookup| { - lookup_gadget.eval_local_lookup(&mut builder, lookup.clone()); + lookup_gadget.eval_local_lookup(&mut builder, lookup); }); } } @@ -1398,8 +1375,8 @@ fn test_global_lookup() { // Register the lookups. let lookup_gadget = LogUpGadget::new(); - let lookups1 = >::get_lookups(&mut air1); - let lookups2 = >::get_lookups(&mut air2); + let lookups1 = >::get_lookups(&mut air1); + let lookups2 = >::get_lookups(&mut air2); assert_eq!( builder1.height, builder2.height, @@ -1411,14 +1388,10 @@ fn test_global_lookup() { builder1.for_row(i); lookups1.iter().for_each(|lookup| { match &lookup.kind { - Kind::Local => lookup_gadget.eval_local_lookup(&mut builder1, lookup.clone()), + Kind::Local => lookup_gadget.eval_local_lookup(&mut builder1, lookup), Kind::Global(name) => { assert_eq!(*name, "LUT".to_string(), "Global lookup name should match"); - lookup_gadget.eval_global_update( - &mut builder1, - lookup.clone(), - s_global_final1, - ); + lookup_gadget.eval_global_update(&mut builder1, lookup, s_global_final1); } }; }); @@ -1426,14 +1399,10 @@ fn test_global_lookup() { builder2.for_row(i); lookups2.iter().for_each(|lookup| { match &lookup.kind { - Kind::Local => lookup_gadget.eval_local_lookup(&mut builder2, lookup.clone()), + Kind::Local => lookup_gadget.eval_local_lookup(&mut builder2, lookup), Kind::Global(name) => { assert_eq!(*name, "LUT".to_string(), "Global lookup name should match"); - lookup_gadget.eval_global_update( - &mut builder2, - lookup.clone(), - s_global_final2, - ); + lookup_gadget.eval_global_update(&mut builder2, lookup, s_global_final2); } }; }); diff --git a/matrix/CHANGELOG.md b/matrix/CHANGELOG.md new file mode 100644 index 000000000..5c35d1240 --- /dev/null +++ b/matrix/CHANGELOG.md @@ -0,0 +1,35 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor(field): Add packed field extraction helpers and FieldArray utilities (#1211) (Adrian Hamelink) + +### Authors +- Adrian Hamelink + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Clippy wants us to put things inside of fmt now instead of just extra arguments... (#916) (AngusG) +- From_biguint method for Bn254 (#914) (AngusG) +- More Clippy Complaints (#931) (AngusG) +- Chore: various small changes (#944) (Thomas Coratger) +- Doc: add better doc in air and fix TODO (#1061) (Thomas Coratger) +- Eq poly: implement batched eval_eq (#1051) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Matrix: make `HorizontallyTruncated` more generic (#1170) (Thomas Coratger) +- Matrix: add `pad_to_power_of_two_height` (#1185) (Thomas Coratger) + +### Authors +- AngusG +- Thomas Coratger + diff --git a/matrix/Cargo.toml b/matrix/Cargo.toml index c8d821de3..5e97bc807 100644 --- a/matrix/Cargo.toml +++ b/matrix/Cargo.toml @@ -21,8 +21,8 @@ tracing.workspace = true transpose.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-mersenne-31.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-mersenne-31 = { path = "../mersenne-31" } criterion.workspace = true diff --git a/matrix/src/dense.rs b/matrix/src/dense.rs index 6f94ccc31..e1034a72f 100644 --- a/matrix/src/dense.rs +++ b/matrix/src/dense.rs @@ -550,6 +550,24 @@ impl DenseMatrix { assert!(new_height >= self.height()); self.values.resize(self.width * new_height, fill); } + + /// Pad the matrix height to the next power of two by appending rows filled with `fill`. + /// + /// This is commonly used in proof systems where trace matrices must have power-of-two heights. + /// + /// # Behavior + /// + /// - If the matrix is empty (height = 0), it is padded to have exactly one row of `fill` values. + /// - If the height is already a power of two, the matrix is unchanged. + /// - Otherwise, the matrix is padded to the next power of two height. + pub fn pad_to_power_of_two_height(&mut self, fill: T) { + // Compute the target height as the next power of two. + let target_height = self.height().next_power_of_two(); + + // If target_height == height, resize will have no effect. + // Otherwise we pad the matrix to a power of two height by filling with the supplied value. + self.values.resize(self.width * target_height, fill); + } } impl> DenseMatrix { @@ -916,6 +934,65 @@ mod tests { assert_eq!(matrix.values, vec![1, 2, 3, 4, 5, 6, 9, 9, 9, 9, 9, 9]); } + #[test] + fn test_pad_to_power_of_two_height() { + // Test 1: Non-power-of-two height (3 rows -> 4 rows) with fill value 0. + // + // - Original matrix has 3 rows, which is not a power of two. + // - After padding, it should have 4 rows (next power of two). + let mut matrix = RowMajorMatrix::new(vec![1, 2, 3, 4, 5, 6], 2); + assert_eq!(matrix.height(), 3); + matrix.pad_to_power_of_two_height(0); + assert_eq!(matrix.height(), 4); + // Original values preserved, new row filled with 0. + assert_eq!(matrix.values, vec![1, 2, 3, 4, 5, 6, 0, 0]); + + // Test 2: Already power-of-two height (4 rows -> 4 rows, unchanged). + // + // Matrix height is already a power of two, so no padding occurs. + // Fill value is ignored when no padding is needed. + let mut matrix = RowMajorMatrix::new(vec![1, 2, 3, 4, 5, 6, 7, 8], 2); + assert_eq!(matrix.height(), 4); + matrix.pad_to_power_of_two_height(99); + assert_eq!(matrix.height(), 4); + // Values unchanged (fill value not used). + assert_eq!(matrix.values, vec![1, 2, 3, 4, 5, 6, 7, 8]); + + // Test 3: Single row matrix (1 row -> 1 row, unchanged). + // + // Height of 1 is a power of two (2^0 = 1). + let mut matrix = RowMajorMatrix::new(vec![1, 2, 3], 3); + assert_eq!(matrix.height(), 1); + matrix.pad_to_power_of_two_height(42); + assert_eq!(matrix.height(), 1); + assert_eq!(matrix.values, vec![1, 2, 3]); + + // Test 4: 5 rows -> 8 rows with custom fill value (-1). + // + // Demonstrates padding across a larger gap with a non-zero fill value. + let mut matrix = RowMajorMatrix::new(vec![1; 10], 2); + assert_eq!(matrix.height(), 5); + matrix.pad_to_power_of_two_height(-1); + assert_eq!(matrix.height(), 8); + // Original 10 values plus 6 fill values (3 new rows * 2 width). + assert_eq!(matrix.values.len(), 16); + assert!(matrix.values[..10].iter().all(|&v| v == 1)); + assert!(matrix.values[10..].iter().all(|&v| v == -1)); + } + + #[test] + fn test_pad_to_power_of_two_height_empty_matrix() { + // Empty matrix (0 rows) should be padded to 1 row of fill values. + // This ensures the matrix is valid for downstream operations. + let mut matrix: RowMajorMatrix = RowMajorMatrix::new(vec![], 3); + assert_eq!(matrix.height(), 0); + assert_eq!(matrix.width, 3); + matrix.pad_to_power_of_two_height(7); + // After padding: 1 row with 3 columns, all filled with 7. + assert_eq!(matrix.height(), 1); + assert_eq!(matrix.values, vec![7, 7, 7]); + } + #[test] fn test_transpose_into() { let matrix = RowMajorMatrix::new(vec![1, 2, 3, 4, 5, 6], 3); diff --git a/matrix/src/horizontally_truncated.rs b/matrix/src/horizontally_truncated.rs index bfce45380..be3af0360 100644 --- a/matrix/src/horizontally_truncated.rs +++ b/matrix/src/horizontally_truncated.rs @@ -1,15 +1,18 @@ use core::marker::PhantomData; +use core::ops::Range; use crate::Matrix; -/// A matrix wrapper that limits the number of columns visible from an inner matrix. +/// A matrix wrapper that exposes a contiguous range of columns from an inner matrix. /// -/// This struct wraps another matrix and restricts access to only the first `truncated_width` columns. +/// This struct: +/// - wraps another matrix, +/// - restricts access to only the columns within the specified `column_range`. pub struct HorizontallyTruncated { /// The underlying full matrix being wrapped. inner: Inner, - /// The number of columns to expose from the inner matrix. - truncated_width: usize, + /// The range of columns to expose from the inner matrix. + column_range: Range, /// Marker for the element type `T`, not used at runtime. _phantom: PhantomData, } @@ -22,13 +25,26 @@ where /// /// # Arguments /// - `inner`: The full inner matrix to be wrapped. - /// - `truncated_width`: The number of columns to expose (must be ≤ `inner.width()`). + /// - `truncated_width`: The number of columns to expose from the start (must be ≤ `inner.width()`). + /// + /// This is equivalent to `new_with_range(inner, 0..truncated_width)`. /// /// Returns `None` if `truncated_width` is greater than the width of the inner matrix. pub fn new(inner: Inner, truncated_width: usize) -> Option { - (truncated_width <= inner.width()).then(|| Self { + Self::new_with_range(inner, 0..truncated_width) + } + + /// Construct a new view exposing a specific column range of a matrix. + /// + /// # Arguments + /// - `inner`: The full inner matrix to be wrapped. + /// - `column_range`: The range of columns to expose (must satisfy `column_range.end <= inner.width()`). + /// + /// Returns `None` if the column range extends beyond the width of the inner matrix. + pub fn new_with_range(inner: Inner, column_range: Range) -> Option { + (column_range.end <= inner.width()).then(|| Self { inner, - truncated_width, + column_range, _phantom: PhantomData, }) } @@ -42,7 +58,7 @@ where /// Returns the number of columns exposed by the truncated matrix. #[inline(always)] fn width(&self) -> usize { - self.truncated_width + self.column_range.len() } /// Returns the number of rows in the matrix (same as the inner matrix). @@ -54,8 +70,10 @@ where #[inline(always)] unsafe fn get_unchecked(&self, r: usize, c: usize) -> T { unsafe { - // Safety: The caller must ensure that `c < truncated_width` and `r < self.height()`. - self.inner.get_unchecked(r, c) + // Safety: The caller must ensure that `c < self.width()` and `r < self.height()`. + // + // We translate the column index by adding `column_range.start`. + self.inner.get_unchecked(r, self.column_range.start + c) } } @@ -65,7 +83,8 @@ where ) -> impl IntoIterator + Send + Sync> { unsafe { // Safety: The caller must ensure that `r < self.height()`. - self.inner.row_subseq_unchecked(r, 0, self.truncated_width) + self.inner + .row_subseq_unchecked(r, self.column_range.start, self.column_range.end) } } @@ -77,7 +96,13 @@ where ) -> impl IntoIterator + Send + Sync> { unsafe { // Safety: The caller must ensure that r < self.height() and start <= end <= self.width(). - self.inner.row_subseq_unchecked(r, start, end) + // + // We translate the column indices by adding `column_range.start`. + self.inner.row_subseq_unchecked( + r, + self.column_range.start + start, + self.column_range.start + end, + ) } } @@ -89,7 +114,13 @@ where ) -> impl core::ops::Deref { unsafe { // Safety: The caller must ensure that `r < self.height()` and `start <= end <= self.width()`. - self.inner.row_subslice_unchecked(r, start, end) + // + // We translate the column indices by adding `column_range.start`. + self.inner.row_subslice_unchecked( + r, + self.column_range.start + start, + self.column_range.start + end, + ) } } } @@ -233,4 +264,143 @@ mod tests { // Attempt to truncate beyond inner width (invalid). assert!(HorizontallyTruncated::new(inner, 5).is_none()); } + + #[test] + fn test_column_range_middle() { + // Create a 3x5 matrix: + // [ 1 2 3 4 5] + // [ 6 7 8 9 10] + // [11 12 13 14 15] + let inner = RowMajorMatrix::new(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 5); + + // Select columns 1..4 (columns 1, 2, 3). + let view = HorizontallyTruncated::new_with_range(inner, 1..4).unwrap(); + + // Width should be 3 (columns 1, 2, 3). + assert_eq!(view.width(), 3); + + // Height remains unchanged. + assert_eq!(view.height(), 3); + + // Check individual elements (column indices are relative to the view). + assert_eq!(view.get(0, 0), Some(2)); // row 0, col 0 -> inner col 1 + assert_eq!(view.get(0, 1), Some(3)); // row 0, col 1 -> inner col 2 + assert_eq!(view.get(0, 2), Some(4)); // row 0, col 2 -> inner col 3 + assert_eq!(view.get(1, 0), Some(7)); // row 1, col 0 -> inner col 1 + assert_eq!(view.get(2, 2), Some(14)); // row 2, col 2 -> inner col 3 + + unsafe { + assert_eq!(view.get_unchecked(1, 1), 8); // row 1, col 1 -> inner col 2 + assert_eq!(view.get_unchecked(2, 0), 12); // row 2, col 0 -> inner col 1 + } + + // Row 0: should return [2, 3, 4] + let row0: Vec<_> = view.row(0).unwrap().into_iter().collect(); + assert_eq!(row0, vec![2, 3, 4]); + + // Row 1: should return [7, 8, 9] + let row1: Vec<_> = view.row(1).unwrap().into_iter().collect(); + assert_eq!(row1, vec![7, 8, 9]); + + unsafe { + // Row 2: should return [12, 13, 14] + let row2: Vec<_> = view.row_unchecked(2).into_iter().collect(); + assert_eq!(row2, vec![12, 13, 14]); + + // Subsequence of row 1, cols 1..3 (view indices) -> [8, 9] + let row1_subseq: Vec<_> = view.row_subseq_unchecked(1, 1, 3).into_iter().collect(); + assert_eq!(row1_subseq, vec![8, 9]); + } + + // Out of bounds checks. + assert!(view.get(0, 3).is_none()); // Width out of bounds + assert!(view.get(3, 0).is_none()); // Height out of bounds + + // Convert the view to a RowMajorMatrix and check contents. + let as_matrix = view.to_row_major_matrix(); + + // The expected matrix after selecting columns 1..4: + // [2 3 4] + // [7 8 9] + // [12 13 14] + let expected = RowMajorMatrix::new(vec![2, 3, 4, 7, 8, 9, 12, 13, 14], 3); + + assert_eq!(as_matrix, expected); + } + + #[test] + fn test_column_range_end() { + // Create a 2x4 matrix: + // [1 2 3 4] + // [5 6 7 8] + let inner = RowMajorMatrix::new(vec![1, 2, 3, 4, 5, 6, 7, 8], 4); + + // Select columns 2..4 (columns 2, 3). + let view = HorizontallyTruncated::new_with_range(inner, 2..4).unwrap(); + + assert_eq!(view.width(), 2); + assert_eq!(view.height(), 2); + + // Row 0: should return [3, 4] + let row0: Vec<_> = view.row(0).unwrap().into_iter().collect(); + assert_eq!(row0, vec![3, 4]); + + // Row 1: should return [7, 8] + let row1: Vec<_> = view.row(1).unwrap().into_iter().collect(); + assert_eq!(row1, vec![7, 8]); + + assert_eq!(view.get(0, 0), Some(3)); + assert_eq!(view.get(1, 1), Some(8)); + } + + #[test] + fn test_column_range_single_column() { + // Create a 3x4 matrix: + // [1 2 3 4] + // [5 6 7 8] + // [9 10 11 12] + let inner = RowMajorMatrix::new(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 4); + + // Select only column 2. + let view = HorizontallyTruncated::new_with_range(inner, 2..3).unwrap(); + + assert_eq!(view.width(), 1); + assert_eq!(view.height(), 3); + + assert_eq!(view.get(0, 0), Some(3)); + assert_eq!(view.get(1, 0), Some(7)); + assert_eq!(view.get(2, 0), Some(11)); + + // Row 0: should return [3] + let row0: Vec<_> = view.row(0).unwrap().into_iter().collect(); + assert_eq!(row0, vec![3]); + } + + #[test] + fn test_column_range_empty() { + // Create a 2x3 matrix: + // [1 2 3] + // [4 5 6] + let inner = RowMajorMatrix::new(vec![1, 2, 3, 4, 5, 6], 3); + + // Select empty range (2..2). + let view = HorizontallyTruncated::new_with_range(inner, 2..2).unwrap(); + + assert_eq!(view.width(), 0); + assert_eq!(view.height(), 2); + + // Row should be empty. + assert!(view.row(0).unwrap().into_iter().next().is_none()); + } + + #[test] + fn test_invalid_column_range() { + // Create a 2x3 matrix: + // [1 2 3] + // [4 5 6] + let inner = RowMajorMatrix::new(vec![1, 2, 3, 4, 5, 6], 3); + + // Attempt to select columns 1..5 (extends beyond width). + assert!(HorizontallyTruncated::new_with_range(inner, 1..5).is_none()); + } } diff --git a/matrix/src/lib.rs b/matrix/src/lib.rs index 8f13d29e8..1b9d93d96 100644 --- a/matrix/src/lib.rs +++ b/matrix/src/lib.rs @@ -10,7 +10,7 @@ use core::ops::Deref; use itertools::{Itertools, izip}; use p3_field::{ - BasedVectorSpace, ExtensionField, Field, PackedValue, PrimeCharacteristicRing, dot_product, + ExtensionField, Field, PackedFieldExtension, PackedValue, PrimeCharacteristicRing, dot_product, }; use p3_maybe_rayon::prelude::*; use strided::{VerticallyStridedMatrixView, VerticallyStridedRowIndexMap}; @@ -435,9 +435,7 @@ pub trait Matrix: Send + Sync { .par_fold_reduce( || EF::ExtensionPacking::zero_vec(packed_width), |mut acc, (row, &scale)| { - let scale = EF::ExtensionPacking::from_basis_coefficients_fn(|i| { - T::Packing::from(scale.as_basis_coefficients_slice()[i]) - }); + let scale = EF::ExtensionPacking::from(scale); izip!(&mut acc, row).for_each(|(l, r)| *l += scale * r); acc }, @@ -447,15 +445,7 @@ pub trait Matrix: Send + Sync { }, ); - packed_result - .into_iter() - .flat_map(|p| { - (0..T::Packing::WIDTH).map(move |i| { - EF::from_basis_coefficients_fn(|j| { - p.as_basis_coefficients_slice()[j].as_slice()[i] - }) - }) - }) + EF::ExtensionPacking::to_ext_iter(packed_result) .take(self.width()) .collect() } @@ -487,14 +477,7 @@ pub trait Matrix: Send + Sync { .map(move |row_packed| { let packed_sum_of_packed: EF::ExtensionPacking = dot_product(vec.iter().copied(), row_packed); - let sum_of_packed: EF = EF::from_basis_coefficients_fn(|i| { - packed_sum_of_packed.as_basis_coefficients_slice()[i] - .as_slice() - .iter() - .copied() - .sum() - }); - sum_of_packed + EF::ExtensionPacking::to_ext_iter([packed_sum_of_packed]).sum() }) } } diff --git a/maybe-rayon/CHANGELOG.md b/maybe-rayon/CHANGELOG.md new file mode 100644 index 000000000..7447ab7b5 --- /dev/null +++ b/maybe-rayon/CHANGELOG.md @@ -0,0 +1,30 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- SIMD optimization for proof-of-work grinding in DuplexChallenger (#1208) (Utsav Sharma) + +### Authors +- Utsav Sharma + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore(maybe-rayon): replace deprecated repeatn with repeat_n (#1064) (Skylar Ray) +- Update lib.rs (#1091) (AJoX) +- Clippy: small step (#1102) (Thomas Coratger) +- Challenger: add `observe_base_as_algebra_element ` to `FieldChallenger` trait (#1152) (Thomas Coratger) + +### Authors +- AJoX +- Skylar Ray +- Thomas Coratger + diff --git a/maybe-rayon/src/serial.rs b/maybe-rayon/src/serial.rs index dc8c0c222..1b501d6b8 100644 --- a/maybe-rayon/src/serial.rs +++ b/maybe-rayon/src/serial.rs @@ -1,7 +1,6 @@ use core::iter::{FlatMap, IntoIterator, Iterator}; use core::marker::{Send, Sized, Sync}; use core::ops::{Fn, FnOnce}; -use core::option::Option; use core::slice::{ Chunks, ChunksExact, ChunksExactMut, ChunksMut, RChunks, RChunksExact, RChunksExactMut, RChunksMut, Split, SplitMut, Windows, @@ -144,6 +143,10 @@ pub trait ParIterExt: Iterator { where P: Fn(&Self::Item) -> bool + Sync + Send; + fn find_map_any(self, predicate: P) -> Option + where + P: Fn(Self::Item) -> Option + Sync + Send; + fn flat_map_iter(self, map_op: F) -> FlatMap where Self: Sized, @@ -159,6 +162,13 @@ impl ParIterExt for T { self.find(predicate) } + fn find_map_any(mut self, predicate: P) -> Option + where + P: Fn(Self::Item) -> Option + Sync + Send, + { + self.find_map(predicate) + } + fn flat_map_iter(self, map_op: F) -> FlatMap where Self: Sized, diff --git a/mds/CHANGELOG.md b/mds/CHANGELOG.md new file mode 100644 index 000000000..db2b8b60f --- /dev/null +++ b/mds/CHANGELOG.md @@ -0,0 +1,32 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Chore: use `collect_n` with powers when possible (#963) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) +- Refactor(mds): Eliminate code duplication in bowers_g_layer functions (#1098) (andrewshab) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Allow users to impl either permute or permute_mut (#1175) (AngusG) + +### Authors +- Adrian Hamelink +- AngusG +- Himess +- Thomas Coratger +- andrewshab + diff --git a/mds/Cargo.toml b/mds/Cargo.toml index 77df76692..c5daff05c 100644 --- a/mds/Cargo.toml +++ b/mds/Cargo.toml @@ -18,9 +18,9 @@ p3-util.workspace = true rand.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-goldilocks.workspace = true -p3-mersenne-31.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-goldilocks = { path = "../goldilocks" } +p3-mersenne-31 = { path = "../mersenne-31" } criterion.workspace = true diff --git a/mds/src/coset_mds.rs b/mds/src/coset_mds.rs index 52ce99502..ef71aedbe 100644 --- a/mds/src/coset_mds.rs +++ b/mds/src/coset_mds.rs @@ -46,11 +46,6 @@ where } impl, const N: usize> Permutation<[A; N]> for CosetMds { - fn permute(&self, mut input: [A; N]) -> [A; N] { - self.permute_mut(&mut input); - input - } - fn permute_mut(&self, values: &mut [A; N]) { // Inverse DFT, except we skip bit reversal and rescaling by 1/N. bowers_g_t(values, &self.ifft_twiddles); diff --git a/mds/src/integrated_coset_mds.rs b/mds/src/integrated_coset_mds.rs index 0de05654b..c4496a153 100644 --- a/mds/src/integrated_coset_mds.rs +++ b/mds/src/integrated_coset_mds.rs @@ -49,11 +49,6 @@ impl Default for IntegratedCosetMds { } impl, const N: usize> Permutation<[A; N]> for IntegratedCosetMds { - fn permute(&self, mut input: [A; N]) -> [A; N] { - self.permute_mut(&mut input); - input - } - fn permute_mut(&self, values: &mut [A; N]) { let log_n = log2_strict_usize(N); diff --git a/merkle-tree/CHANGELOG.md b/merkle-tree/CHANGELOG.md new file mode 100644 index 000000000..9eeb9717a --- /dev/null +++ b/merkle-tree/CHANGELOG.md @@ -0,0 +1,37 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor(field): Add packed field extraction helpers and FieldArray utilities (#1211) (Adrian Hamelink) + +### Authors +- Adrian Hamelink + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Merkle tree: add documentation for MerkleTreeMmcs and errors (#908) (Thomas Coratger) +- Clippy wants us to put things inside of fmt now instead of just extra arguments... (#916) (AngusG) +- Merkle tree: full documentation for first_digest_layer (#924) (Thomas Coratger) +- Merkle tree: very small doc touchup (#928) (Thomas Coratger) +- Merkle tree: add const assert (#1040) (Thomas Coratger) +- Doc: add better doc in air and fix TODO (#1061) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Add input size checks in MMCS (#1119) (Sai) +- Core: add error messages to error enums via thiserror (#1168) (Thomas Coratger) + +### Authors +- AngusG +- Sai +- Thomas Coratger + diff --git a/merkle-tree/Cargo.toml b/merkle-tree/Cargo.toml index 987ade501..4fd11c8fc 100644 --- a/merkle-tree/Cargo.toml +++ b/merkle-tree/Cargo.toml @@ -20,14 +20,15 @@ p3-util.workspace = true itertools.workspace = true rand.workspace = true serde = { workspace = true, features = ["alloc"] } +thiserror.workspace = true tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-blake3.workspace = true -p3-keccak.workspace = true -p3-mds.workspace = true -p3-rescue.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-blake3 = { path = "../blake3" } +p3-keccak = { path = "../keccak" } +p3-mds = { path = "../mds" } +p3-rescue = { path = "../rescue" } criterion.workspace = true diff --git a/merkle-tree/src/merkle_tree.rs b/merkle-tree/src/merkle_tree.rs index 76237e2d9..2646c7d20 100644 --- a/merkle-tree/src/merkle_tree.rs +++ b/merkle-tree/src/merkle_tree.rs @@ -231,10 +231,7 @@ where ); // Unpack the resulting packed digest into individual scalar digests. - // Then, assign each to its slot in the current chunk. - for (dst, src) in digests_chunk.iter_mut().zip(unpack_array(packed_digest)) { - *dst = src; - } + PW::unpack_into(&packed_digest, digests_chunk); }); // Handle leftover rows that do not form a full SIMD batch (if any). @@ -302,9 +299,7 @@ where .flat_map(|m| m.vertically_packed_row(first_row)), ); packed_digest = c.compress([packed_digest, tallest_digest]); - for (dst, src) in digests_chunk.iter_mut().zip(unpack_array(packed_digest)) { - *dst = src; - } + PW::unpack_into(&packed_digest, digests_chunk); }); // If our packing width did not divide next_len, fall back to single-threaded scalar code @@ -371,9 +366,7 @@ where let left = array::from_fn(|j| P::from_fn(|k| prev_layer[2 * (first_row + k)][j])); let right = array::from_fn(|j| P::from_fn(|k| prev_layer[2 * (first_row + k) + 1][j])); let packed_digest = c.compress([left, right]); - for (dst, src) in digests_chunk.iter_mut().zip(unpack_array(packed_digest)) { - *dst = src; - } + P::unpack_into(&packed_digest, digests_chunk); }); // If our packing width did not divide next_len, fall back to single-threaded scalar code @@ -388,17 +381,6 @@ where next_digests } -/// Converts a packed array `[P; N]` into its underlying `P::WIDTH` scalar arrays. -/// -/// Interprets `[P; N]` as the matrix `[[P::Value; P::WIDTH]; N]`, performs a transpose to -/// get `[[P::Value; N] P::WIDTH]` and returns these `P::Value` arrays as an iterator. -#[inline] -fn unpack_array( - packed_digest: [P; N], -) -> impl Iterator { - (0..P::WIDTH).map(move |j| packed_digest.map(|p| p.as_slice()[j])) -} - #[cfg(test)] mod tests { use p3_symmetric::PseudoCompressionFunction; @@ -501,30 +483,4 @@ mod tests { // also validate the padding branch explicitly assert_eq!(result.len(), 4); } - - #[test] - fn test_unpack_array_basic() { - // Validate that `unpack_array` emits WIDTH (= 4) scalar arrays in the - // right order when the packed words are `[u8; 4]`. - - // Two packed “words”, each four lanes wide - let packed: [[u8; 4]; 2] = [ - [0, 1, 2, 3], // first word - [4, 5, 6, 7], // second word - ]; - - // After unpacking we expect four rows (the width), - // each row picking lane *j* from every packed word. - let rows: Vec<[u8; 2]> = unpack_array::<[u8; 4], 2>(packed).collect(); - - assert_eq!( - rows, - vec![ - [0, 4], // lane-0 of both packed words - [1, 5], // lane-1 - [2, 6], // lane-2 - [3, 7], // lane-3 - ] - ); - } } diff --git a/merkle-tree/src/mmcs.rs b/merkle-tree/src/mmcs.rs index 887a14009..a3a077761 100644 --- a/merkle-tree/src/mmcs.rs +++ b/merkle-tree/src/mmcs.rs @@ -30,6 +30,7 @@ use p3_matrix::{Dimensions, Matrix}; use p3_symmetric::{CryptographicHasher, Hash, PseudoCompressionFunction}; use p3_util::{log2_ceil_usize, log2_strict_usize}; use serde::{Deserialize, Serialize}; +use thiserror::Error; use crate::MerkleTree; use crate::MerkleTreeError::{ @@ -61,15 +62,18 @@ pub struct MerkleTreeMmcs { } /// Errors that may arise during Merkle tree commitment, opening, or verification. -#[derive(Debug)] +#[derive(Debug, Error)] pub enum MerkleTreeError { /// The number of openings provided does not match the expected number. + #[error("wrong batch size: number of openings does not match expected")] WrongBatchSize, /// A matrix has a different width than expected. + #[error("wrong width: matrix has a different width than expected")] WrongWidth, /// The number of proof nodes does not match the expected tree height. + #[error("wrong height: expected log_max_height {log_max_height}, got {num_siblings} siblings")] WrongHeight { /// Expected log2 of the maximum matrix height. log_max_height: usize, @@ -79,9 +83,11 @@ pub enum MerkleTreeError { }, /// Matrix heights are incompatible; they cannot share a common binary Merkle tree. + #[error("incompatible heights: matrices cannot share a common binary Merkle tree")] IncompatibleHeights, /// The queried row index exceeds the maximum height. + #[error("index out of bounds: index {index} exceeds max height {max_height}")] IndexOutOfBounds { /// Maximum admissible height. max_height: usize, @@ -90,9 +96,11 @@ pub enum MerkleTreeError { }, /// The computed Merkle root does not match the provided commitment. + #[error("root mismatch: computed Merkle root does not match commitment")] RootMismatch, /// Attempted to open an empty batch (no committed matrices). + #[error("empty batch: attempted to open an empty batch with no committed matrices")] EmptyBatch, } diff --git a/mersenne-31/CHANGELOG.md b/mersenne-31/CHANGELOG.md new file mode 100644 index 000000000..d000e9fe4 --- /dev/null +++ b/mersenne-31/CHANGELOG.md @@ -0,0 +1,60 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor: add public const `new` and `new_array` for all fields (#1222) (Adrian Hamelink) + +### Authors +- Adrian Hamelink + +## [0.4.1] - 2025-12-18 +### Merged PRs +- mersenne 31: optimize Poseidon2 for aarch64 Neon (#1196) (Thomas Coratger) + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- GCD based inversion for 31 bit fields (#921) (AngusG) +- Fixing a pair of clippy complaints in AVX512 (#926) (AngusG) +- More Clippy Complaints (#931) (AngusG) +- Packing: small touchups (#937) (Thomas Coratger) +- Use `#[derive(...)]` for Debug and Default for packed fields. (#945) (AngusG) +- Adding Macros to remove boilerplate impls (#943) (AngusG) +- Combining Interleave Code (#950) (AngusG) +- Add a macro for implying PackedValue for PackedFields (#949) (AngusG) +- Chore: use `collect_n` with powers when possible (#963) (Thomas Coratger) +- Packing Trick for Field Extensions (#958) (AngusG) +- Refactor to packed add methods (#972) (AngusG) +- Remove Nightly Features (#932) (AngusG) +- Move halve to ring (#969) (AngusG) +- Packed Sub Refactor (#979) (AngusG) +- Move div_2_exp_u64 to ring (#970) (AngusG) +- Must Use (#996) (AngusG) +- Generic Poseidon2 Simplifications (#987) (AngusG) +- More Const Assert fixes (#1024) (AngusG) +- Perf: optimize ext_two_adic_generator with precomputed table (#1038) (Avory) +- Mersenne-31: Implement NEON-optimized halve for PackedMersenne31Neon (#1054) (VolodymyrBg) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Fixing a few clippy lints (#1115) (AngusG) +- Fix: Add bounds check to circle_two_adic_generator to prevent underflow (#1130) (Fibonacci747) +- Allow users to impl either permute or permute_mut (#1175) (AngusG) +- Implement uniform sampling of bits from field elements (#1050) (Sebastian) + +### Authors +- AngusG +- Avory +- Fibonacci747 +- Himess +- Sebastian +- Thomas Coratger +- VolodymyrBg + diff --git a/mersenne-31/Cargo.toml b/mersenne-31/Cargo.toml index 9883161e9..c78edd6ed 100644 --- a/mersenne-31/Cargo.toml +++ b/mersenne-31/Cargo.toml @@ -10,6 +10,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +p3-challenger.workspace = true p3-dft.workspace = true p3-field.workspace = true p3-matrix.workspace = true @@ -25,7 +26,7 @@ rand.workspace = true serde = { workspace = true, features = ["derive"] } [dev-dependencies] -p3-field-testing.workspace = true +p3-field-testing = { path = "../field-testing" } criterion.workspace = true rand_xoshiro.workspace = true diff --git a/mersenne-31/src/aarch64_neon/packing.rs b/mersenne-31/src/aarch64_neon/packing.rs index 8562c9ff7..d112b201d 100644 --- a/mersenne-31/src/aarch64_neon/packing.rs +++ b/mersenne-31/src/aarch64_neon/packing.rs @@ -35,7 +35,7 @@ impl PackedMersenne31Neon { #[inline] #[must_use] /// Get an arch-specific vector representing the packed values. - fn to_vector(self) -> uint32x4_t { + pub(crate) fn to_vector(self) -> uint32x4_t { unsafe { // Safety: `Mersenne31` is `repr(transparent)` so it can be transmuted to `u32`. It // follows that `[Mersenne31; WIDTH]` can be transmuted to `[u32; WIDTH]`, which can be @@ -52,7 +52,7 @@ impl PackedMersenne31Neon { /// SAFETY: The caller must ensure that each element of `vector` represents a valid /// `Mersenne31`. In particular, each element of vector must be in `0..=P` (i.e. it fits in 31 /// bits). - unsafe fn from_vector(vector: uint32x4_t) -> Self { + pub(crate) unsafe fn from_vector(vector: uint32x4_t) -> Self { // Safety: It is up to the user to ensure that elements of `vector` represent valid // `Mersenne31` values. We must only reason about memory representations. `uint32x4_t` can // be transmuted to `[u32; WIDTH]` (since arrays elements are contiguous in memory), which @@ -169,6 +169,32 @@ impl PrimeCharacteristicRing for PackedMersenne31Neon { } } + #[inline(always)] + fn exp_const_u64(&self) -> Self { + // We provide specialised code for power 5 as this turns up regularly. + // + // The other powers could be specialised similarly but we ignore this for now. + match POWER { + 0 => Self::ONE, + 1 => *self, + 2 => self.square(), + 3 => self.cube(), + 4 => self.square().square(), + 5 => unsafe { + let val = self.to_vector(); + Self::from_vector(exp5(val)) + }, + 6 => self.square().cube(), + 7 => { + let x2 = self.square(); + let x3 = x2 * *self; + let x4 = x2.square(); + x3 * x4 + } + _ => self.exp_u64(POWER), + } + } + #[inline(always)] fn zero_vec(len: usize) -> Vec { // SAFETY: this is a repr(transparent) wrapper around an array. @@ -291,6 +317,33 @@ unsafe impl PackedField for PackedMersenne31Neon { type Scalar = Mersenne31; } +/// Compute the permutation x -> x^5 on Mersenne-31 field elements. +/// +/// # Safety +/// `x` must be represented as a value in `{0, ..., P}`. +/// If the input does not conform to this representation, the result is undefined. +/// The output will be represented as a value in `{0, ..., P}`. +/// +/// # TODO +/// This could be further improved with a specialized function. +#[inline(always)] +pub(crate) fn exp5(x: uint32x4_t) -> uint32x4_t { + // For Mersenne31, x^5 = x * x^4 = x * (x^2)^2 + // + // We compute: + // x2 = x * x + // x4 = x2 * x2 + // x5 = x4 * x + // + // throughput: ~4 cyc/vec + // latency: ~30 cyc (3 dependent multiplications) + + // x is guaranteed to be in [0, P] + let x2 = mul(x, x); + let x4 = mul(x2, x2); + mul(x4, x) +} + impl_packed_field_pow_2!( PackedMersenne31Neon; [ diff --git a/mersenne-31/src/aarch64_neon/poseidon2.rs b/mersenne-31/src/aarch64_neon/poseidon2.rs index d9fa07622..d422ab8b6 100644 --- a/mersenne-31/src/aarch64_neon/poseidon2.rs +++ b/mersenne-31/src/aarch64_neon/poseidon2.rs @@ -1,67 +1,316 @@ -//! Eventually this will hold a vectorized Neon implementation of Poseidon2 for PackedMersenne31Neon -//! Currently this is essentially a placeholder to allow compilation on Neon devices. -//! -//! Converting the AVX2/AVX512 code across to Neon is on the TODO list. +//! Vectorized NEON implementation of Poseidon2 for Mersenne31. use alloc::vec::Vec; +use core::arch::aarch64::{self, uint32x4_t}; +use core::mem::transmute; +use p3_field::PrimeCharacteristicRing; use p3_poseidon2::{ - ExternalLayer, ExternalLayerConstants, ExternalLayerConstructor, GenericPoseidon2LinearLayers, - InternalLayer, InternalLayerConstructor, MDSMat4, add_rc_and_sbox_generic, - external_initial_permute_state, external_terminal_permute_state, + ExternalLayer, ExternalLayerConstants, ExternalLayerConstructor, InternalLayer, + InternalLayerConstructor, MDSMat4, external_initial_permute_state, + external_terminal_permute_state, }; -use crate::{GenericPoseidon2LinearLayersMersenne31, Mersenne31, PackedMersenne31Neon}; +use super::packing::exp5; +use crate::{Mersenne31, PackedMersenne31Neon}; -/// The internal layers of the Poseidon2 permutation. +/// The prime P = 2^31 - 1 as a packed NEON vector. +const P: uint32x4_t = unsafe { transmute::<[u32; 4], _>([0x7fffffff; 4]) }; + +/// The internal layers of the Poseidon2 permutation for Mersenne31. +/// +/// Constants are stored in standard POSITIVE form `{0, ..., P}`. #[derive(Debug, Clone)] pub struct Poseidon2InternalLayerMersenne31 { + /// The scalar round constants for each internal round. pub(crate) internal_constants: Vec, -} - -/// The external layers of the Poseidon2 permutation. -#[derive(Clone)] -pub struct Poseidon2ExternalLayerMersenne31 { - pub(crate) external_constants: ExternalLayerConstants, + /// The round constants packed into NEON vectors for vectorized computation. + packed_internal_constants: Vec, } impl InternalLayerConstructor for Poseidon2InternalLayerMersenne31 { fn new_from_constants(internal_constants: Vec) -> Self { - Self { internal_constants } + let packed_internal_constants = internal_constants + .iter() + .map(|c| unsafe { aarch64::vdupq_n_u32(c.value) }) + .collect(); + Self { + internal_constants, + packed_internal_constants, + } } } +/// The external layers of the Poseidon2 permutation for Mersenne31. +/// +/// Constants are stored in standard POSITIVE form `{0, ..., P}`. +#[derive(Clone)] +pub struct Poseidon2ExternalLayerMersenne31 { + /// The scalar round constants for both initial and terminal external rounds. + pub(crate) external_constants: ExternalLayerConstants, + /// The initial external round constants packed into NEON vectors. + packed_initial_external_constants: Vec<[uint32x4_t; WIDTH]>, + /// The terminal external round constants packed into NEON vectors. + packed_terminal_external_constants: Vec<[uint32x4_t; WIDTH]>, +} + impl ExternalLayerConstructor for Poseidon2ExternalLayerMersenne31 { fn new_from_constants(external_constants: ExternalLayerConstants) -> Self { - Self { external_constants } + let packed_initial_external_constants = external_constants + .get_initial_constants() + .iter() + .map(|arr| arr.map(|c| unsafe { aarch64::vdupq_n_u32(c.value) })) + .collect(); + let packed_terminal_external_constants = external_constants + .get_terminal_constants() + .iter() + .map(|arr| arr.map(|c| unsafe { aarch64::vdupq_n_u32(c.value) })) + .collect(); + Self { + external_constants, + packed_initial_external_constants, + packed_terminal_external_constants, + } } } -impl InternalLayer - for Poseidon2InternalLayerMersenne31 -where - GenericPoseidon2LinearLayersMersenne31: GenericPoseidon2LinearLayers, -{ +/// Compute the map `x -> 2^I * x` on Mersenne-31 field elements using cyclic rotation. +/// +/// For Mersenne-31 (P = 2^31 - 1), we have `2^31 ≡ 1 (mod P)`, which means multiplication +/// by `2^I` is equivalent to a cyclic rotation of the binary representation. +/// +/// # Safety +/// `val` must be represented as a value in `{0, ..., P}`. +/// If the input does not conform to this representation, the result is undefined. +/// +/// # Generic Parameters +/// - `I`: The exponent (shift amount) +/// - `I_PRIME`: Must satisfy `I + I_PRIME = 31` +#[inline(always)] +pub(crate) fn mul_2exp_i( + val: PackedMersenne31Neon, +) -> PackedMersenne31Neon { + // We want this to compile to: + // ushr lo.4s, val.4s, #(31 - I) // Shift high bits down + // sli lo.4s, val.4s, #I // Shift low bits up and insert into lo + // and res.4s, lo.4s, P.4s // Clear the dirty sign bit + // throughput: ~0.75 cyc/vec + // latency: 4 cyc + + const { + assert!(I + I_PRIME == 31); + } + + unsafe { + // Safety: If this code got compiled then NEON intrinsics are available. + let input = val.to_vector(); + + // 1. Shift high bits down to the bottom. + let lo = aarch64::vshrq_n_u32::(input); + + // 2. Shift low bits up and INSERT them into the accumulator. + // + // `vsli` (Vector Shift Left and Insert) shifts `input` left by I bits, + // then inserts the result into `lo`, preserving the low I bits of `lo`. + // + // Result: bits[30:I] = input[30-I:0], bits[I-1:0] = lo[I-1:0] + let inserted = aarch64::vsliq_n_u32::(lo, input); + + // 3. Clear the sign bit (which might be dirty from the shift left). + let output = aarch64::vandq_u32(inserted, P); + + PackedMersenne31Neon::from_vector(output) + } +} + +/// We hard-code multiplication by the diagonal minus 1 of our internal matrix (1 + Diag(V)). +/// +/// For Mersenne31 with WIDTH = 16, the diagonal minus 1 is: +/// `[-2] + 1 << [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14, 15, 16]` +/// +/// i.e., the first entry is -2 and all other entries are powers of 2. +/// Note: state[0] is handled by the calling code (multiplied by -2). +#[inline(always)] +fn diagonal_mul_16(state: &mut [PackedMersenne31Neon; 16]) { + // state[0] -> -2*state[0] is handled by the calling code. + // state[1] -> 1*state[1] = state[1], no-op. + state[2] = state[2] + state[2]; // *2 via addition (faster than shift) + + // For the remaining entries, use fast cyclic rotation. + state[3] = mul_2exp_i::<2, 29>(state[3]); + state[4] = mul_2exp_i::<3, 28>(state[4]); + state[5] = mul_2exp_i::<4, 27>(state[5]); + state[6] = mul_2exp_i::<5, 26>(state[6]); + state[7] = mul_2exp_i::<6, 25>(state[7]); + state[8] = mul_2exp_i::<7, 24>(state[8]); + state[9] = mul_2exp_i::<8, 23>(state[9]); + state[10] = mul_2exp_i::<10, 21>(state[10]); + state[11] = mul_2exp_i::<12, 19>(state[11]); + state[12] = mul_2exp_i::<13, 18>(state[12]); + state[13] = mul_2exp_i::<14, 17>(state[13]); + state[14] = mul_2exp_i::<15, 16>(state[14]); + state[15] = mul_2exp_i::<16, 15>(state[15]); +} + +/// We hard-code multiplication by the diagonal minus 1 of our internal matrix (1 + Diag(V)). +/// +/// For Mersenne31 with WIDTH = 24, the diagonal minus 1 is: +/// `[-2] + 1 << [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]` +/// +/// i.e., the first entry is -2 and all other entries are powers of 2. +/// Note: state[0] is handled by the calling code (multiplied by -2). +#[inline(always)] +fn diagonal_mul_24(state: &mut [PackedMersenne31Neon; 24]) { + // state[0] -> -2*state[0] is handled by the calling code. + // state[1] -> 1*state[1] = state[1], no-op. + state[2] = state[2] + state[2]; // *2 via addition (faster than shift) + + // For the remaining entries, use fast cyclic rotation. + state[3] = mul_2exp_i::<2, 29>(state[3]); + state[4] = mul_2exp_i::<3, 28>(state[4]); + state[5] = mul_2exp_i::<4, 27>(state[5]); + state[6] = mul_2exp_i::<5, 26>(state[6]); + state[7] = mul_2exp_i::<6, 25>(state[7]); + state[8] = mul_2exp_i::<7, 24>(state[8]); + state[9] = mul_2exp_i::<8, 23>(state[9]); + state[10] = mul_2exp_i::<9, 22>(state[10]); + state[11] = mul_2exp_i::<10, 21>(state[11]); + state[12] = mul_2exp_i::<11, 20>(state[12]); + state[13] = mul_2exp_i::<12, 19>(state[13]); + state[14] = mul_2exp_i::<13, 18>(state[14]); + state[15] = mul_2exp_i::<14, 17>(state[15]); + state[16] = mul_2exp_i::<15, 16>(state[16]); + state[17] = mul_2exp_i::<16, 15>(state[17]); + state[18] = mul_2exp_i::<17, 14>(state[18]); + state[19] = mul_2exp_i::<18, 13>(state[19]); + state[20] = mul_2exp_i::<19, 12>(state[20]); + state[21] = mul_2exp_i::<20, 11>(state[21]); + state[22] = mul_2exp_i::<21, 10>(state[22]); + state[23] = mul_2exp_i::<22, 9>(state[23]); +} + +/// Compute the fused AddRoundConstant and S-Box operation: `x -> (x + rc)^5`. +/// +/// # Optimization +/// 1. Adds `rc` (positive form). Result is in `[0, 2P]`. +/// 2. Performs "Min-Reduction": `min(sum, sum - P)`. +/// - If `sum < P`, `sum - P` wraps to a huge value, `min` selects `sum`. +/// - If `sum >= P`, `sum - P` is small, `min` selects `sum - P`. +/// - Cost: 2 instructions (`sub`, `min`). +/// 3. Calls `exp5`. Since input is now strictly `[0, P]`, `exp5` does not need +/// to handle signs or absolute values. +/// +/// # Safety +/// - `input` must contain elements in canonical form `{0, ..., P}`. +/// - `rc` must contain round constants in positive form `{0, ..., P}`. +#[inline(always)] +fn add_rc_and_sbox(input: &mut PackedMersenne31Neon, rc: uint32x4_t) { + unsafe { + // Safety: If this code got compiled then NEON intrinsics are available. + let input_vec = input.to_vector(); + + // 1. Add round constant. Result in [0, 2P]. + let sum = aarch64::vaddq_u32(input_vec, rc); + + // 2. Fast Reduction to [0, P]. + // If sum >= P, we want (sum - P). + // If sum < P, (sum - P) underflows to > P. + // Unsigned min selects the correct modular result. + let diff = aarch64::vsubq_u32(sum, P); + let reduced = aarch64::vminq_u32(sum, diff); + + // 3. Apply S-box (optimized for positive inputs). + let output = exp5(reduced); + + *input = PackedMersenne31Neon::from_vector(output); + } +} + +/// Compute a single Poseidon2 internal layer on a state of width 16. +/// +/// The internal layer consists of: +/// 1. Add round constant to state[0] and apply S-box: `s0 -> (s0 + rc)^5` +/// 2. Apply linear layer: `s -> (1 + Diag(V)) * s` +/// +/// The linear layer can be decomposed as: +/// - `sum = Σ s_i` +/// - `s_i -> sum + v_i * s_i` +/// +/// We optimize by computing the diagonal multiplication on `s[1..]` in parallel +/// with the S-box on `s[0]`, exploiting instruction-level parallelism. +#[inline(always)] +fn internal_16(state: &mut [PackedMersenne31Neon; 16], rc: uint32x4_t) { + // Apply AddRoundConstant and S-Box to state[0]. + add_rc_and_sbox(&mut state[0], rc); + + // Compute sum of state[1..] while S-box is executing. + // This can run in parallel with the S-box computation. + let sum_tail = PackedMersenne31Neon::sum_array::<15>(&state[1..]); + + // Total sum including state[0] after S-box. + let sum = sum_tail + state[0]; + + // Update state[0]: s0_new = sum + (-2) * s0 = sum - 2*s0 = (sum - s0) - s0 = sum_tail - s0 + state[0] = sum_tail - state[0]; + + // Apply diagonal multiplication to the rest of the state. + diagonal_mul_16(state); + + // Add sum to all elements except state[0] (which was handled above). + state[1..].iter_mut().for_each(|x| *x += sum); +} + +impl InternalLayer for Poseidon2InternalLayerMersenne31 { + /// Perform the internal layers of the Poseidon2 permutation on the given state. + fn permute_state(&self, state: &mut [PackedMersenne31Neon; 16]) { + self.packed_internal_constants + .iter() + .for_each(|&rc| internal_16(state, rc)); + } +} + +/// Compute a single Poseidon2 internal layer on a state of width 24. +#[inline(always)] +fn internal_24(state: &mut [PackedMersenne31Neon; 24], rc: uint32x4_t) { + // Apply AddRoundConstant and S-Box to state[0]. + add_rc_and_sbox(&mut state[0], rc); + + // Compute sum of state[1..] while S-box is executing. + let sum_tail = PackedMersenne31Neon::sum_array::<23>(&state[1..]); + + // Total sum including state[0] after S-box. + let sum = sum_tail + state[0]; + + // Update state[0]: s0_new = sum_tail - s0 (because v_0 = -2) + state[0] = sum_tail - state[0]; + + // Apply diagonal multiplication to the rest of the state. + diagonal_mul_24(state); + + // Add sum to all elements except state[0]. + state[1..].iter_mut().for_each(|x| *x += sum); +} + +impl InternalLayer for Poseidon2InternalLayerMersenne31 { /// Perform the internal layers of the Poseidon2 permutation on the given state. - fn permute_state(&self, state: &mut [PackedMersenne31Neon; WIDTH]) { - self.internal_constants.iter().for_each(|&rc| { - add_rc_and_sbox_generic(&mut state[0], rc); - GenericPoseidon2LinearLayersMersenne31::internal_linear_layer(state); - }); + fn permute_state(&self, state: &mut [PackedMersenne31Neon; 24]) { + self.packed_internal_constants + .iter() + .for_each(|&rc| internal_24(state, rc)); } } -impl ExternalLayer +impl ExternalLayer for Poseidon2ExternalLayerMersenne31 { /// Perform the initial external layers of the Poseidon2 permutation on the given state. fn permute_state_initial(&self, state: &mut [PackedMersenne31Neon; WIDTH]) { external_initial_permute_state( state, - self.external_constants.get_initial_constants(), - add_rc_and_sbox_generic, + &self.packed_initial_external_constants, + add_rc_and_sbox, &MDSMat4, ); } @@ -70,8 +319,8 @@ impl ExternalLayer; type Perm24 = Poseidon2Mersenne31<24>; - /// Test that the output is the same as the scalar version on a random input. + /// Test that the output is the same as the scalar version on a random input of length 16. #[test] fn test_neon_poseidon2_width_16() { let mut rng = SmallRng::seed_from_u64(1); @@ -111,7 +360,7 @@ mod tests { assert_eq!(neon_output, expected); } - /// Test that the output is the same as the scalar version on a random input. + /// Test that the output is the same as the scalar version on a random input of length 24. #[test] fn test_neon_poseidon2_width_24() { let mut rng = SmallRng::seed_from_u64(1); @@ -131,4 +380,18 @@ mod tests { assert_eq!(neon_output, expected); } + + /// Test mul_2exp_i with known values. + #[test] + fn test_mul_2exp_i() { + // Test multiplication by 2^4 = 16 + let input = PackedMersenne31Neon::from(Mersenne31::new(5)); + let output = mul_2exp_i::<4, 27>(input); + assert_eq!(output.0[0], Mersenne31::new(80)); // 5 * 16 = 80 + + // Test multiplication by 2^8 = 256 + let input = PackedMersenne31Neon::from(Mersenne31::new(3)); + let output = mul_2exp_i::<8, 23>(input); + assert_eq!(output.0[0], Mersenne31::new(768)); // 3 * 256 = 768 + } } diff --git a/mersenne-31/src/complex.rs b/mersenne-31/src/complex.rs index b2f33cbfc..7195daeac 100644 --- a/mersenne-31/src/complex.rs +++ b/mersenne-31/src/complex.rs @@ -31,7 +31,10 @@ impl ComplexExtendable for Mersenne31 { // sage: assert(g.multiplicative_order() == 2^31) // sage: assert(g.norm() == 1) assert!(bits <= Self::CIRCLE_TWO_ADICITY); - let base = Complex::new_complex(Self::new(311_014_874), Self::new(1_584_694_829)); + let base = Complex::new_complex( + Self::new_reduced(311_014_874), + Self::new_reduced(1_584_694_829), + ); base.exp_power_of_2(Self::CIRCLE_TWO_ADICITY - bits) } } diff --git a/mersenne-31/src/extension.rs b/mersenne-31/src/extension.rs index a359f7ca4..1ffa287b6 100644 --- a/mersenne-31/src/extension.rs +++ b/mersenne-31/src/extension.rs @@ -62,7 +62,7 @@ impl HasTwoAdicComplexBinomialExtension<2> for Mersenne31 { if bits == 33 { [ Complex::ZERO, - Complex::new_complex(Self::new(1437746044), Self::new(946469285)), + Complex::new_complex(Self::new_reduced(1437746044), Self::new_reduced(946469285)), ] } else { [Complex::two_adic_generator(bits), Complex::ZERO] diff --git a/mersenne-31/src/mds.rs b/mersenne-31/src/mds.rs index aababa442..8a60005d8 100644 --- a/mersenne-31/src/mds.rs +++ b/mersenne-31/src/mds.rs @@ -154,10 +154,6 @@ impl Permutation<[Mersenne31; 8]> for MdsMatrixMersenne31 { SmallConvolveMersenne31::conv8, ) } - - fn permute_mut(&self, input: &mut [Mersenne31; 8]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixMersenne31 {} @@ -173,10 +169,6 @@ impl Permutation<[Mersenne31; 12]> for MdsMatrixMersenne31 { SmallConvolveMersenne31::conv12, ) } - - fn permute_mut(&self, input: &mut [Mersenne31; 12]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixMersenne31 {} @@ -193,10 +185,6 @@ impl Permutation<[Mersenne31; 16]> for MdsMatrixMersenne31 { SmallConvolveMersenne31::conv16, ) } - - fn permute_mut(&self, input: &mut [Mersenne31; 16]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixMersenne31 {} @@ -222,10 +210,6 @@ impl Permutation<[Mersenne31; 32]> for MdsMatrixMersenne31 { LargeConvolveMersenne31::conv32, ) } - - fn permute_mut(&self, input: &mut [Mersenne31; 32]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixMersenne31 {} @@ -259,10 +243,6 @@ impl Permutation<[Mersenne31; 64]> for MdsMatrixMersenne31 { LargeConvolveMersenne31::conv64, ) } - - fn permute_mut(&self, input: &mut [Mersenne31; 64]) { - *input = self.permute(*input); - } } impl MdsPermutation for MdsMatrixMersenne31 {} diff --git a/mersenne-31/src/mersenne_31.rs b/mersenne-31/src/mersenne_31.rs index 7e5b65235..987553cc4 100644 --- a/mersenne-31/src/mersenne_31.rs +++ b/mersenne-31/src/mersenne_31.rs @@ -7,6 +7,7 @@ use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAss use core::{array, fmt, iter}; use num_bigint::BigUint; +use p3_challenger::UniformSamplingField; use p3_field::exponentiation::exp_1717986917; use p3_field::integers::QuotientMap; use p3_field::op_assign_macros::{ @@ -36,20 +37,27 @@ pub struct Mersenne31 { } impl Mersenne31 { - /// Convert a u32 element into a Mersenne31 element. + /// Create a new field element from any `u32`. + /// + /// Any `u32` value is accepted and automatically reduced modulo P. + #[inline] + pub const fn new(value: u32) -> Self { + Self { value: value % P } + } + + /// Create a field element from a value assumed to be < 2^31. /// /// # Safety /// The element must lie in the range: `[0, 2^31 - 1]`. #[inline] - pub(crate) const fn new(value: u32) -> Self { + pub(crate) const fn new_reduced(value: u32) -> Self { debug_assert!((value >> 31) == 0); Self { value } } /// Convert a u32 element into a Mersenne31 element. /// - /// # Panics - /// This will panic if the element does not lie in the range: `[0, 2^31 - 1]`. + /// Returns `None` if the element does not lie in the range: `[0, 2^31 - 1]`. #[inline] pub const fn new_checked(value: u32) -> Option { if (value >> 31) == 0 { @@ -59,11 +67,9 @@ impl Mersenne31 { } } - /// Convert a constant `u32` array into a constant array of field elements. - /// This allows inputs to be `> 2^31`, and just reduces them `mod P`. + /// Convert a `[u32; N]` array to an array of field elements. /// - /// This means that this will be slower than `array.map(Mersenne31::new_checked)` but - /// has the advantage of being able to be used in `const` environments. + /// Const version of `input.map(Mersenne31::new)`. #[inline] pub const fn new_array(input: [u32; N]) -> [Self; N] { let mut output = [Self::ZERO; N]; @@ -163,7 +169,7 @@ impl Distribution for StandardUniform { let next_u31 = rng.next_u32() >> 1; let is_canonical = next_u31 != Mersenne31::ORDER_U32; if is_canonical { - return Mersenne31::new(next_u31); + return Mersenne31::new_reduced(next_u31); } } } @@ -181,7 +187,7 @@ impl<'a> Deserialize<'a> for Mersenne31 { let val = u32::deserialize(d)?; // Ensure that `val` satisfies our invariant. i.e. Not necessarily canonical, but must fit in 31 bits. if val <= P { - Ok(Self::new(val)) + Ok(Self::new_reduced(val)) } else { Err(D::Error::custom("Value is out of range")) } @@ -209,12 +215,12 @@ impl PrimeCharacteristicRing for Mersenne31 { #[inline] fn from_bool(b: bool) -> Self { - Self::new(b as u32) + Self::new_reduced(b as u32) } #[inline] fn halve(&self) -> Self { - Self::new(halve_u32::

(self.value)) + Self::new_reduced(halve_u32::

(self.value)) } #[inline] @@ -224,7 +230,7 @@ impl PrimeCharacteristicRing for Mersenne31 { let left = (self.value << exp) & ((1 << 31) - 1); let right = self.value >> (31 - exp); let rotated = left | right; - Self::new(rotated) + Self::new_reduced(rotated) } #[inline] @@ -234,7 +240,7 @@ impl PrimeCharacteristicRing for Mersenne31 { let left = self.value >> exp; let right = (self.value << (31 - exp)) & ((1 << 31) - 1); let rotated = left | right; - Self::new(rotated) + Self::new_reduced(rotated) } #[inline] @@ -358,7 +364,7 @@ impl QuotientMap for Mersenne31 { // To reduce `n` to 31 bits, we clear its MSB, then add it back in its reduced form. let msb = int & (1 << 31); let msb_reduced = msb >> 31; - Self::new(int ^ msb) + Self::new(msb_reduced) + Self::new_reduced(int ^ msb) + Self::new_reduced(msb_reduced) } /// Convert a given `u32` integer into an element of the `Mersenne31` field. @@ -366,7 +372,7 @@ impl QuotientMap for Mersenne31 { /// Returns none if the input does not lie in the range `[0, 2^31 - 1]`. #[inline] fn from_canonical_checked(int: u32) -> Option { - (int < Self::ORDER_U32).then(|| Self::new(int)) + (int < Self::ORDER_U32).then(|| Self::new_reduced(int)) } /// Convert a given `u32` integer into an element of the `Mersenne31` field. @@ -376,7 +382,7 @@ impl QuotientMap for Mersenne31 { #[inline(always)] unsafe fn from_canonical_unchecked(int: u32) -> Self { debug_assert!(int < Self::ORDER_U32); - Self::new(int) + Self::new_reduced(int) } } @@ -385,9 +391,9 @@ impl QuotientMap for Mersenne31 { #[inline] fn from_int(int: i32) -> Self { if int >= 0 { - Self::new(int as u32) + Self::new_reduced(int as u32) } else if int > (-1 << 31) { - Self::new(Self::ORDER_U32.wrapping_add_signed(int)) + Self::new_reduced(Self::ORDER_U32.wrapping_add_signed(int)) } else { // The only other option is int = -(2^31) = -1 mod p. Self::NEG_ONE @@ -402,8 +408,10 @@ impl QuotientMap for Mersenne31 { const TWO_EXP_30: i32 = 1 << 30; const NEG_TWO_EXP_30_PLUS_1: i32 = (-1 << 30) + 1; match int { - 0..TWO_EXP_30 => Some(Self::new(int as u32)), - NEG_TWO_EXP_30_PLUS_1..0 => Some(Self::new(Self::ORDER_U32.wrapping_add_signed(int))), + 0..TWO_EXP_30 => Some(Self::new_reduced(int as u32)), + NEG_TWO_EXP_30_PLUS_1..0 => { + Some(Self::new_reduced(Self::ORDER_U32.wrapping_add_signed(int))) + } _ => None, } } @@ -415,9 +423,9 @@ impl QuotientMap for Mersenne31 { #[inline(always)] unsafe fn from_canonical_unchecked(int: i32) -> Self { if int >= 0 { - Self::new(int as u32) + Self::new_reduced(int as u32) } else { - Self::new(Self::ORDER_U32.wrapping_add_signed(int)) + Self::new_reduced(Self::ORDER_U32.wrapping_add_signed(int)) } } } @@ -469,7 +477,7 @@ impl Add for Mersenne31 { // If self + rhs did not overflow, return it. // If self + rhs overflowed, sum_corr = self + rhs - (2**31 - 1). - Self::new(if over { sum_corr } else { sum_u32 }) + Self::new_reduced(if over { sum_corr } else { sum_u32 }) } } @@ -484,7 +492,7 @@ impl Sub for Mersenne31 { // Otherwise we have added 2**32 = 2**31 + 1 mod 2**31 - 1. // Hence we need to remove the most significant bit and subtract 1. sub -= over as u32; - Self::new(sub & Self::ORDER_U32) + Self::new_reduced(sub & Self::ORDER_U32) } } @@ -494,7 +502,7 @@ impl Neg for Mersenne31 { #[inline] fn neg(self) -> Self::Output { // Can't underflow, since self.value is 31-bits and thus can't exceed ORDER. - Self::new(Self::ORDER_U32 - self.value) + Self::new_reduced(Self::ORDER_U32 - self.value) } } @@ -533,7 +541,29 @@ pub(crate) fn from_u62(input: u64) -> Mersenne31 { debug_assert!(input < (1 << 62)); let input_lo = (input & ((1 << 31) - 1)) as u32; let input_high = (input >> 31) as u32; - Mersenne31::new(input_lo) + Mersenne31::new(input_high) + Mersenne31::new_reduced(input_lo) + Mersenne31::new_reduced(input_high) +} + +impl UniformSamplingField for Mersenne31 { + const MAX_SINGLE_SAMPLE_BITS: usize = 16; + // For Mersenne31 uniform sampling really only makes sense if we allow rejection sampling. + // Sampling 16 bits already has a chance of 3e-5 to require a resample! + const SAMPLING_BITS_M: [u64; 64] = { + let prime: u64 = P as u64; + let mut a = [0u64; 64]; + let mut k = 0; + while k < 64 { + if k == 0 { + a[k] = prime; // This value is irrelevant in practice. `bits = 0` returns 0 always. + } else { + // Create a mask to zero out the last k bits + let mask = !((1u64 << k) - 1); + a[k] = prime & mask; + } + k += 1; + } + a + }; } #[cfg(test)] diff --git a/monolith/CHANGELOG.md b/monolith/CHANGELOG.md new file mode 100644 index 000000000..9f9c6de61 --- /dev/null +++ b/monolith/CHANGELOG.md @@ -0,0 +1,32 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Feat: use compile-time asserts for const generic parameters (#1232) (Himess) + +### Authors +- Himess + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Clippy wants us to put things inside of fmt now instead of just extra arguments... (#916) (AngusG) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Allow users to impl either permute or permute_mut (#1175) (AngusG) + +### Authors +- AngusG +- Himess +- Thomas Coratger + diff --git a/monolith/src/monolith.rs b/monolith/src/monolith.rs index 9ae4ff242..242574806 100644 --- a/monolith/src/monolith.rs +++ b/monolith/src/monolith.rs @@ -37,9 +37,11 @@ where pub const NUM_BARS: usize = 8; pub fn new(mds: Mds) -> Self { - assert!(WIDTH >= 8); - assert!(WIDTH <= 24); - assert_eq!(WIDTH % 4, 0); + const { + assert!(WIDTH >= 8); + assert!(WIDTH <= 24); + assert!(WIDTH.is_multiple_of(4)); + } let round_constants = Self::instantiate_round_constants(); let lookup1 = Self::instantiate_lookup1(); diff --git a/monolith/src/monolith_mds.rs b/monolith/src/monolith_mds.rs index 7f43b0551..ce6196b72 100644 --- a/monolith/src/monolith_mds.rs +++ b/monolith/src/monolith_mds.rs @@ -39,10 +39,6 @@ impl Permutation<[Mersenne31; WIDTH apply_cauchy_mds_matrix(&mut shake_finalized, input) } } - - fn permute_mut(&self, input: &mut [Mersenne31; WIDTH]) { - *input = self.permute(*input); - } } impl MdsPermutation diff --git a/monty-31/CHANGELOG.md b/monty-31/CHANGELOG.md new file mode 100644 index 000000000..3e892432b --- /dev/null +++ b/monty-31/CHANGELOG.md @@ -0,0 +1,69 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor: add public const `new` and `new_array` for all fields (#1222) (Adrian Hamelink) + +### Authors +- Adrian Hamelink + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- From_biguint method for Bn254 (#914) (AngusG) +- GCD based inversion for 31 bit fields (#921) (AngusG) +- Fixing a pair of clippy complaints in AVX512 (#926) (AngusG) +- Monty31: small touchups for packing (#927) (Thomas Coratger) +- Packing: small touchups (#937) (Thomas Coratger) +- Use `#[derive(...)]` for Debug and Default for packed fields. (#945) (AngusG) +- Adding Macros to remove boilerplate impls (#943) (AngusG) +- Combining Interleave Code (#950) (AngusG) +- Add a macro for implying PackedValue for PackedFields (#949) (AngusG) +- Packing Trick for Field Extensions (#958) (AngusG) +- Chore: small touchups and poseidon external unit tests (#971) (Thomas Coratger) +- Refactor to packed add methods (#972) (AngusG) +- Speed up Extension Field Addition (#980) (AngusG) +- Remove Nightly Features (#932) (AngusG) +- Move halve to ring (#969) (AngusG) +- Packed Sub Refactor (#979) (AngusG) +- Move div_2_exp_u64 to ring (#970) (AngusG) +- Speed Up Extension Field Subtraction (#988) (AngusG) +- Must Use (#996) (AngusG) +- Speed Up Base-Extension Multiplication (#998) (AngusG) +- Generic Poseidon2 Simplifications (#987) (AngusG) +- Compile Time asserts (#1015) (AngusG) +- Monty31: add halve for aarch64 neon (#1020) (Thomas Coratger) +- More Const Assert fixes (#1024) (AngusG) +- Poseidon2: add Neon implementation for Monty31 (#1023) (Thomas Coratger) +- Monty31: add aarch64 neon custom `exp_5` and `exp_7` (#1033) (Thomas Coratger) +- Small Neon Refactor (#1037) (AngusG) +- Monty31: better Poseidon2 for aarch64 neon using `exp_small` (#1035) (Thomas Coratger) +- Monty 31: more efficient aarch64 neon `quartic_mul_packed` (#1060) (Thomas Coratger) +- Poseidon2 doc comment fixes (#1071) (AngusG) +- Monty-31: implement more efficient `dot_product_2` for neon (#1070) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Feat: add thread safety to dft implementations (#999) (Jeremi Do Dinh) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Fixing a few clippy lints (#1115) (AngusG) +- Monty31: const assert in dot product (#1154) (Thomas Coratger) +- Allow users to impl either permute or permute_mut (#1175) (AngusG) +- Core: small touchups (#1186) (Thomas Coratger) + +### Authors +- Adrian Hamelink +- AngusG +- Himess +- Jeremi Do Dinh +- Thomas Coratger + diff --git a/monty-31/src/data_traits.rs b/monty-31/src/data_traits.rs index 6612365f5..b534b2f84 100644 --- a/monty-31/src/data_traits.rs +++ b/monty-31/src/data_traits.rs @@ -8,7 +8,7 @@ use crate::MontyField31; /// MontyParameters contains the prime P along with constants needed to convert elements into and out of MONTY form. /// The MONTY constant is assumed to be a power of 2. pub trait MontyParameters: - Copy + Clone + Default + Debug + Eq + PartialEq + Sync + Send + Hash + 'static + Copy + Default + Debug + Eq + PartialEq + Sync + Send + Hash + 'static { // A 31-bit prime. const PRIME: u32; diff --git a/monty-31/src/mds.rs b/monty-31/src/mds.rs index 2c0975d78..bd0b7add3 100644 --- a/monty-31/src/mds.rs +++ b/monty-31/src/mds.rs @@ -274,10 +274,6 @@ impl Permutation<[MontyField31; 8]> , i64, i64, i64>>::conv8, ) } - - fn permute_mut(&self, input: &mut [MontyField31; 8]) { - *input = self.permute(*input); - } } impl MdsPermutation, 8> for MdsMatrixMontyField31 @@ -294,10 +290,6 @@ impl Permutation<[MontyField31; 12]> , i64, i64, i64>>::conv12, ) } - - fn permute_mut(&self, input: &mut [MontyField31; 12]) { - *input = self.permute(*input); - } } impl MdsPermutation, 12> for MdsMatrixMontyField31 @@ -314,10 +306,6 @@ impl Permutation<[MontyField31; 16]> , i64, i64, i64>>::conv16, ) } - - fn permute_mut(&self, input: &mut [MontyField31; 16]) { - *input = self.permute(*input); - } } impl MdsPermutation, 16> for MdsMatrixMontyField31 @@ -335,10 +323,6 @@ where , i64, i64, i64>>::conv24, ) } - - fn permute_mut(&self, input: &mut [MontyField31; 24]) { - *input = self.permute(*input); - } } impl MdsPermutation, 24> for MdsMatrixMontyField31 @@ -355,10 +339,6 @@ impl Permutation<[MontyField31; 32]> , i64, i64, i64>>::conv32, ) } - - fn permute_mut(&self, input: &mut [MontyField31; 32]) { - *input = self.permute(*input); - } } impl MdsPermutation, 32> for MdsMatrixMontyField31 @@ -375,10 +355,6 @@ impl Permutation<[MontyField31; 64]> , i64, i64, i64>>::conv64, ) } - - fn permute_mut(&self, input: &mut [MontyField31; 64]) { - *input = self.permute(*input); - } } impl MdsPermutation, 64> for MdsMatrixMontyField31 diff --git a/monty-31/src/monty_31.rs b/monty-31/src/monty_31.rs index 18cf37dfd..2a65c79b2 100644 --- a/monty-31/src/monty_31.rs +++ b/monty-31/src/monty_31.rs @@ -43,8 +43,9 @@ pub struct MontyField31 { } impl MontyField31 { - /// The standard way to create a new element. - /// Note that `new` converts the input into MONTY form so should be avoided in performance critical implementations. + /// Create a new field element from any `u32`. + /// + /// Any `u32` value is accepted and automatically converted to Montgomery form. #[inline(always)] pub const fn new(value: u32) -> Self { Self { @@ -70,8 +71,9 @@ impl MontyField31 { from_monty::(elem.value) } - /// Convert a constant u32 array into a constant array of field elements. - /// Constant version of array.map(MontyField31::new). + /// Convert a `[u32; N]` array to an array of field elements. + /// + /// Const version of `input.map(MontyField31::new)`. #[inline] pub const fn new_array(input: [u32; N]) -> [Self; N] { let mut output = [Self::new_monty(0); N]; @@ -246,11 +248,14 @@ impl PrimeCharacteristicRing for MontyField31 { #[inline] fn dot_product(lhs: &[Self; N], rhs: &[Self; N]) -> Self { - assert!(N as u64 <= (1 << 34)); - // This code relies on assumptions about the relative size of the - // prime and the monty parameter. If these are changes this needs to be checked. - debug_assert!(FP::MONTY_BITS == 32); - debug_assert!((FP::PRIME as u64) < (1 << 31)); + const { + assert!(N as u64 <= (1 << 34)); + + // This code relies on assumptions about the relative size of the + // prime and the monty parameter. If these are changes this needs to be checked. + debug_assert!(FP::MONTY_BITS == 32); + debug_assert!((FP::PRIME as u64) < (1 << 31)); + } match N { 0 => Self::ZERO, 1 => lhs[0] * rhs[0], diff --git a/multilinear-util/CHANGELOG.md b/multilinear-util/CHANGELOG.md new file mode 100644 index 000000000..b2383c93b --- /dev/null +++ b/multilinear-util/CHANGELOG.md @@ -0,0 +1,30 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Possible different take on PR #973 (#978) (AngusG) +- Multilinear utils: add multilinear point (#1011) (Thomas Coratger) +- Rm Multilinear Point (#1018) (AngusG) +- Fix(multilinear-util): use core::marker::PhantomData in no_std (#1063) (Skylar Ray) +- Eq poly: implement batched eval_eq (#1051) (Thomas Coratger) +- Multilinear utils: rm `eval_eq` (#1087) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) + +### Authors +- AngusG +- Skylar Ray +- Thomas Coratger + diff --git a/multilinear-util/Cargo.toml b/multilinear-util/Cargo.toml index 729ba532a..f25413129 100644 --- a/multilinear-util/Cargo.toml +++ b/multilinear-util/Cargo.toml @@ -19,7 +19,7 @@ rand.workspace = true [dev-dependencies] criterion.workspace = true -p3-baby-bear.workspace = true +p3-baby-bear = { path = "../baby-bear" } proptest.workspace = true diff --git a/poseidon/CHANGELOG.md b/poseidon/CHANGELOG.md new file mode 100644 index 000000000..68c6db671 --- /dev/null +++ b/poseidon/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) + +### Authors +- Himess +- Thomas Coratger + diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index 421f2385e..f5ae1cd95 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -17,9 +17,9 @@ p3-symmetric.workspace = true rand.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-goldilocks.workspace = true -p3-mersenne-31.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-goldilocks = { path = "../goldilocks" } +p3-mersenne-31 = { path = "../mersenne-31" } criterion.workspace = true diff --git a/poseidon2-air/CHANGELOG.md b/poseidon2-air/CHANGELOG.md new file mode 100644 index 000000000..cebdd89f8 --- /dev/null +++ b/poseidon2-air/CHANGELOG.md @@ -0,0 +1,37 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Small changes for recursive lookups (#1229) (Linda Guiga) + +### Authors +- Linda Guiga + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- More Clippy Complaints (#931) (AngusG) +- Replace `Copy` with `Clone` in `AirBuilder`'s `Var` (#930) (Linda Guiga) +- Weaken the trait bound of AirBuilder to allow `F` to be merely a Ring. (#977) (AngusG) +- Generic Poseidon2 Simplifications (#987) (AngusG) +- Clippy: small step (#1102) (Thomas Coratger) +- Refactor: Replace &Vec with &[T] in function parameters (#1111) (Merkel Tranjes) +- Make generate_trace_rows_for_perm public (#1159) (Alonso González) + +### Authors +- Alonso González +- AngusG +- Himess +- Linda Guiga +- Merkel Tranjes +- Thomas Coratger + diff --git a/poseidon2-air/Cargo.toml b/poseidon2-air/Cargo.toml index 4f8b1066b..418fdae51 100644 --- a/poseidon2-air/Cargo.toml +++ b/poseidon2-air/Cargo.toml @@ -19,24 +19,24 @@ p3-poseidon2.workspace = true rand.workspace = true tracing.workspace = true -[target.'cfg(target_family = "unix")'.dev-dependencies] -tikv-jemallocator = "0.6" - [dev-dependencies] -p3-baby-bear.workspace = true -p3-challenger.workspace = true -p3-commit.workspace = true -p3-dft.workspace = true -p3-fri.workspace = true -p3-keccak.workspace = true -p3-koala-bear.workspace = true -p3-merkle-tree.workspace = true -p3-symmetric.workspace = true -p3-uni-stark.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-challenger = { path = "../challenger" } +p3-commit = { path = "../commit" } +p3-dft = { path = "../dft" } +p3-fri = { path = "../fri" } +p3-keccak = { path = "../keccak" } +p3-koala-bear = { path = "../koala-bear" } +p3-merkle-tree = { path = "../merkle-tree" } +p3-symmetric = { path = "../symmetric" } +p3-uni-stark = { path = "../uni-stark" } tracing-forest = { workspace = true, features = ["ansi", "smallvec"] } tracing-subscriber = { workspace = true, features = ["std", "env-filter"] } +[target.'cfg(target_family = "unix")'.dev-dependencies] +tikv-jemallocator = "0.6" + [features] parallel = ["p3-maybe-rayon/parallel"] diff --git a/poseidon2-air/src/air.rs b/poseidon2-air/src/air.rs index cfa9e5b70..1a6427aee 100644 --- a/poseidon2-air/src/air.rs +++ b/poseidon2-air/src/air.rs @@ -29,6 +29,33 @@ pub struct Poseidon2Air< _phantom: PhantomData, } +impl< + F: PrimeCharacteristicRing, + LinearLayers, + const WIDTH: usize, + const SBOX_DEGREE: u64, + const SBOX_REGISTERS: usize, + const HALF_FULL_ROUNDS: usize, + const PARTIAL_ROUNDS: usize, +> Clone + for Poseidon2Air< + F, + LinearLayers, + WIDTH, + SBOX_DEGREE, + SBOX_REGISTERS, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + > +{ + fn clone(&self) -> Self { + Self { + constants: self.constants.clone(), + _phantom: PhantomData, + } + } +} + impl< F: PrimeCharacteristicRing, LinearLayers, diff --git a/poseidon2-air/src/generation.rs b/poseidon2-air/src/generation.rs index ebef84390..7274ea76a 100644 --- a/poseidon2-air/src/generation.rs +++ b/poseidon2-air/src/generation.rs @@ -131,7 +131,7 @@ pub fn generate_trace_rows< } /// `rows` will normally consist of 24 rows, with an exception for the final row. -fn generate_trace_rows_for_perm< +pub fn generate_trace_rows_for_perm< F: PrimeField, LinearLayers: GenericPoseidon2LinearLayers, const WIDTH: usize, diff --git a/poseidon2/CHANGELOG.md b/poseidon2/CHANGELOG.md new file mode 100644 index 000000000..938a60506 --- /dev/null +++ b/poseidon2/CHANGELOG.md @@ -0,0 +1,32 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Porting BN254 to our own code base (#913) (AngusG) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Poseidon: make ExternalLayerConstants new const (#968) (Thomas Coratger) +- Chore: small touchups and poseidon external unit tests (#971) (Thomas Coratger) +- Remove Nightly Features (#932) (AngusG) +- Packed Sub Refactor (#979) (AngusG) +- Generic Poseidon2 Simplifications (#987) (AngusG) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) + +### Authors +- AngusG +- Himess +- Thomas Coratger + diff --git a/poseidon2/Cargo.toml b/poseidon2/Cargo.toml index f54c60d16..06519a89f 100644 --- a/poseidon2/Cargo.toml +++ b/poseidon2/Cargo.toml @@ -18,11 +18,11 @@ p3-util.workspace = true rand.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-bn254.workspace = true -p3-goldilocks.workspace = true -p3-koala-bear.workspace = true -p3-mersenne-31.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-bn254 = { path = "../bn254" } +p3-goldilocks = { path = "../goldilocks" } +p3-koala-bear = { path = "../koala-bear" } +p3-mersenne-31 = { path = "../mersenne-31" } criterion.workspace = true diff --git a/release-plz.toml b/release-plz.toml new file mode 100644 index 000000000..e35522379 --- /dev/null +++ b/release-plz.toml @@ -0,0 +1,141 @@ +[workspace] +changelog_config = "cliff.toml" +pr_labels = ["release"] +publish_timeout = "10m" +dependencies_update = true + +[[package]] +name = "p3-air" +version_group = "plonky3" + +[[package]] +name = "p3-baby-bear" +version_group = "plonky3" + +[[package]] +name = "p3-batch-stark" +version_group = "plonky3" + +[[package]] +name = "p3-blake3" +version_group = "plonky3" + +[[package]] +name = "p3-blake3-air" +version_group = "plonky3" + +[[package]] +name = "p3-bn254" +version_group = "plonky3" + +[[package]] +name = "p3-challenger" +version_group = "plonky3" + +[[package]] +name = "p3-circle" +version_group = "plonky3" + +[[package]] +name = "p3-commit" +version_group = "plonky3" + +[[package]] +name = "p3-dft" +version_group = "plonky3" + +[[package]] +name = "p3-field" +version_group = "plonky3" + +[[package]] +name = "p3-field-testing" +version_group = "plonky3" + +[[package]] +name = "p3-fri" +version_group = "plonky3" + +[[package]] +name = "p3-goldilocks" +version_group = "plonky3" + +[[package]] +name = "p3-interpolation" +version_group = "plonky3" + +[[package]] +name = "p3-keccak" +version_group = "plonky3" + +[[package]] +name = "p3-keccak-air" +version_group = "plonky3" + +[[package]] +name = "p3-koala-bear" +version_group = "plonky3" + +[[package]] +name = "p3-lookup" +version_group = "plonky3" + +[[package]] +name = "p3-matrix" +version_group = "plonky3" + +[[package]] +name = "p3-maybe-rayon" +version_group = "plonky3" + +[[package]] +name = "p3-mds" +version_group = "plonky3" + +[[package]] +name = "p3-merkle-tree" +version_group = "plonky3" + +[[package]] +name = "p3-mersenne-31" +version_group = "plonky3" + +[[package]] +name = "p3-monty-31" +version_group = "plonky3" + +[[package]] +name = "p3-multilinear-util" +version_group = "plonky3" + +[[package]] +name = "p3-poseidon" +version_group = "plonky3" + +[[package]] +name = "p3-poseidon2" +version_group = "plonky3" + +[[package]] +name = "p3-poseidon2-air" +version_group = "plonky3" + +[[package]] +name = "p3-rescue" +version_group = "plonky3" + +[[package]] +name = "p3-sha256" +version_group = "plonky3" + +[[package]] +name = "p3-symmetric" +version_group = "plonky3" + +[[package]] +name = "p3-uni-stark" +version_group = "plonky3" + +[[package]] +name = "p3-util" +version_group = "plonky3" diff --git a/rescue/CHANGELOG.md b/rescue/CHANGELOG.md new file mode 100644 index 000000000..d19489539 --- /dev/null +++ b/rescue/CHANGELOG.md @@ -0,0 +1,30 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor: add public const `new` and `new_array` for all fields (#1222) (Adrian Hamelink) + +### Authors +- Adrian Hamelink + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Possible different take on PR #973 (#978) (AngusG) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) + +### Authors +- AngusG +- Himess +- Thomas Coratger + diff --git a/rescue/Cargo.toml b/rescue/Cargo.toml index dcb629234..cfabb2a1e 100644 --- a/rescue/Cargo.toml +++ b/rescue/Cargo.toml @@ -20,9 +20,9 @@ rand.workspace = true sha3.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-goldilocks.workspace = true -p3-mersenne-31.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-goldilocks = { path = "../goldilocks" } +p3-mersenne-31 = { path = "../mersenne-31" } criterion.workspace = true diff --git a/rescue/src/util.rs b/rescue/src/util.rs index 741d30f91..182ebb87d 100644 --- a/rescue/src/util.rs +++ b/rescue/src/util.rs @@ -25,16 +25,21 @@ pub(crate) fn shake256_hash(seed_bytes: &[u8], num_bytes: usize) -> Vec { /// series. As x increases, increase the precision P until the /// accuracy is sufficient. #[must_use] -fn pow2_no_std(x: f32, tol: f32) -> f32 { +const fn pow2_no_std(x: f32, tol: f32) -> f32 { let y = x * core::f32::consts::LN_2; let mut t = 1.0; // ith Taylor term = (x ln(2))^i/i! let mut two_pow_x = t; - for i in 1.. { + + // use manual `while` loop to enable `const` + let mut i = 1; + loop { t *= y / (i as f32); if t < tol { break; } two_pow_x += t; + + i += 1; } two_pow_x } @@ -53,7 +58,7 @@ fn pow2_no_std(x: f32, tol: f32) -> f32 { /// to multiple iterations (with a suitable analysis of the precision /// passed to pow2_no_std) before being used more widely. #[must_use] -fn log2_no_std(x: u64) -> f32 { +const fn log2_no_std(x: u64) -> f32 { const LOG2_E: f32 = core::f32::consts::LOG2_E; const POW2_TOL: f32 = 0.0001; // Initial estimate x0 = floor(log2(x)) @@ -78,7 +83,7 @@ fn log2_no_std(x: u64) -> f32 { /// + (log2(n) - log2(k) - log2(n-k) - log2(2π))/2 /// /// coming from Stirling's approximation for n!. -pub(crate) fn log2_binom(n: u64, k: u64) -> f32 { +pub(crate) const fn log2_binom(n: u64, k: u64) -> f32 { const LOG2_2PI: f32 = 2.6514961; let log2_n = log2_no_std(n); let log2_k = log2_no_std(k); diff --git a/sha256/CHANGELOG.md b/sha256/CHANGELOG.md new file mode 100644 index 000000000..3e5beb0f1 --- /dev/null +++ b/sha256/CHANGELOG.md @@ -0,0 +1,22 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Authors + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Sha256: fix clippy warning (#1081) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) + +### Authors +- Thomas Coratger + diff --git a/symmetric/CHANGELOG.md b/symmetric/CHANGELOG.md new file mode 100644 index 000000000..65ce51c1b --- /dev/null +++ b/symmetric/CHANGELOG.md @@ -0,0 +1,30 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Feat: use compile-time asserts for const generic parameters (#1232) (Himess) + +### Authors +- Himess + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Hasher: add more documentation for `CryptographicHasher` trait (#922) (Thomas Coratger) +- Clippy: small step (#1102) (Thomas Coratger) +- Allow users to impl either permute or permute_mut (#1175) (AngusG) + +### Authors +- AngusG +- Himess +- Thomas Coratger + diff --git a/symmetric/Cargo.toml b/symmetric/Cargo.toml index c87b1bc09..1e41cb84d 100644 --- a/symmetric/Cargo.toml +++ b/symmetric/Cargo.toml @@ -16,7 +16,7 @@ itertools.workspace = true serde = { workspace = true, features = ["alloc"] } [dev-dependencies] -p3-koala-bear.workspace = true +p3-koala-bear = { path = "../koala-bear" } [lints] workspace = true diff --git a/symmetric/src/compression.rs b/symmetric/src/compression.rs index cb2fc44e7..3bc841b28 100644 --- a/symmetric/src/compression.rs +++ b/symmetric/src/compression.rs @@ -33,7 +33,7 @@ where InnerP: CryptographicPermutation<[T; WIDTH]>, { fn compress(&self, input: [[T; CHUNK]; N]) -> [T; CHUNK] { - debug_assert!(CHUNK * N <= WIDTH); + const { assert!(CHUNK * N <= WIDTH) } let mut pre = [T::default(); WIDTH]; for i in 0..N { pre[i * CHUNK..(i + 1) * CHUNK].copy_from_slice(&input[i]); diff --git a/symmetric/src/permutation.rs b/symmetric/src/permutation.rs index f62f9b766..fbac8adc4 100644 --- a/symmetric/src/permutation.rs +++ b/symmetric/src/permutation.rs @@ -1,12 +1,19 @@ /// A permutation in the mathematical sense. pub trait Permutation: Clone + Sync { + // The methods permute, permute_mut are defined in a circular manner + // so you only need to implement one of them and will get the other + // for free. If you implement neither, this will cause a run time + // error. + #[inline(always)] fn permute(&self, mut input: T) -> T { self.permute_mut(&mut input); input } - fn permute_mut(&self, input: &mut T); + fn permute_mut(&self, input: &mut T) { + *input = self.permute(input.clone()); + } } /// A permutation thought to be cryptographically secure, in the sense that it is thought to be diff --git a/uni-stark/CHANGELOG.md b/uni-stark/CHANGELOG.md new file mode 100644 index 000000000..697e98f9e --- /dev/null +++ b/uni-stark/CHANGELOG.md @@ -0,0 +1,75 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor(field): Add packed field extraction helpers and FieldArray utilities (#1211) (Adrian Hamelink) +- Enable ZK for preprocessing and in batch-stark (#1178) (Linda Guiga) +- Avoid change of Pcs's `open` method signature (#1230) (Linda Guiga) + +### Authors +- Adrian Hamelink +- Linda Guiga + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Field.rs: `Powers::packed_collect_n` (#888) (Adrian Hamelink) +- Uni stark: small touchups on the verifier (#910) (Thomas Coratger) +- Clippy wants us to put things inside of fmt now instead of just extra arguments... (#916) (AngusG) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- Fixed "attempt to subtract with overflow" issue in uni-stark (#934) (Gabriel Barreto) +- Replace `Copy` with `Clone` in `AirBuilder`'s `Var` (#930) (Linda Guiga) +- Docs: Add comprehensive documentation to constraint folder implementation (#856) (Ragnar) +- Shrink some test sizes (#524) (Daniel Lubarov) +- Fixing error on main (#939) (AngusG) +- Chore: various small changes (#944) (Thomas Coratger) +- Remove Nightly Features (#932) (AngusG) +- Small visibility changes for recursion (#1046) (Linda Guiga) +- Refactor: remove redundant clones in crypto modules (#1086) (Skylar Ray) +- Add modular lookups (local and global) with logup implementation (#1090) (Linda Guiga) +- Add multi-STARK prover and verifier (#1088) (Sai) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Update symbolic_builder.rs (#1106) (AJoX) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add `needless_pass_by_value` (#1112) (Thomas Coratger) +- Refactor: Replace &Vec with &[T] in function parameters (#1111) (Merkel Tranjes) +- Add preprocessed/transparent columns to uni-stark (#1114) (o-k-d) +- Add Preprocessed trace setup and VKs (#1150) (Sai) +- Update lookup traits and add folders with lookups (#1160) (Linda Guiga) +- ExtensionBuilder for SymbolicAirBuilder (#1161) (Linda Guiga) +- Uni-stark: add unit tests for SymbolicExpression (#1169) (Thomas Coratger) +- Uni stark: small touchups (#1163) (Thomas Coratger) +- Clarify quotient degree vs quotient chunks naming (#1156) (Sai) +- Core: add error messages to error enums via thiserror (#1168) (Thomas Coratger) +- Feat: add `SubAirBuilder` module (#1172) (Robin Salen) +- Doc: add intra-doc links (#1174) (Robin Salen) +- Integrate lookups to prover and verifier (#1165) (Linda Guiga) +- Core: small touchups (#1186) (Thomas Coratger) +- Feat: add PoW phase for batching in FRI commit phase (#1164) (Zach Langley) + +### Authors +- AJoX +- Adrian Hamelink +- AngusG +- Daniel Lubarov +- Gabriel Barreto +- Himess +- Linda Guiga +- Merkel Tranjes +- Ragnar +- Robin Salen +- Sai +- Skylar Ray +- Thomas Coratger +- Zach Langley +- o-k-d + diff --git a/uni-stark/Cargo.toml b/uni-stark/Cargo.toml index 4afc0ee78..e8fb17ea3 100644 --- a/uni-stark/Cargo.toml +++ b/uni-stark/Cargo.toml @@ -21,20 +21,21 @@ p3-util.workspace = true itertools.workspace = true serde = { workspace = true, features = ["derive", "alloc"] } +thiserror.workspace = true tracing.workspace = true [dev-dependencies] -p3-baby-bear.workspace = true -p3-challenger.workspace = true -p3-circle.workspace = true -p3-commit = { workspace = true, features = ["test-utils"] } -p3-dft.workspace = true -p3-fri.workspace = true -p3-keccak.workspace = true -p3-matrix.workspace = true -p3-merkle-tree.workspace = true -p3-mersenne-31.workspace = true -p3-symmetric.workspace = true +p3-baby-bear = { path = "../baby-bear" } +p3-challenger = { path = "../challenger" } +p3-circle = { path = "../circle" } +p3-commit = { path = "../commit", features = ["test-utils"] } +p3-dft = { path = "../dft" } +p3-fri = { path = "../fri" } +p3-keccak = { path = "../keccak" } +p3-matrix = { path = "../matrix" } +p3-merkle-tree = { path = "../merkle-tree" } +p3-mersenne-31 = { path = "../mersenne-31" } +p3-symmetric = { path = "../symmetric" } postcard = { workspace = true, features = ["alloc"] } rand.workspace = true diff --git a/uni-stark/src/check_constraints.rs b/uni-stark/src/check_constraints.rs index b50a4895a..2ff9894c5 100644 --- a/uni-stark/src/check_constraints.rs +++ b/uni-stark/src/check_constraints.rs @@ -1,21 +1,22 @@ -use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, PairBuilder}; +use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues}; use p3_field::Field; use p3_matrix::Matrix; use p3_matrix::dense::{RowMajorMatrix, RowMajorMatrixView}; use p3_matrix::stack::ViewPair; use tracing::instrument; -/// Runs constraint checks using a given AIR definition and trace matrix. +/// Runs constraint checks using a given [`Air`] implementation and trace matrix. /// /// Iterates over every row in `main`, providing both the current and next row -/// (with wraparound) to the AIR logic. Also injects public values into the builder -/// for first/last row assertions. +/// (with wraparound) to the [`Air`] logic. Also injects public values into the +/// [`DebugConstraintBuilder`] for first/last row assertions. /// /// # Arguments -/// - `air`: The AIR logic to run -/// - `main`: The trace matrix (rows of witness values) -/// - `public_values`: Public values provided to the builder -#[instrument(name = "check constraints", skip_all)] +/// - `air`: The [`Air`] logic to run. +/// - `main`: The [`RowMajorMatrix`] containing witness rows. +/// - `public_values`: Public values provided to the builder. +#[instrument(skip_all)] +#[allow(unused)] pub(crate) fn check_constraints(air: &A, main: &RowMajorMatrix, public_values: &[F]) where F: Field, @@ -27,9 +28,9 @@ where (0..height).for_each(|row_index| { let row_index_next = (row_index + 1) % height; - // row_index < height so we can used unchecked indexing. + // row_index < height so we can use unchecked indexing. let local = unsafe { main.row_slice_unchecked(row_index) }; - // row_index_next < height so we can used unchecked indexing. + // row_index_next < height so we can use unchecked indexing. let next = unsafe { main.row_slice_unchecked(row_index_next) }; let main = ViewPair::new( RowMajorMatrixView::new_row(&*local), @@ -66,7 +67,7 @@ where /// A builder that runs constraint assertions during testing. /// /// Used in conjunction with [`check_constraints`] to simulate -/// an execution trace and verify that the AIR logic enforces all constraints. +/// an execution trace and verify that the [`Air`] logic enforces all constraints. #[derive(Debug)] pub struct DebugConstraintBuilder<'a, F: Field> { /// The index of the row currently being evaluated. @@ -98,6 +99,10 @@ where self.main } + fn preprocessed(&self) -> Option { + self.preprocessed + } + fn is_first_row(&self) -> Self::Expr { self.is_first_row } @@ -144,13 +149,6 @@ impl AirBuilderWithPublicValues for DebugConstraintBuilder<'_, F> { } } -impl<'a, F: Field> PairBuilder for DebugConstraintBuilder<'a, F> { - fn preprocessed(&self) -> Self::M { - self.preprocessed - .expect("DebugConstraintBuilder requires preprocessed columns when used as PairBuilder") - } -} - #[cfg(test)] mod tests { use alloc::vec; diff --git a/uni-stark/src/config.rs b/uni-stark/src/config.rs index 3a675c4cb..d1ebb8c89 100644 --- a/uni-stark/src/config.rs +++ b/uni-stark/src/config.rs @@ -22,24 +22,24 @@ pub type PackedChallenge = <::Challenge as ExtensionField>>::ExtensionPacking; pub trait StarkGenericConfig { - /// The PCS used to commit to trace polynomials. + /// The [`Pcs`] implementation used to commit to trace polynomials. type Pcs: Pcs; - /// The field from which most random challenges are drawn. + /// The [`ExtensionField`] from which most random challenges are drawn. type Challenge: ExtensionField>; - /// The challenger (Fiat-Shamir) implementation used. + /// The [`FieldChallenger`] (Fiat-Shamir) implementation used. type Challenger: FieldChallenger> + CanObserve<>::Commitment> + CanSample; - /// Get a reference to the PCS used by this proof configuration. + /// Get a reference to the [`Pcs`] used by this proof configuration. fn pcs(&self) -> &Self::Pcs; - /// Get an initialisation of the challenger used by this proof configuration. + /// Get an initialisation of the [`FieldChallenger`] used by this proof configuration. fn initialise_challenger(&self) -> Self::Challenger; - /// Returns 1 if the PCS is zero-knowledge, 0 otherwise. + /// Returns 1 if the [`Pcs`] is zero-knowledge, 0 otherwise. fn is_zk(&self) -> usize { Self::Pcs::ZK as usize } @@ -47,9 +47,9 @@ pub trait StarkGenericConfig { #[derive(Debug)] pub struct StarkConfig { - /// The PCS used to commit polynomials and prove opening proofs. + /// The [`Pcs`] used to commit polynomials and produce opening proofs. pcs: Pcs, - /// An initialised instance of the challenger. + /// An initialised instance of the [`FieldChallenger`]. challenger: Challenger, _phantom: PhantomData, } diff --git a/uni-stark/src/folder.rs b/uni-stark/src/folder.rs index 2d4e1a0d6..8c8c18ea2 100644 --- a/uni-stark/src/folder.rs +++ b/uni-stark/src/folder.rs @@ -1,6 +1,6 @@ use alloc::vec::Vec; -use p3_air::{AirBuilder, AirBuilderWithPublicValues, PairBuilder}; +use p3_air::{AirBuilder, AirBuilderWithPublicValues, PeriodicAirBuilder}; use p3_field::{BasedVectorSpace, PackedField}; use p3_matrix::dense::RowMajorMatrixView; use p3_matrix::stack::ViewPair; @@ -14,11 +14,11 @@ use crate::{PackedChallenge, PackedVal, StarkGenericConfig, Val}; /// `C_0 + alpha C_1 + alpha^2 C_2 + ...` #[derive(Debug)] pub struct ProverConstraintFolder<'a, SC: StarkGenericConfig> { - /// The matrix containing rows on which the constraint polynomial is to be evaluated + /// The [`RowMajorMatrixView`] containing rows on which the constraint polynomial is evaluated. pub main: RowMajorMatrixView<'a, PackedVal>, - /// The preprocessed columns (if any) + /// The preprocessed columns (if any) as a [`RowMajorMatrixView`]. pub preprocessed: Option>>, - /// Public inputs to the AIR + /// Public inputs to the [AIR](`p3_air::Air`) implementation. pub public_values: &'a [Val], /// Evaluations of the Selector polynomial for the first row of the trace pub is_first_row: PackedVal, @@ -35,17 +35,19 @@ pub struct ProverConstraintFolder<'a, SC: StarkGenericConfig> { pub accumulator: PackedChallenge, /// Current constraint index being processed pub constraint_index: usize, + /// Evaluations of periodic columns at the current row (base field for prover) + pub periodic_values: Vec>, } /// Handles constraint verification for the verifier in a STARK system. /// -/// Similar to ProverConstraintFolder but operates on committed values rather than the full trace, +/// Similar to [`ProverConstraintFolder`] but operates on committed values rather than the full trace, /// using a more efficient accumulation method for verification. #[derive(Debug)] pub struct VerifierConstraintFolder<'a, SC: StarkGenericConfig> { - /// Pair of consecutive rows from the committed polynomial evaluations + /// Pair of consecutive rows from the committed polynomial evaluations as a [`ViewPair`]. pub main: ViewPair<'a, SC::Challenge>, - /// The preprocessed columns (if any) + /// The preprocessed columns (if any) as a [`ViewPair`]. pub preprocessed: Option>, /// Public values that are inputs to the computation pub public_values: &'a [Val], @@ -59,6 +61,8 @@ pub struct VerifierConstraintFolder<'a, SC: StarkGenericConfig> { pub alpha: SC::Challenge, /// Running accumulator for all constraints pub accumulator: SC::Challenge, + /// Evaluations of periodic columns at the current row + pub periodic_values: Vec, } impl<'a, SC: StarkGenericConfig> AirBuilder for ProverConstraintFolder<'a, SC> { @@ -72,6 +76,10 @@ impl<'a, SC: StarkGenericConfig> AirBuilder for ProverConstraintFolder<'a, SC> { self.main } + fn preprocessed(&self) -> Option { + self.preprocessed + } + #[inline] fn is_first_row(&self) -> Self::Expr { self.is_first_row @@ -123,14 +131,15 @@ impl AirBuilderWithPublicValues for ProverConstraintFold } } -impl<'a, SC: StarkGenericConfig> PairBuilder for ProverConstraintFolder<'a, SC> { - #[inline] - fn preprocessed(&self) -> Self::M { - self.preprocessed - .expect("Air does not provide preprocessed columns, hence can not be consumed") +impl PeriodicAirBuilder for ProverConstraintFolder<'_, SC> { + type PeriodicVar = PackedVal; + + fn periodic_values(&self) -> &[Self::PeriodicVar] { + &self.periodic_values } } + impl<'a, SC: StarkGenericConfig> AirBuilder for VerifierConstraintFolder<'a, SC> { type F = Val; type Expr = SC::Challenge; @@ -141,6 +150,10 @@ impl<'a, SC: StarkGenericConfig> AirBuilder for VerifierConstraintFolder<'a, SC> self.main } + fn preprocessed(&self) -> Option { + self.preprocessed + } + fn is_first_row(&self) -> Self::Expr { self.is_first_row } @@ -175,9 +188,10 @@ impl AirBuilderWithPublicValues for VerifierConstraintFo } } -impl<'a, SC: StarkGenericConfig> PairBuilder for VerifierConstraintFolder<'a, SC> { - fn preprocessed(&self) -> Self::M { - self.preprocessed - .expect("Air does not provide preprocessed columns, hence can not be consumed") +impl PeriodicAirBuilder for VerifierConstraintFolder<'_, SC> { + type PeriodicVar = SC::Challenge; + + fn periodic_values(&self) -> &[Self::PeriodicVar] { + &self.periodic_values } } diff --git a/uni-stark/src/lib.rs b/uni-stark/src/lib.rs index d462eaf0b..7aca95c68 100644 --- a/uni-stark/src/lib.rs +++ b/uni-stark/src/lib.rs @@ -4,23 +4,24 @@ extern crate alloc; +mod check_constraints; mod config; mod folder; +mod preprocessed; mod proof; mod prover; +mod sub_builder; mod symbolic_builder; -mod symbolic_expression; -mod symbolic_variable; mod verifier; -mod check_constraints; - pub use check_constraints::*; pub use config::*; pub use folder::*; +// Public re-exports from p3-air. +pub use p3_air::symbolic::*; +pub use preprocessed::*; pub use proof::*; pub use prover::*; +pub use sub_builder::*; pub use symbolic_builder::*; -pub use symbolic_expression::*; -pub use symbolic_variable::*; pub use verifier::*; diff --git a/uni-stark/src/preprocessed.rs b/uni-stark/src/preprocessed.rs new file mode 100644 index 000000000..24b1e0a8e --- /dev/null +++ b/uni-stark/src/preprocessed.rs @@ -0,0 +1,94 @@ +use p3_air::Air; +use p3_commit::Pcs; +use p3_field::Field; +use p3_matrix::Matrix; +use tracing::debug_span; + +use crate::{ProverConstraintFolder, StarkGenericConfig, SymbolicAirBuilder, Val}; + +/// Prover-side reusable data for preprocessed columns. +/// +/// This allows committing to the preprocessed trace once per [`Air`]/degree and reusing +/// the commitment and [`Pcs`] prover data across many proofs. +pub struct PreprocessedProverData { + /// The width (number of columns) of the preprocessed trace. + pub width: usize, + /// The log2 of the degree of the domain over which the preprocessed trace is committed. + /// + /// In the current uni-stark implementation this matches `degree_bits` in [`Proof`](crate::Proof), + /// i.e. the (extended) trace degree. + pub degree_bits: usize, + /// [`Pcs`] commitment to the preprocessed trace. + pub commitment: >::Commitment, + /// [`Pcs`] prover data for the preprocessed trace. + pub prover_data: >::ProverData, +} + +/// Verifier-side reusable data for preprocessed columns. +/// +/// This allows committing to the preprocessed trace once per [`Air`]/degree and reusing +/// the commitment across many verifications. +#[derive(Clone)] +pub struct PreprocessedVerifierKey { + /// The width (number of columns) of the preprocessed trace. + pub width: usize, + /// The log2 of the degree of the domain over which the preprocessed trace is committed. + /// + /// This should match `degree_bits` in [`Proof`](crate::Proof), i.e. the (extended) trace degree. + pub degree_bits: usize, + /// [`Pcs`] commitment to the preprocessed trace. + pub commitment: >::Commitment, +} + +/// Set up and commit the preprocessed trace for a given [`Air`] and degree. +/// +/// This can be called once per [`Air`]/degree configuration to obtain reusable +/// prover data for preprocessed columns. Returns `None` if the [`Air`] does not +/// define any preprocessed columns. +pub fn setup_preprocessed( + config: &SC, + air: &A, + degree_bits: usize, +) -> Option<(PreprocessedProverData, PreprocessedVerifierKey)> +where + SC: StarkGenericConfig, + Val: Field, + A: Air>> + for<'a> Air>, +{ + let pcs = config.pcs(); + let is_zk = config.is_zk(); + + let init_degree = 1 << degree_bits; + let degree = 1 << (degree_bits + is_zk); + + let preprocessed = air.preprocessed_trace()?; + + let width = preprocessed.width(); + if width == 0 { + return None; + } + + assert_eq!( + preprocessed.height(), + init_degree, + "preprocessed trace height must equal trace degree" + ); + + let trace_domain = pcs.natural_domain_for_degree(degree); + let (commitment, prover_data) = debug_span!("commit to preprocessed trace") + .in_scope(|| pcs.commit_preprocessing([(trace_domain, preprocessed)])); + + let degree_bits = degree_bits + is_zk; + let prover_data = PreprocessedProverData { + width, + degree_bits, + commitment: commitment.clone(), + prover_data, + }; + let vk = PreprocessedVerifierKey { + width, + degree_bits, + commitment, + }; + Some((prover_data, vk)) +} diff --git a/uni-stark/src/prover.rs b/uni-stark/src/prover.rs index b0ce759c0..b88dcef79 100644 --- a/uni-stark/src/prover.rs +++ b/uni-stark/src/prover.rs @@ -5,7 +5,7 @@ use itertools::Itertools; use p3_air::Air; use p3_challenger::{CanObserve, FieldChallenger}; use p3_commit::{Pcs, PolynomialSpace}; -use p3_field::{BasedVectorSpace, PackedValue, PrimeCharacteristicRing}; +use p3_field::{BasedVectorSpace, PackedFieldExtension, PackedValue, PrimeCharacteristicRing}; use p3_matrix::Matrix; use p3_matrix::dense::RowMajorMatrix; use p3_maybe_rayon::prelude::*; @@ -13,31 +13,14 @@ use p3_util::log2_strict_usize; use tracing::{debug_span, info_span, instrument}; use crate::{ - Commitments, Domain, OpenedValues, PackedChallenge, PackedVal, Proof, ProverConstraintFolder, - StarkGenericConfig, SymbolicAirBuilder, Val, get_log_quotient_degree, get_symbolic_constraints, + Commitments, Domain, OpenedValues, PackedChallenge, PackedVal, PreprocessedProverData, Proof, + ProverConstraintFolder, StarkGenericConfig, SymbolicAirBuilder, Val, + get_log_num_quotient_chunks, get_symbolic_constraints, }; -/// Commits the preprocessed trace if present. -/// Returns the commitment hash and prover data (available iff preprocessed is Some). -#[allow(clippy::type_complexity)] -fn commit_preprocessed_trace( - preprocessed: RowMajorMatrix>, - pcs: &SC::Pcs, - trace_domain: >::Domain, -) -> ( - >::Commitment, - >::ProverData, -) -where - SC: StarkGenericConfig, -{ - debug_span!("commit to preprocessed trace") - .in_scope(|| pcs.commit([(trace_domain, preprocessed)])) -} - #[instrument(skip_all)] #[allow(clippy::multiple_bound_locations, clippy::type_repetition_in_bounds)] // cfg not supported in where clauses? -pub fn prove< +pub fn prove_with_preprocessed< SC, #[cfg(debug_assertions)] A: for<'a> Air>>, #[cfg(not(debug_assertions))] A, @@ -46,6 +29,7 @@ pub fn prove< air: &A, trace: RowMajorMatrix>, public_values: &[Val], + preprocessed: Option<&PreprocessedProverData>, ) -> Proof where SC: StarkGenericConfig, @@ -59,9 +43,35 @@ where let log_degree = log2_strict_usize(degree); let log_ext_degree = log_degree + config.is_zk(); - // Get preprocessed trace and its width for symbolic constraint evaluation - let preprocessed_trace = air.preprocessed_trace(); - let preprocessed_width = preprocessed_trace.as_ref().map(|m| m.width).unwrap_or(0); + // Get preprocessed width for symbolic constraint evaluation. + // + // - If reusable preprocessed prover data is provided, trust its width and degree_bits + // (and enforce consistency). + // - Otherwise, if the AIR defines preprocessed columns, we treat it as an error: + // callers must use `setup_preprocessed` and pass the resulting data in. + let preprocessed_width = preprocessed.map_or_else( + || { + if let Some(preprocessed_trace) = air.preprocessed_trace() { + let width = preprocessed_trace.width(); + if width > 0 { + panic!( + "AIR defines preprocessed columns (width = {}), \ + but no PreprocessedProverData was provided. \ + Call `setup_preprocessed` and pass it to `prove_with_preprocessed`.", + width + ); + } + } + 0 + }, + |pp| { + assert_eq!( + pp.degree_bits, log_ext_degree, + "PreprocessedProverData degree_bits does not match trace degree_bits" + ); + pp.width + }, + ); // Compute the constraint polynomials as vectors of symbolic expressions. let symbolic_constraints = @@ -103,14 +113,14 @@ where // From the degree of the constraint polynomial, compute the number // of quotient polynomials we will split Q(x) into. This is chosen to // always be a power of 2. - let log_quotient_degree = get_log_quotient_degree::, A>( + let log_num_quotient_chunks = get_log_num_quotient_chunks::, A>( air, preprocessed_width, public_values.len(), config.is_zk(), ); - let quotient_degree = 1 << (log_quotient_degree + config.is_zk()); + let num_quotient_chunks = 1 << (log_num_quotient_chunks + config.is_zk()); // Initialize the PCS and the Challenger. let pcs = config.pcs(); @@ -137,16 +147,10 @@ where let (trace_commit, trace_data) = info_span!("commit to trace data").in_scope(|| pcs.commit([(ext_trace_domain, trace)])); - let (preprocessed_commit, preprocessed_data) = preprocessed_trace.map_or_else( - || (None, None), - |preprocessed| { - let (commit, data) = - commit_preprocessed_trace::(preprocessed, pcs, ext_trace_domain); - #[cfg(debug_assertions)] - assert_eq!(config.is_zk(), 0); // TODO: preprocessed columns not supported in zk mode - (Some(commit), Some(data)) - }, - ); + // Preprocessed commitment and prover data (if any). + let (preprocessed_commit, preprocessed_data_ref) = preprocessed + .map(|pp| (pp.commitment.clone(), &pp.prover_data)) + .unzip(); // Observe the instance. // degree < 2^255 so we can safely cast log_degree to a u8. @@ -190,7 +194,7 @@ where // This domain must be contained in the domain over which `trace_data` is defined. // Explicitly it should be equal to `gK` for some subgroup `K` contained in `H'`. let quotient_domain = - ext_trace_domain.create_disjoint_domain(1 << (log_ext_degree + log_quotient_degree)); + ext_trace_domain.create_disjoint_domain(1 << (log_ext_degree + log_num_quotient_chunks)); // Return a the subset of the extended trace `ET` corresponding to the rows giving evaluations // over the quotient domain. @@ -198,9 +202,8 @@ where // This only works if the trace domain is `gH'` and the quotient domain is `gK` for some subgroup `K` contained in `H'`. // TODO: Make this explicit in `get_evaluations_on_domain` or otherwise fix this. let trace_on_quotient_domain = pcs.get_evaluations_on_domain(&trace_data, 0, quotient_domain); - let preprocessed_on_quotient_domain = preprocessed_data - .as_ref() - .map(|data| pcs.get_evaluations_on_domain(data, 0, quotient_domain)); + let preprocessed_on_quotient_domain = preprocessed_data_ref + .map(|data| pcs.get_evaluations_on_domain_no_random(data, 0, quotient_domain)); // Compute the quotient polynomial `Q(x)` by evaluating // `C(T_1(x), ..., T_w(x), T_1(hx), ..., T_w(hx), selectors(x)) / Z_H(x)` @@ -245,20 +248,20 @@ where // quotient_data contains the entire tree. // - quotient_data.leaves is a pair of matrices containing the `q_i0(x)` and `q_i1(x)`. let (quotient_commit, quotient_data) = info_span!("commit to quotient poly chunks") - .in_scope(|| pcs.commit_quotient(quotient_domain, quotient_flat, quotient_degree)); + .in_scope(|| pcs.commit_quotient(quotient_domain, quotient_flat, num_quotient_chunks)); challenger.observe(quotient_commit.clone()); // If zk is enabled, we generate random extension field values of the size of the randomized trace. If `n` is the degree of the initial trace, // then the randomized trace has degree `2n`. To randomize the FRI batch polynomial, we then need an extension field random polynomial of degree `2n -1`. - // So we can generate a random polynomial of degree `2n`, and provide it to `open` as is. + // So we can generate a random polynomial of degree `2n`, and provide it to `open` as is. // Then the method will add `(R(X) - R(z)) / (X - z)` (which is of the desired degree `2n - 1`), to the batch of polynomials. // Since we need a random polynomial defined over the extension field, and the `commit` method is over the base field, - // we actually need to commit to `SC::CHallenge::D` base field random polynomials. + // we actually need to commit to `SC::Challenge::D` base field random polynomials. // This is similar to what is done for the quotient polynomials. // TODO: This approach is only statistically zk. To make it perfectly zk, `R` would have to truly be an extension field polynomial. let (opt_r_commit, opt_r_data) = if SC::Pcs::ZK { let (r_commit, r_data) = pcs - .get_opt_randomization_poly_commitment(ext_trace_domain) + .get_opt_randomization_poly_commitment(core::iter::once(ext_trace_domain)) .expect("ZK is enabled, so we should have randomization commitments"); (Some(r_commit), Some(r_data)) } else { @@ -297,10 +300,8 @@ where let (opened_values, opening_proof) = info_span!("open").in_scope(|| { let round0 = opt_r_data.as_ref().map(|r_data| (r_data, vec![vec![zeta]])); let round1 = (&trace_data, vec![vec![zeta, zeta_next]]); - let round2 = ("ient_data, vec![vec![zeta]; quotient_degree]); // open every chunk at zeta - let round3 = preprocessed_data - .as_ref() - .map(|data| (data, vec![vec![zeta, zeta_next]])); + let round2 = ("ient_data, vec![vec![zeta]; num_quotient_chunks]); // open every chunk at zeta + let round3 = preprocessed_data_ref.map(|data| (data, vec![vec![zeta, zeta_next]])); let rounds = round0 .into_iter() @@ -308,7 +309,7 @@ where .chain(round3) .collect(); - pcs.open(rounds, &mut challenger) + pcs.open_with_preprocessing(rounds, &mut challenger, preprocessed_data_ref.is_some()) }); let trace_idx = SC::Pcs::TRACE_IDX; let quotient_idx = SC::Pcs::QUOTIENT_IDX; @@ -347,7 +348,26 @@ where } } -#[instrument(name = "compute quotient polynomial", skip_all)] +#[instrument(skip_all)] +#[allow(clippy::multiple_bound_locations, clippy::type_repetition_in_bounds)] // cfg not supported in where clauses? +pub fn prove< + SC, + #[cfg(debug_assertions)] A: for<'a> Air>>, + #[cfg(not(debug_assertions))] A, +>( + config: &SC, + air: &A, + trace: RowMajorMatrix>, + public_values: &[Val], +) -> Proof +where + SC: StarkGenericConfig, + A: Air>> + for<'a> Air>, +{ + prove_with_preprocessed::(config, air, trace, public_values, None) +} + +#[instrument(skip_all)] // TODO: Group some arguments to remove the `allow`? #[allow(clippy::too_many_arguments)] pub fn quotient_values( @@ -430,6 +450,7 @@ where decomposed_alpha_powers: &decomposed_alpha_powers, accumulator, constraint_index: 0, + periodic_values: vec![], }; air.eval(&mut folder); @@ -437,11 +458,8 @@ where let quotient = folder.accumulator * inv_vanishing; // "Transpose" D packed base coefficients into WIDTH scalar extension coefficients. - (0..core::cmp::min(quotient_size, PackedVal::::WIDTH)).map(move |idx_in_packing| { - SC::Challenge::from_basis_coefficients_fn(|coeff_idx| { - quotient.as_basis_coefficients_slice()[coeff_idx].as_slice()[idx_in_packing] - }) - }) + (0..core::cmp::min(quotient_size, PackedVal::::WIDTH)) + .map(move |idx_in_packing| quotient.extract(idx_in_packing)) }) .collect() } diff --git a/uni-stark/src/sub_builder.rs b/uni-stark/src/sub_builder.rs new file mode 100644 index 000000000..25bbd6481 --- /dev/null +++ b/uni-stark/src/sub_builder.rs @@ -0,0 +1,75 @@ +//! Helpers for reusing an [`AirBuilder`] on a restricted set of trace columns. +//! +//! The uni-STARK builders often need to enforce constraints that refer to only a slice of the main +//! trace. [`HorizontallyTruncated`] offers a cheap view over a subset of columns, and +//! [`SubAirBuilder`] wires that view into any [`AirBuilder`] implementation so a sub-air can be +//! evaluated independently without cloning trace data. + +// Code inpsired from SP1 with additional modifications: +// https://github.com/succinctlabs/sp1/blob/main/crates/stark/src/air/sub_builder.rs + +use core::ops::Range; + +use p3_air::{AirBuilder, BaseAir}; +use p3_matrix::horizontally_truncated::HorizontallyTruncated; + +/// Evaluates a sub-AIR against a restricted slice of the parent trace. +/// +/// This is useful whenever a standalone component AIR is embedded in a larger system but only owns +/// a few columns. `SubAirBuilder` reuses the parent builder for bookkeeping so witness generation +/// and constraint enforcement stay in sync. +pub struct SubAirBuilder<'a, AB: AirBuilder, SubAir: BaseAir, T> { + /// Mutable reference to the parent builder. + inner: &'a mut AB, + + /// Column range (in the parent trace) that the sub-AIR is allowed to see. + column_range: Range, + + /// Marker for the sub-AIR and witness type. + _phantom: core::marker::PhantomData<(SubAir, T)>, +} + +impl<'a, AB: AirBuilder, SubAir: BaseAir, T> SubAirBuilder<'a, AB, SubAir, T> { + /// Create a new [`SubAirBuilder`] exposing only `column_range` to the sub-AIR. + /// + /// The range must lie entirely inside the parent trace width. + #[must_use] + pub const fn new(inner: &'a mut AB, column_range: Range) -> Self { + Self { + inner, + column_range, + _phantom: core::marker::PhantomData, + } + } +} + +/// Implements `AirBuilder` for `SubAirBuilder`. +impl, F> AirBuilder for SubAirBuilder<'_, AB, SubAir, F> { + type F = AB::F; + type Expr = AB::Expr; + type Var = AB::Var; + type M = HorizontallyTruncated; + + fn main(&self) -> Self::M { + let matrix = self.inner.main(); + + HorizontallyTruncated::new_with_range(matrix, self.column_range.clone()) + .expect("sub-air column range exceeds parent width") + } + + fn is_first_row(&self) -> Self::Expr { + self.inner.is_first_row() + } + + fn is_last_row(&self) -> Self::Expr { + self.inner.is_last_row() + } + + fn is_transition_window(&self, size: usize) -> Self::Expr { + self.inner.is_transition_window(size) + } + + fn assert_zero>(&mut self, x: I) { + self.inner.assert_zero(x.into()); + } +} diff --git a/uni-stark/src/symbolic_builder.rs b/uni-stark/src/symbolic_builder.rs index 1c4cbdc56..2e559dd99 100644 --- a/uni-stark/src/symbolic_builder.rs +++ b/uni-stark/src/symbolic_builder.rs @@ -1,39 +1,62 @@ use alloc::vec; use alloc::vec::Vec; -use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, PairBuilder}; -use p3_field::Field; +use p3_air::{ + Air, AirBuilder, AirBuilderWithPublicValues, BaseAir, Entry, ExtensionBuilder, + PeriodicAirBuilder, PermutationAirBuilder, SymbolicExpression, SymbolicVariable, +}; +use p3_field::{ExtensionField, Field}; use p3_matrix::dense::RowMajorMatrix; use p3_util::log2_ceil_usize; + use tracing::instrument; -use crate::Entry; -use crate::symbolic_expression::SymbolicExpression; -use crate::symbolic_variable::SymbolicVariable; +#[instrument(skip_all)] +pub fn get_log_num_quotient_chunks( + air: &A, + preprocessed_width: usize, + num_public_values: usize, + is_zk: usize, +) -> usize +where + F: Field, + A: BaseAir + Air>, +{ + get_log_quotient_degree_extension(air, preprocessed_width, num_public_values, 0, 0, is_zk) +} -#[instrument(name = "infer log of constraint degree", skip_all)] -pub fn get_log_quotient_degree( +#[instrument(name = "infer log of base and extension constraint degree", skip_all)] +pub fn get_log_quotient_degree_extension( air: &A, preprocessed_width: usize, num_public_values: usize, + permutation_width: usize, + num_permutation_challenges: usize, is_zk: usize, ) -> usize where F: Field, - A: Air>, + EF: ExtensionField, + A: BaseAir + Air>, { assert!(is_zk <= 1, "is_zk must be either 0 or 1"); // We pad to at least degree 2, since a quotient argument doesn't make sense with smaller degrees. - let constraint_degree = - (get_max_constraint_degree(air, preprocessed_width, num_public_values) + is_zk).max(2); - - // The quotient's actual degree is approximately (max_constraint_degree - 1) n, - // where subtracting 1 comes from division by the vanishing polynomial. - // But we pad it to a power of two so that we can efficiently decompose the quotient. + let constraint_degree = (get_max_constraint_degree_extension::( + air, + preprocessed_width, + num_public_values, + permutation_width, + num_permutation_challenges, + ) + is_zk) + .max(2); + + // We bound the degree of the quotient polynomial by constraint_degree - 1, + // then choose the number of quotient chunks as the smallest power of two + // >= (constraint_degree - 1). This function returns log2(#chunks). log2_ceil_usize(constraint_degree - 1) } -#[instrument(name = "infer constraint degree", skip_all, level = "debug")] +#[instrument(skip_all, level = "debug")] pub fn get_max_constraint_degree( air: &A, preprocessed_width: usize, @@ -41,16 +64,55 @@ pub fn get_max_constraint_degree( ) -> usize where F: Field, - A: Air>, + A: BaseAir + Air>, { - get_symbolic_constraints(air, preprocessed_width, num_public_values) + get_max_constraint_degree_extension(air, preprocessed_width, num_public_values, 0, 0) +} + +#[instrument( + name = "infer base and extension constraint degree", + skip_all, + level = "debug" +)] +pub fn get_max_constraint_degree_extension( + air: &A, + preprocessed_width: usize, + num_public_values: usize, + permutation_width: usize, + num_permutation_challenges: usize, +) -> usize +where + F: Field, + EF: ExtensionField, + A: BaseAir + Air>, +{ + let (base_constraints, extension_constraints) = get_all_symbolic_constraints( + air, + preprocessed_width, + num_public_values, + permutation_width, + num_permutation_challenges, + ); + + let base_degree = base_constraints + .iter() + .map(|c| c.degree_multiple()) + .max() + .unwrap_or(0); + + let extension_degree = extension_constraints .iter() .map(|c| c.degree_multiple()) .max() - .unwrap_or(0) + .unwrap_or(0); + base_degree.max(extension_degree) } -#[instrument(name = "evaluate constraints symbolically", skip_all, level = "debug")] +#[instrument( + name = "evaluate base constraints symbolically", + skip_all, + level = "debug" +)] pub fn get_symbolic_constraints( air: &A, preprocessed_width: usize, @@ -58,24 +120,120 @@ pub fn get_symbolic_constraints( ) -> Vec> where F: Field, - A: Air>, + A: BaseAir + Air>, +{ + let num_periodic = air.periodic_table().len(); + let mut builder = SymbolicAirBuilder::new_with_periodic( + preprocessed_width, + air.width(), + num_public_values, + 0, + 0, + num_periodic, + ); + air.eval(&mut builder); + builder.base_constraints() +} + +#[instrument( + name = "evaluate extension constraints symbolically", + skip_all, + level = "debug" +)] +pub fn get_symbolic_constraints_extension( + air: &A, + preprocessed_width: usize, + num_public_values: usize, + permutation_width: usize, + num_permutation_challenges: usize, +) -> Vec> +where + F: Field, + EF: ExtensionField, + A: BaseAir + Air>, +{ + let num_periodic = air.periodic_table().len(); + let mut builder = SymbolicAirBuilder::new_with_periodic( + preprocessed_width, + air.width(), + num_public_values, + permutation_width, + num_permutation_challenges, + num_periodic, + ); + air.eval(&mut builder); + builder.extension_constraints() +} + +#[instrument( + name = "evaluate all constraints symbolically", + skip_all, + level = "debug" +)] +pub fn get_all_symbolic_constraints( + air: &A, + preprocessed_width: usize, + num_public_values: usize, + permutation_width: usize, + num_permutation_challenges: usize, +) -> (Vec>, Vec>) +where + F: Field, + EF: ExtensionField, + A: BaseAir + Air>, { - let mut builder = SymbolicAirBuilder::new(preprocessed_width, air.width(), num_public_values); + let num_periodic = air.periodic_table().len(); + let mut builder = SymbolicAirBuilder::new_with_periodic( + preprocessed_width, + air.width(), + num_public_values, + permutation_width, + num_permutation_challenges, + num_periodic, + ); air.eval(&mut builder); - builder.constraints() + (builder.base_constraints(), builder.extension_constraints()) } -/// An `AirBuilder` for evaluating constraints symbolically, and recording them for later use. +/// An [`AirBuilder`] for evaluating constraints symbolically, and recording them for later use. #[derive(Debug)] -pub struct SymbolicAirBuilder { +pub struct SymbolicAirBuilder = F> { preprocessed: RowMajorMatrix>, main: RowMajorMatrix>, public_values: Vec>, - constraints: Vec>, + periodic_values: Vec>, + base_constraints: Vec>, + permutation: RowMajorMatrix>, + permutation_challenges: Vec>, + extension_constraints: Vec>, } -impl SymbolicAirBuilder { - pub fn new(preprocessed_width: usize, width: usize, num_public_values: usize) -> Self { +impl> SymbolicAirBuilder { + pub fn new( + preprocessed_width: usize, + width: usize, + num_public_values: usize, + permutation_width: usize, + num_permutation_challenges: usize, + ) -> Self { + Self::new_with_periodic( + preprocessed_width, + width, + num_public_values, + permutation_width, + num_permutation_challenges, + 0, + ) + } + + pub fn new_with_periodic( + preprocessed_width: usize, + width: usize, + num_public_values: usize, + permutation_width: usize, + num_permutation_challenges: usize, + num_periodic_values: usize, + ) -> Self { let prep_values = [0, 1] .into_iter() .flat_map(|offset| { @@ -92,20 +250,42 @@ impl SymbolicAirBuilder { let public_values = (0..num_public_values) .map(move |index| SymbolicVariable::new(Entry::Public, index)) .collect(); + let periodic_values = (0..num_periodic_values) + .map(move |index| SymbolicVariable::new(Entry::Periodic, index)) + .collect(); + let perm_values = [0, 1] + .into_iter() + .flat_map(|offset| { + (0..permutation_width) + .map(move |index| SymbolicVariable::new(Entry::Permutation { offset }, index)) + }) + .collect(); + let permutation = RowMajorMatrix::new(perm_values, permutation_width); + let permutation_challenges = (0..num_permutation_challenges) + .map(|index| SymbolicVariable::new(Entry::Challenge, index)) + .collect(); Self { preprocessed: RowMajorMatrix::new(prep_values, preprocessed_width), main: RowMajorMatrix::new(main_values, width), public_values, - constraints: vec![], + periodic_values, + base_constraints: vec![], + permutation, + permutation_challenges, + extension_constraints: vec![], } } - pub fn constraints(self) -> Vec> { - self.constraints + pub fn extension_constraints(&self) -> Vec> { + self.extension_constraints.clone() + } + + pub fn base_constraints(&self) -> Vec> { + self.base_constraints.clone() } } -impl AirBuilder for SymbolicAirBuilder { +impl> AirBuilder for SymbolicAirBuilder { type F = F; type Expr = SymbolicExpression; type Var = SymbolicVariable; @@ -115,6 +295,10 @@ impl AirBuilder for SymbolicAirBuilder { self.main.clone() } + fn preprocessed(&self) -> Option { + Some(self.preprocessed.clone()) + } + fn is_first_row(&self) -> Self::Expr { SymbolicExpression::IsFirstRow } @@ -134,20 +318,58 @@ impl AirBuilder for SymbolicAirBuilder { } fn assert_zero>(&mut self, x: I) { - self.constraints.push(x.into()); + self.base_constraints.push(x.into()); } } -impl AirBuilderWithPublicValues for SymbolicAirBuilder { +impl> AirBuilderWithPublicValues for SymbolicAirBuilder { type PublicVar = SymbolicVariable; fn public_values(&self) -> &[Self::PublicVar] { &self.public_values } } -impl PairBuilder for SymbolicAirBuilder { - fn preprocessed(&self) -> Self::M { - self.preprocessed.clone() +impl> ExtensionBuilder for SymbolicAirBuilder +where + SymbolicExpression: From>, +{ + type EF = EF; + type ExprEF = SymbolicExpression; + type VarEF = SymbolicVariable; + + fn assert_zero_ext(&mut self, x: I) + where + I: Into, + { + self.extension_constraints.push(x.into()); + } +} + +impl> PermutationAirBuilder for SymbolicAirBuilder +where + SymbolicExpression: From>, +{ + type MP = RowMajorMatrix; + + type RandomVar = SymbolicVariable; + + fn permutation(&self) -> Self::MP { + self.permutation.clone() + } + + fn permutation_randomness(&self) -> &[Self::RandomVar] { + &self.permutation_challenges + } +} + +impl> PeriodicAirBuilder for SymbolicAirBuilder +where + SymbolicExpression: From>, +{ + type PeriodicVar = SymbolicVariable; + + fn periodic_values(&self) -> &[Self::PeriodicVar] { + &self.periodic_values } } @@ -179,27 +401,27 @@ mod tests { } #[test] - fn test_get_log_quotient_degree_no_constraints() { + fn test_get_log_num_quotient_chunks_no_constraints() { let air = MockAir { constraints: vec![], width: 4, }; - let log_degree = get_log_quotient_degree(&air, 3, 2, 0); + let log_degree = get_log_num_quotient_chunks(&air, 3, 2, 0); assert_eq!(log_degree, 0); } #[test] - fn test_get_log_quotient_degree_single_constraint() { + fn test_get_log_num_quotient_chunks_single_constraint() { let air = MockAir { constraints: vec![SymbolicVariable::new(Entry::Main { offset: 0 }, 0)], width: 4, }; - let log_degree = get_log_quotient_degree(&air, 3, 2, 0); + let log_degree = get_log_num_quotient_chunks(&air, 3, 2, 0); assert_eq!(log_degree, log2_ceil_usize(1)); } #[test] - fn test_get_log_quotient_degree_multiple_constraints() { + fn test_get_log_num_quotient_chunks_multiple_constraints() { let air = MockAir { constraints: vec![ SymbolicVariable::new(Entry::Main { offset: 0 }, 0), @@ -208,7 +430,7 @@ mod tests { ], width: 4, }; - let log_degree = get_log_quotient_degree(&air, 3, 2, 0); + let log_degree = get_log_num_quotient_chunks(&air, 3, 2, 0); assert_eq!(log_degree, log2_ceil_usize(1)); } @@ -266,7 +488,7 @@ mod tests { #[test] fn test_symbolic_air_builder_initialization() { - let builder = SymbolicAirBuilder::::new(2, 4, 3); + let builder = SymbolicAirBuilder::::new(2, 4, 3, 0, 0); let expected_main = [ SymbolicVariable::::new(Entry::Main { offset: 0 }, 0), @@ -295,7 +517,7 @@ mod tests { #[test] fn test_symbolic_air_builder_is_first_last_row() { - let builder = SymbolicAirBuilder::::new(2, 4, 3); + let builder = SymbolicAirBuilder::::new(2, 4, 3, 0, 0); assert!( matches!(builder.is_first_row(), SymbolicExpression::IsFirstRow), @@ -310,11 +532,11 @@ mod tests { #[test] fn test_symbolic_air_builder_assert_zero() { - let mut builder = SymbolicAirBuilder::::new(2, 4, 3); + let mut builder = SymbolicAirBuilder::::new(2, 4, 3, 0, 0); let expr = SymbolicExpression::Constant(BabyBear::new(5)); builder.assert_zero(expr); - let constraints = builder.constraints(); + let constraints = builder.base_constraints(); assert_eq!(constraints.len(), 1, "One constraint should be recorded"); assert!( diff --git a/uni-stark/src/symbolic_expression.rs b/uni-stark/src/symbolic_expression.rs deleted file mode 100644 index 743fc5b79..000000000 --- a/uni-stark/src/symbolic_expression.rs +++ /dev/null @@ -1,470 +0,0 @@ -use alloc::rc::Rc; -use core::fmt::Debug; -use core::iter::{Product, Sum}; -use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; - -use p3_field::{Algebra, Field, InjectiveMonomial, PrimeCharacteristicRing}; - -use crate::symbolic_variable::SymbolicVariable; - -/// An expression over `SymbolicVariable`s. -#[derive(Clone, Debug)] -pub enum SymbolicExpression { - Variable(SymbolicVariable), - IsFirstRow, - IsLastRow, - IsTransition, - Constant(F), - Add { - x: Rc, - y: Rc, - degree_multiple: usize, - }, - Sub { - x: Rc, - y: Rc, - degree_multiple: usize, - }, - Neg { - x: Rc, - degree_multiple: usize, - }, - Mul { - x: Rc, - y: Rc, - degree_multiple: usize, - }, -} - -impl SymbolicExpression { - /// Returns the multiple of `n` (the trace length) in this expression's degree. - pub const fn degree_multiple(&self) -> usize { - match self { - Self::Variable(v) => v.degree_multiple(), - Self::IsFirstRow | Self::IsLastRow => 1, - Self::IsTransition | Self::Constant(_) => 0, - Self::Add { - degree_multiple, .. - } - | Self::Sub { - degree_multiple, .. - } - | Self::Neg { - degree_multiple, .. - } - | Self::Mul { - degree_multiple, .. - } => *degree_multiple, - } - } -} - -impl Default for SymbolicExpression { - fn default() -> Self { - Self::Constant(F::ZERO) - } -} - -impl From for SymbolicExpression { - fn from(value: F) -> Self { - Self::Constant(value) - } -} - -impl PrimeCharacteristicRing for SymbolicExpression { - type PrimeSubfield = F::PrimeSubfield; - - const ZERO: Self = Self::Constant(F::ZERO); - const ONE: Self = Self::Constant(F::ONE); - const TWO: Self = Self::Constant(F::TWO); - const NEG_ONE: Self = Self::Constant(F::NEG_ONE); - - #[inline] - fn from_prime_subfield(f: Self::PrimeSubfield) -> Self { - F::from_prime_subfield(f).into() - } -} - -impl Algebra for SymbolicExpression {} - -impl Algebra> for SymbolicExpression {} - -// Note we cannot implement PermutationMonomial due to the degree_multiple part which makes -// operations non invertible. -impl, const N: u64> InjectiveMonomial for SymbolicExpression {} - -impl Add for SymbolicExpression -where - T: Into, -{ - type Output = Self; - - fn add(self, rhs: T) -> Self { - match (self, rhs.into()) { - (Self::Constant(lhs), Self::Constant(rhs)) => Self::Constant(lhs + rhs), - (lhs, rhs) => Self::Add { - degree_multiple: lhs.degree_multiple().max(rhs.degree_multiple()), - x: Rc::new(lhs), - y: Rc::new(rhs), - }, - } - } -} - -impl AddAssign for SymbolicExpression -where - T: Into, -{ - fn add_assign(&mut self, rhs: T) { - *self = self.clone() + rhs.into(); - } -} - -impl Sum for SymbolicExpression -where - T: Into, -{ - fn sum>(iter: I) -> Self { - iter.map(Into::into) - .reduce(|x, y| x + y) - .unwrap_or(Self::ZERO) - } -} - -impl> Sub for SymbolicExpression { - type Output = Self; - - fn sub(self, rhs: T) -> Self { - match (self, rhs.into()) { - (Self::Constant(lhs), Self::Constant(rhs)) => Self::Constant(lhs - rhs), - (lhs, rhs) => Self::Sub { - degree_multiple: lhs.degree_multiple().max(rhs.degree_multiple()), - x: Rc::new(lhs), - y: Rc::new(rhs), - }, - } - } -} - -impl SubAssign for SymbolicExpression -where - T: Into, -{ - fn sub_assign(&mut self, rhs: T) { - *self = self.clone() - rhs.into(); - } -} - -impl Neg for SymbolicExpression { - type Output = Self; - - fn neg(self) -> Self { - match self { - Self::Constant(c) => Self::Constant(-c), - expr => Self::Neg { - degree_multiple: expr.degree_multiple(), - x: Rc::new(expr), - }, - } - } -} - -impl> Mul for SymbolicExpression { - type Output = Self; - - fn mul(self, rhs: T) -> Self { - match (self, rhs.into()) { - (Self::Constant(lhs), Self::Constant(rhs)) => Self::Constant(lhs * rhs), - (lhs, rhs) => Self::Mul { - degree_multiple: lhs.degree_multiple() + rhs.degree_multiple(), - x: Rc::new(lhs), - y: Rc::new(rhs), - }, - } - } -} - -impl MulAssign for SymbolicExpression -where - T: Into, -{ - fn mul_assign(&mut self, rhs: T) { - *self = self.clone() * rhs.into(); - } -} - -impl> Product for SymbolicExpression { - fn product>(iter: I) -> Self { - iter.map(Into::into) - .reduce(|x, y| x * y) - .unwrap_or(Self::ONE) - } -} - -#[cfg(test)] -mod tests { - use alloc::vec; - - use p3_baby_bear::BabyBear; - - use super::*; - use crate::Entry; - - #[test] - fn test_symbolic_expression_degree_multiple() { - let constant_expr = SymbolicExpression::::Constant(BabyBear::new(5)); - assert_eq!( - constant_expr.degree_multiple(), - 0, - "Constant should have degree 0" - ); - - let variable_expr = - SymbolicExpression::Variable(SymbolicVariable::new(Entry::Main { offset: 0 }, 1)); - assert_eq!( - variable_expr.degree_multiple(), - 1, - "Main variable should have degree 1" - ); - - let preprocessed_var = SymbolicExpression::Variable(SymbolicVariable::new( - Entry::Preprocessed { offset: 0 }, - 2, - )); - assert_eq!( - preprocessed_var.degree_multiple(), - 1, - "Preprocessed variable should have degree 1" - ); - - let permutation_var = SymbolicExpression::Variable(SymbolicVariable::::new( - Entry::Permutation { offset: 0 }, - 3, - )); - assert_eq!( - permutation_var.degree_multiple(), - 1, - "Permutation variable should have degree 1" - ); - - let public_var = - SymbolicExpression::Variable(SymbolicVariable::::new(Entry::Public, 4)); - assert_eq!( - public_var.degree_multiple(), - 0, - "Public variable should have degree 0" - ); - - let challenge_var = - SymbolicExpression::Variable(SymbolicVariable::::new(Entry::Challenge, 5)); - assert_eq!( - challenge_var.degree_multiple(), - 0, - "Challenge variable should have degree 0" - ); - - let is_first_row = SymbolicExpression::::IsFirstRow; - assert_eq!( - is_first_row.degree_multiple(), - 1, - "IsFirstRow should have degree 1" - ); - - let is_last_row = SymbolicExpression::::IsLastRow; - assert_eq!( - is_last_row.degree_multiple(), - 1, - "IsLastRow should have degree 1" - ); - - let is_transition = SymbolicExpression::::IsTransition; - assert_eq!( - is_transition.degree_multiple(), - 0, - "IsTransition should have degree 0" - ); - - let add_expr = SymbolicExpression::::Add { - x: Rc::new(variable_expr.clone()), - y: Rc::new(preprocessed_var.clone()), - degree_multiple: 1, - }; - assert_eq!( - add_expr.degree_multiple(), - 1, - "Addition should take max degree of inputs" - ); - - let sub_expr = SymbolicExpression::::Sub { - x: Rc::new(variable_expr.clone()), - y: Rc::new(preprocessed_var.clone()), - degree_multiple: 1, - }; - assert_eq!( - sub_expr.degree_multiple(), - 1, - "Subtraction should take max degree of inputs" - ); - - let neg_expr = SymbolicExpression::::Neg { - x: Rc::new(variable_expr.clone()), - degree_multiple: 1, - }; - assert_eq!( - neg_expr.degree_multiple(), - 1, - "Negation should keep the degree" - ); - - let mul_expr = SymbolicExpression::::Mul { - x: Rc::new(variable_expr), - y: Rc::new(preprocessed_var), - degree_multiple: 2, - }; - assert_eq!( - mul_expr.degree_multiple(), - 2, - "Multiplication should sum degrees" - ); - } - - #[test] - fn test_addition_of_constants() { - let a = SymbolicExpression::Constant(BabyBear::new(3)); - let b = SymbolicExpression::Constant(BabyBear::new(4)); - let result = a + b; - match result { - SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(7)), - _ => panic!("Addition of constants did not simplify correctly"), - } - } - - #[test] - fn test_subtraction_of_constants() { - let a = SymbolicExpression::Constant(BabyBear::new(10)); - let b = SymbolicExpression::Constant(BabyBear::new(4)); - let result = a - b; - match result { - SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(6)), - _ => panic!("Subtraction of constants did not simplify correctly"), - } - } - - #[test] - fn test_negation() { - let a = SymbolicExpression::Constant(BabyBear::new(7)); - let result = -a; - match result { - SymbolicExpression::Constant(val) => { - assert_eq!(val, BabyBear::NEG_ONE * BabyBear::new(7)); - } - _ => panic!("Negation did not work correctly"), - } - } - - #[test] - fn test_multiplication_of_constants() { - let a = SymbolicExpression::Constant(BabyBear::new(3)); - let b = SymbolicExpression::Constant(BabyBear::new(5)); - let result = a * b; - match result { - SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(15)), - _ => panic!("Multiplication of constants did not simplify correctly"), - } - } - - #[test] - fn test_degree_multiple_for_addition() { - let a = SymbolicExpression::Variable::(SymbolicVariable::new( - Entry::Main { offset: 0 }, - 1, - )); - let b = SymbolicExpression::Variable::(SymbolicVariable::new( - Entry::Main { offset: 0 }, - 2, - )); - let result = a + b; - match result { - SymbolicExpression::Add { - degree_multiple, - x, - y, - } => { - assert_eq!(degree_multiple, 1); - assert!( - matches!(*x, SymbolicExpression::Variable(ref v) if v.index == 1 && matches!(v.entry, Entry::Main { offset: 0 })) - ); - assert!( - matches!(*y, SymbolicExpression::Variable(ref v) if v.index == 2 && matches!(v.entry, Entry::Main { offset: 0 })) - ); - } - _ => panic!("Addition did not create an Add expression"), - } - } - - #[test] - fn test_degree_multiple_for_multiplication() { - let a = SymbolicExpression::Variable::(SymbolicVariable::new( - Entry::Main { offset: 0 }, - 1, - )); - let b = SymbolicExpression::Variable::(SymbolicVariable::new( - Entry::Main { offset: 0 }, - 2, - )); - let result = a * b; - - match result { - SymbolicExpression::Mul { - degree_multiple, - x, - y, - } => { - assert_eq!(degree_multiple, 2, "Multiplication should sum degrees"); - - assert!( - matches!(*x, SymbolicExpression::Variable(ref v) - if v.index == 1 && matches!(v.entry, Entry::Main { offset: 0 }) - ), - "Left operand should match `a`" - ); - - assert!( - matches!(*y, SymbolicExpression::Variable(ref v) - if v.index == 2 && matches!(v.entry, Entry::Main { offset: 0 }) - ), - "Right operand should match `b`" - ); - } - _ => panic!("Multiplication did not create a `Mul` expression"), - } - } - - #[test] - fn test_sum_operator() { - let expressions = vec![ - SymbolicExpression::Constant(BabyBear::new(2)), - SymbolicExpression::Constant(BabyBear::new(3)), - SymbolicExpression::Constant(BabyBear::new(5)), - ]; - let result: SymbolicExpression = expressions.into_iter().sum(); - match result { - SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(10)), - _ => panic!("Sum did not produce correct result"), - } - } - - #[test] - fn test_product_operator() { - let expressions = vec![ - SymbolicExpression::Constant(BabyBear::new(2)), - SymbolicExpression::Constant(BabyBear::new(3)), - SymbolicExpression::Constant(BabyBear::new(4)), - ]; - let result: SymbolicExpression = expressions.into_iter().product(); - match result { - SymbolicExpression::Constant(val) => assert_eq!(val, BabyBear::new(24)), - _ => panic!("Product did not produce correct result"), - } - } -} diff --git a/uni-stark/src/verifier.rs b/uni-stark/src/verifier.rs index 2414e28f5..d319ac476 100644 --- a/uni-stark/src/verifier.rs +++ b/uni-stark/src/verifier.rs @@ -1,20 +1,25 @@ -//! See `prover.rs` for an overview of the protocol and a more detailed soundness analysis. +//! See [`crate::prover`] for an overview of the protocol and a more detailed soundness analysis. -use alloc::vec; use alloc::vec::Vec; +use alloc::{format, vec}; use itertools::Itertools; use p3_air::Air; +use p3_air::lookup::LookupError; use p3_challenger::{CanObserve, FieldChallenger}; use p3_commit::{Pcs, PolynomialSpace}; use p3_field::{BasedVectorSpace, Field, PrimeCharacteristicRing}; use p3_matrix::dense::RowMajorMatrixView; use p3_matrix::stack::VerticalPair; use p3_util::zip_eq::zip_eq; -use tracing::{debug_span, instrument}; +use thiserror::Error; +use tracing::instrument; -use crate::symbolic_builder::{SymbolicAirBuilder, get_log_quotient_degree}; -use crate::{Domain, PcsError, Proof, StarkGenericConfig, Val, VerifierConstraintFolder}; +use crate::symbolic_builder::{SymbolicAirBuilder, get_log_num_quotient_chunks}; +use crate::{ + Domain, PcsError, PreprocessedVerifierKey, Proof, StarkGenericConfig, Val, + VerifierConstraintFolder, +}; /// Recomposes the quotient polynomial from its chunks evaluated at a point. /// @@ -64,7 +69,7 @@ where /// Verifies that the folded constraints match the quotient polynomial at zeta. /// -/// This evaluates the AIR constraints at the out-of-domain point and checks +/// This evaluates the [`Air`] constraints at the out-of-domain point and checks /// that constraints(zeta) / Z_H(zeta) = quotient(zeta). #[allow(clippy::too_many_arguments)] pub fn verify_constraints( @@ -82,6 +87,7 @@ pub fn verify_constraints( where SC: StarkGenericConfig, A: for<'a> Air>, + PcsErr: core::fmt::Debug, { let sels = trace_domain.selectors_at_point(zeta); @@ -107,6 +113,7 @@ where is_transition: sels.is_transition, alpha, accumulator: SC::Challenge::ZERO, + periodic_values: vec![], }; air.eval(&mut folder); let folded_constraints = folder.accumulator; @@ -125,9 +132,7 @@ where fn process_preprocessed_trace( air: &A, opened_values: &crate::proof::OpenedValues, - pcs: &SC::Pcs, - trace_domain: >::Domain, - is_zk: usize, + preprocessed_vk: Option<&PreprocessedVerifierKey>, ) -> Result< ( usize, @@ -139,9 +144,15 @@ where SC: StarkGenericConfig, A: for<'a> Air>, { - // If verifier asked for preprocessed trace, then proof should have it - let preprocessed = air.preprocessed_trace(); - let preprocessed_width = preprocessed.as_ref().map(|m| m.width).unwrap_or(0); + // Determine expected preprocessed width. + // - If a verifier key is provided, trust its width. + // - Otherwise, derive width from the AIR's preprocessed trace (if any). + let preprocessed_width = preprocessed_vk + .map(|vk| vk.width) + .or_else(|| air.preprocessed_trace().as_ref().map(|m| m.width)) + .unwrap_or(0); + + // Check that the proof's opened preprocessed values match the expected width. let preprocessed_local_len = opened_values .preprocessed_local .as_ref() @@ -155,19 +166,23 @@ where return Err(VerificationError::InvalidProofShape); } - if preprocessed_width > 0 { - assert_eq!(is_zk, 0); // TODO: preprocessed columns not supported in zk mode - let height = preprocessed.as_ref().unwrap().values.len() / preprocessed_width; - assert_eq!( - height, - trace_domain.size(), - "Verifier's preprocessed trace height must be equal to trace domain size" - ); - let (preprocessed_commit, _) = debug_span!("process preprocessed trace") - .in_scope(|| pcs.commit([(trace_domain, preprocessed.unwrap())])); - Ok((preprocessed_width, Some(preprocessed_commit))) - } else { - Ok((preprocessed_width, None)) + // Validate consistency between width, verifier key, and zk settings. + match (preprocessed_width, preprocessed_vk) { + // Case: No preprocessed columns. + // + // Valid only if no verifier key is provided. + (0, None) => Ok((0, None)), + + // Case: Preprocessed columns exist. + // + // Valid only if VK exists, widths match, and we are NOT in zk mode. + (w, Some(vk)) if w == vk.width => Ok((w, Some(vk.commitment.clone()))), + + // Catch-all for invalid states, such as: + // - Width is 0 but VK is provided. + // - Width > 0 but VK is missing. + // - Width > 0 but VK width mismatches the expected width. + _ => Err(VerificationError::InvalidProofShape), } } @@ -178,6 +193,21 @@ pub fn verify( proof: &Proof, public_values: &[Val], ) -> Result<(), VerificationError>> +where + SC: StarkGenericConfig, + A: Air>> + for<'a> Air>, +{ + verify_with_preprocessed(config, air, proof, public_values, None) +} + +#[instrument(skip_all)] +pub fn verify_with_preprocessed( + config: &SC, + air: &A, + proof: &Proof, + public_values: &[Val], + preprocessed_vk: Option<&PreprocessedVerifierKey>, +) -> Result<(), VerificationError>> where SC: StarkGenericConfig, A: Air>> + for<'a> Air>, @@ -194,21 +224,29 @@ where let trace_domain = pcs.natural_domain_for_degree(degree); // TODO: allow moving preprocessed commitment to preprocess time, if known in advance let (preprocessed_width, preprocessed_commit) = - process_preprocessed_trace::(air, opened_values, pcs, trace_domain, config.is_zk())?; + process_preprocessed_trace::(air, opened_values, preprocessed_vk)?; - let log_quotient_degree = get_log_quotient_degree::, A>( + // Ensure the preprocessed trace and main trace have the same height. + if let Some(vk) = preprocessed_vk + && preprocessed_width > 0 + && vk.degree_bits != *degree_bits + { + return Err(VerificationError::InvalidProofShape); + } + + let log_num_quotient_chunks = get_log_num_quotient_chunks::, A>( air, preprocessed_width, public_values.len(), config.is_zk(), ); - let quotient_degree = 1 << (log_quotient_degree + config.is_zk()); + let num_quotient_chunks = 1 << (log_num_quotient_chunks + config.is_zk()); let mut challenger = config.initialise_challenger(); let init_trace_domain = pcs.natural_domain_for_degree(degree >> (config.is_zk())); let quotient_domain = - trace_domain.create_disjoint_domain(1 << (degree_bits + log_quotient_degree)); - let quotient_chunks_domains = quotient_domain.split_domains(quotient_degree); + trace_domain.create_disjoint_domain(1 << (degree_bits + log_num_quotient_chunks)); + let quotient_chunks_domains = quotient_domain.split_domains(num_quotient_chunks); let randomized_quotient_chunks_domains = quotient_chunks_domains .iter() @@ -226,7 +264,7 @@ where let air_width = A::width(air); let valid_shape = opened_values.trace_local.len() == air_width && opened_values.trace_next.len() == air_width - && opened_values.quotient_chunks.len() == quotient_degree + && opened_values.quotient_chunks.len() == num_quotient_chunks && opened_values .quotient_chunks .iter() @@ -349,18 +387,29 @@ where Ok(()) } -#[derive(Debug)] -pub enum VerificationError { +#[derive(Debug, Error)] +pub enum VerificationError +where + PcsErr: core::fmt::Debug, +{ + #[error("invalid proof shape")] InvalidProofShape, /// An error occurred while verifying the claimed openings. + #[error("invalid opening argument: {0:?}")] InvalidOpeningArgument(PcsErr), /// Out-of-domain evaluation mismatch, i.e. `constraints(zeta)` did not match /// `quotient(zeta) Z_H(zeta)`. - OodEvaluationMismatch { - index: Option, - }, + #[error("out-of-domain evaluation mismatch{}", .index.map(|i| format!(" at index {}", i)).unwrap_or_default())] + OodEvaluationMismatch { index: Option }, /// The FRI batch randomization does not correspond to the ZK setting. + #[error("randomization error: FRI batch randomization does not match ZK setting")] RandomizationError, /// The domain does not support computing the next point algebraically. + #[error( + "next point unavailable: domain does not support computing the next point algebraically" + )] NextPointUnavailable, + /// Lookup related error + #[error("lookup error: {0:?}")] + LookupError(LookupError), } diff --git a/uni-stark/tests/fib_air.rs b/uni-stark/tests/fib_air.rs index b2e4f5e4c..fece71686 100644 --- a/uni-stark/tests/fib_air.rs +++ b/uni-stark/tests/fib_air.rs @@ -3,15 +3,17 @@ use core::borrow::Borrow; use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, BaseAir}; use p3_baby_bear::{BabyBear, Poseidon2BabyBear}; use p3_challenger::{DuplexChallenger, HashChallenger, SerializingChallenger32}; +use p3_circle::CirclePcs; use p3_commit::ExtensionMmcs; use p3_dft::Radix2DitParallel; use p3_field::extension::BinomialExtensionField; use p3_field::{Field, PrimeCharacteristicRing, PrimeField64}; -use p3_fri::{HidingFriPcs, TwoAdicFriPcs, create_test_fri_params}; +use p3_fri::{FriParameters, HidingFriPcs, TwoAdicFriPcs, create_test_fri_params}; use p3_keccak::{Keccak256Hash, KeccakF}; use p3_matrix::Matrix; use p3_matrix::dense::RowMajorMatrix; use p3_merkle_tree::{MerkleTreeHidingMmcs, MerkleTreeMmcs}; +use p3_mersenne_31::Mersenne31; use p3_symmetric::{ CompressionFunctionFromHasher, PaddingFreeSponge, SerializingHasher, TruncatedPermutation, }; @@ -119,8 +121,10 @@ type Dft = Radix2DitParallel; type Pcs = TwoAdicFriPcs; type MyConfig = StarkConfig; -/// n-th Fibonacci number expected to be x -fn test_public_value_impl(n: usize, x: u64, log_final_poly_len: usize) { +const TWO_ADIC_FIXTURE: &str = "tests/fixtures/uni_stark_two_adic_v1.postcard"; +const CIRCLE_FIXTURE: &str = "tests/fixtures/uni_stark_circle_v1.postcard"; + +fn make_two_adic_config(log_final_poly_len: usize) -> MyConfig { let mut rng = SmallRng::seed_from_u64(1); let perm = Perm::new_from_rng_128(&mut rng); let hash = MyHash::new(perm.clone()); @@ -128,12 +132,93 @@ fn test_public_value_impl(n: usize, x: u64, log_final_poly_len: usize) { let val_mmcs = ValMmcs::new(hash, compress); let challenge_mmcs = ChallengeMmcs::new(val_mmcs.clone()); let dft = Dft::default(); - let trace = generate_trace_rows::(0, 1, n); - let fri_params = create_test_fri_params(challenge_mmcs, log_final_poly_len); + let fri_params = FriParameters { + log_blowup: 2, + log_final_poly_len, + num_queries: 2, + commit_proof_of_work_bits: 1, + query_proof_of_work_bits: 1, + mmcs: challenge_mmcs, + }; let pcs = Pcs::new(dft, val_mmcs, fri_params); let challenger = Challenger::new(perm); + MyConfig::new(pcs, challenger) +} - let config = MyConfig::new(pcs, challenger); +fn two_adic_compat_case() -> (MyConfig, FibonacciAir, Vec, RowMajorMatrix) { + let trace = generate_trace_rows::(0, 1, 1 << 3); + let config = make_two_adic_config(2); + let pis = vec![BabyBear::ZERO, BabyBear::ONE, BabyBear::from_u64(21)]; + (config, FibonacciAir {}, pis, trace) +} + +type CircleVal = Mersenne31; +type CircleChallenge = BinomialExtensionField; +type CircleByteHash = Keccak256Hash; +type CircleFieldHash = SerializingHasher; +type CircleCompress = CompressionFunctionFromHasher; +type CircleValMmcs = MerkleTreeMmcs; +type CircleChallengeMmcs = ExtensionMmcs; +type CircleChallenger = SerializingChallenger32>; +type CirclePcsType = CirclePcs; +type CircleConfig = StarkConfig; + +fn make_circle_config() -> CircleConfig { + let byte_hash = CircleByteHash {}; + let field_hash = CircleFieldHash::new(byte_hash); + let compress = CircleCompress::new(byte_hash); + let val_mmcs = CircleValMmcs::new(field_hash, compress); + let challenge_mmcs = CircleChallengeMmcs::new(val_mmcs.clone()); + let fri_params = FriParameters { + log_blowup: 1, + log_final_poly_len: 0, + num_queries: 40, + commit_proof_of_work_bits: 0, + query_proof_of_work_bits: 8, + mmcs: challenge_mmcs, + }; + let pcs = CirclePcsType { + mmcs: val_mmcs, + fri_params, + _phantom: core::marker::PhantomData, + }; + let challenger = CircleChallenger::from_hasher(vec![], byte_hash); + CircleConfig::new(pcs, challenger) +} + +fn circle_compat_case() -> ( + CircleConfig, + FibonacciAir, + Vec, + RowMajorMatrix, +) { + let config = make_circle_config(); + let trace = generate_trace_rows::(0, 1, 1 << 3); + let pis = vec![ + CircleVal::from_u64(0), + CircleVal::from_u64(1), + CircleVal::from_u64(21), + ]; + (config, FibonacciAir {}, pis, trace) +} + +fn write_fixture(path: &str, bytes: &[u8]) -> std::io::Result<()> { + let full_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join(path); + if let Some(parent) = full_path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(full_path, bytes) +} + +fn read_fixture(path: &str) -> std::io::Result> { + let full_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join(path); + std::fs::read(full_path) +} + +/// n-th Fibonacci number expected to be x +fn test_public_value_impl(n: usize, x: u64, log_final_poly_len: usize) { + let trace = generate_trace_rows::(0, 1, n); + let config = make_two_adic_config(log_final_poly_len); let pis = vec![BabyBear::ZERO, BabyBear::ONE, BabyBear::from_u64(x)]; let proof = prove(&config, &FibonacciAir {}, trace, &pis); @@ -199,6 +284,48 @@ fn test_public_value() { test_public_value_impl(1 << 3, 21, 2); } +#[test] +fn verify_two_adic_compat_fixture() -> Result<(), Box> { + let (config, air, pis, _) = two_adic_compat_case(); + let proof_bytes = read_fixture(TWO_ADIC_FIXTURE) + .expect("Missing fixture. Run: cargo test -p p3-uni-stark --test fib_air -- --ignored"); + let proof: p3_uni_stark::Proof = postcard::from_bytes(&proof_bytes)?; + verify(&config, &air, &proof, &pis)?; + Ok(()) +} + +#[test] +fn verify_circle_compat_fixture() -> Result<(), Box> { + let (config, air, pis, _) = circle_compat_case(); + let proof_bytes = read_fixture(CIRCLE_FIXTURE) + .expect("Missing fixture. Run: cargo test -p p3-uni-stark --test fib_air -- --ignored"); + let proof: p3_uni_stark::Proof = postcard::from_bytes(&proof_bytes)?; + verify(&config, &air, &proof, &pis)?; + Ok(()) +} + +#[test] +#[ignore] +fn generate_two_adic_fixture() -> Result<(), Box> { + // Regen: cargo test -p p3-uni-stark --test fib_air -- --ignored + let (config, air, pis, trace) = two_adic_compat_case(); + let proof = prove(&config, &air, trace, &pis); + let bytes = postcard::to_allocvec(&proof)?; + write_fixture(TWO_ADIC_FIXTURE, &bytes)?; + Ok(()) +} + +#[test] +#[ignore] +fn generate_circle_fixture() -> Result<(), Box> { + // Regen: cargo test -p p3-uni-stark --test fib_air -- --ignored + let (config, air, pis, trace) = circle_compat_case(); + let proof = prove(&config, &air, trace, &pis); + let bytes = postcard::to_allocvec(&proof)?; + write_fixture(CIRCLE_FIXTURE, &bytes)?; + Ok(()) +} + #[cfg(debug_assertions)] #[test] #[should_panic(expected = "assertion `left == right` failed: constraints had nonzero value")] diff --git a/uni-stark/tests/fixtures/uni_stark_circle_v1.postcard b/uni-stark/tests/fixtures/uni_stark_circle_v1.postcard new file mode 100644 index 000000000..0cd3dd5d6 Binary files /dev/null and b/uni-stark/tests/fixtures/uni_stark_circle_v1.postcard differ diff --git a/uni-stark/tests/fixtures/uni_stark_two_adic_v1.postcard b/uni-stark/tests/fixtures/uni_stark_two_adic_v1.postcard new file mode 100644 index 000000000..ad4738bdd Binary files /dev/null and b/uni-stark/tests/fixtures/uni_stark_two_adic_v1.postcard differ diff --git a/uni-stark/tests/mul_air.rs b/uni-stark/tests/mul_air.rs index e63aa5057..57edd345e 100644 --- a/uni-stark/tests/mul_air.rs +++ b/uni-stark/tests/mul_air.rs @@ -215,7 +215,8 @@ fn do_test_bb_twoadic(log_blowup: usize, degree: u64, log_n: usize) -> Result<() log_blowup, log_final_poly_len: 3, num_queries: 40, - proof_of_work_bits: 8, + commit_proof_of_work_bits: 0, + query_proof_of_work_bits: 8, mmcs: challenge_mmcs, }; type Pcs = TwoAdicFriPcs; @@ -326,7 +327,8 @@ fn do_test_m31_circle(log_blowup: usize, degree: u64, log_n: usize) -> Result<() log_blowup, log_final_poly_len: 0, num_queries: 40, - proof_of_work_bits: 8, + commit_proof_of_work_bits: 0, + query_proof_of_work_bits: 8, mmcs: challenge_mmcs, }; diff --git a/uni-stark/tests/mul_fib_pair.rs b/uni-stark/tests/mul_fib_pair.rs index 31f8c80ee..b4ef454be 100644 --- a/uni-stark/tests/mul_fib_pair.rs +++ b/uni-stark/tests/mul_fib_pair.rs @@ -1,18 +1,20 @@ use core::borrow::Borrow; -use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, BaseAir, PairBuilder}; +use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, BaseAir}; use p3_baby_bear::{BabyBear, Poseidon2BabyBear}; use p3_challenger::DuplexChallenger; use p3_commit::ExtensionMmcs; use p3_dft::Radix2DitParallel; use p3_field::extension::BinomialExtensionField; use p3_field::{Field, PrimeField64}; -use p3_fri::{TwoAdicFriPcs, create_test_fri_params}; +use p3_fri::{HidingFriPcs, TwoAdicFriPcs, create_test_fri_params}; use p3_matrix::Matrix; use p3_matrix::dense::RowMajorMatrix; -use p3_merkle_tree::MerkleTreeMmcs; +use p3_merkle_tree::{MerkleTreeHidingMmcs, MerkleTreeMmcs}; use p3_symmetric::{PaddingFreeSponge, TruncatedPermutation}; -use p3_uni_stark::{StarkConfig, prove, verify}; +use p3_uni_stark::{ + StarkConfig, prove_with_preprocessed, setup_preprocessed, verify_with_preprocessed, +}; use rand::SeedableRng; use rand::rngs::SmallRng; @@ -50,13 +52,13 @@ impl BaseAir for MulFibPAir { } } -impl Air for MulFibPAir +impl Air for MulFibPAir where AB::F: PrimeField64, { fn eval(&self, builder: &mut AB) { let main = builder.main(); - let preprocessed = builder.preprocessed(); + let preprocessed = builder.preprocessed().expect("Preprocessed is empty?"); let local_slice = main.row_slice(0).expect("Matrix is empty?"); let next_slice = main.row_slice(1).expect("Matrix only has 1 row?"); @@ -172,12 +174,24 @@ type MyHash = PaddingFreeSponge; type MyCompress = TruncatedPermutation; type ValMmcs = MerkleTreeMmcs<::Packing, ::Packing, MyHash, MyCompress, 8>; +type HidingValMmcs = MerkleTreeHidingMmcs< + ::Packing, + ::Packing, + MyHash, + MyCompress, + SmallRng, + 8, + 4, +>; type Challenge = BinomialExtensionField; type ChallengeMmcs = ExtensionMmcs; +type HidingChallengeMmcs = ExtensionMmcs; type Challenger = DuplexChallenger; type Dft = Radix2DitParallel; type Pcs = TwoAdicFriPcs; +type HidingPcs = HidingFriPcs; type MyConfig = StarkConfig; +type MyHidingConfig = StarkConfig; fn setup_test_config() -> MyConfig { let mut rng = SmallRng::seed_from_u64(1); @@ -192,13 +206,51 @@ fn setup_test_config() -> MyConfig { MyConfig::new(pcs, challenger) } +fn setup_zk_test_config() -> MyHidingConfig { + let mut rng = SmallRng::seed_from_u64(1); + let perm = Perm::new_from_rng_128(&mut rng); + let hash = MyHash::new(perm.clone()); + let compress = MyCompress::new(perm.clone()); + let val_mmcs = HidingValMmcs::new(hash, compress, rng.clone()); + let challenge_mmcs = HidingChallengeMmcs::new(val_mmcs.clone()); + let fri_params = create_test_fri_params(challenge_mmcs, 2); + let pcs = HidingPcs::new(Dft::default(), val_mmcs, fri_params, 4, rng); + let challenger = Challenger::new(perm); + MyHidingConfig::new(pcs, challenger) +} + #[test] fn test_mul_fib_pair() { let num_rows = 1024; let config = setup_test_config(); let trace = generate_trace_rows::(1, 1, num_rows); - let proof = prove(&config, &MulFibPAir::new(num_rows), trace, &[]); - verify(&config, &MulFibPAir::new(num_rows), &proof, &[]).expect("verification failed"); + + let air = MulFibPAir::new(num_rows); + let degree_bits = 10; // log2(1024) + let (preprocessed_prover_data, preprocessed_vk) = + setup_preprocessed::(&config, &air, degree_bits).unwrap(); + + let proof = prove_with_preprocessed(&config, &air, trace, &[], Some(&preprocessed_prover_data)); + + verify_with_preprocessed(&config, &air, &proof, &[], Some(&preprocessed_vk)) + .expect("verification failed"); +} + +#[test] +fn test_mul_fib_pair_zk() { + let num_rows = 1024; + let config = setup_zk_test_config(); + let trace = generate_trace_rows::(1, 1, num_rows); + + let air = MulFibPAir::new(num_rows); + let degree_bits = 10; // log2(1024) + let (preprocessed_prover_data, preprocessed_vk) = + setup_preprocessed::(&config, &air, degree_bits).unwrap(); + + let proof = prove_with_preprocessed(&config, &air, trace, &[], Some(&preprocessed_prover_data)); + + verify_with_preprocessed(&config, &air, &proof, &[], Some(&preprocessed_vk)) + .expect("verification failed"); } #[test] @@ -206,13 +258,26 @@ fn test_tampered_preprocessed_fails() { let num_rows = 1024; let config = setup_test_config(); let trace = generate_trace_rows::(1, 1, num_rows); - let proof = prove(&config, &MulFibPAir::new(num_rows), trace, &[]); + let air = MulFibPAir::new(num_rows); + let degree_bits = 10; // log2(1024) + + // Prover uses the correct AIR for preprocessed setup. + let (preprocessed_prover_data, _) = + setup_preprocessed::(&config, &air, degree_bits).unwrap(); + let proof = prove_with_preprocessed(&config, &air, trace, &[], Some(&preprocessed_prover_data)); + + // Verifier uses a *tampered* AIR to derive the preprocessed commitment, which should + // not match the one used in the proof. + let tampered_air = MulFibPAir::with_tampered_preprocessed(num_rows, 3); + let (_, tampered_preprocessed_vk) = + setup_preprocessed::(&config, &tampered_air, degree_bits).unwrap(); - let result = verify( + let result = verify_with_preprocessed( &config, - &MulFibPAir::with_tampered_preprocessed(num_rows, 3), + &tampered_air, &proof, &[], + Some(&tampered_preprocessed_vk), ); assert!( diff --git a/uni-stark/tests/rc_sub_builder.rs b/uni-stark/tests/rc_sub_builder.rs new file mode 100644 index 000000000..ab36f2b0a --- /dev/null +++ b/uni-stark/tests/rc_sub_builder.rs @@ -0,0 +1,167 @@ +//! Minimal range-check example that reuses a bit-decomposition gadget via [`SubAirBuilder`]. +//! +//! Column layout: +//! - `c[0]`: running sum owned by the parent AIR. +//! - `c[1]`: value that must stay in `[0, 2^NUM_RANGE_BITS)`. +//! - `c[2..]`: boolean limbs proving the decomposition of `c[1]`. +//! +//! The sub-AIR enforces the decomposition + booleanity over columns `1..`, while the parent AIR +//! never touches the bit columns and only reasons about the accumulated sum. + +use core::marker::PhantomData; + +use p3_air::{Air, AirBuilder, BaseAir}; +use p3_baby_bear::{BabyBear, Poseidon2BabyBear}; +use p3_challenger::DuplexChallenger; +use p3_commit::testing::TrivialPcs; +use p3_dft::Radix2DitParallel; +use p3_field::PrimeCharacteristicRing; +use p3_field::extension::BinomialExtensionField; +use p3_matrix::Matrix; +use p3_matrix::dense::RowMajorMatrix; +use p3_uni_stark::{StarkConfig, SubAirBuilder, SymbolicAirBuilder, prove, verify}; +use rand::SeedableRng; +use rand::rngs::SmallRng; + +const NUM_RANGE_BITS: usize = 4; +const TRACE_WIDTH: usize = 2 + NUM_RANGE_BITS; + +/// Range-check gadget: proves a value equals the sum of weighted boolean limbs. +#[derive(Copy, Clone)] +struct RangeDecompAir; + +impl BaseAir for RangeDecompAir { + fn width(&self) -> usize { + 1 + NUM_RANGE_BITS + } +} + +impl Air for RangeDecompAir +where + AB: AirBuilder, +{ + fn eval(&self, builder: &mut AB) { + let main = builder.main(); + let local = main.row_slice(0).expect("matrix should have a local row"); + + let value = local[0].clone(); + let bits = &local[1..]; + + let mut recomposed = AB::Expr::ZERO; + for (i, bit) in bits.iter().enumerate() { + let weight = BabyBear::from_u32(1 << i); + recomposed += bit.clone() * weight; + builder.assert_zero(bit.clone() * (bit.clone() - AB::Expr::ONE)); + } + + builder.assert_zero(value - recomposed); + } +} + +/// Parent AIR that reuses the range gadget but only reasons about the running sum. +#[derive(Copy, Clone)] +struct RangeCheckAir; + +impl BaseAir for RangeCheckAir { + fn width(&self) -> usize { + TRACE_WIDTH + } +} + +impl Air for RangeCheckAir +where + AB: AirBuilder, +{ + fn eval(&self, builder: &mut AB) { + // Declare the sub-AIR and evaluate it via `SubAirBuilder` + let sub_air = RangeDecompAir; + { + let mut sub_builder = + SubAirBuilder::::new(builder, 1..TRACE_WIDTH); + sub_air.eval(&mut sub_builder); + } + + // Evaluate the parent AIR + let main = builder.main(); + let local = main.row_slice(0).expect("matrix should have a local row"); + let next = main.row_slice(1).expect("matrix only has 1 row?"); + + let accumulator = local[0].clone(); + let range_value = local[1].clone(); + let next_accumulator = next[0].clone(); + + builder.when_first_row().assert_zero(accumulator.clone()); + builder + .when_transition() + .assert_eq(next_accumulator, accumulator + range_value); + } +} + +impl RangeCheckAir { + fn generate_trace(&self, rows: usize) -> RowMajorMatrix { + assert!( + rows.is_power_of_two(), + "trace height must be a power of two" + ); + let mut values = BabyBear::zero_vec(rows * TRACE_WIDTH); + let mut accumulator = BabyBear::ZERO; + for row in 0..rows { + let base = row * TRACE_WIDTH; + let raw_value = (row * 7) % (1 << NUM_RANGE_BITS); + values[base] = accumulator; + values[base + 1] = BabyBear::from_u32(raw_value as u32); + let mut tmp = raw_value; + for bit in 0..NUM_RANGE_BITS { + values[base + 2 + bit] = BabyBear::from_u32((tmp & 1) as u32); + tmp >>= 1; + } + accumulator += BabyBear::from_u32(raw_value as u32); + } + RowMajorMatrix::new(values, TRACE_WIDTH) + } +} + +// Ensures the range-check gadget stays scoped to its columns and the whole AIR proves. +#[test] +fn range_checked_sub_builder() { + let air = RangeCheckAir; + let mut builder = SymbolicAirBuilder::::new(0, TRACE_WIDTH, 0, 0, 0); + air.eval(&mut builder); + + let constraints = builder.base_constraints(); + assert!( + !constraints.is_empty(), + "Range-check AIR should emit constraints" + ); + + prove_bb_trivial_deg4(&air, 3); +} + +/// Tests the whole AIR on a trivial trace. +fn prove_bb_trivial_deg4(air: &RangeCheckAir, log_n: usize) { + type Val = BabyBear; + type Challenge = BinomialExtensionField; + type Perm = Poseidon2BabyBear<16>; + type Dft = Radix2DitParallel; + type Challenger = DuplexChallenger; + type Pcs = TrivialPcs; + type Config = StarkConfig; + + let rows = 1 << log_n; + let trace = air.generate_trace(rows); + + let mut rng = SmallRng::seed_from_u64(1); + let perm = Perm::new_from_rng_128(&mut rng); + let dft = Dft::default(); + + let pcs = Pcs { + dft, + log_n, + _phantom: PhantomData, + }; + let challenger = Challenger::new(perm); + let config = Config::new(pcs, challenger); + + let proof = prove(&config, air, trace, &[]); + verify(&config, air, &proof, &[]).expect("verification failed"); +} diff --git a/util/CHANGELOG.md b/util/CHANGELOG.md new file mode 100644 index 000000000..8e780590e --- /dev/null +++ b/util/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] +## [0.4.2] - 2026-01-05 +### Merged PRs +- Refactor: add public const `new` and `new_array` for all fields (#1222) (Adrian Hamelink) + +### Authors +- Adrian Hamelink + +## [0.4.1] - 2025-12-18 +### Authors + +## [0.4.0] - 2025-12-12 +### Merged PRs +- Clippy wants us to put things inside of fmt now instead of just extra arguments... (#916) (AngusG) +- Chore: add descriptions to all sub-crate manifests (#906) (Himess) +- GCD based inversion for 31 bit fields (#921) (AngusG) +- Fast GCD Inverse for Goldilocks (#925) (AngusG) +- More Clippy Complaints (#931) (AngusG) +- Chore: remove useless bench_reverse_bits benchmark (#933) (Galoretka) +- Packed Goldilocks Small Refactor (#946) (AngusG) +- Make Assume unsafe and add a doc comment (#1005) (AngusG) +- Compile Time asserts (#1015) (AngusG) +- Clippy: small step (#1102) (Thomas Coratger) +- Clippy: add nursery (#1103) (Thomas Coratger) +- Clippy: add semicolon_if_nothing_returned (#1107) (Thomas Coratger) +- Clippy: add match_bool (#1126) (Thomas Coratger) + +### Authors +- AngusG +- Galoretka +- Himess +- Thomas Coratger + diff --git a/util/src/lib.rs b/util/src/lib.rs index 1e704d2fe..e4489c769 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -26,8 +26,8 @@ pub const fn log2_ceil_usize(n: usize) -> usize { } #[must_use] -pub fn log2_ceil_u64(n: u64) -> u64 { - (u64::BITS - n.saturating_sub(1).leading_zeros()).into() +pub const fn log2_ceil_u64(n: u64) -> u64 { + (u64::BITS - n.saturating_sub(1).leading_zeros()) as u64 } /// Computes `log_2(n)` @@ -36,9 +36,9 @@ pub fn log2_ceil_u64(n: u64) -> u64 { /// Panics if `n` is not a power of two. #[must_use] #[inline] -pub fn log2_strict_usize(n: usize) -> usize { +pub const fn log2_strict_usize(n: usize) -> usize { let res = n.trailing_zeros(); - assert_eq!(n.wrapping_shr(res), 1, "Not a power of two: {n}"); + assert!(n.wrapping_shr(res) == 1, "Not a power of two"); // Tell the optimizer about the semantics of `log2_strict`. i.e. it can replace `n` with // `1 << res` and vice versa. unsafe { @@ -200,13 +200,17 @@ fn reverse_slice_index_bits_small(vals: &mut [F], lb_n: usize) { } #[cfg(target_arch = "aarch64")] -fn reverse_slice_index_bits_small(vals: &mut [F], lb_n: usize) { +const fn reverse_slice_index_bits_small(vals: &mut [F], lb_n: usize) { // Aarch64 can reverse bits in one instruction, so the trivial version works best. - for src in 0..vals.len() { + // use manual `while` loop to enable `const` + let mut src = 0; + while src < vals.len() { let dst = src.reverse_bits().wrapping_shr(usize::BITS - lb_n as u32); if src < dst { vals.swap(src, dst); } + + src += 1; } } @@ -241,7 +245,7 @@ unsafe fn reverse_slice_index_bits_chunks( /// /// Callers must ensure that `p` is true. If this is not the case, the behavior is undefined. #[inline(always)] -pub unsafe fn assume(p: bool) { +pub const unsafe fn assume(p: bool) { debug_assert!(p); if !p { unsafe { @@ -646,18 +650,20 @@ pub const fn relatively_prime_u64(mut u: u64, mut v: u64) -> bool { /// the corresponding big-ints and the top `NUM_ROUNDS + 2` should match the top bits including /// zeroes if the original numbers have different sizes. #[inline] -pub fn gcd_inner(a: &mut u64, b: &mut u64) -> (i64, i64, i64, i64) { +pub const fn gcd_inner(a: &mut u64, b: &mut u64) -> (i64, i64, i64, i64) { // Initialise update factors. // At the start of round 0: -1 < f0, g0, f1, g1 <= 1 let (mut f0, mut g0, mut f1, mut g1) = (1, 0, 0, 1); // If at the start of a round: -2^i < f0, g0, f1, g1 <= 2^i // Then, at the end of the round: -2^{i + 1} < f0, g0, f1, g1 <= 2^{i + 1} - for _ in 0..NUM_ROUNDS { + // use manual `while` loop to enable `const` + let mut round = 0; + while round < NUM_ROUNDS { if *a & 1 == 0 { *a >>= 1; } else { - if a < b { + if *a < *b { core::mem::swap(a, b); (f0, f1) = (f1, f0); (g0, g1) = (g1, g0); @@ -669,6 +675,8 @@ pub fn gcd_inner(a: &mut u64, b: &mut u64) -> (i64, i64 } f1 <<= 1; g1 <<= 1; + + round += 1; } // -2^NUM_ROUNDS < f0, g0, f1, g1 <= 2^NUM_ROUNDS @@ -691,7 +699,7 @@ pub fn gcd_inner(a: &mut u64, b: &mut u64) -> (i64, i64 /// It is up to the user to ensure that `b` is an odd prime with at most `FIELD_BITS` bits and /// `a < b`. If either of these assumptions break, the output is undefined. #[inline] -pub fn gcd_inversion_prime_field_32(mut a: u32, mut b: u32) -> i64 { +pub const fn gcd_inversion_prime_field_32(mut a: u32, mut b: u32) -> i64 { const { assert!(FIELD_BITS <= 32); } @@ -705,7 +713,9 @@ pub fn gcd_inversion_prime_field_32(mut a: u32, mut b: u3 // `b = v * a0 mod P` // `len(a) + len(b) <= 2 * len(P) <= 2 * FIELD_BITS` - for _ in 0..(2 * FIELD_BITS - 2) { + // use manual `while` loop to enable `const` + let mut i = 0; + while i < 2 * FIELD_BITS - 2 { // Assume at the start of the loop i: // (1) `|u|, |v| <= 2^{i}` // (2) `2^i * a = u * a0 mod P` @@ -741,6 +751,8 @@ pub fn gcd_inversion_prime_field_32(mut a: u32, mut b: u3 // (4) `gcd(a, b) = 1` // (5) `b` is odd. // (6) `len(a) + len(b) <= max(n - i - 1, 1)` + + i += 1; } // After the loops, we see that: diff --git a/versioning_tools/README.md b/versioning_tools/README.md deleted file mode 100644 index 709ae9f97..000000000 --- a/versioning_tools/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Versioning Tools - -Because plonky3 has many, many sub-crates, it was decided that it was worth putting in effort to automate the release process. The following three scripts when used together make this process significantly more streamlined and much less error-prone. - -## But first, why not use existing release tools? - -While the tooling available for automating Rust releases is fairly mature at this point, almost all of the tools assume that you are not releasing all of a project's sub-crates in lockstep. Specifically, "lockstep" here means that when a release occurs all individual subcrates will get a release for an identical semver version that matches the project's workspace version. For example, if we perform a `minor` bump and we're currently on `0.2.0`, all crates will be bumped to `0.3.0`. - -## Intended Workflow - -When it's decided that it's time to do a release, the following should occur: - -- Create a PR that updates `CHANGELOG.md` and bumps all crate versions. - - On a separate branch, run `lockstep_version_bump.sh` and follow the prompts. This will create a tagged commit and push the branch to remote. This script will create and publish a commit that bumps all crate versions based on how the latest changes affected Semver. - - Once this is complete, also run `changelog_gen.sh`. This will search for all PR commits and prepend them to `CHANGELOG.md`. Also don't forget to commit this and push (will automate later). -- Once this PR is merged, we need to next publish this commit to `crates.io`. - - From `main` on this version bump commit, run `lockstep_publish.sh`. This will do as much validation as possible before finally prompting you to publish all of the sub-crates. - -And that's it! No more work is needed on your end. diff --git a/versioning_tools/changelog_gen.sh b/versioning_tools/changelog_gen.sh deleted file mode 100755 index d1a9bce29..000000000 --- a/versioning_tools/changelog_gen.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -# Convenience script for prepending to the changelog after a version bump. -# Also has an additional check to prevent double appending a changelog for a version to `CHANGELOG.md`. -# -# Intended to be used in the same PR that bumps the versions. - -curr_dir=$(realpath "$(dirname "$0")") -changelog_path="${curr_dir}/../CHANGELOG.md" - -latest_local_release_tag=$(git tag -l | grep -E 'v[0-9]+\.[0-9]+\.[0-9]+' | sort -r | head -n 1) -latest_local_release_tag_no_v=$(echo "$latest_local_release_tag" | sed -E 's/v(.*)/\1/') - -# Check if there already is an entry in the changelog for this version. -if [ "$(grep -c "\[$latest_local_release_tag_no_v\]" < "$changelog_path")" -gt 0 ]; then - echo "Version ${latest_local_release_tag} already has a entry in the changelog. Manually remove it before prepending to it with this script." - exit 1 -fi - -git cliff -p "$changelog_path" --latest - -echo "Changelog successfully updated for ${latest_local_release_tag}." diff --git a/versioning_tools/lockstep_publish.sh b/versioning_tools/lockstep_publish.sh deleted file mode 100755 index 8906e9063..000000000 --- a/versioning_tools/lockstep_publish.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash - -# Publish all subcrates to `crates.io` once we merged a version bump PR back to main. - -# Note that the intended workflow of this script is: -# - Have a PR of a successful run of `lockstep_version_bump.sh` merged back into `main`. -# - Run this script on the commit that got merged back into `main`. - -# 1 --> Prompt string -# 2 --> Yes string -get_yes_no_input_from_user_and_exit_on_no() { - echo "${1} (y/n)" - - read -r input - if [ "$input" = "y" ]; then - echo "$2" - else - exit 0 - fi -} - -# If a version bump occurred on this commit (highest version tag is present on this commit), then this will report that nothing has changed since the last bump (because we just bumped on this commit). -changed_res=$(cargo workspaces changed --error-on-empty) - -if [ ! $? ]; then - # The number of changes since the latest release tag should be zero if we are on the corresponding commit. - num_changed=$(echo "$changed_res" | wc -l) - - if [ "$num_changed" -gt 0 ]; then - latest_local_release_tag=$(git tag -l | grep -E 'v[0-9]+\.[0-9]+\.[0-9]+' | sort -r | head -n 1) - - echo "Changes detected since the latest version release tag (${latest_local_release_tag})!" - echo "Make sure to run \`lockstep_version_bump.sh\` before running this script." - exit 1 - fi - - get_yes_no_input_from_user_and_exit_on_no "Do you want to publish a release now?" "Publishing to crates.io..." -else - echo "The latest version tag is not on this commit. Run \`lockstep_version_bump.sh\` to create a commit for publishing." - exit 1 -fi - -# User green-lighted publishing. -# Because a failure during publishing could result in a desync between local and remote (and thus a big headache), we're going to do a dry run first in order to detect any errors during publishing. -if ! cargo workspaces publish --dry-run --no-git-push --allow-branch main; then - echo "crates.io publishing dry run failed." - exit 1 -fi - -# Publishing dry run succeeded. Do a real publish now. - -get_yes_no_input_from_user_and_exit_on_no "Publishing dry run succeeded! Perform a real publish now? (Be careful!!! This is non-reversible!)" "Publishing to crates.io..." - -# Perform a real publish. Hopefully nothing with break at this point. -if ! cargo workspaces publish --allow-branch main; then - echo "crates.io publishing failed. This is probably not good..." - exit 1 -fi diff --git a/versioning_tools/lockstep_version_bump.sh b/versioning_tools/lockstep_version_bump.sh deleted file mode 100755 index 7e4909421..000000000 --- a/versioning_tools/lockstep_version_bump.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env bash - -# We are currently incrementing versions with lockstep, which means that all sub-crates will always be at the same version (eg. `0.x.0` is the crate version that should be used for all crates when we target this version). - -# When we increment the version, we are always going to be following Semver of whichever package had the most "significant" bump. So for example, if one package had a major bump but all of the rest only had a patch bump, then all packages would receive a major bump. - -# Check if a binary in installed and exit early if it's missing. -# $1 --> Binary name -function tool_installed_check () { - if ! command -v "$1" >/dev/null 2>&1; then - echo "\"${1}\" not found! Make sure it's installed before running this script." - exit 1 - fi -} - -# Tool install check. -tool_installed_check "cargo" -tool_installed_check "cargo-workspaces" -tool_installed_check "cargo-semver-checks" - -# First we need to check if a version bump occurred that was never published. We don't want to accidentally bump a version and never publish it. -all_local_subcrates_name_and_versions=$(cargo workspaces list -l) - -all_local_subcrate_versions=$(echo "$all_local_subcrates_name_and_versions" | sed -E 's/.* v([0-9]+.[0-9]+.[0-9]+).*/\1/') -# First check that all subcrates are locksteped to the same version. If this isn't the case, then something is wrong and we should stop. -# Use `awk` to check that all crate versions are the same string. -if [ "$(echo "$all_local_subcrate_versions" | uniq | wc -l)" -gt 1 ]; then - echo "Something is wrong and all local subcrates are not on the same version!" - echo "$all_local_subcrates_name_and_versions" - echo "Aborting!" - - exit 1 -fi - -# Now that we know that all local subcrates are locksteped to the same version, we need to also ensure that all published (remote) crates are on the same version. -for crate_name in $(cargo workspaces list) -do - echo "Checking published version for ${crate_name}..." - - local_ver=$(echo "$all_local_subcrates_name_and_versions" | grep -E ".*$crate_name .*" | sed -E 's/.*([0-9]+\.[0-9]+\.[0-9]+).*/\1/') - published_ver=$(cargo search -q "$crate_name" | grep "$crate_name =" | sed -E 's/.* = "([0-9]+\.[0-9]+\.[0-9]+)".*/\1/') - - # Handle the case where this is a new crate that is not yet published. - if [ "$published_ver" == "" ]; then - echo "${crate_name} not yet published to crates.io. Will publish an initial version for it." - elif [ ! "$local_ver" == "$published_ver" ]; then - echo "The crate \"${crate_name}\" has a different published version (${published_ver}) than the current local version (${local_ver})." - echo "This script relies that all sub-crates are bumped in lockstep, and if one crate does not match its remote, the script's core assumptions break down." - echo "You're going to have to manually sync all desynced sub-crates to get their local versions to match the published version." - echo "Aborting..." - - exit 1 - fi -done - -# Now all local and published versions are currently synced. To get the most recent current version. -latest_published_version=$(cargo search -q uni-stark | sed -E 's/.* = "([0-9]+.[0-9]+.[0-9]+)".*/\1/') -major_version=$(echo "$latest_published_version" | sed -E "s/([0-9]+).*/\1/") - -echo "Checking for the most significant semver change across all crates. This may take some time..." -semver_check_out=$(cargo workspaces exec --no-bail cargo semver-checks 2>&1 | grep "Summary") - -major_bumps_suggested=$(echo "$semver_check_out" | grep -ce "new major version") -minor_bumps_suggested=$(echo "$semver_check_out" | grep -ce "new minor version") -patch_bumps_suggested=$(echo "$semver_check_out" | grep -c "Summary no semver update required") - -# Check if we would normally perform a `Major` bump but won't because the current `Major` version `0`. (https://semver.org/#spec-item-4) -major_bump_suppressed=0 - -# Man... We sure love Bash here... Look at this beautiful line below. -[ "$major_version" -eq 0 ] && [ "$major_bumps_suggested" -gt 0 ] && major_bump_suppressed=1 - -# Note: Because the rules for Semver are a bit different when major is `0`, we're going to override suggesting a `Major` change if the major version is `0`. -if [ "$major_bumps_suggested" -gt 0 ] && [ "$major_version" -gt 0 ]; then - patch_bump_type="major" - -# We want to downgrade a `Major` bump into a `minor` bump if the current `Major` version is `0`. -elif [ "$minor_bumps_suggested" -gt 0 ] || [ "$major_bump_suppressed" -eq 1 ]; then - patch_bump_type="minor" -elif [ "$patch_bumps_suggested" -gt 0 ]; then - patch_bump_type="patch" -else - patch_bump_type="none" -fi - -if [ $patch_bump_type == "none" ]; then - echo "No crates need to be bumped." - exit 0 -fi - -# We can perform a bump. -echo "${patch_bump_type} bump suggested. (Major: ${major_bumps_suggested}, Minor: ${minor_bumps_suggested}, Patch: ${patch_bumps_suggested})" - -if [ "$major_bump_suppressed" ]; then - echo "Note: Even though there are breaking changes since the last release, because the current major version is \"0\", we are going to suggest a minor bump instead. (See https://semver.org/#spec-item-4)" -fi - -echo "Proceed with a ${patch_bump_type} lockstep bump? (y/n)" -read -r input - -if [ ! "$input" == "y" ]; then - echo "Overriding semver suggested patch type. What kind of bump should be done instead? (\"major\" | \"minor\" | \"patch\")" - read -r patch_bump_type - - case $patch_bump_type in - major | minor | patch) - # Valid input. Do nothing. - ;; - - *) - echo "${patch_bump_type} not valid input! Exiting!" - exit 1 - ;; - esac -fi - -current_branch=$(git branch --show-current) - -# If the branch doesn't exist on remote, create it. -if [ "$(git ls-remote --heads origin refs/heads/"$current_branch" | wc -l)" -eq 0 ]; then - echo "Current branch does not exist on remote. Pushing to remote..." - git push --set-upstream origin "$current_branch" -fi - -# Now we have a valid bump type. Apply it. -echo "Performing a ${patch_bump_type} bump..." -cargo workspaces version -y --allow-branch "$current_branch" --no-individual-tags "${patch_bump_type}"