diff --git a/.github/workflows/curve25519-dalek.yml b/.github/workflows/curve25519-dalek.yml index 3f688b4d9..6e16c0174 100644 --- a/.github/workflows/curve25519-dalek.yml +++ b/.github/workflows/curve25519-dalek.yml @@ -43,30 +43,6 @@ jobs: RUSTFLAGS: '--cfg curve25519_dalek_backend="fiat"' run: cargo test --target ${{ matrix.target }} - # Default no_std test only tests using serial across all crates - build-nostd-fiat: - name: Build fiat on no_std target (thumbv7em-none-eabi) - runs-on: ubuntu-latest - strategy: - matrix: - include: - - crate: curve25519-dalek - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: stable - targets: thumbv7em-none-eabi - - uses: taiki-e/install-action@cargo-hack - # No default features build - - name: no_std fiat / no feat ${{ matrix.crate }} - env: - RUSTFLAGS: '--cfg curve25519_dalek_backend="fiat"' - run: cargo build -p ${{ matrix.crate }} --target thumbv7em-none-eabi --release --no-default-features - - name: no_std fiat / cargo hack ${{ matrix.crate }} - env: - RUSTFLAGS: '--cfg curve25519_dalek_backend="fiat"' - run: cargo hack build -p ${{ matrix.crate }} --target thumbv7em-none-eabi --release --each-feature --exclude-features default,std,getrandom test-serial: name: Test serial backend @@ -143,3 +119,23 @@ jobs: - run: cargo build --no-default-features --features serde # Also make sure the AVX2 build works - run: cargo build --target x86_64-unknown-linux-gnu + + verus: + name: Run `cargo verify verus` + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@1.88.0 + - name: Install Verus + run: | + wget https://github.com/verus-lang/verus/releases/download/release%2F0.2025.08.01.33c6cec/verus-0.2025.08.01.33c6cec-x86-linux.zip + unzip verus-0.2025.08.01.33c6cec-x86-linux.zip + mv verus-x86-linux ~/.cargo/bin + cd ~/.cargo/bin + ln -s verus-x86-linux/cargo-verus + - name: Run test + run: | + cargo-verus verify -- --smt-option smt.random_seed=0 + diff --git a/.github/workflows/workspace.yml b/.github/workflows/workspace.yml index dc5ac8d91..a9775f81e 100644 --- a/.github/workflows/workspace.yml +++ b/.github/workflows/workspace.yml @@ -57,29 +57,6 @@ jobs: - name: Build default (host native) bench run: cargo build --benches - # Test no_std with serial (default) - build-nostd-serial: - name: Build serial on no_std target (thumbv7em-none-eabi) - runs-on: ubuntu-latest - strategy: - matrix: - include: - - crate: curve25519-dalek - - crate: ed25519-dalek - - crate: x25519-dalek - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: stable - targets: thumbv7em-none-eabi - - uses: taiki-e/install-action@cargo-hack - # No default features build - - name: no_std / no feat ${{ matrix.crate }} - run: cargo build -p ${{ matrix.crate }} --target thumbv7em-none-eabi --release --no-default-features - - name: no_std / cargo hack ${{ matrix.crate }} - run: cargo hack build -p ${{ matrix.crate }} --target thumbv7em-none-eabi --release --each-feature --exclude-features default,std,os_rng - clippy: name: Check that clippy is happy runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 6eea85527..4ac079ca1 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ build*.txt *.bak *.s +.aider* +.verus-solver-log diff --git a/curve25519-dalek/Cargo.toml b/curve25519-dalek/Cargo.toml index 46e6ca584..4049ba4b2 100644 --- a/curve25519-dalek/Cargo.toml +++ b/curve25519-dalek/Cargo.toml @@ -53,6 +53,9 @@ harness = false required-features = ["alloc", "rand_core"] [dependencies] +vstd = { git = "https://github.com/verus-lang/verus", rev = "33c6cec7bd19818e51d2b61f629d5d2484778bed"} +verus_builtin = { git = "https://github.com/verus-lang/verus", rev = "33c6cec7bd19818e51d2b61f629d5d2484778bed"} +verus_builtin_macros = { git = "https://github.com/verus-lang/verus", rev = "33c6cec7bd19818e51d2b61f629d5d2484778bed"} cfg-if = "1" ff = { version = "=0.14.0-pre.0", default-features = false, optional = true } group = { version = "=0.14.0-pre.0", default-features = false, optional = true } @@ -95,3 +98,6 @@ check-cfg = [ 'cfg(curve25519_dalek_bits, values("32", "64"))', 'cfg(nightly)', ] + +[package.metadata.verus] +verify = true diff --git a/curve25519-dalek/src/backend/serial/fiat_u64/mod.rs b/curve25519-dalek/src/backend/serial/fiat_u64/mod.rs index 8c8306249..77e0888c5 100644 --- a/curve25519-dalek/src/backend/serial/fiat_u64/mod.rs +++ b/curve25519-dalek/src/backend/serial/fiat_u64/mod.rs @@ -26,3 +26,12 @@ pub mod field; #[path = "../u64/constants.rs"] pub mod constants; + +#[path = "../u64/subtle_assumes.rs"] +pub mod subtle_assumes; + +#[path = "../u64/scalar_lemmas.rs"] +pub mod scalar_lemmas; + +#[path = "../u64/scalar_specs.rs"] +pub mod scalar_specs; diff --git a/curve25519-dalek/src/backend/serial/u32/constants.rs b/curve25519-dalek/src/backend/serial/u32/constants.rs index 191bb13d7..689b3502c 100644 --- a/curve25519-dalek/src/backend/serial/u32/constants.rs +++ b/curve25519-dalek/src/backend/serial/u32/constants.rs @@ -93,25 +93,31 @@ pub(crate) const MONTGOMERY_A_NEG: FieldElement2625 = FieldElement2625::from_lim /// `L` is the order of base point, i.e. 2^252 + /// 27742317777372353535851937790883648493 -pub(crate) const L: Scalar29 = Scalar29([ - 0x1cf5d3ed, 0x009318d2, 0x1de73596, 0x1df3bd45, 0x0000014d, 0x00000000, 0x00000000, 0x00000000, - 0x00100000, -]); +pub(crate) const L: Scalar29 = Scalar29 { + limbs: [ + 0x1cf5d3ed, 0x009318d2, 0x1de73596, 0x1df3bd45, 0x0000014d, 0x00000000, 0x00000000, + 0x00000000, 0x00100000, + ], +}; /// `L` * `LFACTOR` = -1 (mod 2^29) pub(crate) const LFACTOR: u32 = 0x12547e1b; /// `R` = R % L where R = 2^261 -pub(crate) const R: Scalar29 = Scalar29([ - 0x114df9ed, 0x1a617303, 0x0f7c098c, 0x16793167, 0x1ffd656e, 0x1fffffff, 0x1fffffff, 0x1fffffff, - 0x000fffff, -]); +pub(crate) const R: Scalar29 = Scalar29 { + limbs: [ + 0x114df9ed, 0x1a617303, 0x0f7c098c, 0x16793167, 0x1ffd656e, 0x1fffffff, 0x1fffffff, + 0x1fffffff, 0x000fffff, + ], +}; /// `RR` = (R^2) % L where R = 2^261 -pub(crate) const RR: Scalar29 = Scalar29([ - 0x0b5f9d12, 0x1e141b17, 0x158d7f3d, 0x143f3757, 0x1972d781, 0x042feb7c, 0x1ceec73d, 0x1e184d1e, - 0x0005046d, -]); +pub(crate) const RR: Scalar29 = Scalar29 { + limbs: [ + 0x0b5f9d12, 0x1e141b17, 0x158d7f3d, 0x143f3757, 0x1972d781, 0x042feb7c, 0x1ceec73d, + 0x1e184d1e, 0x0005046d, + ], +}; /// The Ed25519 basepoint, as an `EdwardsPoint`. /// diff --git a/curve25519-dalek/src/backend/serial/u32/scalar.rs b/curve25519-dalek/src/backend/serial/u32/scalar.rs index cc21139bc..4706edcfd 100644 --- a/curve25519-dalek/src/backend/serial/u32/scalar.rs +++ b/curve25519-dalek/src/backend/serial/u32/scalar.rs @@ -22,31 +22,33 @@ use crate::constants; /// The `Scalar29` struct represents an element in \\(\mathbb{Z} / \ell\mathbb{Z}\\) as 9 29-bit /// limbs #[derive(Copy, Clone)] -pub struct Scalar29(pub [u32; 9]); +pub struct Scalar29 { + pub limbs: [u32; 9], +} impl Debug for Scalar29 { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "Scalar29: {:?}", &self.0[..]) + write!(f, "Scalar29: {:?}", &self.limbs[..]) } } #[cfg(feature = "zeroize")] impl Zeroize for Scalar29 { fn zeroize(&mut self) { - self.0.zeroize(); + self.limbs.zeroize(); } } impl Index for Scalar29 { type Output = u32; fn index(&self, _index: usize) -> &u32 { - &(self.0[_index]) + &(self.limbs[_index]) } } impl IndexMut for Scalar29 { fn index_mut(&mut self, _index: usize) -> &mut u32 { - &mut (self.0[_index]) + &mut (self.limbs[_index]) } } @@ -58,7 +60,9 @@ fn m(x: u32, y: u32) -> u64 { impl Scalar29 { /// The scalar \\( 0 \\). - pub const ZERO: Scalar29 = Scalar29([0, 0, 0, 0, 0, 0, 0, 0, 0]); + pub const ZERO: Scalar29 = Scalar29 { + limbs: [0, 0, 0, 0, 0, 0, 0, 0, 0], + }; /// Unpack a 32 byte / 256 bit scalar into 9 29-bit limbs. #[rustfmt::skip] // keep alignment of s[*] calculations @@ -132,38 +136,38 @@ impl Scalar29 { pub fn to_bytes(self) -> [u8; 32] { let mut s = [0u8; 32]; - s[ 0] = (self.0[0] >> 0) as u8; - s[ 1] = (self.0[0] >> 8) as u8; - s[ 2] = (self.0[0] >> 16) as u8; - s[ 3] = ((self.0[0] >> 24) | (self.0[1] << 5)) as u8; - s[ 4] = (self.0[1] >> 3) as u8; - s[ 5] = (self.0[1] >> 11) as u8; - s[ 6] = (self.0[1] >> 19) as u8; - s[ 7] = ((self.0[1] >> 27) | (self.0[2] << 2)) as u8; - s[ 8] = (self.0[2] >> 6) as u8; - s[ 9] = (self.0[2] >> 14) as u8; - s[10] = ((self.0[2] >> 22) | (self.0[3] << 7)) as u8; - s[11] = (self.0[3] >> 1) as u8; - s[12] = (self.0[3] >> 9) as u8; - s[13] = (self.0[3] >> 17) as u8; - s[14] = ((self.0[3] >> 25) | (self.0[4] << 4)) as u8; - s[15] = (self.0[4] >> 4) as u8; - s[16] = (self.0[4] >> 12) as u8; - s[17] = (self.0[4] >> 20) as u8; - s[18] = ((self.0[4] >> 28) | (self.0[5] << 1)) as u8; - s[19] = (self.0[5] >> 7) as u8; - s[20] = (self.0[5] >> 15) as u8; - s[21] = ((self.0[5] >> 23) | (self.0[6] << 6)) as u8; - s[22] = (self.0[6] >> 2) as u8; - s[23] = (self.0[6] >> 10) as u8; - s[24] = (self.0[6] >> 18) as u8; - s[25] = ((self.0[6] >> 26) | (self.0[7] << 3)) as u8; - s[26] = (self.0[7] >> 5) as u8; - s[27] = (self.0[7] >> 13) as u8; - s[28] = (self.0[7] >> 21) as u8; - s[29] = (self.0[8] >> 0) as u8; - s[30] = (self.0[8] >> 8) as u8; - s[31] = (self.0[8] >> 16) as u8; + s[ 0] = (self.limbs[0] >> 0) as u8; + s[ 1] = (self.limbs[0] >> 8) as u8; + s[ 2] = (self.limbs[0] >> 16) as u8; + s[ 3] = ((self.limbs[0] >> 24) | (self.limbs[1] << 5)) as u8; + s[ 4] = (self.limbs[1] >> 3) as u8; + s[ 5] = (self.limbs[1] >> 11) as u8; + s[ 6] = (self.limbs[1] >> 19) as u8; + s[ 7] = ((self.limbs[1] >> 27) | (self.limbs[2] << 2)) as u8; + s[ 8] = (self.limbs[2] >> 6) as u8; + s[ 9] = (self.limbs[2] >> 14) as u8; + s[10] = ((self.limbs[2] >> 22) | (self.limbs[3] << 7)) as u8; + s[11] = (self.limbs[3] >> 1) as u8; + s[12] = (self.limbs[3] >> 9) as u8; + s[13] = (self.limbs[3] >> 17) as u8; + s[14] = ((self.limbs[3] >> 25) | (self.limbs[4] << 4)) as u8; + s[15] = (self.limbs[4] >> 4) as u8; + s[16] = (self.limbs[4] >> 12) as u8; + s[17] = (self.limbs[4] >> 20) as u8; + s[18] = ((self.limbs[4] >> 28) | (self.limbs[5] << 1)) as u8; + s[19] = (self.limbs[5] >> 7) as u8; + s[20] = (self.limbs[5] >> 15) as u8; + s[21] = ((self.limbs[5] >> 23) | (self.limbs[6] << 6)) as u8; + s[22] = (self.limbs[6] >> 2) as u8; + s[23] = (self.limbs[6] >> 10) as u8; + s[24] = (self.limbs[6] >> 18) as u8; + s[25] = ((self.limbs[6] >> 26) | (self.limbs[7] << 3)) as u8; + s[26] = (self.limbs[7] >> 5) as u8; + s[27] = (self.limbs[7] >> 13) as u8; + s[28] = (self.limbs[7] >> 21) as u8; + s[29] = (self.limbs[8] >> 0) as u8; + s[30] = (self.limbs[8] >> 8) as u8; + s[31] = (self.limbs[8] >> 16) as u8; s } @@ -347,7 +351,7 @@ impl Scalar29 { let r8 = carry as u32; // result may be >= l, so attempt to subtract l - Scalar29::sub(&Scalar29([r0,r1,r2,r3,r4,r5,r6,r7,r8]), l) + Scalar29::sub(&Scalar29 { limbs: [r0,r1,r2,r3,r4,r5,r6,r7,r8] }, l) } /// Compute `a * b` (mod l). @@ -404,65 +408,85 @@ mod test { /// x = 2^253-1 = 14474011154664524427946373126085988481658748083205070504932198000989141204991 /// x = 7237005577332262213973186563042994240801631723825162898930247062703686954002 mod l /// x = 5147078182513738803124273553712992179887200054963030844803268920753008712037*R mod l in Montgomery form - pub static X: Scalar29 = Scalar29([ - 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, - 0x1fffffff, 0x001fffff, - ]); + pub static X: Scalar29 = Scalar29 { + limbs: [ + 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, 0x1fffffff, + 0x1fffffff, 0x001fffff, + ], + }; /// x^2 = 3078544782642840487852506753550082162405942681916160040940637093560259278169 mod l - pub static XX: Scalar29 = Scalar29([ - 0x00217559, 0x000b3401, 0x103ff43b, 0x1462a62c, 0x1d6f9f38, 0x18e7a42f, 0x09a3dcee, - 0x008dbe18, 0x0006ce65, - ]); + pub static XX: Scalar29 = Scalar29 { + limbs: [ + 0x00217559, 0x000b3401, 0x103ff43b, 0x1462a62c, 0x1d6f9f38, 0x18e7a42f, 0x09a3dcee, + 0x008dbe18, 0x0006ce65, + ], + }; /// x^2 = 2912514428060642753613814151688322857484807845836623976981729207238463947987*R mod l in Montgomery form - pub static XX_MONT: Scalar29 = Scalar29([ - 0x152b4d2e, 0x0571d53b, 0x1da6d964, 0x188663b6, 0x1d1b5f92, 0x19d50e3f, 0x12306c29, - 0x0c6f26fe, 0x00030edb, - ]); + pub static XX_MONT: Scalar29 = Scalar29 { + limbs: [ + 0x152b4d2e, 0x0571d53b, 0x1da6d964, 0x188663b6, 0x1d1b5f92, 0x19d50e3f, 0x12306c29, + 0x0c6f26fe, 0x00030edb, + ], + }; /// y = 6145104759870991071742105800796537629880401874866217824609283457819451087098 - pub static Y: Scalar29 = Scalar29([ - 0x1e1458fa, 0x165ba838, 0x1d787b36, 0x0e577f3a, 0x1d2baf06, 0x1d689a19, 0x1fff3047, - 0x117704ab, 0x000d9601, - ]); + pub static Y: Scalar29 = Scalar29 { + limbs: [ + 0x1e1458fa, 0x165ba838, 0x1d787b36, 0x0e577f3a, 0x1d2baf06, 0x1d689a19, 0x1fff3047, + 0x117704ab, 0x000d9601, + ], + }; /// x*y = 36752150652102274958925982391442301741 - pub static XY: Scalar29 = Scalar29([ - 0x0ba7632d, 0x017736bb, 0x15c76138, 0x0c69daa1, 0x000001ba, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, - ]); + pub static XY: Scalar29 = Scalar29 { + limbs: [ + 0x0ba7632d, 0x017736bb, 0x15c76138, 0x0c69daa1, 0x000001ba, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, + ], + }; /// x*y = 3783114862749659543382438697751927473898937741870308063443170013240655651591*R mod l in Montgomery form - pub static XY_MONT: Scalar29 = Scalar29([ - 0x077b51e1, 0x1c64e119, 0x02a19ef5, 0x18d2129e, 0x00de0430, 0x045a7bc8, 0x04cfc7c9, - 0x1c002681, 0x000bdc1c, - ]); + pub static XY_MONT: Scalar29 = Scalar29 { + limbs: [ + 0x077b51e1, 0x1c64e119, 0x02a19ef5, 0x18d2129e, 0x00de0430, 0x045a7bc8, 0x04cfc7c9, + 0x1c002681, 0x000bdc1c, + ], + }; /// a = 2351415481556538453565687241199399922945659411799870114962672658845158063753 - pub static A: Scalar29 = Scalar29([ - 0x07b3be89, 0x02291b60, 0x14a99f03, 0x07dc3787, 0x0a782aae, 0x16262525, 0x0cfdb93f, - 0x13f5718d, 0x000532da, - ]); + pub static A: Scalar29 = Scalar29 { + limbs: [ + 0x07b3be89, 0x02291b60, 0x14a99f03, 0x07dc3787, 0x0a782aae, 0x16262525, 0x0cfdb93f, + 0x13f5718d, 0x000532da, + ], + }; /// b = 4885590095775723760407499321843594317911456947580037491039278279440296187236 - pub static B: Scalar29 = Scalar29([ - 0x15421564, 0x1e69fd72, 0x093d9692, 0x161785be, 0x1587d69f, 0x09d9dada, 0x130246c0, - 0x0c0a8e72, 0x000acd25, - ]); + pub static B: Scalar29 = Scalar29 { + limbs: [ + 0x15421564, 0x1e69fd72, 0x093d9692, 0x161785be, 0x1587d69f, 0x09d9dada, 0x130246c0, + 0x0c0a8e72, 0x000acd25, + ], + }; /// a+b = 0 /// a-b = 4702830963113076907131374482398799845891318823599740229925345317690316127506 - pub static AB: Scalar29 = Scalar29([ - 0x0f677d12, 0x045236c0, 0x09533e06, 0x0fb86f0f, 0x14f0555c, 0x0c4c4a4a, 0x19fb727f, - 0x07eae31a, 0x000a65b5, - ]); + pub static AB: Scalar29 = Scalar29 { + limbs: [ + 0x0f677d12, 0x045236c0, 0x09533e06, 0x0fb86f0f, 0x14f0555c, 0x0c4c4a4a, 0x19fb727f, + 0x07eae31a, 0x000a65b5, + ], + }; // c = (2^512 - 1) % l = 1627715501170711445284395025044413883736156588369414752970002579683115011840 - pub static C: Scalar29 = Scalar29([ - 0x049c0f00, 0x00308f1a, 0x0164d1e9, 0x1c374ed1, 0x1be65d00, 0x19e90bfa, 0x08f73bb1, - 0x036f8613, 0x00039941, - ]); + pub static C: Scalar29 = Scalar29 { + limbs: [ + 0x049c0f00, 0x00308f1a, 0x0164d1e9, 0x1c374ed1, 0x1be65d00, 0x19e90bfa, 0x08f73bb1, + 0x036f8613, 0x00039941, + ], + }; #[test] fn mul_max() { diff --git a/curve25519-dalek/src/backend/serial/u64/constants.rs b/curve25519-dalek/src/backend/serial/u64/constants.rs index 22e0ccca7..a4a998bfb 100644 --- a/curve25519-dalek/src/backend/serial/u64/constants.rs +++ b/curve25519-dalek/src/backend/serial/u64/constants.rs @@ -22,6 +22,8 @@ use crate::{ window::{LookupTable, NafLookupTable8}, }; +use vstd::prelude::*; + /// The value of minus one, equal to `-&FieldElement::ONE` pub(crate) const MINUS_ONE: FieldElement51 = FieldElement51::from_limbs([ 2251799813685228, @@ -123,35 +125,43 @@ pub(crate) const MONTGOMERY_A_NEG: FieldElement51 = FieldElement51::from_limbs([ 2251799813685247, ]); +verus! { /// `L` is the order of base point, i.e. 2^252 + 27742317777372353535851937790883648493 -pub(crate) const L: Scalar52 = Scalar52([ - 0x0002631a5cf5d3ed, - 0x000dea2f79cd6581, - 0x000000000014def9, - 0x0000000000000000, - 0x0000100000000000, -]); +pub(crate) const L: Scalar52 = Scalar52 { + limbs: [ + 0x0002631a5cf5d3ed, + 0x000dea2f79cd6581, + 0x000000000014def9, + 0x0000000000000000, + 0x0000100000000000, + ], +}; /// `L` * `LFACTOR` = -1 (mod 2^52) pub(crate) const LFACTOR: u64 = 0x51da312547e1b; /// `R` = R % L where R = 2^260 -pub(crate) const R: Scalar52 = Scalar52([ - 0x000f48bd6721e6ed, - 0x0003bab5ac67e45a, - 0x000fffffeb35e51b, - 0x000fffffffffffff, - 0x00000fffffffffff, -]); +pub(crate) const R: Scalar52 = Scalar52 { + limbs: [ + 0x000f48bd6721e6ed, + 0x0003bab5ac67e45a, + 0x000fffffeb35e51b, + 0x000fffffffffffff, + 0x00000fffffffffff, + ], +}; /// `RR` = (R^2) % L where R = 2^260 -pub(crate) const RR: Scalar52 = Scalar52([ - 0x0009d265e952d13b, - 0x000d63c715bea69f, - 0x0005be65cb687604, - 0x0003dceec73d217f, - 0x000009411b7c309a, -]); +pub(crate) const RR: Scalar52 = Scalar52 { + limbs: [ + 0x0009d265e952d13b, + 0x000d63c715bea69f, + 0x0005be65cb687604, + 0x0003dceec73d217f, + 0x000009411b7c309a, + ], +}; +} /// The Ed25519 basepoint, as an `EdwardsPoint`. /// diff --git a/curve25519-dalek/src/backend/serial/u64/mod.rs b/curve25519-dalek/src/backend/serial/u64/mod.rs index aa29eb6ce..cea246c07 100644 --- a/curve25519-dalek/src/backend/serial/u64/mod.rs +++ b/curve25519-dalek/src/backend/serial/u64/mod.rs @@ -25,3 +25,9 @@ pub mod field; pub mod scalar; pub mod constants; + +pub mod subtle_assumes; + +pub mod scalar_lemmas; + +pub mod scalar_specs; diff --git a/curve25519-dalek/src/backend/serial/u64/scalar.rs b/curve25519-dalek/src/backend/serial/u64/scalar.rs index 5bcbb72c3..661bbcff9 100644 --- a/curve25519-dalek/src/backend/serial/u64/scalar.rs +++ b/curve25519-dalek/src/backend/serial/u64/scalar.rs @@ -13,56 +13,87 @@ use core::fmt::Debug; use core::ops::{Index, IndexMut}; -use subtle::{Choice, ConditionallySelectable}; +use subtle::Choice; #[cfg(feature = "zeroize")] use zeroize::Zeroize; use crate::constants; +#[allow(unused_imports)] +use super::scalar_lemmas::*; +#[allow(unused_imports)] +use super::scalar_specs::*; +use super::subtle_assumes::*; +#[allow(unused_imports)] +use vstd::arithmetic::div_mod::*; +#[allow(unused_imports)] +use vstd::arithmetic::power2::*; +use vstd::prelude::*; + +verus! { /// The `Scalar52` struct represents an element in /// \\(\mathbb Z / \ell \mathbb Z\\) as 5 \\(52\\)-bit limbs. #[derive(Copy, Clone)] -pub struct Scalar52(pub [u64; 5]); +pub struct Scalar52 { + pub limbs: [u64; 5], +} + +} // verus! impl Debug for Scalar52 { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "Scalar52: {:?}", &self.0[..]) + write!(f, "Scalar52: {:?}", self.limbs) } } #[cfg(feature = "zeroize")] impl Zeroize for Scalar52 { fn zeroize(&mut self) { - self.0.zeroize(); + self.limbs.zeroize(); } } +verus! { impl Index for Scalar52 { type Output = u64; + // TODO Verify this + #[verifier::external_body] fn index(&self, _index: usize) -> &u64 { - &(self.0[_index]) + &(self.limbs[_index]) } } +} // verus! impl IndexMut for Scalar52 { fn index_mut(&mut self, _index: usize) -> &mut u64 { - &mut (self.0[_index]) + &mut (self.limbs[_index]) } } +verus! { + /// u64 * u64 = u128 multiply helper #[inline(always)] -fn m(x: u64, y: u64) -> u128 { +fn m(x: u64, y: u64) -> (z: u128) +requires + x < (1u64 << 52), + y < (1u64 << 52), +ensures + z < (1u128 << 104), + z == x * y +{ + proof {lemma_52_52(x, y);} (x as u128) * (y as u128) } impl Scalar52 { /// The scalar \\( 0 \\). - pub const ZERO: Scalar52 = Scalar52([0, 0, 0, 0, 0]); + pub const ZERO: Scalar52 = Scalar52 { limbs: [0, 0, 0, 0, 0] }; /// Unpack a 32 byte / 256 bit scalar into 5 52-bit limbs. #[rustfmt::skip] // keep alignment of s[*] calculations + #[verifier::external_body] // TODO Verify this function pub fn from_bytes(bytes: &[u8; 32]) -> Scalar52 { let mut words = [0u64; 4]; for i in 0..4 { @@ -73,20 +104,21 @@ impl Scalar52 { let mask = (1u64 << 52) - 1; let top_mask = (1u64 << 48) - 1; - let mut s = Scalar52::ZERO; - - s[0] = words[0] & mask; - s[1] = ((words[0] >> 52) | (words[1] << 12)) & mask; - s[2] = ((words[1] >> 40) | (words[2] << 24)) & mask; - s[3] = ((words[2] >> 28) | (words[3] << 36)) & mask; - s[4] = (words[3] >> 16) & top_mask; + let mut s = Scalar52 { limbs: [0u64, 0u64, 0u64, 0u64, 0u64] }; + s.limbs[0] = words[0] & mask; + s.limbs[1] = ((words[0] >> 52) | (words[1] << 12)) & mask; + s.limbs[2] = ((words[1] >> 40) | (words[2] << 24)) & mask; + s.limbs[3] = ((words[2] >> 28) | (words[3] << 36)) & mask; + s.limbs[4] = (words[3] >> 16) & top_mask; s } /// Reduce a 64 byte / 512 bit scalar mod l #[rustfmt::skip] // keep alignment of lo[*] and hi[*] calculations - pub fn from_bytes_wide(bytes: &[u8; 64]) -> Scalar52 { + #[verifier::external_body] // TODO Verify this function + pub fn from_bytes_wide(bytes: &[u8; 64]) -> (s: Scalar52) + { let mut words = [0u64; 8]; for i in 0..8 { for j in 0..8 { @@ -95,8 +127,8 @@ impl Scalar52 { } let mask = (1u64 << 52) - 1; - let mut lo = Scalar52::ZERO; - let mut hi = Scalar52::ZERO; + let mut lo = Scalar52 { limbs: [0u64, 0u64, 0u64, 0u64, 0u64] }; + let mut hi = Scalar52 { limbs: [0u64, 0u64, 0u64, 0u64, 0u64] }; lo[0] = words[0] & mask; lo[1] = ((words[0] >> 52) | (words[ 1] << 12)) & mask; @@ -118,169 +150,324 @@ impl Scalar52 { /// Pack the limbs of this `Scalar52` into 32 bytes #[rustfmt::skip] // keep alignment of s[*] calculations #[allow(clippy::identity_op)] - pub fn to_bytes(self) -> [u8; 32] { + #[allow(clippy::wrong_self_convention)] + #[verifier::external_body] // TODO Verify this function + pub fn to_bytes(self) -> [u8; 32] + { let mut s = [0u8; 32]; - s[ 0] = (self.0[ 0] >> 0) as u8; - s[ 1] = (self.0[ 0] >> 8) as u8; - s[ 2] = (self.0[ 0] >> 16) as u8; - s[ 3] = (self.0[ 0] >> 24) as u8; - s[ 4] = (self.0[ 0] >> 32) as u8; - s[ 5] = (self.0[ 0] >> 40) as u8; - s[ 6] = ((self.0[ 0] >> 48) | (self.0[ 1] << 4)) as u8; - s[ 7] = (self.0[ 1] >> 4) as u8; - s[ 8] = (self.0[ 1] >> 12) as u8; - s[ 9] = (self.0[ 1] >> 20) as u8; - s[10] = (self.0[ 1] >> 28) as u8; - s[11] = (self.0[ 1] >> 36) as u8; - s[12] = (self.0[ 1] >> 44) as u8; - s[13] = (self.0[ 2] >> 0) as u8; - s[14] = (self.0[ 2] >> 8) as u8; - s[15] = (self.0[ 2] >> 16) as u8; - s[16] = (self.0[ 2] >> 24) as u8; - s[17] = (self.0[ 2] >> 32) as u8; - s[18] = (self.0[ 2] >> 40) as u8; - s[19] = ((self.0[ 2] >> 48) | (self.0[ 3] << 4)) as u8; - s[20] = (self.0[ 3] >> 4) as u8; - s[21] = (self.0[ 3] >> 12) as u8; - s[22] = (self.0[ 3] >> 20) as u8; - s[23] = (self.0[ 3] >> 28) as u8; - s[24] = (self.0[ 3] >> 36) as u8; - s[25] = (self.0[ 3] >> 44) as u8; - s[26] = (self.0[ 4] >> 0) as u8; - s[27] = (self.0[ 4] >> 8) as u8; - s[28] = (self.0[ 4] >> 16) as u8; - s[29] = (self.0[ 4] >> 24) as u8; - s[30] = (self.0[ 4] >> 32) as u8; - s[31] = (self.0[ 4] >> 40) as u8; + s[ 0] = (self.limbs[ 0] >> 0) as u8; + s[ 1] = (self.limbs[ 0] >> 8) as u8; + s[ 2] = (self.limbs[ 0] >> 16) as u8; + s[ 3] = (self.limbs[ 0] >> 24) as u8; + s[ 4] = (self.limbs[ 0] >> 32) as u8; + s[ 5] = (self.limbs[ 0] >> 40) as u8; + s[ 6] = ((self.limbs[ 0] >> 48) | (self.limbs[ 1] << 4)) as u8; + s[ 7] = (self.limbs[ 1] >> 4) as u8; + s[ 8] = (self.limbs[ 1] >> 12) as u8; + s[ 9] = (self.limbs[ 1] >> 20) as u8; + s[10] = (self.limbs[ 1] >> 28) as u8; + s[11] = (self.limbs[ 1] >> 36) as u8; + s[12] = (self.limbs[ 1] >> 44) as u8; + s[13] = (self.limbs[ 2] >> 0) as u8; + s[14] = (self.limbs[ 2] >> 8) as u8; + s[15] = (self.limbs[ 2] >> 16) as u8; + s[16] = (self.limbs[ 2] >> 24) as u8; + s[17] = (self.limbs[ 2] >> 32) as u8; + s[18] = (self.limbs[ 2] >> 40) as u8; + s[19] = ((self.limbs[ 2] >> 48) | (self.limbs[ 3] << 4)) as u8; + s[20] = (self.limbs[ 3] >> 4) as u8; + s[21] = (self.limbs[ 3] >> 12) as u8; + s[22] = (self.limbs[ 3] >> 20) as u8; + s[23] = (self.limbs[ 3] >> 28) as u8; + s[24] = (self.limbs[ 3] >> 36) as u8; + s[25] = (self.limbs[ 3] >> 44) as u8; + s[26] = (self.limbs[ 4] >> 0) as u8; + s[27] = (self.limbs[ 4] >> 8) as u8; + s[28] = (self.limbs[ 4] >> 16) as u8; + s[29] = (self.limbs[ 4] >> 24) as u8; + s[30] = (self.limbs[ 4] >> 32) as u8; + s[31] = (self.limbs[ 4] >> 40) as u8; s } /// Compute `a + b` (mod l) - pub fn add(a: &Scalar52, b: &Scalar52) -> Scalar52 { - let mut sum = Scalar52::ZERO; + pub fn add(a: &Scalar52, b: &Scalar52) -> (s: Scalar52) + requires + limbs_bounded(a), + limbs_bounded(b), + to_nat(&a.limbs) < group_order(), + to_nat(&b.limbs) < group_order(), + ensures + to_nat(&s.limbs) == (to_nat(&a.limbs) + to_nat(&b.limbs)) % group_order(), + { + let mut sum = Scalar52 { limbs: [0u64, 0u64, 0u64, 0u64, 0u64] }; + proof { assert(1u64 << 52 > 0) by (bit_vector); } let mask = (1u64 << 52) - 1; // a + b let mut carry: u64 = 0; - for i in 0..5 { - carry = a[i] + b[i] + (carry >> 52); - sum[i] = carry & mask; + proof { + // Base case: empty subrange has value 0 + assert(seq_u64_to_nat(a.limbs@.subrange(0, 0 as int)) == 0); + assert(seq_u64_to_nat(b.limbs@.subrange(0, 0 as int)) == 0); + assert(seq_u64_to_nat(sum.limbs@.subrange(0, 0 as int)) == 0); + assert((carry >> 52) == 0) by (bit_vector) requires carry == 0; + lemma2_to64(); + assert(pow2(0) == 1); + } + for i in 0..5 + invariant + forall|j: int| 0 <= j < i ==> sum.limbs[j] < 1u64 << 52, + limbs_bounded(a), + limbs_bounded(b), + mask == (1u64 << 52) - 1, + i == 0 ==> carry == 0, + i >= 1 ==> (carry >> 52) < 2, + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) + seq_u64_to_nat(b.limbs@.subrange(0, i as int)) == + seq_u64_to_nat(sum.limbs@.subrange(0, i as int)) + (carry >> 52) * pow2((52 * (i) as nat)) + { + proof {lemma_add_loop_bounds(i as int, carry, a.limbs[i as int], b.limbs[i as int]);} + let ghost old_carry = carry; + carry = a.limbs[i] + b.limbs[i] + (carry >> 52); + let ghost sum_loop_start = sum; + sum.limbs[i] = carry & mask; + assert(sum_loop_start.limbs@.subrange(0, i as int) == sum.limbs@.subrange(0, i as int)); + proof { + lemma_add_loop_invariant(sum, carry, i, a, b, old_carry, mask, sum_loop_start); + } + proof {lemma_add_carry_and_sum_bounds(carry, mask);} } + assert(seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) + seq_u64_to_nat(b.limbs@.subrange(0, 5 as int)) == + seq_u64_to_nat(sum.limbs@.subrange(0, 5 as int)) + (carry >> 52) * pow2((52 * (5) as nat))); + + proof {lemma_add_sum_simplify(a, b, &sum, carry);} + // subtract l if the sum is >= l + proof { lemma_l_value_properties(&constants::L, &sum); } + assert(group_order() > to_nat(&sum.limbs) - group_order() >= -group_order()); + proof{lemma_l_equals_group_order();} + proof{lemma_mod_sub_multiples_vanish(to_nat(&sum.limbs) as int, group_order() as int);} Scalar52::sub(&sum, &constants::L) + } /// Compute `a - b` (mod l) - pub fn sub(a: &Scalar52, b: &Scalar52) -> Scalar52 { - let mut difference = Scalar52::ZERO; + pub fn sub(a: &Scalar52, b: &Scalar52) -> (s: Scalar52) + requires + limbs_bounded(a), + limbs_bounded(b), + // Without the following condition, all we can prove is something like: + // to_nat(&a.limbs) >= to_nat(&b.limbs) ==> to_nat(&s.limbs) == to_nat(&a.limbs) - to_nat(&b.limbs), + // to_nat(&a.limbs) < to_nat(&b.limbs) ==> to_nat(&s.limbs) == (to_nat(&a.limbs) - to_nat(&b.limbs) + pow2(260) + group_order()) % (pow2(260) as int), + // In the 2nd case, `sub` doesn't always do subtraction mod group_order + -group_order() <= to_nat(&a.limbs) - to_nat(&b.limbs) < group_order(), + ensures + to_nat(&s.limbs) == (to_nat(&a.limbs) - to_nat(&b.limbs)) % (group_order() as int), + limbs_bounded(&s), + { + let mut difference = Scalar52 { limbs: [0u64, 0u64, 0u64, 0u64, 0u64] }; + proof { assert(1u64 << 52 > 0) by (bit_vector);} let mask = (1u64 << 52) - 1; // a - b let mut borrow: u64 = 0; - for i in 0..5 { - borrow = a[i].wrapping_sub(b[i] + (borrow >> 63)); - difference[i] = borrow & mask; + assert(seq_u64_to_nat(a.limbs@.subrange(0, 0 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 0 as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, 0 as int ))); + assert( (borrow >> 63) == 0 ) by (bit_vector) + requires borrow == 0; + assert(seq_u64_to_nat(a.limbs@.subrange(0, 0 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 0 as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, 0 as int )) - (borrow >> 63) * pow2((52 * (0) as nat))); + for i in 0..5 + invariant + limbs_bounded(b), + limbs_bounded(a), + forall|j: int| 0 <= j < i ==> difference.limbs[j] < (1u64 << 52), + mask == (1u64 << 52) - 1, + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) - seq_u64_to_nat(b.limbs@.subrange(0, i as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, i as int )) - (borrow >> 63) * pow2((52 * (i) as nat)) + { + proof { assert ((borrow >> 63) < 2) by (bit_vector); } + let ghost old_borrow = borrow; + borrow = a.limbs[i].wrapping_sub(b.limbs[i] + (borrow >> 63)); + let ghost difference_loop1_start = difference; + difference.limbs[i] = borrow & mask; + assert(difference_loop1_start.limbs@.subrange(0, i as int) == difference.limbs@.subrange(0, i as int)); + assert( + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) - seq_u64_to_nat(b.limbs@.subrange(0, i as int )) == + seq_u64_to_nat(difference_loop1_start.limbs@.subrange(0, i as int )) - (old_borrow >> 63) * pow2((52 * (i) as nat))); + proof{ + lemma_sub_loop1_invariant(difference, borrow, i, a, b, old_borrow, mask, difference_loop1_start); + } + proof { lemma_borrow_and_mask_bounded(borrow, mask); } } + assert(seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int )) - (borrow >> 63) * pow2((52 * (5) as nat)) ); // conditionally add l if the difference is negative + assert(borrow >> 63 == 1 || borrow >> 63 == 0) by (bit_vector); let mut carry: u64 = 0; - for i in 0..5 { + let ghost difference_after_loop1 = difference; + assert(seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, 0 as int)) == 0); + assert(seq_u64_to_nat(constants::L.limbs@.subrange(0, 0 as int)) == 0); + assert(seq_u64_to_nat(difference.limbs@.subrange(0, 0 as int)) == 0); + assert(carry >> 52 == 0) by (bit_vector) + requires carry == 0; + for i in 0..5 + invariant + forall|j: int| 0 <= j < 5 ==> difference.limbs[j] < (1u64 << 52), // from first loop + forall|j: int| i <= j < 5 ==> difference.limbs[j] == difference_after_loop1.limbs[j], + mask == (1u64 << 52) - 1, + i == 0 ==> carry == 0, + i >= 1 ==> (carry >> 52) < 2, + (i >=1 && borrow >> 63 == 0) ==> carry == difference.limbs[i-1], + borrow >> 63 == 0 ==> difference_after_loop1 == difference, + borrow >> 63 == 1 ==> + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, i as int)) + seq_u64_to_nat(constants::L.limbs@.subrange(0, i as int)) == + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + (carry >> 52) * pow2(52 * i as nat) + + { + let ghost old_carry = carry; let underflow = Choice::from((borrow >> 63) as u8); - let addend = u64::conditional_select(&0, &constants::L[i], underflow); - carry = (carry >> 52) + difference[i] + addend; - difference[i] = carry & mask; + let addend = select(&0, &constants::L.limbs[i], underflow); + if borrow >> 63 == 0 { + assert(addend == 0); + } + if borrow >> 63 == 1 { + assert(addend == constants::L.limbs[i as int]); + } + proof {lemma_scalar_subtract_no_overflow(carry, difference.limbs[i as int], addend, i as u32, &constants::L);} + carry = (carry >> 52) + difference.limbs[i] + addend; + let ghost difference_loop2_start = difference; + difference.limbs[i] = carry & mask; + proof { + lemma_carry_bounded_after_mask(carry, mask); + assert(difference_loop2_start.limbs@.subrange(0, i as int) == difference.limbs@.subrange(0, i as int)); + lemma_sub_loop2_invariant(difference, i, a, b, mask, difference_after_loop1, difference_loop2_start, carry, old_carry, addend, borrow); + } } - + proof { lemma_sub_correct_after_loops(difference, carry, a, b, difference_after_loop1, borrow);} difference } /// Compute `a * b` #[inline(always)] #[rustfmt::skip] // keep alignment of z[*] calculations - pub (crate) fn mul_internal(a: &Scalar52, b: &Scalar52) -> [u128; 9] { + pub (crate) fn mul_internal(a: &Scalar52, b: &Scalar52) -> (z: [u128; 9]) + requires + limbs_bounded(a), + limbs_bounded(b), + ensures + slice128_to_nat(&z) == to_nat(&a.limbs) * to_nat(&b.limbs), + { + proof {lemma_mul_internal_no_overflow()} + let mut z = [0u128; 9]; - z[0] = m(a[0], b[0]); - z[1] = m(a[0], b[1]) + m(a[1], b[0]); - z[2] = m(a[0], b[2]) + m(a[1], b[1]) + m(a[2], b[0]); - z[3] = m(a[0], b[3]) + m(a[1], b[2]) + m(a[2], b[1]) + m(a[3], b[0]); - z[4] = m(a[0], b[4]) + m(a[1], b[3]) + m(a[2], b[2]) + m(a[3], b[1]) + m(a[4], b[0]); - z[5] = m(a[1], b[4]) + m(a[2], b[3]) + m(a[3], b[2]) + m(a[4], b[1]); - z[6] = m(a[2], b[4]) + m(a[3], b[3]) + m(a[4], b[2]); - z[7] = m(a[3], b[4]) + m(a[4], b[3]); - z[8] = m(a[4], b[4]); + z[0] = m(a.limbs[0], b.limbs[0]); + z[1] = m(a.limbs[0], b.limbs[1]) + m(a.limbs[1], b.limbs[0]); + z[2] = m(a.limbs[0], b.limbs[2]) + m(a.limbs[1], b.limbs[1]) + m(a.limbs[2], b.limbs[0]); + z[3] = m(a.limbs[0], b.limbs[3]) + m(a.limbs[1], b.limbs[2]) + m(a.limbs[2], b.limbs[1]) + m(a.limbs[3], b.limbs[0]); + z[4] = m(a.limbs[0], b.limbs[4]) + m(a.limbs[1], b.limbs[3]) + m(a.limbs[2], b.limbs[2]) + m(a.limbs[3], b.limbs[1]) + m(a.limbs[4], b.limbs[0]); + z[5] = m(a.limbs[1], b.limbs[4]) + m(a.limbs[2], b.limbs[3]) + m(a.limbs[3], b.limbs[2]) + m(a.limbs[4], b.limbs[1]); + z[6] = m(a.limbs[2], b.limbs[4]) + m(a.limbs[3], b.limbs[3]) + m(a.limbs[4], b.limbs[2]); + z[7] = m(a.limbs[3], b.limbs[4]) + m(a.limbs[4], b.limbs[3]); + z[8] = m(a.limbs[4], b.limbs[4]); + + proof {lemma_mul_internal_correct(&a.limbs, &b.limbs, &z);} z } + + // TODO Make this function more like the original? /// Compute `a^2` #[inline(always)] - #[rustfmt::skip] // keep alignment of return calculations - fn square_internal(a: &Scalar52) -> [u128; 9] { - let aa = [ - a[0] * 2, - a[1] * 2, - a[2] * 2, - a[3] * 2, - ]; - - [ - m( a[0], a[0]), - m(aa[0], a[1]), - m(aa[0], a[2]) + m( a[1], a[1]), - m(aa[0], a[3]) + m(aa[1], a[2]), - m(aa[0], a[4]) + m(aa[1], a[3]) + m( a[2], a[2]), - m(aa[1], a[4]) + m(aa[2], a[3]), - m(aa[2], a[4]) + m( a[3], a[3]), - m(aa[3], a[4]), - m(a[4], a[4]) - ] + #[rustfmt::skip] // keep alignment of calculations + pub (crate) fn square_internal(a: &Scalar52) -> (z: [u128; 9]) + requires + limbs_bounded(a), + ensures + slice128_to_nat(&z) == to_nat(&a.limbs) * to_nat(&a.limbs), + { + proof {lemma_square_internal_no_overflow()} + + let mut z = [0u128; 9]; + z[0] = m(a.limbs[0], a.limbs[0]); + z[1] = m(a.limbs[0], a.limbs[1]) * 2; + z[2] = m(a.limbs[0], a.limbs[2]) * 2 + m(a.limbs[1], a.limbs[1]); + z[3] = m(a.limbs[0], a.limbs[3]) * 2 + m(a.limbs[1], a.limbs[2]) * 2; + z[4] = m(a.limbs[0], a.limbs[4]) * 2 + m(a.limbs[1], a.limbs[3]) * 2 + m(a.limbs[2], a.limbs[2]); + z[5] = m(a.limbs[1], a.limbs[4]) * 2 + m(a.limbs[2], a.limbs[3]) * 2; + z[6] = m(a.limbs[2], a.limbs[4]) * 2 + m(a.limbs[3], a.limbs[3]); + z[7] = m(a.limbs[3], a.limbs[4]) * 2; + z[8] = m(a.limbs[4], a.limbs[4]); + + proof {lemma_square_internal_correct(&a.limbs, &z);} + + z } /// Compute `limbs/R` (mod l), where R is the Montgomery modulus 2^260 #[inline(always)] #[rustfmt::skip] // keep alignment of n* and r* calculations - pub (crate) fn montgomery_reduce(limbs: &[u128; 9]) -> Scalar52 { + #[verifier::external_body] // TODO Verify this function + pub (crate) fn montgomery_reduce(limbs: &[u128; 9]) -> (result: Scalar52) + ensures + (to_nat(&result.limbs) * montgomery_radix()) % group_order() == slice128_to_nat(limbs) % group_order(), + limbs_bounded(&result), + { - #[inline(always)] - fn part1(sum: u128) -> (u128, u64) { - let p = (sum as u64).wrapping_mul(constants::LFACTOR) & ((1u64 << 52) - 1); - ((sum + m(p, constants::L[0])) >> 52, p) - } - - #[inline(always)] - fn part2(sum: u128) -> (u128, u64) { - let w = (sum as u64) & ((1u64 << 52) - 1); - (sum >> 52, w) - } // note: l[3] is zero, so its multiples can be skipped let l = &constants::L; // the first half computes the Montgomery adjustment factor n, and begins adding n*l to make limbs divisible by R - let (carry, n0) = part1( limbs[0]); - let (carry, n1) = part1(carry + limbs[1] + m(n0, l[1])); - let (carry, n2) = part1(carry + limbs[2] + m(n0, l[2]) + m(n1, l[1])); - let (carry, n3) = part1(carry + limbs[3] + m(n1, l[2]) + m(n2, l[1])); - let (carry, n4) = part1(carry + limbs[4] + m(n0, l[4]) + m(n2, l[2]) + m(n3, l[1])); + let (carry, n0) = Self::part1(limbs[0]); + let (carry, n1) = Self::part1(carry + limbs[1] + m(n0, l.limbs[1])); + let (carry, n2) = Self::part1(carry + limbs[2] + m(n0, l.limbs[2]) + m(n1, l.limbs[1])); + let (carry, n3) = Self::part1(carry + limbs[3] + m(n1, l.limbs[2]) + m(n2, l.limbs[1])); + let (carry, n4) = Self::part1(carry + limbs[4] + m(n0, l.limbs[4]) + m(n2, l.limbs[2]) + m(n3, l.limbs[1])); // limbs is divisible by R now, so we can divide by R by simply storing the upper half as the result - let (carry, r0) = part2(carry + limbs[5] + m(n1, l[4]) + m(n3, l[2]) + m(n4, l[1])); - let (carry, r1) = part2(carry + limbs[6] + m(n2,l[4]) + m(n4, l[2])); - let (carry, r2) = part2(carry + limbs[7] + m(n3, l[4]) ); - let (carry, r3) = part2(carry + limbs[8] + m(n4, l[4])); - let r4 = carry as u64; + let (carry, r0) = Self::part2(carry + limbs[5] + m(n1, l.limbs[4]) + m(n3, l.limbs[2]) + m(n4, l.limbs[1])); + let (carry, r1) = Self::part2(carry + limbs[6] + m(n2, l.limbs[4]) + m(n4, l.limbs[2])); + let (carry, r2) = Self::part2(carry + limbs[7] + m(n3, l.limbs[4])); + let (carry, r3) = Self::part2(carry + limbs[8] + m(n4, l.limbs[4])); + let r4 = carry as u64; // result may be >= l, so attempt to subtract l - Scalar52::sub(&Scalar52([r0, r1, r2, r3, r4]), l) + Scalar52::sub(&Scalar52 { limbs: [r0, r1, r2, r3, r4] }, l) + } + + + /// Helper function for Montgomery reduction + #[inline(always)] + #[verifier::external_body] // TODO Verify this function + fn part1(sum: u128) -> (res: (u128, u64)) + { + let p = (sum as u64).wrapping_mul(constants::LFACTOR) & ((1u64 << 52) - 1); + let carry = (sum + m(p, constants::L.limbs[0])) >> 52; + (carry, p) + } + + /// Helper function for Montgomery reduction + #[inline(always)] + #[verifier::external_body] // TODO Verify this function + fn part2(sum: u128) -> (res: (u128, u64)) + { + let w = (sum as u64) & ((1u64 << 52) - 1); + let carry = sum >> 52; + (carry, w) } /// Compute `a * b` (mod l) #[inline(never)] - pub fn mul(a: &Scalar52, b: &Scalar52) -> Scalar52 { + #[verifier::external_body] // TODO Verify this function + pub fn mul(a: &Scalar52, b: &Scalar52) -> Scalar52 + { let ab = Scalar52::montgomery_reduce(&Scalar52::mul_internal(a, b)); Scalar52::montgomery_reduce(&Scalar52::mul_internal(&ab, &constants::RR)) } @@ -288,41 +475,76 @@ impl Scalar52 { /// Compute `a^2` (mod l) #[inline(never)] #[allow(dead_code)] // XXX we don't expose square() via the Scalar API - pub fn square(&self) -> Scalar52 { + #[verifier::external_body] // TODO Verify this function + pub fn square(&self) -> Scalar52 + { let aa = Scalar52::montgomery_reduce(&Scalar52::square_internal(self)); Scalar52::montgomery_reduce(&Scalar52::mul_internal(&aa, &constants::RR)) } /// Compute `(a * b) / R` (mod l), where R is the Montgomery modulus 2^260 #[inline(never)] - pub fn montgomery_mul(a: &Scalar52, b: &Scalar52) -> Scalar52 { + pub fn montgomery_mul(a: &Scalar52, b: &Scalar52) -> (result: Scalar52) + requires + limbs_bounded(a), + limbs_bounded(b), + ensures + limbs_bounded(&result), + (to_nat(&result.limbs) * montgomery_radix()) % group_order() == (to_nat(&a.limbs) * to_nat(&b.limbs)) % group_order(), + { Scalar52::montgomery_reduce(&Scalar52::mul_internal(a, b)) } /// Compute `(a^2) / R` (mod l) in Montgomery form, where R is the Montgomery modulus 2^260 #[inline(never)] - pub fn montgomery_square(&self) -> Scalar52 { + pub fn montgomery_square(&self) -> (result: Scalar52) + requires + limbs_bounded(self), + ensures + limbs_bounded(&result), + (to_nat(&result.limbs) * montgomery_radix()) % group_order() == (to_nat(&self.limbs) * to_nat(&self.limbs)) % group_order(), + { Scalar52::montgomery_reduce(&Scalar52::square_internal(self)) } /// Puts a Scalar52 in to Montgomery form, i.e. computes `a*R (mod l)` #[inline(never)] - pub fn as_montgomery(&self) -> Scalar52 { + #[verifier::external_body] // TODO Verify this function + pub fn as_montgomery(&self) -> Scalar52 + { Scalar52::montgomery_mul(self, &constants::RR) } /// Takes a Scalar52 out of Montgomery form, i.e. computes `a/R (mod l)` #[allow(clippy::wrong_self_convention)] #[inline(never)] - pub fn from_montgomery(&self) -> Scalar52 { + pub fn from_montgomery(&self) -> (result: Scalar52) + requires + limbs_bounded(self), + ensures + limbs_bounded(&result), + (to_nat(&result.limbs) * montgomery_radix()) % group_order() == to_nat(&self.limbs) % group_order(), + { let mut limbs = [0u128; 9]; - for i in 0..5 { - limbs[i] = self[i] as u128; + #[allow(clippy::needless_range_loop)] + for i in 0..5 + invariant + forall|j: int| #![auto] 0 <= j < i ==> limbs[j] == self.limbs[j] as u128, + forall|j: int| #![auto] i <= j < 9 ==> limbs[j] == 0, + { + limbs[i] = self.limbs[i] as u128; + } + let result = Scalar52::montgomery_reduce(&limbs); + proof { + lemma_from_montgomery_limbs_conversion(&limbs, &self.limbs); } - Scalar52::montgomery_reduce(&limbs) + result } } + +} // verus! + #[cfg(test)] mod test { use super::*; @@ -333,95 +555,115 @@ mod test { /// x = 14474011154664524427946373126085988481658748083205070504932198000989141204991 /// x = 7237005577332262213973186563042994240801631723825162898930247062703686954002 mod l /// x = 3057150787695215392275360544382990118917283750546154083604586903220563173085*R mod l in Montgomery form - pub static X: Scalar52 = Scalar52([ - 0x000fffffffffffff, - 0x000fffffffffffff, - 0x000fffffffffffff, - 0x000fffffffffffff, - 0x00001fffffffffff, - ]); + pub static X: Scalar52 = Scalar52 { + limbs: [ + 0x000fffffffffffff, + 0x000fffffffffffff, + 0x000fffffffffffff, + 0x000fffffffffffff, + 0x00001fffffffffff, + ], + }; /// x^2 = 3078544782642840487852506753550082162405942681916160040940637093560259278169 mod l - pub static XX: Scalar52 = Scalar52([ - 0x0001668020217559, - 0x000531640ffd0ec0, - 0x00085fd6f9f38a31, - 0x000c268f73bb1cf4, - 0x000006ce65046df0, - ]); + pub static XX: Scalar52 = Scalar52 { + limbs: [ + 0x0001668020217559, + 0x000531640ffd0ec0, + 0x00085fd6f9f38a31, + 0x000c268f73bb1cf4, + 0x000006ce65046df0, + ], + }; /// x^2 = 4413052134910308800482070043710297189082115023966588301924965890668401540959*R mod l in Montgomery form - pub static XX_MONT: Scalar52 = Scalar52([ - 0x000c754eea569a5c, - 0x00063b6ed36cb215, - 0x0008ffa36bf25886, - 0x000e9183614e7543, - 0x0000061db6c6f26f, - ]); + pub static XX_MONT: Scalar52 = Scalar52 { + limbs: [ + 0x000c754eea569a5c, + 0x00063b6ed36cb215, + 0x0008ffa36bf25886, + 0x000e9183614e7543, + 0x0000061db6c6f26f, + ], + }; /// y = 6145104759870991071742105800796537629880401874866217824609283457819451087098 - pub static Y: Scalar52 = Scalar52([ - 0x000b75071e1458fa, - 0x000bf9d75e1ecdac, - 0x000433d2baf0672b, - 0x0005fffcc11fad13, - 0x00000d96018bb825, - ]); + pub static Y: Scalar52 = Scalar52 { + limbs: [ + 0x000b75071e1458fa, + 0x000bf9d75e1ecdac, + 0x000433d2baf0672b, + 0x0005fffcc11fad13, + 0x00000d96018bb825, + ], + }; /// x*y = 36752150652102274958925982391442301741 mod l - pub static XY: Scalar52 = Scalar52([ - 0x000ee6d76ba7632d, - 0x000ed50d71d84e02, - 0x00000000001ba634, - 0x0000000000000000, - 0x0000000000000000, - ]); + pub static XY: Scalar52 = Scalar52 { + limbs: [ + 0x000ee6d76ba7632d, + 0x000ed50d71d84e02, + 0x00000000001ba634, + 0x0000000000000000, + 0x0000000000000000, + ], + }; /// x*y = 658448296334113745583381664921721413881518248721417041768778176391714104386*R mod l in Montgomery form - pub static XY_MONT: Scalar52 = Scalar52([ - 0x0006d52bf200cfd5, - 0x00033fb1d7021570, - 0x000f201bc07139d8, - 0x0001267e3e49169e, - 0x000007b839c00268, - ]); + pub static XY_MONT: Scalar52 = Scalar52 { + limbs: [ + 0x0006d52bf200cfd5, + 0x00033fb1d7021570, + 0x000f201bc07139d8, + 0x0001267e3e49169e, + 0x000007b839c00268, + ], + }; /// a = 2351415481556538453565687241199399922945659411799870114962672658845158063753 - pub static A: Scalar52 = Scalar52([ - 0x0005236c07b3be89, - 0x0001bc3d2a67c0c4, - 0x000a4aa782aae3ee, - 0x0006b3f6e4fec4c4, - 0x00000532da9fab8c, - ]); + pub static A: Scalar52 = Scalar52 { + limbs: [ + 0x0005236c07b3be89, + 0x0001bc3d2a67c0c4, + 0x000a4aa782aae3ee, + 0x0006b3f6e4fec4c4, + 0x00000532da9fab8c, + ], + }; /// b = 4885590095775723760407499321843594317911456947580037491039278279440296187236 - pub static B: Scalar52 = Scalar52([ - 0x000d3fae55421564, - 0x000c2df24f65a4bc, - 0x0005b5587d69fb0b, - 0x00094c091b013b3b, - 0x00000acd25605473, - ]); + pub static B: Scalar52 = Scalar52 { + limbs: [ + 0x000d3fae55421564, + 0x000c2df24f65a4bc, + 0x0005b5587d69fb0b, + 0x00094c091b013b3b, + 0x00000acd25605473, + ], + }; /// a+b = 0 /// a-b = 4702830963113076907131374482398799845891318823599740229925345317690316127506 - pub static AB: Scalar52 = Scalar52([ - 0x000a46d80f677d12, - 0x0003787a54cf8188, - 0x0004954f0555c7dc, - 0x000d67edc9fd8989, - 0x00000a65b53f5718, - ]); + pub static AB: Scalar52 = Scalar52 { + limbs: [ + 0x000a46d80f677d12, + 0x0003787a54cf8188, + 0x0004954f0555c7dc, + 0x000d67edc9fd8989, + 0x00000a65b53f5718, + ], + }; // c = (2^512 - 1) % l = 1627715501170711445284395025044413883736156588369414752970002579683115011840 - pub static C: Scalar52 = Scalar52([ - 0x000611e3449c0f00, - 0x000a768859347a40, - 0x0007f5be65d00e1b, - 0x0009a3dceec73d21, - 0x00000399411b7c30, - ]); + pub static C: Scalar52 = Scalar52 { + limbs: [ + 0x000611e3449c0f00, + 0x000a768859347a40, + 0x0007f5be65d00e1b, + 0x0009a3dceec73d21, + 0x00000399411b7c30, + ], + }; #[test] fn mul_max() { diff --git a/curve25519-dalek/src/backend/serial/u64/scalar_lemmas.rs b/curve25519-dalek/src/backend/serial/u64/scalar_lemmas.rs new file mode 100644 index 000000000..5cc6bfbc5 --- /dev/null +++ b/curve25519-dalek/src/backend/serial/u64/scalar_lemmas.rs @@ -0,0 +1,1192 @@ +#[allow(unused_imports)] +use super::constants; +#[allow(unused_imports)] +use super::scalar::Scalar52; +#[allow(unused_imports)] +use super::scalar_specs::*; +#[allow(unused_imports)] +use vstd::arithmetic::div_mod::*; +#[allow(unused_imports)] +use vstd::arithmetic::mul::*; +#[allow(unused_imports)] +use vstd::arithmetic::power2::*; +#[allow(unused_imports)] +use vstd::bits::*; +#[allow(unused_imports)] +use vstd::calc; +use vstd::prelude::*; + +verus! { + +pub proof fn lemma_52_52(x: u64, y: u64) +requires + x < (1u64 << 52), + y < (1u64 << 52), +ensures (x as u128) * (y as u128) < (1u128 << 104) +{ + assert(1u128 << 52 == 1u64 << 52) by (bit_vector); + calc! { + (<) + (x as u128) * (y as u128); (<=) { + if x > 0 { + lemma_mul_strict_inequality(y as int, (1u128 << 52) as int, x as int); + assert( x * y < x * (1u128 << 52) ); + } else { + assert((0 as u128) * (y as u128) == 0); + } + } + (x as u128) * (1u128 << 52); (<) { + lemma_mul_strict_inequality(x as int, (1u128 << 52) as int, (1u128 << 52) as int); + } + (1u128 << 52) * (1u128 << 52); + } + assert((1u128 << 52) * (1u128 << 52) == (1u128 << 104)) by (compute); +} + +pub proof fn lemma_square_internal_no_overflow() + ensures + (1u128 << 105) + (1u128 << 105) == (1u128 << 106), + (1u128 << 105) + (1u128 << 104) < (1u128 << 106), + (1u128 << 104) * 2 == (1u128 << 105), + (1u128 << 106) + (1u128 << 104) < (1u128 << 107), +{ + assert((1u128 << 105) + (1u128 << 105) == (1u128 << 106)) by (bit_vector); + assert((1u128 << 105) + (1u128 << 104) < (1u128 << 106)) by (bit_vector); + assert((1u128 << 104) * 2 == (1u128 << 105)) by (bit_vector); + assert((1u128 << 106) + (1u128 << 104) < (1u128 << 107)) by (bit_vector); +} + + +pub proof fn lemma_square_internal_correct(a: &[u64; 5], z: &[u128; 9]) + requires + forall|i: int| 0 <= i < 5 ==> a[i] < (1u64 << 52), + z[0] == (a[0] * a[0]) , + z[1] == (a[0] * a[1]) * 2, + z[2] == (a[0] * a[2]) * 2 + (a[1] * a[1]) , + z[3] == (a[0] * a[3]) * 2 + (a[1] * a[2]) * 2, + z[4] == (a[0] * a[4]) * 2 + (a[1] * a[3]) * 2 + (a[2] * a[2]) , + z[5] == (a[1] * a[4]) * 2 + (a[2] * a[3]) * 2, + z[6] == (a[2] * a[4]) * 2 + (a[3] * a[3]) , + z[7] == (a[3] * a[4]) * 2, + z[8] == (a[4] * a[4]) , + ensures + slice128_to_nat(z) == to_nat(a) * to_nat(a), + +{ + assert(five_limbs_to_nat_aux(*a) * five_limbs_to_nat_aux(*a) == nine_limbs_to_nat_aux(&z)) by { + broadcast use group_mul_is_commutative_and_distributive; + broadcast use lemma_mul_is_associative; + + lemma_pow2_adds(52, 52); + lemma_pow2_adds(52, 104); + lemma_pow2_adds(52, 156); + lemma_pow2_adds(52, 208); + lemma_pow2_adds(104, 104); + lemma_pow2_adds(104, 156); + lemma_pow2_adds(104, 208); + lemma_pow2_adds(156, 156); + lemma_pow2_adds(156, 208); + lemma_pow2_adds(208, 208); + }; + lemma_nine_limbs_equals_slice128_to_nat(&z); + lemma_five_limbs_equals_to_nat(&a); +} + +pub proof fn lemma_mul_internal_no_overflow() + ensures + (1u128 << 104) + (1u128 << 104) == (1u128 << 105), + 3u128 * (1u128 << 104) < (1u128 << 106), + 4u128 * (1u128 << 104) == (1u128 << 2) * (1u128 << 104), + (1u128 << 2) * (1u128 << 104) == (1u128 << 106), + 8u128 == (1u128 << 3), + (1u128 << 3) * (1u128 << 104) == (1u128 << 107), +{ + assert((1u128 << 104) + (1u128 << 104) == (1u128 << 105)) by (bit_vector); + assert(3u128 * (1u128 << 104) < (1u128 << 106)) by (bit_vector); + assert(4u128 * (1u128 << 104) == (1u128 << 2) * (1u128 << 104)) by (bit_vector); + assert((1u128 << 2) * (1u128 << 104) == (1u128 << 106)) by (bit_vector); + assert(8u128 == (1u128 << 3)) by (bit_vector); + assert((1u128 << 3) * (1u128 << 104) == (1u128 << 107)) by (bit_vector); +} + +pub proof fn lemma_mul_internal_correct(a: &[u64; 5], b: &[u64; 5], z: &[u128; 9]) + requires + forall|i: int| 0 <= i < 5 ==> a[i] < (1u64 << 52), + forall|i: int| 0 <= i < 5 ==> b[i] < (1u64 << 52), + z[0] == (a[0] * b[0]), + z[1] == (a[0] * b[1]) + (a[1] * b[0]), + z[2] == (a[0] * b[2]) + (a[1] * b[1]) + (a[2] * b[0]), + z[3] == (a[0] * b[3]) + (a[1] * b[2]) + (a[2] * b[1]) + (a[3] * b[0]), + z[4] == (a[0] * b[4]) + (a[1] * b[3]) + (a[2] * b[2]) + (a[3] * b[1]) + (a[4] * b[0]), + z[5] == (a[1] * b[4]) + (a[2] * b[3]) + (a[3] * b[2]) + (a[4] * b[1]), + z[6] == (a[2] * b[4]) + (a[3] * b[3]) + (a[4] * b[2]), + z[7] == (a[3] * b[4]) + (a[4] * b[3]), + z[8] == (a[4] * b[4]), + ensures + slice128_to_nat(z) == to_nat(a) * to_nat(b), +{ + assert(five_limbs_to_nat_aux(*a) * five_limbs_to_nat_aux(*b) == nine_limbs_to_nat_aux(&z)) by { + broadcast use group_mul_is_commutative_and_distributive; + broadcast use lemma_mul_is_associative; + + lemma_pow2_adds(52, 52); + lemma_pow2_adds(52, 104); + lemma_pow2_adds(52, 156); + lemma_pow2_adds(52, 208); + lemma_pow2_adds(104, 104); + lemma_pow2_adds(104, 156); + lemma_pow2_adds(104, 208); + lemma_pow2_adds(156, 156); + lemma_pow2_adds(156, 208); + lemma_pow2_adds(208, 208); + }; + lemma_nine_limbs_equals_slice128_to_nat(&z); + lemma_five_limbs_equals_to_nat(&a); + lemma_five_limbs_equals_to_nat(&b); +} + + +pub proof fn lemma_nine_limbs_equals_slice128_to_nat(limbs: &[u128; 9]) +ensures + nine_limbs_to_nat_aux(limbs) == slice128_to_nat(limbs) +{ + + let seq = limbs@.map(|i, x| x as nat); + + calc! { + (==) + slice128_to_nat(limbs); { + } + seq_to_nat(seq); { + reveal_with_fuel(seq_to_nat, 10); + } + (limbs[0] as nat) + + ((limbs[1] as nat) + + ((limbs[2] as nat) + + ((limbs[3] as nat) + + ((limbs[4] as nat) + + ((limbs[5] as nat) + + ((limbs[6] as nat) + + ((limbs[7] as nat) + + (limbs[8] as nat) * pow2(52) + ) * pow2(52) + ) * pow2(52) + ) * pow2(52) + ) * pow2(52) + ) * pow2(52) + ) * pow2(52) + ) * pow2(52); { + lemma_pow2_adds(52, 52); + lemma_pow2_adds(104, 52); + lemma_pow2_adds(156, 52); + lemma_pow2_adds(208, 52); + lemma_pow2_adds(260, 52); + lemma_pow2_adds(312, 52); + lemma_pow2_adds(364, 52); + broadcast use group_mul_is_distributive; + broadcast use lemma_mul_is_associative; + } + nine_limbs_to_nat_aux(limbs); + } +} + +pub proof fn lemma_five_limbs_equals_to_nat(limbs: &[u64; 5]) +ensures + five_limbs_to_nat_aux(*limbs) == to_nat(limbs) +{ + let seq = limbs@.map(|i, x| x as nat); + + calc! { + (==) + to_nat(limbs); { + } + seq_to_nat(seq); { + reveal_with_fuel(seq_to_nat, 6); + } + (limbs[0] as nat) + + ((limbs[1] as nat) + + ((limbs[2] as nat) + + ((limbs[3] as nat) + + (limbs[4] as nat) * pow2(52) + ) * pow2(52) + ) * pow2(52) + ) * pow2(52); { + lemma_pow2_adds(52, 52); + lemma_pow2_adds(104, 52); + lemma_pow2_adds(156, 52); + broadcast use group_mul_is_distributive; + broadcast use lemma_mul_is_associative; + } + (limbs[0] as nat) + + pow2(52) * (limbs[1] as nat) + + pow2(104) * (limbs[2] as nat) + + pow2(156) * (limbs[3] as nat) + + pow2(208) * (limbs[4] as nat); { + } + five_limbs_to_nat_aux(*limbs); + } +} + + +pub proof fn lemma_scalar_subtract_no_overflow(carry: u64, difference_limb: u64, addend: u64, i: u32, l_value: &Scalar52) + requires + i < 5, + difference_limb < (1u64 << 52), + addend == 0 || addend == l_value.limbs[i as int], + i == 0 ==> carry == 0, + i >= 1 ==> (carry >> 52) < 2, + l_value.limbs[0] == 0x0002631a5cf5d3ed, + l_value.limbs[1] == 0x000dea2f79cd6581, + l_value.limbs[2] == 0x000000000014def9, + l_value.limbs[3] == 0x0000000000000000, + l_value.limbs[4] == 0x0000100000000000, + ensures + (carry >> 52) + difference_limb + addend < (1u64 << 53), +{ + if i == 0 { + assert(0x0002631a5cf5d3ed < (1u64 << 52)) by (bit_vector); + } else if i == 1 { + assert(0x000dea2f79cd6581 < (1u64 << 52)) by (bit_vector); + } else if i == 2 { + assert(0x000000000014def9 < (1u64 << 52)) by (bit_vector); + } else if i == 3 { + } else { + assert(0x0000100000000000 < (1u64 << 52)) by (bit_vector); + } + if i == 0 { + assert((0u64 >> 52) == 0) by (bit_vector); + } + assert(2 * (1u64 << 52) == (1u64 << 53)) by (bit_vector); +} + +pub proof fn lemma_borrow_and_mask_bounded(borrow: u64, mask: u64) + requires + mask == (1u64 << 52) - 1, + ensures + (borrow & mask) < (1u64 << 52), +{ + assert((borrow & mask) <= mask) by (bit_vector); +} + +pub proof fn lemma_carry_bounded_after_mask(carry: u64, mask: u64) + requires + mask == (1u64 << 52) - 1, + carry < (1u64 << 53), + ensures + (carry & mask) < (1u64 << 52), + (carry >> 52) <= 1, +{ + assert((carry & mask) <= mask) by (bit_vector); + assert((1u64 << 53) == 2 * (1u64 << 52)) by (bit_vector); + broadcast use lemma_u64_shr_is_div; + lemma_pow2_pos(52); + shift_is_pow2(52); + assert(carry >> 52 == carry / (1u64 << 52)); + lemma_fundamental_div_mod(carry as int, (1u64 << 52) as int); + let q = carry / (1u64 << 52); + let r = carry % (1u64 << 52); + lemma_mul_strict_inequality_converse(q as int, 2int, (1u64 << 52) as int); +} + +pub proof fn lemma_add_loop_bounds(i: int, carry: u64, a_limb: u64, b_limb: u64) + requires + 0 <= i < 5, + a_limb < (1u64 << 52), + b_limb < (1u64 << 52), + i == 0 ==> carry == 0, + i >= 1 ==> (carry >> 52) < 2, + ensures + (carry >> 52) + a_limb + b_limb < (1u64 << 53), +{ + if i == 0 { + assert((0u64 >> 52) == 0) by (bit_vector); + } + assert((1u64 << 52) + (1u64 << 52) == (1u64 << 53)) by (bit_vector); +} + +pub proof fn lemma_add_carry_and_sum_bounds(carry: u64, mask: u64) + requires + mask == (1u64 << 52) - 1, + carry < (1u64 << 53), + ensures + (carry & mask) < (1u64 << 52), + (carry >> 52) < 2, +{ + assert((carry & mask) <= mask) by (bit_vector); + assert((1u64 << 53) == 2 * (1u64 << 52)) by (bit_vector); + broadcast use lemma_u64_shr_is_div; + lemma_pow2_pos(52); + shift_is_pow2(52); + assert(carry >> 52 == carry / (1u64 << 52)); + lemma_fundamental_div_mod(carry as int, (1u64 << 52) as int); + let q = carry / (1u64 << 52); + let r = carry % (1u64 << 52); + lemma_mul_strict_inequality_converse(q as int, 2int, (1u64 << 52) as int); +} + +pub proof fn lemma_l_value_properties(l_value: &Scalar52, sum: &Scalar52) + requires + l_value.limbs[0] == 0x0002631a5cf5d3ed, + l_value.limbs[1] == 0x000dea2f79cd6581, + l_value.limbs[2] == 0x000000000014def9, + l_value.limbs[3] == 0x0000000000000000, + l_value.limbs[4] == 0x0000100000000000, + forall|j: int| 0 <= j < 5 ==> sum.limbs[j] < (1u64 << 52), + ensures + forall|j: int| 0 <= j < 5 ==> l_value.limbs[j] < (1u64 << 52), +{ + assert(0x0002631a5cf5d3ed < (1u64 << 52)) by (bit_vector); + assert(0x000dea2f79cd6581 < (1u64 << 52)) by (bit_vector); +} + + +pub proof fn lemma_from_montgomery_limbs_conversion( + limbs: &[u128; 9], + self_limbs: &[u64; 5] +) + requires + forall|j: int| #![auto] 0 <= j < 5 ==> limbs[j] == self_limbs[j] as u128, + forall|j: int| 5 <= j < 9 ==> limbs[j] == 0, + ensures + slice128_to_nat(limbs) == to_nat(self_limbs), +{ + lemma_nine_limbs_equals_slice128_to_nat(limbs); + lemma_five_limbs_equals_to_nat(self_limbs); + assert(limbs[0] == self_limbs[0] as u128); + assert(nine_limbs_to_nat_aux(limbs) == (self_limbs[0] as nat) + + (self_limbs[1] as nat) * pow2(52) + + (self_limbs[2] as nat) * pow2(104) + + (self_limbs[3] as nat) * pow2(156) + + (self_limbs[4] as nat) * pow2(208) + + 0 * pow2(260) + 0 * pow2(312) + 0 * pow2(364) + 0 * pow2(416)); +} + + + +pub proof fn lemma_rr_limbs_bounded() + ensures + 0x000d63c715bea69fu64 < (1u64 << 52), +{ + // Verus can figure that out the other 4 limbs are bounded + assert(0x000d63c715bea69fu64 < (1u64 << 52)) by (bit_vector); +} + +/// Need to use induction because the postcondition expands +/// seq_u64_to_nat in the opposite way from how it's defined. +/// The base case is straightforward, but it takes a few steps +/// to get Verus to prove it. +/// Induction case: Take off the first element using definition of +/// seq_u64_to_nat, apply induction hypothesis to the remaining sequence, +/// then put the first element back on and simplify all the powers. +pub proof fn lemma_seq_u64_to_nat_subrange_extend(seq: Seq, i: int) + requires + 0 <= i < seq.len(), + ensures + seq_u64_to_nat(seq.subrange(0, i + 1)) == + seq_u64_to_nat(seq.subrange(0, i)) + seq[i] * pow2(52 * i as nat) + decreases i +{ + if i == 0 { + reveal_with_fuel(seq_to_nat, 3); + assert(seq.len()>0); + assert(seq.subrange(0, 1) == seq![seq[0]]); + calc! { + (==) + seq_u64_to_nat(seq.subrange(0, 0 + 1 as int)); { + assert(seq.subrange(0, 1) == seq![seq[0]]); + } + seq_u64_to_nat(seq![seq[0]]); { + let single_elem = seq![seq[0]]; + let nat_single = single_elem.map(|idx, x| x as nat); + assert(nat_single == seq![seq[0] as nat]); + assert(seq_u64_to_nat(single_elem) == seq_to_nat(nat_single)); + assert(nat_single.len() == 1); + assert(seq_to_nat(nat_single) == nat_single[0] + seq_to_nat(nat_single.subrange(1, 1)) * pow2(52)); + assert(nat_single.subrange(1, 1).len() == 0); + assert(seq_to_nat(nat_single.subrange(1, 1)) == 0); + assert(seq_to_nat(nat_single) == nat_single[0]); + assert(nat_single[0] == seq[0] as nat); + } + seq[0] as nat; { + assert(pow2(0) == 1) by {lemma2_to64();} + assert(52 * 0 == 0); + assert(pow2(52 * 0 as nat) == pow2(0)); + assert((seq[0] * pow2(0)) as nat == (seq[0] * 1) as nat); + assert((seq[0] * 1) as nat == seq[0] as nat); + } + (seq[0] * pow2(52 * 0 as nat)) as nat; { + lemma_empty_seq_as_nat(seq); + } + (seq_u64_to_nat(seq.subrange(0, 0)) + seq[0] * pow2(52 * 0 as nat)) as nat; + } + return; + } + else { + let limbs1 = seq.subrange(0, i + 1).map(|i, x| x as nat); + let limbs2 = seq.subrange(0, i).map(|i, x| x as nat); + calc! { + (==) + seq_u64_to_nat(seq.subrange(0, i + 1)); { + assert( seq_to_nat(limbs1) == limbs1[0] + seq_to_nat(limbs1.subrange(1, limbs1.len() as int)) * pow2(52)); + } + limbs1[0] + seq_to_nat(limbs1.subrange(1, limbs1.len() as int)) * pow2(52); { + assert(seq.subrange(1, i + 1).map(|i, x| x as nat) == limbs1.subrange(1, limbs1.len() as int)); + } + limbs1[0] + seq_u64_to_nat(seq.subrange(1, i + 1)) * pow2(52); { + let tail = seq.subrange(1, i + 1); + assert(i >= 1); + assert(0 <= i-1 < tail.len()); + lemma_seq_u64_to_nat_subrange_extend(tail, i-1); + assert(seq_u64_to_nat(tail.subrange(0, i)) == + seq_u64_to_nat(tail.subrange(0, i - 1)) + tail[i -1 ] * pow2(52 * (i-1) as nat)); + assert( tail.subrange(0, i) == seq.subrange(1, i + 1) ); + assert( tail.subrange(0, i - 1) == seq.subrange(1, i ) ); + assert(seq_u64_to_nat(seq.subrange(1, i + 1)) == + seq_u64_to_nat(seq.subrange(1, i )) + seq[i] * pow2(52 * (i-1) as nat)); + } + limbs1[0] + ((seq_u64_to_nat(seq.subrange(1, i )) + seq[i] * pow2(52 * (i-1) as nat)) * pow2(52)) as nat; { + broadcast use lemma_mul_is_distributive_add_other_way; + } + (limbs1[0] + seq_u64_to_nat(seq.subrange(1, i )) * pow2(52) + seq[i] * pow2(52 * (i-1) as nat) * pow2(52)) as nat; { + broadcast use lemma_mul_is_associative; + lemma_pow2_adds(52 * (i-1) as nat, 52); + } + (limbs1[0] + seq_u64_to_nat(seq.subrange(1, i )) * pow2(52) + seq[i] * pow2(52 * i as nat)) as nat; { + assert(seq.subrange(1, i ).map(|i, x| x as nat) == limbs2.subrange(1, limbs2.len() as int)); + } + (limbs2[0] + seq_to_nat(limbs2.subrange(1, limbs2.len() as int)) * pow2(52) + seq[i] * pow2(52 * i as nat)) as nat; { + assert( seq_to_nat(limbs2) == limbs2[0] + seq_to_nat(limbs2.subrange(1, limbs2.len() as int)) * pow2(52)); + } + (seq_to_nat(limbs2) + seq[i] * pow2(52 * i as nat)) as nat; { + } + (seq_u64_to_nat(seq.subrange(0, i)) + seq[i] * pow2(52 * i as nat)) as nat; + + } + } +} + +/// Verus times out when this assertion is inside +/// lemma_seq_u64_to_nat_subrange_extend +pub proof fn lemma_empty_seq_as_nat(a: Seq) + ensures seq_u64_to_nat(a.subrange(0, 0)) == 0 +{ + assert(seq_u64_to_nat(a.subrange(0, 0)) == 0); +} + + +/// Using lemma_mod_add_multiples_vanish in a big proof made the proof hang +pub proof fn lemma_mod_cancel(a: &Scalar52, b: &Scalar52) + ensures (group_order() + to_nat(&a.limbs) - to_nat(&b.limbs)) % (group_order() as int) == + (to_nat(&a.limbs) - to_nat(&b.limbs)) % (group_order() as int) +{ + lemma_mod_add_multiples_vanish((to_nat(&a.limbs) - to_nat(&b.limbs)) as int, group_order() as int); +} + + +/// The corollary of limbs_bounded(a) +pub proof fn lemma_bound_scalar(a: &Scalar52) + requires limbs_bounded(a) + ensures to_nat(&a.limbs) < pow2((52 * (5) as nat)) +{ + lemma_general_bound(a.limbs@); +} + +/// The general case of lemma_bound_scalar so we +/// can prove via straightforward induction. +pub proof fn lemma_general_bound(a: Seq) + requires forall|i: int| 0 <= i < a.len() ==> a[i] < (1u64 << 52) + ensures seq_u64_to_nat(a) < pow2((52 * a.len() as nat)) + decreases a.len() +{ + if a.len() == 0 { + assert(seq_u64_to_nat(a) == 0); + lemma2_to64(); // Gives us pow2(0) == 1 among other facts + assert(pow2(0) == 1); + } else { + // Inductive case + let tail = a.subrange(1, a.len() as int); + + // Apply induction hypothesis on tail + assert(forall|i: int| 0 <= i < tail.len() ==> tail[i] < (1u64 << 52)) by { + assert(forall|i: int| 0 <= i < tail.len() ==> tail[i] == a[i + 1]); + }; + + assert(tail.len() == a.len() - 1); + + // Apply induction hypothesis + lemma_general_bound(tail); + assert(seq_u64_to_nat(tail) < pow2((52 * tail.len() as nat))); + + // Now prove for the full sequence + assert(seq_u64_to_nat(a) == seq_to_nat(a.map(|i, x| x as nat))); + assert(a.map(|i, x| x as nat).len() == a.len()); + assert(a.map(|i, x| x as nat)[0] == a[0] as nat); + assert(a.map(|i, x| x as nat).subrange(1, a.len() as int) == a.subrange(1, a.len() as int).map(|i, x| x as nat)); + // Therefore: + assert(seq_u64_to_nat(a) == a[0] as nat + seq_u64_to_nat(a.subrange(1, a.len() as int)) * pow2(52)); + + assert(a.subrange(1, a.len() as int) == tail); + + // From precondition + assert(a[0] < (1u64 << 52)); + lemma2_to64_rest(); + assert(0x10000000000000 == 1u64 << 52) by (compute_only); + assert(0x10000000000000 == pow2(52)); + assert((1u64 << 52) == pow2(52)); + + // We have seq_u64_to_nat(a) == a[0] + seq_u64_to_nat(tail) * pow2(52) + // We know a[0] < pow2(52) and seq_u64_to_nat(tail) < pow2(52 * (a.len() - 1)) + + + assert(a[0] as nat <= pow2(52) - 1); + assert(seq_u64_to_nat(tail) <= pow2(52 * (a.len() - 1) as nat) - 1); + + assert(seq_u64_to_nat(a) <= (pow2(52) - 1) + (pow2(52 * (a.len() - 1) as nat) - 1) * pow2(52)) by { + lemma_mul_inequality((pow2(52 * (a.len() - 1) as nat) - 1) as int, pow2(52 * (a.len() - 1) as nat) as int, pow2(52) as int); + }; + + // Expand the right side + assert((pow2(52) - 1) + (pow2(52 * (a.len() - 1) as nat) - 1) * pow2(52) == + pow2(52) - 1 + pow2(52 * (a.len() - 1) as nat) * pow2(52) - pow2(52)) by { + broadcast use lemma_mul_is_distributive_sub; + }; + + assert(pow2(52) - 1 + pow2(52 * (a.len() - 1) as nat) * pow2(52) - pow2(52) == + pow2(52 * (a.len() - 1) as nat) * pow2(52) - 1); + + lemma_pow2_adds(52 * (a.len() - 1) as nat, 52); + assert(pow2(52 * (a.len() - 1) as nat) * pow2(52) == pow2(52 * (a.len() - 1) as nat + 52)); + assert(52 * (a.len() - 1) as nat + 52 == 52 * a.len() as nat); + + assert(seq_u64_to_nat(a) <= pow2(52 * a.len() as nat) - 1); + assert(seq_u64_to_nat(a) < pow2(52 * a.len() as nat)); + } +} + +/// Claude wrote this proof. Could the proof be shorter? +/// Moved this to a lemma to get under rlimit. +pub proof fn lemma_decompose(a: u64, mask: u64) + requires mask == (1u64 << 52) - 1 + ensures a == (a >> 52) * pow2(52) + (a & mask) +{ + // First, establish that bit shifting is division by pow2(52) + broadcast use lemma_u64_shr_is_div; + lemma_pow2_pos(52); + shift_is_pow2(52); + lemma2_to64_rest(); // Establishes pow2(52) == 0x10000000000000 + assert((1u64 << 52) == 0x10000000000000) by (bit_vector); + assert(pow2(52) == 0x10000000000000); + assert((1u64 << 52) as nat == pow2(52)); + + assert(a >> 52 == a / (1u64 << 52)); + + // Apply fundamental division theorem: a = q * d + r + lemma_fundamental_div_mod(a as int, pow2(52) as int); + let q = a as nat / pow2(52); + let r = a as nat % pow2(52); + assert(a as nat == q * pow2(52) + r); + assert(0 <= r < pow2(52)); + + // Now prove that (a & mask) == r + // mask is all 1s in the lower 52 bits + assert(mask == (1u64 << 52) - 1); + + // Key insight: a & mask gives us the lower 52 bits, which is exactly a % pow2(52) + lemma_u64_low_bits_mask_is_mod(a, 52); + assert(a & mask == a % (1u64 << 52)); + assert((a % (1u64 << 52)) as nat == a as nat % pow2(52)); + assert((a & mask) as nat == r); + + // Now show that a >> 52 == q + assert((a >> 52) as nat == a as nat / pow2(52)); + assert((a >> 52) as nat == q); + + // Combine everything + assert(a as nat == (a >> 52) as nat * pow2(52) + (a & mask) as nat); +} + +/// The loop invariant says that subtraction is correct if we only subtract +/// the first i items of each array, plus there's a borrow term. +/// The first parts of the calc statement expand using the previous invariant. +/// Then we have cases depending if the wrapping_sub wrapped. +/// If it didn't wrap, we show that borrow must be small, and borrow >> 52 == 0. +/// If it did wrap, we show that borrow is so large that its bit-shifts are all +/// the maximum amount. +/// Either way, we then use the preconditions about what was mutated, +/// and shuffle around the powers of 52. +pub proof fn lemma_sub_loop1_invariant(difference: Scalar52, borrow: u64, i: usize, a: &Scalar52, b: &Scalar52, old_borrow: u64, mask: u64, difference_loop1_start: Scalar52) + requires + limbs_bounded(a), + limbs_bounded(b), + 0 <= i < 5, + forall|j: int| 0 <= j < i ==> difference.limbs[j] < (1u64 << 52), + mask == (1u64 << 52) - 1, + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) - seq_u64_to_nat(b.limbs@.subrange(0, i as int )) == + seq_u64_to_nat(difference_loop1_start.limbs@.subrange(0, i as int )) - (old_borrow >> 63) * pow2((52 * (i) as nat)), + difference_loop1_start.limbs@.subrange(0, i as int) == difference.limbs@.subrange(0, i as int), + difference.limbs[i as int] == borrow & mask, + borrow == a.limbs[i as int].wrapping_sub((b.limbs[i as int] + (old_borrow >> 63)) as u64) + ensures + seq_u64_to_nat(difference.limbs@.subrange(0, i + 1)) - (borrow >> 63) * pow2((52 * (i + 1) as nat)) + == seq_u64_to_nat(a.limbs@.subrange(0, i + 1)) - seq_u64_to_nat(b.limbs@.subrange(0, i + 1)) +{ + calc! { + (==) + seq_u64_to_nat(a.limbs@.subrange(0, i + 1)) - seq_u64_to_nat(b.limbs@.subrange(0, i + 1)); { + lemma_seq_u64_to_nat_subrange_extend(a.limbs@, i as int); + lemma_seq_u64_to_nat_subrange_extend(b.limbs@, i as int); + } + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) + a.limbs[i as int] * pow2(52 * i as nat) - + (seq_u64_to_nat(b.limbs@.subrange(0, i as int)) + b.limbs[i as int] * pow2(52 * i as nat)); { + broadcast use lemma_mul_is_distributive_sub_other_way; + } + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) - seq_u64_to_nat(b.limbs@.subrange(0, i as int)) + + (a.limbs[i as int] - b.limbs[i as int]) * pow2(52 * i as nat); { + assert(difference_loop1_start.limbs@.subrange(0, i as int) == difference.limbs@.subrange(0, i as int)); + // Use loop invariant + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) - (old_borrow >> 63) * pow2(52 * i as nat) + + (a.limbs[i as int] - b.limbs[i as int]) * pow2(52 * i as nat); { + broadcast use lemma_mul_is_distributive_sub_other_way; + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + + (a.limbs[i as int] - b.limbs[i as int] - (old_borrow >> 63)) * pow2(52 * i as nat); { + assert(borrow == a.limbs[i as int].wrapping_sub((b.limbs[i as int] + (old_borrow >> 63)) as u64)); + assert(difference.limbs[i as int] == borrow & mask); + // Expand wrapping sub + if a.limbs[i as int] - ((b.limbs[i as int] + (old_borrow >> 63)) as u64) < 0 { + + assert(borrow >= 0x1_0000_0000_0000_0000 - (1u64<<52)) by { + assert(old_borrow >> 63 <= 1) by (bit_vector); + assert(b.limbs[i as int] <= (1u64 << 52) - 1); + assert(borrow == (a.limbs[i as int] - ((b.limbs[i as int] + (old_borrow >> 63)) as u64) + 0x1_0000_0000_0000_0000) as u64); + assert((b.limbs[i as int] + (old_borrow >> 63)) as u64 <= 1u64 << 52); + assert(borrow >= (a.limbs[i as int] - (1u64 << 52) + 0x1_0000_0000_0000_0000) as u64); + }; + calc! { + (==) + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + + (a.limbs[i as int] - b.limbs[i as int] - (old_borrow >> 63)) * pow2(52 * i as nat); { + assert(borrow == (a.limbs[i as int] - ((b.limbs[i as int] + (old_borrow >> 63)) as u64) + 0x1_0000_0000_0000_0000) as u64); + assert(b.limbs[i as int] < 1u64 << 52); + assert(old_borrow >> 63 <= 1) by (bit_vector); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + (borrow - 0x1_0000_0000_0000_0000) * pow2(52 * i as nat); { + lemma_decompose(borrow, mask); + assert(borrow == (borrow >> 52) * pow2(52) + difference.limbs[i as int]); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + + ((borrow >> 52) * pow2(52) + difference.limbs[i as int] - 0x1_0000_0000_0000_0000) * pow2(52 * i as nat); { + assert(pow2(52) * pow2(52 * i as nat) == pow2(52 + 52 * i as nat)) by {broadcast use lemma_pow2_adds;}; + assert(52 + 52 * i as nat == 52 * (i+1) as nat); + broadcast use lemma_mul_is_distributive_add_other_way; + broadcast use lemma_mul_is_distributive_sub_other_way; + assert((borrow >> 52) as nat * pow2(52) * pow2(52 * i as nat) == (borrow >> 52) as nat * pow2(52 * (i+1) as nat)) by { + assert(pow2(52) * pow2(52 * i as nat) == pow2(52 * (i+1) as nat)); + lemma_mul_is_associative((borrow >> 52) as int, pow2(52) as int, pow2(52 * i as nat) as int); + }; + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + + (borrow >> 52) * pow2(52 * (i+1) as nat) + difference.limbs[i as int] * pow2(52 * i as nat) - + 0x1_0000_0000_0000_0000 * pow2(52 * i as nat); { + lemma_seq_u64_to_nat_subrange_extend(difference.limbs@, i as int); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i + 1)) + + (borrow >> 52) * pow2(52 * (i+1) as nat) - 0x1_0000_0000_0000_0000 * pow2(52 * i as nat); { + assert(borrow >> 52 == (1u64<<12) - 1) by (bit_vector) + requires borrow >= 0x1_0000_0000_0000_0000 - (1u64<<52); + assert( 0x1_0000_0000_0000_0000 * pow2(52 * i as nat) == (1u64 << 12) * pow2(52 * (i + 1) as nat) ) by + { + lemma2_to64(); + assert(0x1_0000_0000_0000_0000 == pow2(64)); + assert(1u64 << 12 == pow2(12)) by (compute); + lemma_pow2_adds(64, 52 * i as nat); + lemma_pow2_adds(12, 52 * (i + 1) as nat); + assert(64 + 52 * i as nat == 12 + 52 * (i + 1) as nat); + } + lemma_mul_is_distributive_sub_other_way(pow2(52 * (i+1) as nat) as int, (1u64<<12) - 1, (1u64 << 12) as int); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i + 1)) + + (-1) * pow2(52 * (i+1) as nat) ; { + assert(borrow >> 63 == 1) by (bit_vector) + requires borrow >= 0x1_0000_0000_0000_0000 - (1u64<<52); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i + 1)) - (borrow >> 63) * pow2((52 * (i + 1) as nat)); + } + } + else { + + calc! { + (==) + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + + (a.limbs[i as int] - b.limbs[i as int] - (old_borrow >> 63)) * pow2(52 * i as nat); { + assert(borrow == (a.limbs[i as int] - ((b.limbs[i as int] + (old_borrow >> 63)) as u64)) as u64); + assert(b.limbs[i as int] < 1u64 << 52); + assert(old_borrow >> 63 <= 1) by (bit_vector); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + (borrow) * pow2(52 * i as nat); { + lemma_decompose(borrow, mask); + assert(borrow == (borrow >> 52) * pow2(52) + difference.limbs[i as int]); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + + ((borrow >> 52) * pow2(52) + difference.limbs[i as int]) * pow2(52 * i as nat); { + assert(pow2(52) * pow2(52 * i as nat) == pow2(52 + 52 * i as nat)) by {broadcast use lemma_pow2_adds;}; + assert(52 + 52 * i as nat == 52 * (i+1) as nat); + broadcast use lemma_mul_is_distributive_add_other_way; + assert((borrow >> 52) as nat * pow2(52) * pow2(52 * i as nat) == (borrow >> 52) as nat * pow2(52 * (i+1) as nat)) by { + assert(pow2(52) * pow2(52 * i as nat) == pow2(52 * (i+1) as nat)); + lemma_mul_is_associative((borrow >> 52) as int, pow2(52) as int, pow2(52 * i as nat) as int); + }; + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + + (borrow >> 52) * pow2(52 * (i+1) as nat) + difference.limbs[i as int] * pow2(52 * i as nat); { + lemma_seq_u64_to_nat_subrange_extend(difference.limbs@, i as int); + assert (borrow < 1u64 << 52) by { + assert(borrow == (a.limbs[i as int] - ((b.limbs[i as int] + (old_borrow >> 63)) as u64)) as u64); + assert(a.limbs[i as int] < (1u64 << 52)); + assert((b.limbs[i as int] + (old_borrow >> 63)) as u64 >= 0); + } + assert(borrow >> 52 == 0) by (bit_vector) + requires borrow < 1u64 << 52; + assert(borrow >> 63 == 0) by (bit_vector) + requires borrow < 1u64 << 52; + } + seq_u64_to_nat(difference.limbs@.subrange(0, i + 1)) - (borrow >> 63) * pow2((52 * (i + 1) as nat)); + } + } + } + seq_u64_to_nat(difference.limbs@.subrange(0, i + 1)) - (borrow >> 63) * pow2((52 * (i + 1) as nat)); + } +} + +/// Just a proof by computation +pub(crate) proof fn lemma_l_equals_group_order() + ensures + to_nat(&constants::L.limbs) == group_order(), + seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) == group_order() +{ + // First show that the subrange equals the full array + assert(constants::L.limbs@ == constants::L.limbs@.subrange(0, 5 as int)); + assert(seq_u64_to_nat(constants::L.limbs@) == seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int))); + assert(to_nat(&constants::L.limbs) == seq_u64_to_nat(constants::L.limbs@)); + + assert(pow2(52) == 0x10000000000000) by {lemma2_to64_rest();}; + lemma_pow2_adds(52, 52); + assert(pow2(104) == 0x100000000000000000000000000); + lemma_pow2_adds(104, 104); + assert(pow2(208) == 0x10000000000000000000000000000000000000000000000000000); + lemma_pow252(); + lemma_five_limbs_equals_to_nat(&constants::L.limbs); + assert(five_limbs_to_nat_aux(constants::L.limbs) == group_order()) by (compute); +} + +pub proof fn lemma_pow252() + ensures pow2(252) == 0x1000000000000000000000000000000000000000000000000000000000000000 +{ + assert(pow2(63) == 0x8000000000000000) by {lemma2_to64_rest();} + lemma_pow2_adds(63, 63); + assert(pow2(126) == 0x40000000000000000000000000000000); + lemma_pow2_adds(126, 126); +} + +pub proof fn lemma_pow2_260_greater_than_2_group_order() + ensures pow2(260) > 2 * group_order() +{ + // The group order is approximately 2^252, so 2 * group_order ≈ 2^253 + // And 2^260 >> 2^253 + assert(pow2(260) == pow2(252) * pow2(8)) by { + lemma_pow2_adds(252, 8); + }; + assert(pow2(8) == 256) by { + lemma2_to64(); + }; + lemma_pow252(); + // Now Verus knows what the powers of 2 mean, so it can figure out the rest +} + +/// If borrow >> 63 == 0, we apply +/// (1) `-group_order() <= to_nat(&a.limbs) - to_nat(&b.limbs) < group_order()`, +/// and that's enough to show that to_nat(&difference.limbs) is between +/// 0 and group order. +/// If borrow >> 63 == 1, we apply (1) to show that carry >> 52 can't be 0. +/// This makes the excess terms in the borrow >> 63 == 1 precondition disappear +pub(crate) proof fn lemma_sub_correct_after_loops(difference: Scalar52, carry: u64, a: &Scalar52, b: &Scalar52, difference_after_loop1: Scalar52, borrow: u64) + requires + limbs_bounded(a), + limbs_bounded(b), + limbs_bounded(&difference), + limbs_bounded(&difference_after_loop1), + (carry >> 52) < 2, + -group_order() <= to_nat(&a.limbs) - to_nat(&b.limbs) < group_order(), + borrow >> 63 == 0 ==> difference_after_loop1 == difference, + borrow >> 63 == 1 ==> + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, 5 as int)) + seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) + (carry >> 52) * pow2(52 * 5 as nat), + seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) == + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, 5 as int )) - (borrow >> 63) * pow2((52 * (5) as nat)), + ensures + to_nat(&difference.limbs) == (to_nat(&a.limbs) - to_nat(&b.limbs)) % (group_order() as int) +{ + assert(borrow >> 63 == 1 || borrow >> 63 == 0) by (bit_vector); + assert( seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) == to_nat(&difference.limbs)) by { + assert( seq_u64_to_nat(difference.limbs@) == to_nat(&difference.limbs)); + assert( difference.limbs@ == difference.limbs@.subrange(0, 5 as int)); + } + assert( seq_u64_to_nat(b.limbs@.subrange(0, 5 as int)) == to_nat(&b.limbs)) by { + assert( seq_u64_to_nat(b.limbs@) == to_nat(&b.limbs)); + assert( b.limbs@ == b.limbs@.subrange(0, 5 as int)); + } + assert( seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) == to_nat(&a.limbs)) by { + assert( seq_u64_to_nat(a.limbs@) == to_nat(&a.limbs)); + assert( a.limbs@ == a.limbs@.subrange(0, 5 as int)); + } + if borrow >> 63 == 0 { + + assert( seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int )) - (borrow >> 63) * pow2((52 * (5) as nat)) ); + assert( seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int )) ); + assert( to_nat(&a.limbs) - to_nat(&b.limbs) == + to_nat(&difference.limbs) ); + assert(to_nat(&a.limbs) - to_nat(&b.limbs) >= 0); + assert(to_nat(&a.limbs) - to_nat(&b.limbs) < group_order()); + lemma_small_mod((to_nat(&a.limbs) - to_nat(&b.limbs)) as nat, group_order()); + assert(to_nat(&difference.limbs) == (to_nat(&a.limbs) - to_nat(&b.limbs)) % (group_order() as int)); + } + if borrow >> 63 == 1 { + assert( + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, 5 as int)) + seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) + (carry >> 52) * pow2(52 * 5 as nat) + ); + assert( + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, 5 as int)) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) + (carry >> 52) * pow2(52 * 5 as nat) + - seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) + ); + assert(seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) == + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, 5 as int )) - pow2((52 * (5) as nat)) ); + assert(seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) + (carry >> 52) * pow2(52 * 5 as nat) + - seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) - pow2((52 * (5) as nat)) ); + assert(seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) + + seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) + (carry >> 52) * pow2(52 * 5 as nat) + - pow2((52 * (5) as nat)) ); + if carry >> 52 == 0 { + // Get a contradiction because the sides in the above equation have different signs + assert(seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) + + seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) >=0) by { + assert(seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) >= group_order()) by { + lemma_l_equals_group_order(); + }; + assert(seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) == to_nat(&a.limbs)); + assert(seq_u64_to_nat(b.limbs@.subrange(0, 5 as int)) == to_nat(&b.limbs)); + }; + assert(seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) < pow2((52 * (5) as nat))) by { + assert(seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) == to_nat(&difference.limbs)); + lemma_bound_scalar(&difference); + }; + assert((carry >> 52) * pow2(52 * 5 as nat) == 0); + assert(false); + } + assert(carry >> 52 ==1); + assert(seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) + + seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) - seq_u64_to_nat(b.limbs@.subrange(0, 5 as int )) == + seq_u64_to_nat(difference.limbs@.subrange(0, 5 as int)) ); + assert(seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) + + to_nat(&a.limbs) - to_nat(&b.limbs) == + to_nat(&difference.limbs) ); + assert(to_nat(&constants::L.limbs) == group_order()) by { + lemma_l_equals_group_order(); + }; + assert(seq_u64_to_nat(constants::L.limbs@.subrange(0, 5 as int)) == group_order()) by { + lemma_l_equals_group_order(); + }; + assert(group_order() > 0); + calc! { + (==) + to_nat(&difference.limbs) as int; { + } + group_order() as int + to_nat(&a.limbs) - to_nat(&b.limbs); { + assert(group_order() as int + to_nat(&a.limbs) - to_nat(&b.limbs) < group_order()) by { + assert( seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, 5 as int)) == to_nat(&difference_after_loop1.limbs)) by { + assert( seq_u64_to_nat(difference_after_loop1.limbs@) == to_nat(&difference_after_loop1.limbs)); + assert( difference_after_loop1.limbs@ == difference_after_loop1.limbs@.subrange(0, 5 as int)); + } + assert(to_nat(&a.limbs) - to_nat(&b.limbs ) == + to_nat(&difference_after_loop1.limbs ) - pow2((52 * (5) as nat)) ); + lemma_bound_scalar(&difference_after_loop1); + }; + lemma_small_mod((group_order() as int + to_nat(&a.limbs) - to_nat(&b.limbs)) as nat, group_order()); + } + (group_order() as int + to_nat(&a.limbs) - to_nat(&b.limbs)) % (group_order() as int); { + lemma_mod_cancel(a, b); + } + (to_nat(&a.limbs) - to_nat(&b.limbs)) % (group_order() as int); + } + } +} + +/// Moving this out to get under rlimit +pub proof fn lemma_old_carry(old_carry: u64) + requires old_carry < 1u64 <<52, + ensures old_carry >> 52 == 0, +{ + assert(old_carry >> 52 == 0) by (bit_vector) + requires old_carry < 1u64 <<52; +} + +/// If borrow >> 63 == 0, we just prove that the loop step has no effect. +/// If borrow >> 63 == 1, we substitute in the loop's updates +/// like `difference.limbs[i as int] == carry & mask`. +/// In that case we're proving that subtraction is correct if we only +/// consider the first i items of each array, except there's also a +/// `(carry >> 52) * pow2(52 * (i+1) as nat)` term that doesn't go away. +pub(crate) proof fn lemma_sub_loop2_invariant(difference: Scalar52, i: usize, a: &Scalar52, b: &Scalar52, mask: u64, difference_after_loop1: Scalar52, difference_loop2_start: Scalar52, carry: u64, old_carry: u64, addend: u64, borrow: u64) + requires + 0 <= i < 5, + mask == (1u64 << 52) - 1, + forall|j: int| 0 <= j < 5 ==> difference_loop2_start.limbs[j] < (1u64 << 52), + forall|j: int| i <= j < 5 ==> difference_loop2_start.limbs[j] == difference_after_loop1.limbs[j], + forall|j: int| (0 <= j < 5 && j!=i) ==> difference_loop2_start.limbs[j] == difference.limbs[j], + mask == (1u64 << 52) - 1, + i == 0 ==> old_carry == 0, + i >= 1 ==> (old_carry >> 52) < 2, + (i >=1 && borrow >> 63 == 0) ==> old_carry == difference_loop2_start.limbs[i-1], + borrow >> 63 == 0 ==> difference_after_loop1 == difference_loop2_start, + borrow >> 63 == 1 ==> + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, i as int)) + seq_u64_to_nat(constants::L.limbs@.subrange(0, i as int)) == + seq_u64_to_nat(difference_loop2_start.limbs@.subrange(0, i as int)) + (old_carry >> 52) * pow2(52 * i as nat), + difference.limbs[i as int] == carry & mask, + difference_loop2_start.limbs@.subrange(0, i as int) == difference.limbs@.subrange(0, i as int), + borrow >> 63 == 0 ==> addend == 0, + borrow >> 63 == 1 ==> addend == constants::L.limbs[i as int], + carry == (old_carry >> 52) + difference_loop2_start.limbs[i as int] + addend, + ensures + (i+1 >=1 && borrow >> 63 == 0) ==> carry == difference.limbs[i as int], + borrow >> 63 == 0 ==> difference_after_loop1 == difference, + borrow >> 63 == 1 ==> + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, i+1 as int)) + seq_u64_to_nat(constants::L.limbs@.subrange(0, i+1 as int)) == + seq_u64_to_nat(difference.limbs@.subrange(0, i+1 as int)) + (carry >> 52) * pow2(52 * (i+1) as nat) +{ + if borrow >> 63 == 0 { + lemma_old_carry(old_carry); + assert(addend == 0); + assert(carry == difference_loop2_start.limbs[i as int]); + assert( carry & mask == carry ) by (bit_vector) + requires + carry < 1u64 <<52, + mask == (1u64 << 52) - 1; + assert(difference_after_loop1.limbs[i as int] == difference.limbs[i as int]); + assert(forall |j :int| 0<=j<5 ==> difference_after_loop1.limbs[j] == difference.limbs[j]); + assert(difference_after_loop1.limbs == difference.limbs); + } + if borrow >> 63 == 1 { + // When underflow, addend = L.limbs[i] + assert(addend == constants::L.limbs[i as int]); + // carry = (old_carry >> 52) + difference_after_loop1.limbs[i] + L.limbs[i] + // difference.limbs[i] = carry & mask + calc! { + (==) + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, i+1)) + seq_u64_to_nat(constants::L.limbs@.subrange(0, i+1)); { + lemma_seq_u64_to_nat_subrange_extend(difference_after_loop1.limbs@, i as int); + lemma_seq_u64_to_nat_subrange_extend(constants::L.limbs@, i as int); + } + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, i as int)) + difference_after_loop1.limbs[i as int] as nat * pow2(52 * i as nat) + + seq_u64_to_nat(constants::L.limbs@.subrange(0, i as int)) + constants::L.limbs[i as int] as nat * pow2(52 * i as nat); { + broadcast use lemma_mul_is_distributive_add_other_way; + } + seq_u64_to_nat(difference_after_loop1.limbs@.subrange(0, i as int)) + seq_u64_to_nat(constants::L.limbs@.subrange(0, i as int)) + + (difference_after_loop1.limbs[i as int] as nat + constants::L.limbs[i as int] as nat) * pow2(52 * i as nat); { + // Use invariant + } + seq_u64_to_nat(difference_loop2_start.limbs@.subrange(0, i as int)) + (old_carry >> 52) as nat * pow2(52 * i as nat) + + (difference_after_loop1.limbs[i as int] as nat + constants::L.limbs[i as int] as nat) * pow2(52 * i as nat); { + assert(difference_loop2_start.limbs@.subrange(0, i as int) == difference.limbs@.subrange(0, i as int) ); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + (old_carry >> 52) as nat * pow2(52 * i as nat) + + (difference_after_loop1.limbs[i as int] as nat + constants::L.limbs[i as int] as nat) * pow2(52 * i as nat); { + broadcast use lemma_mul_is_distributive_add_other_way; + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + ((old_carry >> 52) as nat + + difference_after_loop1.limbs[i as int] as nat + constants::L.limbs[i as int] as nat) * pow2(52 * i as nat); { + assert(carry == (old_carry >> 52) + difference_after_loop1.limbs[i as int] + constants::L.limbs[i as int]); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + carry as nat * pow2(52 * i as nat); { + assert(carry == (carry >> 52) * (1u64<<52) + (carry & mask)) by (bit_vector) + requires mask == (1u64 << 52) - 1; + assert(carry == (carry >> 52) * pow2(52) + difference.limbs[i as int]) by { + lemma2_to64_rest(); + assert(0x10000000000000 == 1u64 << 52) by (compute_only);}; + assert(difference.limbs[i as int] == carry & mask); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + ((carry >> 52) as nat * pow2(52) + difference.limbs[i as int] as nat) * pow2(52 * i as nat); { + broadcast use lemma_mul_is_distributive_add_other_way; + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + (carry >> 52) as nat * pow2(52) * pow2(52 * i as nat) + difference.limbs[i as int] as nat * pow2(52 * i as nat); { + assert(pow2(52) * pow2(52 * i as nat) == pow2(52 + 52 * i as nat)) by {broadcast use lemma_pow2_adds;}; + assert(52 + 52 * i as nat == 52 * (i+1) as nat); + assert((carry >> 52) as nat * pow2(52) * pow2(52 * i as nat) == (carry >> 52) as nat * pow2(52 * (i+1) as nat)) by { + assert(pow2(52) * pow2(52 * i as nat) == pow2(52 * (i+1) as nat)); + lemma_mul_is_associative((carry >> 52) as int, pow2(52) as int, pow2(52 * i as nat) as int); + }; + } + seq_u64_to_nat(difference.limbs@.subrange(0, i as int)) + (carry >> 52) as nat * pow2(52 * (i+1) as nat) + difference.limbs[i as int] as nat * pow2(52 * i as nat); { + lemma_seq_u64_to_nat_subrange_extend(difference.limbs@, i as int); + } + seq_u64_to_nat(difference.limbs@.subrange(0, i+1)) + (carry >> 52) as nat * pow2(52 * (i+1) as nat); + } + } +} + +/// Proves that the addition loop maintains its invariant: +/// a[0..i+1] + b[0..i+1] == sum[0..i+1] + (carry >> 52) * 2^(52*(i+1)) +/// See lemma_sub_loop1_invariant for more comments +pub proof fn lemma_add_loop_invariant(sum: Scalar52, carry: u64, i: usize, a: &Scalar52, b: &Scalar52, old_carry: u64, mask: u64, sum_loop_start: Scalar52) + requires + limbs_bounded(a), + limbs_bounded(b), + 0 <= i < 5, + forall|j: int| 0 <= j < i ==> sum.limbs[j] < (1u64 << 52), + mask == (1u64 << 52) - 1, + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) + seq_u64_to_nat(b.limbs@.subrange(0, i as int)) == + seq_u64_to_nat(sum_loop_start.limbs@.subrange(0, i as int)) + (old_carry >> 52) * pow2((52 * (i) as nat)), + sum_loop_start.limbs@.subrange(0, i as int) == sum.limbs@.subrange(0, i as int), + sum.limbs[i as int] == carry & mask, + carry == a.limbs[i as int] + b.limbs[i as int] + (old_carry >> 52) + ensures + seq_u64_to_nat(sum.limbs@.subrange(0, i + 1)) + (carry >> 52) * pow2((52 * (i + 1) as nat)) + == seq_u64_to_nat(a.limbs@.subrange(0, i + 1)) + seq_u64_to_nat(b.limbs@.subrange(0, i + 1)) +{ + calc! { + (==) + seq_u64_to_nat(a.limbs@.subrange(0, i + 1)) + seq_u64_to_nat(b.limbs@.subrange(0, i + 1)); { + lemma_seq_u64_to_nat_subrange_extend(a.limbs@, i as int); + lemma_seq_u64_to_nat_subrange_extend(b.limbs@, i as int); + } + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) + a.limbs[i as int] as nat * pow2(52 * i as nat) + + seq_u64_to_nat(b.limbs@.subrange(0, i as int)) + b.limbs[i as int] as nat * pow2(52 * i as nat); { + lemma_mul_is_distributive_add_other_way(pow2(52 * i as nat) as int, a.limbs[i as int] as int, b.limbs[i as int] as int); + } + seq_u64_to_nat(a.limbs@.subrange(0, i as int)) + seq_u64_to_nat(b.limbs@.subrange(0, i as int)) + + (a.limbs[i as int] as nat + b.limbs[i as int] as nat) * pow2(52 * i as nat); { + assert(sum_loop_start.limbs@.subrange(0, i as int) == sum.limbs@.subrange(0, i as int)); + // Use loop invariant + } + seq_u64_to_nat(sum.limbs@.subrange(0, i as int)) + (old_carry >> 52) as nat * pow2(52 * i as nat) + + (a.limbs[i as int] as nat + b.limbs[i as int] as nat) * pow2(52 * i as nat); { + lemma_mul_is_distributive_add_other_way(pow2(52 * i as nat) as int, (old_carry >> 52) as int, (a.limbs[i as int] as nat + b.limbs[i as int] as nat) as int); + } + seq_u64_to_nat(sum.limbs@.subrange(0, i as int)) + + ((old_carry >> 52) as nat + a.limbs[i as int] as nat + b.limbs[i as int] as nat) * pow2(52 * i as nat); { + assert(carry == a.limbs[i as int] + b.limbs[i as int] + (old_carry >> 52)); + assert(sum.limbs[i as int] == carry & mask); + // Decompose carry using the mask + lemma_decompose(carry, mask); + assert(carry == (carry >> 52) * pow2(52) + sum.limbs[i as int]); + } + seq_u64_to_nat(sum.limbs@.subrange(0, i as int)) + + ((carry >> 52) as nat * pow2(52) + sum.limbs[i as int] as nat) * pow2(52 * i as nat); { + assert(pow2(52) * pow2(52 * i as nat) == pow2(52 + 52 * i as nat)) by {lemma_pow2_adds(52, 52 * i as nat);}; + assert(52 + 52 * i as nat == 52 * (i+1) as nat); + lemma_mul_is_distributive_add_other_way(pow2(52 * i as nat) as int, (carry >> 52) as nat * pow2(52) as int, sum.limbs[i as int] as int); + assert((carry >> 52) as nat * pow2(52) * pow2(52 * i as nat) == (carry >> 52) as nat * pow2(52 * (i+1) as nat)) by { + assert(pow2(52) * pow2(52 * i as nat) == pow2(52 * (i+1) as nat)); + lemma_mul_is_associative((carry >> 52) as int, pow2(52) as int, pow2(52 * i as nat) as int); + }; + } + seq_u64_to_nat(sum.limbs@.subrange(0, i as int)) + + (carry >> 52) as nat * pow2(52 * (i+1) as nat) + sum.limbs[i as int] as nat * pow2(52 * i as nat); { + lemma_seq_u64_to_nat_subrange_extend(sum.limbs@, i as int); + } + seq_u64_to_nat(sum.limbs@.subrange(0, i + 1)) + (carry >> 52) as nat * pow2((52 * (i + 1) as nat)); + } +} + +/// Get rid of the subranges from the invariant statement. +/// Since a and b are less than group order, we can show that carry >> 52 +/// has to be 0, else the RHS is too large +pub proof fn lemma_add_sum_simplify(a: &Scalar52, b: &Scalar52, sum: &Scalar52, carry: u64) + requires + limbs_bounded(a), + limbs_bounded(b), + to_nat(&a.limbs) < group_order(), + to_nat(&b.limbs) < group_order(), + forall|j: int| 0 <= j < 5 ==> sum.limbs[j] < (1u64 << 52), + (carry >> 52) < 2, + seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) + seq_u64_to_nat(b.limbs@.subrange(0, 5 as int)) == + seq_u64_to_nat(sum.limbs@.subrange(0, 5 as int)) + (carry >> 52) as nat * pow2((52 * (5) as nat)) + ensures + to_nat(&a.limbs) + to_nat(&b.limbs) == to_nat(&sum.limbs) +{ + // First establish the relationship between the different representations + assert(seq_u64_to_nat(a.limbs@.subrange(0, 5 as int)) == to_nat(&a.limbs)) by { + assert(a.limbs@ == a.limbs@.subrange(0, 5 as int)); + assert(seq_u64_to_nat(a.limbs@) == to_nat(&a.limbs)); + } + assert(seq_u64_to_nat(b.limbs@.subrange(0, 5 as int)) == to_nat(&b.limbs)) by { + assert(b.limbs@ == b.limbs@.subrange(0, 5 as int)); + assert(seq_u64_to_nat(b.limbs@) == to_nat(&b.limbs)); + } + assert(seq_u64_to_nat(sum.limbs@.subrange(0, 5 as int)) == to_nat(&sum.limbs)) by { + assert(sum.limbs@ == sum.limbs@.subrange(0, 5 as int)); + assert(seq_u64_to_nat(sum.limbs@) == to_nat(&sum.limbs)); + } + + assert(to_nat(&a.limbs) + to_nat(&b.limbs) == to_nat(&sum.limbs) + (carry >> 52) * pow2((52 * (5) as nat))); + + // From the loop invariant, we have: a + b == sum + (carry >> 52) * 2^260 + assert(52 * 5 == 260) by (compute); + assert(pow2((52 * 5) as nat) == pow2(260)); + assert(to_nat(&a.limbs) + to_nat(&b.limbs) == to_nat(&sum.limbs) + (carry >> 52) as nat * pow2(260)); + + // Since a < group_order() and b < group_order(), we have a + b < 2 * group_order() + // This is just basic arithmetic: if x < A and y < A, then x + y < A + A = 2*A + assert(to_nat(&a.limbs) + to_nat(&b.limbs) < group_order() + group_order()); + assert(group_order() + group_order() == 2 * group_order()); + assert(to_nat(&a.limbs) + to_nat(&b.limbs) < 2 * group_order()); + + // Therefore: sum + (carry >> 52) * 2^260 < 2 * group_order() + assert(to_nat(&sum.limbs) + (carry >> 52) as nat * pow2(260) < 2 * group_order()); + + // Prove a contradiction if carry is nonzero + assert((carry >> 52) as nat * pow2(260) < 2 * group_order()); + if carry >> 52 == 1 { + lemma_pow2_260_greater_than_2_group_order(); + assert(1 as nat * pow2(260) < 2 * group_order()); + assert(false); + } + assert(carry >> 52 == 0); + + // Since carry >> 52 >= 0 and pow2(260) > 0, we have (carry >> 52) * pow2(260) >= 0 + // Therefore sum < sum + (carry >> 52) * pow2(260) < 2 * group_order() + lemma_pow2_pos(260); + assert(pow2(260) > 0); + assert((carry >> 52) as nat * pow2(260) >= 0); + assert(to_nat(&sum.limbs) <= to_nat(&sum.limbs) + (carry >> 52) as nat * pow2(260)); + assert(to_nat(&sum.limbs) < 2 * group_order()); +} + + +// Specialization of lemma_u64_shl_is_mul for x = 1 +pub proof fn shift_is_pow2(k: nat) + requires + k < 64, + ensures + (1u64 << k) == pow2(k) +{ + pow2_le_max64(k); + lemma_u64_shl_is_mul(1u64, k as u64); +} + +pub proof fn pow2_le_max64(k: nat) + requires + k < 64, + ensures + pow2(k) <= u64::MAX + { + lemma2_to64(); + lemma2_to64_rest(); + } + +} // verus! diff --git a/curve25519-dalek/src/backend/serial/u64/scalar_specs.rs b/curve25519-dalek/src/backend/serial/u64/scalar_specs.rs new file mode 100644 index 000000000..c74526e7e --- /dev/null +++ b/curve25519-dalek/src/backend/serial/u64/scalar_specs.rs @@ -0,0 +1,69 @@ +#[allow(unused_imports)] +use super::scalar::Scalar52; +#[allow(unused_imports)] +use vstd::arithmetic::power2::*; +use vstd::prelude::*; + +verus! { +pub open spec fn seq_to_nat(limbs: Seq) -> nat +decreases limbs.len() +{ + if limbs.len() == 0 { + 0 + } else { + limbs[0] + seq_to_nat(limbs.subrange(1, limbs.len() as int)) * pow2(52) + } +} + +pub open spec fn slice128_to_nat(limbs: &[u128]) -> nat +{ + seq_to_nat(limbs@.map(|i, x| x as nat)) +} + +pub open spec fn seq_u64_to_nat(limbs: Seq) -> nat +{ + seq_to_nat(limbs.map(|i, x| x as nat)) +} + +pub open spec fn to_nat(limbs: &[u64]) -> nat +{ + seq_to_nat(limbs@.map(|i, x| x as nat)) +} + +pub open spec fn nine_limbs_to_nat_aux(limbs: &[u128; 9]) -> nat { + (limbs[0] as nat) + + (limbs[1] as nat) * pow2(52) + + (limbs[2] as nat) * pow2(104) + + (limbs[3] as nat) * pow2(156) + + (limbs[4] as nat) * pow2(208) + + (limbs[5] as nat) * pow2(260) + + (limbs[6] as nat) * pow2(312) + + (limbs[7] as nat) * pow2(364) + + (limbs[8] as nat) * pow2(416) +} + +pub open spec fn five_limbs_to_nat_aux(limbs: [u64; 5]) -> nat { + (limbs[0] as nat) + + pow2(52) * (limbs[1] as nat) + + pow2(104) * (limbs[2] as nat) + + pow2(156) * (limbs[3] as nat) + + pow2(208) * (limbs[4] as nat) +} + + +// Group order: the value of L as a natural number +pub open spec fn group_order() -> nat { + pow2(252) + 27742317777372353535851937790883648493nat +} + +// Montgomery radix R = 2^260 +pub open spec fn montgomery_radix() -> nat { + pow2(260) +} + +// Check that all limbs of a Scalar52 are properly bounded (< 2^52) +pub open spec fn limbs_bounded(s: &Scalar52) -> bool { + forall|i: int| 0 <= i < 5 ==> s.limbs[i] < (1u64 << 52) +} + +} // verus! diff --git a/curve25519-dalek/src/backend/serial/u64/subtle_assumes.rs b/curve25519-dalek/src/backend/serial/u64/subtle_assumes.rs new file mode 100644 index 000000000..b24c34644 --- /dev/null +++ b/curve25519-dalek/src/backend/serial/u64/subtle_assumes.rs @@ -0,0 +1,34 @@ +//! Tell Verus what Choice does +use subtle::{Choice, ConditionallySelectable}; + +use vstd::prelude::*; + +verus! { + +#[allow(dead_code)] +pub enum RevealedChoice { + Choice0, + Choice1, +} + +#[verifier::external_type_specification] +#[verifier::external_body] +#[allow(dead_code)] +pub struct ExChoice(Choice); + +pub uninterp spec fn reveal_choice(c: Choice) -> RevealedChoice; + +pub assume_specification [Choice::from](u: u8) -> (c: Choice) + ensures u == 0 ==> reveal_choice(c) == RevealedChoice::Choice0, + u == 1 ==> reveal_choice(c) == RevealedChoice::Choice1; + +#[verifier::external_body] +/// See https://docs.rs/subtle/latest/subtle/trait.ConditionallySelectable.html#tymethod.conditional_select +pub fn select(a: &u64, b: &u64, c: Choice) -> (res: u64) + ensures reveal_choice(c) == RevealedChoice::Choice0 ==> res == a, + reveal_choice(c) == RevealedChoice::Choice1 ==> res == b +{ + u64::conditional_select(a, b, c) +} + +} // verus! diff --git a/curve25519-dalek/src/lib.rs b/curve25519-dalek/src/lib.rs index 24e0fa5b8..1261b01b6 100644 --- a/curve25519-dalek/src/lib.rs +++ b/curve25519-dalek/src/lib.rs @@ -23,13 +23,7 @@ // Linting: //------------------------------------------------------------------------ #![cfg_attr(allow_unused_unsafe, allow(unused_unsafe))] -#![warn( - clippy::unwrap_used, - missing_docs, - rust_2018_idioms, - unused_lifetimes, - unused_qualifications -)] +#![warn(clippy::unwrap_used, missing_docs, rust_2018_idioms, unused_lifetimes)] //------------------------------------------------------------------------ // External dependencies: diff --git a/curve25519-dalek/src/scalar.rs b/curve25519-dalek/src/scalar.rs index fc8939fb4..61c7a5df8 100644 --- a/curve25519-dalek/src/scalar.rs +++ b/curve25519-dalek/src/scalar.rs @@ -1730,7 +1730,7 @@ pub(crate) mod test { let bytes = unpacked.to_bytes(); let should_be_unpacked = UnpackedScalar::from_bytes(&bytes); - assert_eq!(should_be_unpacked.0, unpacked.0); + assert_eq!(should_be_unpacked.limbs, unpacked.limbs); } #[test] @@ -1762,8 +1762,8 @@ pub(crate) mod test { let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim); // The Montgomery reduced scalar should match the reduced one, as well as the expected - assert_eq!(montgomery_reduced.0, reduced.unpack().0); - assert_eq!(montgomery_reduced.0, expected.unpack().0) + assert_eq!(montgomery_reduced.limbs, reduced.unpack().limbs); + assert_eq!(montgomery_reduced.limbs, expected.unpack().limbs) } #[test]