diff --git a/CHANGELOG.md b/CHANGELOG.md index 72bcbb83..1272c11f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,12 +11,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Add `const fn` `const_is_zero`, `const_eq`, `bit*` ([#441]) + ### Changed - Updated Pyo3. This is a **non-semver breaking change** to address a vulnerability reported on Pyo3. ([#460]) +- Make `rotate*`, `*sh[lr]` functions `const` ([#441]) -### Fixed - +[#441]: https://github.com/recmo/uint/pull/441 +[#460]: https://github.com/recmo/uint/pull/460 ## [1.14.0] - 2025-03-25 diff --git a/src/algorithms/div/reciprocal.rs b/src/algorithms/div/reciprocal.rs index 8dca569a..426a4f25 100644 --- a/src/algorithms/div/reciprocal.rs +++ b/src/algorithms/div/reciprocal.rs @@ -134,7 +134,6 @@ pub fn reciprocal_2_mg10(d: u128) -> u64 { v } -#[allow(clippy::missing_const_for_fn)] // False positive #[inline] #[must_use] fn mul_hi(a: Wrapping, b: Wrapping) -> Wrapping { @@ -144,7 +143,6 @@ fn mul_hi(a: Wrapping, b: Wrapping) -> Wrapping { Wrapping((r >> 64) as u64) } -#[allow(clippy::missing_const_for_fn)] // False positive #[inline] #[must_use] fn muladd_hi(a: Wrapping, b: Wrapping, c: Wrapping) -> Wrapping { diff --git a/src/bits.rs b/src/bits.rs index 6db0298c..35deaebb 100644 --- a/src/bits.rs +++ b/src/bits.rs @@ -127,13 +127,11 @@ impl Uint { if BITS == 0 { return Self::ZERO; } - let mut i = 0; while i < LIMBS { self.limbs[i] = !self.limbs[i]; i += 1; } - self.masked() } @@ -195,15 +193,13 @@ impl Uint { #[inline] #[must_use] pub const fn count_ones(&self) -> usize { - let mut total = 0; - + let mut ones = 0; let mut i = 0; while i < LIMBS { - total += self.limbs[i].count_ones() as usize; + ones += self.limbs[i].count_ones() as usize; i += 1; } - - total + ones } /// Returns the number of zeros in the binary representation of `self`. @@ -278,7 +274,7 @@ impl Uint { /// shift is larger than BITS (which is IMHO not very useful). #[inline(always)] #[must_use] - pub fn checked_shl(self, rhs: usize) -> Option { + pub const fn checked_shl(self, rhs: usize) -> Option { match self.overflowing_shl(rhs) { (value, false) => Some(value), _ => None, @@ -292,7 +288,7 @@ impl Uint { /// [`Uint::MAX`] if the bits shifted out would be non-zero. #[inline(always)] #[must_use] - pub fn saturating_shl(self, rhs: usize) -> Self { + pub const fn saturating_shl(self, rhs: usize) -> Self { match self.overflowing_shl(rhs) { (value, false) => value, _ => Self::MAX, @@ -309,22 +305,23 @@ impl Uint { /// the shift is larger than `BITS` (which is IMHO not very useful). #[inline] #[must_use] - pub fn overflowing_shl(self, rhs: usize) -> (Self, bool) { + pub const fn overflowing_shl(self, rhs: usize) -> (Self, bool) { let (limbs, bits) = (rhs / 64, rhs % 64); if limbs >= LIMBS { - return (Self::ZERO, self != Self::ZERO); + return (Self::ZERO, !self.const_is_zero()); } let word_bits = 64; let mut r = Self::ZERO; let mut carry = 0; - for i in 0..Self::LIMBS - limbs { + let mut i = 0; + while i < Self::LIMBS - limbs { let x = self.limbs[i]; r.limbs[i + limbs] = (x << bits) | carry; carry = (x >> (word_bits - bits - 1)) >> 1; + i += 1; } - r.apply_mask(); - (r, carry != 0) + (r.masked(), carry != 0) } /// Left shift by `rhs` bits. @@ -335,7 +332,7 @@ impl Uint { /// by `BITS` (which is IMHO not very useful). #[inline(always)] #[must_use] - pub fn wrapping_shl(self, rhs: usize) -> Self { + pub const fn wrapping_shl(self, rhs: usize) -> Self { self.overflowing_shl(rhs).0 } @@ -352,7 +349,7 @@ impl Uint { /// shift is larger than BITS (which is IMHO not very useful). #[inline(always)] #[must_use] - pub fn checked_shr(self, rhs: usize) -> Option { + pub const fn checked_shr(self, rhs: usize) -> Option { match self.overflowing_shr(rhs) { (value, false) => Some(value), _ => None, @@ -373,19 +370,21 @@ impl Uint { /// the shift is larger than `BITS` (which is IMHO not very useful). #[inline] #[must_use] - pub fn overflowing_shr(self, rhs: usize) -> (Self, bool) { + pub const fn overflowing_shr(self, rhs: usize) -> (Self, bool) { let (limbs, bits) = (rhs / 64, rhs % 64); if limbs >= LIMBS { - return (Self::ZERO, self != Self::ZERO); + return (Self::ZERO, !self.const_is_zero()); } let word_bits = 64; let mut r = Self::ZERO; let mut carry = 0; - for i in 0..LIMBS - limbs { + let mut i = 0; + while i < LIMBS - limbs { let x = self.limbs[LIMBS - 1 - i]; r.limbs[LIMBS - 1 - i - limbs] = (x >> bits) | carry; carry = (x << (word_bits - bits - 1)) << 1; + i += 1; } (r, carry != 0) } @@ -401,21 +400,22 @@ impl Uint { /// by `BITS` (which is IMHO not very useful). #[inline(always)] #[must_use] - pub fn wrapping_shr(self, rhs: usize) -> Self { + pub const fn wrapping_shr(self, rhs: usize) -> Self { self.overflowing_shr(rhs).0 } /// Arithmetic shift right by `rhs` bits. #[inline] #[must_use] - pub fn arithmetic_shr(self, rhs: usize) -> Self { + pub const fn arithmetic_shr(self, rhs: usize) -> Self { if BITS == 0 { return Self::ZERO; } let sign = self.bit(BITS - 1); - let mut r = self >> rhs; + let mut r = self.wrapping_shr(rhs); if sign { - r |= Self::MAX << BITS.saturating_sub(rhs); + // r |= Self::MAX << BITS.saturating_sub(rhs); + r = r.bitor(Self::MAX.wrapping_shl(BITS.saturating_sub(rhs))); } r } @@ -424,20 +424,20 @@ impl Uint { /// truncated bits to the end of the resulting integer. #[inline] #[must_use] - #[allow(clippy::missing_const_for_fn)] // False positive - pub fn rotate_left(self, rhs: usize) -> Self { + pub const fn rotate_left(self, rhs: usize) -> Self { if BITS == 0 { return Self::ZERO; } let rhs = rhs % BITS; - (self << rhs) | (self >> (BITS - rhs)) + // (self << rhs) | (self >> (BITS - rhs)) + self.wrapping_shl(rhs).bitor(self.wrapping_shr(BITS - rhs)) } /// Shifts the bits to the right by a specified amount, `rhs`, wrapping the /// truncated bits to the beginning of the resulting integer. #[inline(always)] #[must_use] - pub fn rotate_right(self, rhs: usize) -> Self { + pub const fn rotate_right(self, rhs: usize) -> Self { if BITS == 0 { return Self::ZERO; } @@ -451,7 +451,7 @@ impl Not for Uint { #[inline] fn not(self) -> Self::Output { - Self::not(self) + self.not() } } @@ -465,7 +465,7 @@ impl Not for &Uint { } macro_rules! impl_bit_op { - ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { + ($op:tt, $assign_op:tt, $trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => { impl $trait_assign> for Uint { @@ -529,15 +529,29 @@ macro_rules! impl_bit_op { #[inline(always)] fn $fn(self, rhs: &Uint) -> Self::Output { - self.clone().$fn(rhs) + >::$fn(*self, *rhs) + } + } + + impl Uint { + #[doc = concat!("Returns the bitwise `", stringify!($op), "` of the two numbers.")] + #[inline(always)] + #[must_use] + pub const fn $fn(mut self, rhs: Uint) -> Uint { + let mut i = 0; + while i < LIMBS { + self.limbs[i] $assign_op rhs.limbs[i]; + i += 1; + } + self } } }; } -impl_bit_op!(BitOr, bitor, BitOrAssign, bitor_assign); -impl_bit_op!(BitAnd, bitand, BitAndAssign, bitand_assign); -impl_bit_op!(BitXor, bitxor, BitXorAssign, bitxor_assign); +impl_bit_op!(|, |=, BitOr, bitor, BitOrAssign, bitor_assign); +impl_bit_op!(&, &=, BitAnd, bitand, BitAndAssign, bitand_assign); +impl_bit_op!(^, ^=, BitXor, bitxor, BitXorAssign, bitxor_assign); impl Shl for Uint { type Output = Self; diff --git a/src/bytes.rs b/src/bytes.rs index 10e4790d..7640cb12 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -49,7 +49,7 @@ impl Uint { #[cfg(feature = "alloc")] #[must_use] #[inline] - #[allow(clippy::missing_const_for_fn)] + #[cfg_attr(target_endian = "little", allow(clippy::missing_const_for_fn))] // Not const in big-endian. pub fn as_le_bytes(&self) -> Cow<'_, [u8]> { // On little endian platforms this is a no-op. #[cfg(target_endian = "little")] @@ -58,11 +58,11 @@ impl Uint { // In others, reverse each limb and return a copy. #[cfg(target_endian = "big")] return Cow::Owned({ - let mut cpy = *self; - for limb in &mut cpy.limbs { + let mut limbs = self.limbs; + for limb in &mut limbs { *limb = limb.swap_bytes(); } - unsafe { slice::from_raw_parts(cpy.limbs.as_ptr().cast(), Self::BYTES).to_vec() } + unsafe { slice::from_raw_parts(limbs.as_ptr().cast(), Self::BYTES).to_vec() } }); } diff --git a/src/cmp.rs b/src/cmp.rs index 130ab43f..23c1f7aa 100644 --- a/src/cmp.rs +++ b/src/cmp.rs @@ -16,12 +16,41 @@ impl Ord for Uint { } impl Uint { - /// Returns true if the value is zero. + /// Returns `true` if the value is zero. #[inline] #[must_use] pub fn is_zero(&self) -> bool { *self == Self::ZERO } + + /// Returns `true` if the value is zero. + /// + /// Note that this currently might perform worse than + /// [`is_zero`](Self::is_zero). + #[inline] + #[must_use] + pub const fn const_is_zero(&self) -> bool { + self.const_eq(&Self::ZERO) + } + + /// Returns `true` if `self` equals `other`. + /// + /// Note that this currently might perform worse than the derived + /// `PartialEq` (`==` operator). + #[inline] + #[must_use] + pub const fn const_eq(&self, other: &Self) -> bool { + // TODO: Replace with `self == other` and deprecate once `PartialEq` is const. + let a = self.as_limbs(); + let b = other.as_limbs(); + let mut i = 0; + let mut r = true; + while i < LIMBS { + r &= a[i] == b[i]; + i += 1; + } + r + } } #[cfg(test)] diff --git a/src/support/num_traits.rs b/src/support/num_traits.rs index 0ff582b6..c54b0955 100644 --- a/src/support/num_traits.rs +++ b/src/support/num_traits.rs @@ -42,7 +42,7 @@ impl Zero for Uint { #[inline(always)] fn is_zero(&self) -> bool { - self == &Self::ZERO + self.is_zero() } }