Skip to content

Commit 5515a97

Browse files
committed
Introduce a special case in IntRange::from_const.
The `if let Some(val) = value.try_eval_bits(...)` branch in `from_const()` is very hot for the `unicode_normalization` benchmark. This commit introduces a special-case alternative for scalars that avoids `try_eval_bits()` and all the functions it calls (`Const::eval()`, `ConstValue::try_to_bits()`, `ConstValue::try_to_scalar()`, and `Scalar::to_bits()`), instead extracting the result immediately. The type and value checking done by `Scalar::to_bits()` is replicated by moving it into a new function `Scalar::check_raw()` and using that new function in the special case. PR #64673 introduced some special-case handling of scalar types in `Const::try_eval_bits()`. This handling is now moved out of that function into the new `IntRange::integral_size_and_signed_bias` function. This commit reduces the instruction count for `unicode_normalization-check-clean` by about 10%.
1 parent a69e0e0 commit 5515a97

File tree

3 files changed

+38
-26
lines changed

3 files changed

+38
-26
lines changed

src/librustc/mir/interpret/value.rs

+8-3
Original file line numberDiff line numberDiff line change
@@ -343,14 +343,19 @@ impl<'tcx, Tag> Scalar<Tag> {
343343
}
344344
}
345345

346+
#[inline(always)]
347+
pub fn check_raw(data: u128, size: u8, target_size: Size) {
348+
assert_eq!(target_size.bytes(), size as u64);
349+
assert_ne!(size, 0, "you should never look at the bits of a ZST");
350+
Scalar::check_data(data, size);
351+
}
352+
346353
/// Do not call this method! Use either `assert_bits` or `force_bits`.
347354
#[inline]
348355
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
349356
match self {
350357
Scalar::Raw { data, size } => {
351-
assert_eq!(target_size.bytes(), size as u64);
352-
assert_ne!(size, 0, "you should never look at the bits of a ZST");
353-
Scalar::check_data(data, size);
358+
Self::check_raw(data, size, target_size);
354359
Ok(data)
355360
}
356361
Scalar::Ptr(_) => throw_unsup!(ReadPointerAsBytes),

src/librustc/ty/sty.rs

+2-16
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ use rustc_macros::HashStable;
1313
use crate::ty::subst::{InternalSubsts, Subst, SubstsRef, GenericArg, GenericArgKind};
1414
use crate::ty::{self, AdtDef, Discr, DefIdTree, TypeFlags, Ty, TyCtxt, TypeFoldable};
1515
use crate::ty::{List, TyS, ParamEnvAnd, ParamEnv};
16-
use crate::ty::layout::{Size, Integer, IntegerExt, VariantIdx};
16+
use crate::ty::layout::VariantIdx;
1717
use crate::util::captures::Captures;
1818
use crate::mir::interpret::{Scalar, GlobalId};
1919

@@ -24,7 +24,6 @@ use std::marker::PhantomData;
2424
use std::ops::Range;
2525
use rustc_target::spec::abi;
2626
use syntax::ast::{self, Ident};
27-
use syntax::attr::{SignedInt, UnsignedInt};
2827
use syntax::symbol::{kw, InternedString};
2928

3029
use self::InferTy::*;
@@ -2299,20 +2298,7 @@ impl<'tcx> Const<'tcx> {
22992298
ty: Ty<'tcx>,
23002299
) -> Option<u128> {
23012300
assert_eq!(self.ty, ty);
2302-
// This is purely an optimization -- layout_of is a pretty expensive operation,
2303-
// but if we can determine the size without calling it, we don't need all that complexity
2304-
// (hashing, caching, etc.). As such, try to skip it.
2305-
let size = match ty.kind {
2306-
ty::Bool => Size::from_bytes(1),
2307-
ty::Char => Size::from_bytes(4),
2308-
ty::Int(ity) => {
2309-
Integer::from_attr(&tcx, SignedInt(ity)).size()
2310-
}
2311-
ty::Uint(uty) => {
2312-
Integer::from_attr(&tcx, UnsignedInt(uty)).size()
2313-
}
2314-
_ => tcx.layout_of(param_env.with_reveal_all().and(ty)).ok()?.size,
2315-
};
2301+
let size = tcx.layout_of(param_env.with_reveal_all().and(ty)).ok()?.size;
23162302
// if `ty` does not depend on generic parameters, use an empty param_env
23172303
self.eval(tcx, param_env).val.try_to_bits(size)
23182304
}

src/librustc_mir/hair/pattern/_match.rs

+28-7
Original file line numberDiff line numberDiff line change
@@ -842,21 +842,42 @@ impl<'tcx> IntRange<'tcx> {
842842
}
843843
}
844844

845+
#[inline]
846+
fn integral_size_and_signed_bias(tcx: TyCtxt<'tcx>, ty: Ty<'_>) -> Option<(Size, u128)> {
847+
match ty.kind {
848+
ty::Char => Some((Size::from_bytes(4), 0)),
849+
ty::Int(ity) => {
850+
let size = Integer::from_attr(&tcx, SignedInt(ity)).size();
851+
Some((size, 1u128 << (size.bits() as u128 - 1)))
852+
}
853+
ty::Uint(uty) => Some((Integer::from_attr(&tcx, UnsignedInt(uty)).size(), 0)),
854+
_ => None,
855+
}
856+
}
857+
845858
#[inline]
846859
fn from_const(
847860
tcx: TyCtxt<'tcx>,
848861
param_env: ty::ParamEnv<'tcx>,
849862
value: &Const<'tcx>,
850863
) -> Option<IntRange<'tcx>> {
851-
if Self::is_integral(value.ty) {
864+
if let Some((target_size, bias)) = Self::integral_size_and_signed_bias(tcx, value.ty) {
852865
let ty = value.ty;
853-
if let Some(val) = value.try_eval_bits(tcx, param_env, ty) {
854-
let bias = IntRange::signed_bias(tcx, ty);
855-
let val = val ^ bias;
856-
Some(IntRange { range: val..=val, ty })
866+
let val = if let ConstValue::Scalar(Scalar::Raw { data, size }) = value.val {
867+
// For this specific pattern we can skip a lot of effort and go
868+
// straight to the result, after doing a bit of checking. (We
869+
// could remove this branch and just use the next branch, which
870+
// is more general but much slower.)
871+
Scalar::<()>::check_raw(data, size, target_size);
872+
data
873+
} else if let Some(val) = value.try_eval_bits(tcx, param_env, ty) {
874+
// This is a more general form of the previous branch.
875+
val
857876
} else {
858-
None
859-
}
877+
return None
878+
};
879+
let val = val ^ bias;
880+
Some(IntRange { range: val..=val, ty })
860881
} else {
861882
None
862883
}

0 commit comments

Comments
 (0)