From 30703af6f34e96d3e51b665aeb56f44749d11265 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Wed, 26 Feb 2025 21:40:14 +0800 Subject: [PATCH 01/13] Enable `f16` for LoongArch --- library/std/build.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/library/std/build.rs b/library/std/build.rs index 723d1eb02e07e..5e2ac839e1e3a 100644 --- a/library/std/build.rs +++ b/library/std/build.rs @@ -106,7 +106,6 @@ fn main() { // Infinite recursion ("csky", _) => false, ("hexagon", _) => false, - ("loongarch64", _) => false, ("powerpc" | "powerpc64", _) => false, ("sparc" | "sparc64", _) => false, ("wasm32" | "wasm64", _) => false, From 2a4204bf6c7965778685cc1af4d32a3767d8fbe9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20K=C3=BCber?= Date: Mon, 3 Mar 2025 20:04:20 +0000 Subject: [PATCH 02/13] Use default field values in `markdown::parse::Context` --- compiler/rustc_errors/src/lib.rs | 1 + compiler/rustc_errors/src/markdown/parse.rs | 20 +++++++------------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index f2b133f56773b..afced1d803f6a 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -14,6 +14,7 @@ #![feature(associated_type_defaults)] #![feature(box_into_inner)] #![feature(box_patterns)] +#![feature(default_field_values)] #![feature(error_reporter)] #![feature(if_let_guard)] #![feature(let_chains)] diff --git a/compiler/rustc_errors/src/markdown/parse.rs b/compiler/rustc_errors/src/markdown/parse.rs index 7a991a2ace71b..f02387d833595 100644 --- a/compiler/rustc_errors/src/markdown/parse.rs +++ b/compiler/rustc_errors/src/markdown/parse.rs @@ -40,11 +40,13 @@ type ParseResult<'a> = Option>; /// Parsing context #[derive(Clone, Copy, Debug, PartialEq)] +// The default values are the most common setting for non top-level parsing: not top block, not at +// line start (yes leading whitespace, not escaped). struct Context { /// If true, we are at a the topmost level (not recursing a nested tt) - top_block: bool, + top_block: bool = false, /// Previous character - prev: Prev, + prev: Prev = Prev::Whitespace, } /// Character class preceding this one @@ -57,14 +59,6 @@ enum Prev { Any, } -impl Default for Context { - /// Most common setting for non top-level parsing: not top block, not at - /// line start (yes leading whitespace, not escaped) - fn default() -> Self { - Self { top_block: false, prev: Prev::Whitespace } - } -} - /// Flags to simple parser function #[derive(Clone, Copy, Debug, PartialEq)] enum ParseOpt { @@ -248,7 +242,7 @@ fn parse_heading(buf: &[u8]) -> ParseResult<'_> { } let (txt, rest) = parse_to_newline(&buf[1..]); - let ctx = Context { top_block: false, prev: Prev::Whitespace }; + let ctx = Context { .. }; let stream = parse_recursive(txt, ctx); Some((MdTree::Heading(level.try_into().unwrap(), stream), rest)) @@ -257,7 +251,7 @@ fn parse_heading(buf: &[u8]) -> ParseResult<'_> { /// Bulleted list fn parse_unordered_li(buf: &[u8]) -> Parsed<'_> { let (txt, rest) = get_indented_section(&buf[2..]); - let ctx = Context { top_block: false, prev: Prev::Whitespace }; + let ctx = Context { .. }; let stream = parse_recursive(trim_ascii_start(txt), ctx); (MdTree::UnorderedListItem(stream), rest) } @@ -266,7 +260,7 @@ fn parse_unordered_li(buf: &[u8]) -> Parsed<'_> { fn parse_ordered_li(buf: &[u8]) -> Parsed<'_> { let (num, pos) = ord_list_start(buf).unwrap(); // success tested in caller let (txt, rest) = get_indented_section(&buf[pos..]); - let ctx = Context { top_block: false, prev: Prev::Whitespace }; + let ctx = Context { .. }; let stream = parse_recursive(trim_ascii_start(txt), ctx); (MdTree::OrderedListItem(num, stream), rest) } From aae7a3c4c2fbd0db49fac5618590df541943f140 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20K=C3=BCber?= Date: Mon, 3 Mar 2025 20:05:42 +0000 Subject: [PATCH 03/13] Use default field values for `config::NextSolverConfig` Use default field values to avoid manual `Default` impl. --- compiler/rustc_session/src/config.rs | 11 +++-------- compiler/rustc_session/src/lib.rs | 1 + 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 7586c5766b585..1f63847b6fe75 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -872,18 +872,13 @@ pub enum PrintKind { DeploymentTarget, } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Default)] pub struct NextSolverConfig { /// Whether the new trait solver should be enabled in coherence. - pub coherence: bool, + pub coherence: bool = true, /// Whether the new trait solver should be enabled everywhere. /// This is only `true` if `coherence` is also enabled. - pub globally: bool, -} -impl Default for NextSolverConfig { - fn default() -> Self { - NextSolverConfig { coherence: true, globally: false } - } + pub globally: bool = false, } #[derive(Clone)] diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs index 112adde3740bc..d432e84fdb224 100644 --- a/compiler/rustc_session/src/lib.rs +++ b/compiler/rustc_session/src/lib.rs @@ -1,5 +1,6 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![feature(default_field_values)] #![feature(iter_intersperse)] #![feature(let_chains)] #![feature(rustc_attrs)] From 0c4eaa5c985f69d3b6255a56799ee30d29e08d54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20K=C3=BCber?= Date: Mon, 3 Mar 2025 20:18:56 +0000 Subject: [PATCH 04/13] Use default field values for `ErrorOutputType` Remove manual `Default` impl from `config::ErrorOutputType`. --- compiler/rustc_session/src/config.rs | 45 +++++++++++++-------------- compiler/rustc_session/src/session.rs | 4 +-- src/librustdoc/core.rs | 2 +- src/librustdoc/doctest.rs | 2 +- 4 files changed, 25 insertions(+), 28 deletions(-) diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 1f63847b6fe75..36adb113d053c 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -655,10 +655,14 @@ impl OutputType { } /// The type of diagnostics output to generate. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] pub enum ErrorOutputType { /// Output meant for the consumption of humans. - HumanReadable(HumanReadableErrorType, ColorConfig), + #[default] + HumanReadable { + kind: HumanReadableErrorType = HumanReadableErrorType::Default, + color_config: ColorConfig = ColorConfig::Auto, + }, /// Output that's consumed by other tools such as `rustfix` or the `RLS`. Json { /// Render the JSON in a human readable way (with indents and newlines). @@ -670,12 +674,6 @@ pub enum ErrorOutputType { }, } -impl Default for ErrorOutputType { - fn default() -> Self { - Self::HumanReadable(HumanReadableErrorType::Default, ColorConfig::Auto) - } -} - #[derive(Clone, Hash, Debug)] pub enum ResolveDocLinks { /// Do not resolve doc links. @@ -1790,7 +1788,7 @@ pub fn parse_json(early_dcx: &EarlyDiagCtxt, matches: &getopts::Matches) -> Json pub fn parse_error_format( early_dcx: &mut EarlyDiagCtxt, matches: &getopts::Matches, - color: ColorConfig, + color_config: ColorConfig, json_color: ColorConfig, json_rendered: HumanReadableErrorType, ) -> ErrorOutputType { @@ -1800,27 +1798,26 @@ pub fn parse_error_format( // `opt_present` because the latter will panic. let error_format = if matches.opts_present(&["error-format".to_owned()]) { match matches.opt_str("error-format").as_deref() { - None | Some("human") => { - ErrorOutputType::HumanReadable(HumanReadableErrorType::Default, color) - } - Some("human-annotate-rs") => { - ErrorOutputType::HumanReadable(HumanReadableErrorType::AnnotateSnippet, color) - } + None | Some("human") => ErrorOutputType::HumanReadable { color_config, .. }, + Some("human-annotate-rs") => ErrorOutputType::HumanReadable { + kind: HumanReadableErrorType::AnnotateSnippet, + color_config, + }, Some("json") => { ErrorOutputType::Json { pretty: false, json_rendered, color_config: json_color } } Some("pretty-json") => { ErrorOutputType::Json { pretty: true, json_rendered, color_config: json_color } } - Some("short") => ErrorOutputType::HumanReadable(HumanReadableErrorType::Short, color), - Some("human-unicode") => { - ErrorOutputType::HumanReadable(HumanReadableErrorType::Unicode, color) + Some("short") => { + ErrorOutputType::HumanReadable { kind: HumanReadableErrorType::Short, color_config } } + Some("human-unicode") => ErrorOutputType::HumanReadable { + kind: HumanReadableErrorType::Unicode, + color_config, + }, Some(arg) => { - early_dcx.set_error_format(ErrorOutputType::HumanReadable( - HumanReadableErrorType::Default, - color, - )); + early_dcx.set_error_format(ErrorOutputType::HumanReadable { color_config, .. }); early_dcx.early_fatal(format!( "argument for `--error-format` must be `human`, `human-annotate-rs`, \ `human-unicode`, `json`, `pretty-json` or `short` (instead was `{arg}`)" @@ -1828,7 +1825,7 @@ pub fn parse_error_format( } } } else { - ErrorOutputType::HumanReadable(HumanReadableErrorType::Default, color) + ErrorOutputType::HumanReadable { color_config, .. } }; match error_format { @@ -1883,7 +1880,7 @@ fn check_error_format_stability( } let format = match format { ErrorOutputType::Json { pretty: true, .. } => "pretty-json", - ErrorOutputType::HumanReadable(format, _) => match format { + ErrorOutputType::HumanReadable { kind, .. } => match kind { HumanReadableErrorType::AnnotateSnippet => "human-annotate-rs", HumanReadableErrorType::Unicode => "human-unicode", _ => return, diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index ecdf76d22fb2e..aa1e9762f397b 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -913,7 +913,7 @@ fn default_emitter( let source_map = if sopts.unstable_opts.link_only { None } else { Some(source_map) }; match sopts.error_format { - config::ErrorOutputType::HumanReadable(kind, color_config) => { + config::ErrorOutputType::HumanReadable { kind, color_config } => { let short = kind.short(); if let HumanReadableErrorType::AnnotateSnippet = kind { @@ -1430,7 +1430,7 @@ fn mk_emitter(output: ErrorOutputType) -> Box { let fallback_bundle = fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false); let emitter: Box = match output { - config::ErrorOutputType::HumanReadable(kind, color_config) => { + config::ErrorOutputType::HumanReadable { kind, color_config } => { let short = kind.short(); Box::new( HumanEmitter::new(stderr_destination(color_config), fallback_bundle) diff --git a/src/librustdoc/core.rs b/src/librustdoc/core.rs index 757a2a6e0dd06..7221e2ce6b1d5 100644 --- a/src/librustdoc/core.rs +++ b/src/librustdoc/core.rs @@ -153,7 +153,7 @@ pub(crate) fn new_dcx( false, ); let emitter: Box = match error_format { - ErrorOutputType::HumanReadable(kind, color_config) => { + ErrorOutputType::HumanReadable { kind, color_config } => { let short = kind.short(); Box::new( HumanEmitter::new(stderr_destination(color_config), fallback_bundle) diff --git a/src/librustdoc/doctest.rs b/src/librustdoc/doctest.rs index 4a379b4235ff0..3d6e0330fffad 100644 --- a/src/librustdoc/doctest.rs +++ b/src/librustdoc/doctest.rs @@ -580,7 +580,7 @@ fn run_test( path_for_rustdoc.to_str().expect("target path must be valid unicode") } }); - if let ErrorOutputType::HumanReadable(kind, color_config) = rustdoc_options.error_format { + if let ErrorOutputType::HumanReadable { kind, color_config } = rustdoc_options.error_format { let short = kind.short(); let unicode = kind == HumanReadableErrorType::Unicode; From 8391c08b0329cf965ba9dae04d04dc1e46d1803e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jana=20D=C3=B6nszelmann?= Date: Wed, 5 Mar 2025 15:23:45 +0100 Subject: [PATCH 05/13] Revert #138019 after further discussion about adding this exception in hir-pretty --- compiler/rustc_hir_pretty/src/lib.rs | 74 ------------------------ tests/ui/unpretty/deprecated-attr.rs | 2 + tests/ui/unpretty/deprecated-attr.stdout | 17 ++++-- 3 files changed, 14 insertions(+), 79 deletions(-) diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs index 1658c8dac6768..22edc18d71ca3 100644 --- a/compiler/rustc_hir_pretty/src/lib.rs +++ b/compiler/rustc_hir_pretty/src/lib.rs @@ -117,80 +117,6 @@ impl<'a> State<'a> { )); self.hardbreak() } - hir::Attribute::Parsed(AttributeKind::Deprecation { deprecation, .. }) => { - self.word("#[deprecated"); - - // There are three possible forms here: - // 1. a form with explicit components like - // `#[deprecated(since = "1.2.3", note = "some note", suggestion = "something")]` - // where each component may be present or absent. - // 2. `#[deprecated = "message"]` - // 3. `#[deprecated]` - // - // Let's figure out which we need. - // If there's a `since` or `suggestion` value, we're definitely in form 1. - if matches!( - deprecation.since, - rustc_attr_parsing::DeprecatedSince::RustcVersion(..) - | rustc_attr_parsing::DeprecatedSince::Future - | rustc_attr_parsing::DeprecatedSince::NonStandard(..) - ) || deprecation.suggestion.is_some() - { - self.word("("); - let mut use_comma = false; - - match &deprecation.since { - rustc_attr_parsing::DeprecatedSince::RustcVersion(rustc_version) => { - self.word("since = \""); - self.word(format!( - "{}.{}.{}", - rustc_version.major, rustc_version.minor, rustc_version.patch - )); - self.word("\""); - use_comma = true; - } - rustc_attr_parsing::DeprecatedSince::Future => { - self.word("since = \"future\""); - use_comma = true; - } - rustc_attr_parsing::DeprecatedSince::NonStandard(symbol) => { - self.word("since = \""); - self.word(symbol.to_ident_string()); - self.word("\""); - use_comma = true; - } - _ => {} - } - - if let Some(note) = &deprecation.note { - if use_comma { - self.word(", "); - } - self.word("note = \""); - self.word(note.to_ident_string()); - self.word("\""); - use_comma = true; - } - - if let Some(suggestion) = &deprecation.suggestion { - if use_comma { - self.word(", "); - } - self.word("suggestion = \""); - self.word(suggestion.to_ident_string()); - self.word("\""); - } - } else if let Some(note) = &deprecation.note { - // We're in form 2: `#[deprecated = "message"]`. - self.word(" = \""); - self.word(note.to_ident_string()); - self.word("\""); - } else { - // We're in form 3: `#[deprecated]`. Nothing to do here. - } - - self.word("]"); - } hir::Attribute::Parsed(pa) => { self.word("#[attr=\""); pa.print_attribute(self); diff --git a/tests/ui/unpretty/deprecated-attr.rs b/tests/ui/unpretty/deprecated-attr.rs index dda362a595e24..24a32d8a9acf9 100644 --- a/tests/ui/unpretty/deprecated-attr.rs +++ b/tests/ui/unpretty/deprecated-attr.rs @@ -1,6 +1,8 @@ //@ compile-flags: -Zunpretty=hir //@ check-pass +// FIXME(jdonszelmann): the pretty printing output for deprecated (and possibly more attrs) is +// slightly broken. #[deprecated] pub struct PlainDeprecated; diff --git a/tests/ui/unpretty/deprecated-attr.stdout b/tests/ui/unpretty/deprecated-attr.stdout index 60dbac1072b9f..675351351a0c6 100644 --- a/tests/ui/unpretty/deprecated-attr.stdout +++ b/tests/ui/unpretty/deprecated-attr.stdout @@ -5,17 +5,24 @@ extern crate std; //@ compile-flags: -Zunpretty=hir //@ check-pass -#[deprecated] +// FIXME(jdonszelmann): the pretty printing output for deprecated (and possibly more attrs) is +// slightly broken. +#[attr="Deprecation{deprecation: Deprecation{since: Unspecifiednote: +suggestion: }span: }")] struct PlainDeprecated; -#[deprecated = "here's why this is deprecated"] +#[attr="Deprecation{deprecation: Deprecation{since: Unspecifiednote: +here's why this is deprecatedsuggestion: }span: }")] struct DirectNote; -#[deprecated = "here's why this is deprecated"] +#[attr="Deprecation{deprecation: Deprecation{since: Unspecifiednote: +here's why this is deprecatedsuggestion: }span: }")] struct ExplicitNote; -#[deprecated(since = "1.2.3", note = "here's why this is deprecated"] +#[attr="Deprecation{deprecation: Deprecation{since: NonStandard(1.2.3)note: +here's why this is deprecatedsuggestion: }span: }")] struct SinceAndNote; -#[deprecated(since = "1.2.3", note = "here's why this is deprecated"] +#[attr="Deprecation{deprecation: Deprecation{since: NonStandard(1.2.3)note: +here's why this is deprecatedsuggestion: }span: }")] struct FlippedOrder; From c5b7a9c4b5b06cab36b0e3914854d6ee4a600d46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Tue, 4 Mar 2025 20:30:42 +0100 Subject: [PATCH 06/13] Factor out edge breaking code --- .../src/add_call_guards.rs | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs index 55694cacd92e0..c64bb30fa2157 100644 --- a/compiler/rustc_mir_transform/src/add_call_guards.rs +++ b/compiler/rustc_mir_transform/src/add_call_guards.rs @@ -40,6 +40,16 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { let mut new_blocks = Vec::new(); let cur_len = body.basic_blocks.len(); + let mut new_block = |source_info: SourceInfo, is_cleanup: bool, target: BasicBlock| { + let block = BasicBlockData { + statements: vec![], + is_cleanup, + terminator: Some(Terminator { source_info, kind: TerminatorKind::Goto { target } }), + }; + let idx = cur_len + new_blocks.len(); + new_blocks.push(block); + BasicBlock::new(idx) + }; for block in body.basic_blocks_mut() { match block.terminator { @@ -53,19 +63,7 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { ) || self == &AllCallEdges) => { // It's a critical edge, break it - let call_guard = BasicBlockData { - statements: vec![], - is_cleanup: block.is_cleanup, - terminator: Some(Terminator { - source_info, - kind: TerminatorKind::Goto { target: *destination }, - }), - }; - - // Get the index it will be when inserted into the MIR - let idx = cur_len + new_blocks.len(); - new_blocks.push(call_guard); - *destination = BasicBlock::new(idx); + *destination = new_block(source_info, block.is_cleanup, *destination); } _ => {} } From ccfbfe22922946249088bc9d574f7dc22c8f6c90 Mon Sep 17 00:00:00 2001 From: Yotam Ofek Date: Thu, 6 Mar 2025 16:06:41 +0000 Subject: [PATCH 07/13] `x clippy src/librustdoc --fix` --- src/librustdoc/clean/mod.rs | 5 ++--- src/librustdoc/clean/types.rs | 2 +- src/librustdoc/clean/utils.rs | 4 ++-- src/librustdoc/display.rs | 2 +- src/librustdoc/doctest/extracted.rs | 4 ++-- src/librustdoc/html/format.rs | 10 ++++------ src/librustdoc/html/highlight.rs | 6 +++--- src/librustdoc/html/markdown.rs | 2 +- src/librustdoc/html/render/print_item.rs | 6 ++---- src/librustdoc/html/render/search_index/encode.rs | 4 ++-- src/librustdoc/passes/collect_intra_doc_links.rs | 2 +- src/librustdoc/passes/propagate_stability.rs | 6 +++--- 12 files changed, 24 insertions(+), 29 deletions(-) diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index bb12e4a706e7d..b45c43e6979f6 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -2539,7 +2539,7 @@ fn clean_generic_args<'tcx>( ) -> GenericArgs { // FIXME(return_type_notation): Fix RTN parens rendering if let Some((inputs, output)) = generic_args.paren_sugar_inputs_output() { - let inputs = inputs.iter().map(|x| clean_ty(x, cx)).collect::>().into(); + let inputs = inputs.iter().map(|x| clean_ty(x, cx)).collect::>(); let output = match output.kind { hir::TyKind::Tup(&[]) => None, _ => Some(Box::new(clean_ty(output, cx))), @@ -2560,8 +2560,7 @@ fn clean_generic_args<'tcx>( } hir::GenericArg::Infer(_inf) => GenericArg::Infer, }) - .collect::>() - .into(); + .collect::>(); let constraints = generic_args .constraints .iter() diff --git a/src/librustdoc/clean/types.rs b/src/librustdoc/clean/types.rs index 5906a720e0fd3..8d03fce423bc1 100644 --- a/src/librustdoc/clean/types.rs +++ b/src/librustdoc/clean/types.rs @@ -2419,7 +2419,7 @@ impl ConstantKind { ConstantKind::Local { body, .. } | ConstantKind::Anonymous { body } => { rendered_const(tcx, tcx.hir_body(body), tcx.hir_body_owner_def_id(body)) } - ConstantKind::Infer { .. } => "_".to_string(), + ConstantKind::Infer => "_".to_string(), } } diff --git a/src/librustdoc/clean/utils.rs b/src/librustdoc/clean/utils.rs index a284de5229a21..cf649bdb11f0e 100644 --- a/src/librustdoc/clean/utils.rs +++ b/src/librustdoc/clean/utils.rs @@ -223,7 +223,7 @@ fn clean_middle_generic_args_with_constraints<'tcx>( let args = clean_middle_generic_args(cx, args.map_bound(|args| &args[..]), has_self, did); - GenericArgs::AngleBracketed { args: args.into(), constraints } + GenericArgs::AngleBracketed { args: args, constraints } } pub(super) fn clean_middle_path<'tcx>( @@ -524,7 +524,7 @@ pub(crate) fn register_res(cx: &mut DocContext<'_>, res: Res) -> DefId { | AssocConst | Variant | Fn - | TyAlias { .. } + | TyAlias | Enum | Trait | Struct diff --git a/src/librustdoc/display.rs b/src/librustdoc/display.rs index ee8dde013ee93..aa0fad265208d 100644 --- a/src/librustdoc/display.rs +++ b/src/librustdoc/display.rs @@ -22,7 +22,7 @@ where let mut iter = self.into_iter(); let Some(first) = iter.next() else { return Ok(()) }; first.fmt(f)?; - while let Some(item) = iter.next() { + for item in iter { f.write_str(sep)?; item.fmt(f)?; } diff --git a/src/librustdoc/doctest/extracted.rs b/src/librustdoc/doctest/extracted.rs index 03c8814a4c960..ce362eabfc4c9 100644 --- a/src/librustdoc/doctest/extracted.rs +++ b/src/librustdoc/doctest/extracted.rs @@ -33,7 +33,7 @@ impl ExtractedDocTests { opts: &super::GlobalTestOptions, options: &RustdocOptions, ) { - let edition = scraped_test.edition(&options); + let edition = scraped_test.edition(options); let ScrapedDocTest { filename, line, langstr, text, name } = scraped_test; @@ -48,7 +48,7 @@ impl ExtractedDocTests { let (full_test_code, size) = doctest.generate_unique_doctest( &text, langstr.test_harness, - &opts, + opts, Some(&opts.crate_name), ); self.doctests.push(ExtractedDocTest { diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index ea740508c5833..6731837d71843 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -625,8 +625,7 @@ pub(crate) fn href_relative_parts<'fqp>( let dissimilar_part_count = relative_to_fqp.len() - i; let fqp_module = &fqp[i..fqp.len()]; return Box::new( - iter::repeat(sym::dotdot) - .take(dissimilar_part_count) + std::iter::repeat_n(sym::dotdot, dissimilar_part_count) .chain(fqp_module.iter().copied()), ); } @@ -639,7 +638,7 @@ pub(crate) fn href_relative_parts<'fqp>( Ordering::Greater => { // e.g. linking to std::sync from std::sync::atomic let dissimilar_part_count = relative_to_fqp.len() - fqp.len(); - Box::new(iter::repeat(sym::dotdot).take(dissimilar_part_count)) + Box::new(std::iter::repeat_n(sym::dotdot, dissimilar_part_count)) } Ordering::Equal => { // linking to the same module @@ -770,10 +769,9 @@ fn primitive_link_fragment( ExternalLocation::Local => { let cname_sym = ExternalCrate { crate_num: def_id.krate }.name(cx.tcx()); Some(if cx.current.first() == Some(&cname_sym) { - iter::repeat(sym::dotdot).take(cx.current.len() - 1).collect() + std::iter::repeat_n(sym::dotdot, cx.current.len() - 1).collect() } else { - iter::repeat(sym::dotdot) - .take(cx.current.len()) + std::iter::repeat_n(sym::dotdot, cx.current.len()) .chain(iter::once(cname_sym)) .collect() }) diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index ed4b97d36252c..8c17b84a42f40 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -100,7 +100,7 @@ fn write_header( } if let Some(extra) = extra_content { - out.push_str(&extra); + out.push_str(extra); } if class.is_empty() { write_str( @@ -233,7 +233,7 @@ impl TokenHandler<'_, '_, F> { #[inline] fn write_line_number(&mut self, line: u32, extra: &'static str) { - (self.write_line_number)(&mut self.out, line, extra); + (self.write_line_number)(self.out, line, extra); } } @@ -610,7 +610,7 @@ impl Decorations { let (mut starts, mut ends): (Vec<_>, Vec<_>) = info .0 .iter() - .flat_map(|(&kind, ranges)| ranges.into_iter().map(move |&(lo, hi)| ((lo, kind), hi))) + .flat_map(|(&kind, ranges)| ranges.iter().map(move |&(lo, hi)| ((lo, kind), hi))) .unzip(); // Sort the sequences in document order. diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs index d9e49577d3929..8c49bf49a4e56 100644 --- a/src/librustdoc/html/markdown.rs +++ b/src/librustdoc/html/markdown.rs @@ -1792,7 +1792,7 @@ pub(crate) fn markdown_links<'md, R>( } } } else if !c.is_ascii_whitespace() { - while let Some((j, c)) = iter.next() { + for (j, c) in iter.by_ref() { if c.is_ascii_whitespace() { return MarkdownLinkRange::Destination(i + span.start..j + span.start); } diff --git a/src/librustdoc/html/render/print_item.rs b/src/librustdoc/html/render/print_item.rs index c599a84ee44e6..48bbba607b2da 100644 --- a/src/librustdoc/html/render/print_item.rs +++ b/src/librustdoc/html/render/print_item.rs @@ -1192,8 +1192,7 @@ fn item_trait(cx: &Context<'_>, it: &clean::Item, t: &clean::Trait) -> impl fmt: // to already be in the HTML, and will be ignored. // // [JSONP]: https://en.wikipedia.org/wiki/JSONP - let mut js_src_path: UrlPartsBuilder = std::iter::repeat("..") - .take(cx.current.len()) + let mut js_src_path: UrlPartsBuilder = std::iter::repeat_n("..", cx.current.len()) .chain(std::iter::once("trait.impl")) .collect(); if let Some(did) = it.item_id.as_def_id() @@ -1446,8 +1445,7 @@ fn item_type_alias(cx: &Context<'_>, it: &clean::Item, t: &clean::TypeAlias) -> && let get_local = { || cache.paths.get(&self_did).map(|(p, _)| p) } && let Some(self_fqp) = cache.exact_paths.get(&self_did).or_else(get_local) { - let mut js_src_path: UrlPartsBuilder = std::iter::repeat("..") - .take(cx.current.len()) + let mut js_src_path: UrlPartsBuilder = std::iter::repeat_n("..", cx.current.len()) .chain(std::iter::once("type.impl")) .collect(); js_src_path.extend(target_fqp[..target_fqp.len() - 1].iter().copied()); diff --git a/src/librustdoc/html/render/search_index/encode.rs b/src/librustdoc/html/render/search_index/encode.rs index 8816ea650593b..de2f54558ff81 100644 --- a/src/librustdoc/html/render/search_index/encode.rs +++ b/src/librustdoc/html/render/search_index/encode.rs @@ -182,9 +182,9 @@ pub(crate) fn write_bitmap_to_bytes( out.write_all(&[b])?; } if size < NO_OFFSET_THRESHOLD { - 4 + 4 * size + ((size + 7) / 8) + 4 + 4 * size + size.div_ceil(8) } else { - 4 + 8 * size + ((size + 7) / 8) + 4 + 8 * size + size.div_ceil(8) } } else { out.write_all(&u32::to_le_bytes(SERIAL_COOKIE_NO_RUNCONTAINER))?; diff --git a/src/librustdoc/passes/collect_intra_doc_links.rs b/src/librustdoc/passes/collect_intra_doc_links.rs index 440d6331457b0..de4ae6f59158a 100644 --- a/src/librustdoc/passes/collect_intra_doc_links.rs +++ b/src/librustdoc/passes/collect_intra_doc_links.rs @@ -2060,7 +2060,7 @@ fn resolution_failure( return; } Trait - | TyAlias { .. } + | TyAlias | ForeignTy | OpaqueTy | TraitAlias diff --git a/src/librustdoc/passes/propagate_stability.rs b/src/librustdoc/passes/propagate_stability.rs index 8cf39afd55c51..fdab2b087799a 100644 --- a/src/librustdoc/passes/propagate_stability.rs +++ b/src/librustdoc/passes/propagate_stability.rs @@ -39,15 +39,15 @@ impl DocFolder for StabilityPropagator<'_, '_> { let item_stability = self.cx.tcx.lookup_stability(def_id); let inline_stability = item.inline_stmt_id.and_then(|did| self.cx.tcx.lookup_stability(did)); - let is_glob_export = item.inline_stmt_id.and_then(|id| { + let is_glob_export = item.inline_stmt_id.map(|id| { let hir_id = self.cx.tcx.local_def_id_to_hir_id(id); - Some(matches!( + matches!( self.cx.tcx.hir_node(hir_id), rustc_hir::Node::Item(rustc_hir::Item { kind: rustc_hir::ItemKind::Use(_, rustc_hir::UseKind::Glob), .. }) - )) + ) }); let own_stability = if let Some(item_stab) = item_stability && let StabilityLevel::Stable { since: _, allowed_through_unstable_modules } = From 329b8a312dc5fbe8a310a30b8ce452c7609e202c Mon Sep 17 00:00:00 2001 From: Yotam Ofek Date: Sat, 1 Mar 2025 21:50:43 +0000 Subject: [PATCH 08/13] Implement `Ord` by-hand instead of `PartialOrd` for `Link` --- src/librustdoc/html/render/sidebar.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/librustdoc/html/render/sidebar.rs b/src/librustdoc/html/render/sidebar.rs index 64dbaf9083e70..3130815af0bd0 100644 --- a/src/librustdoc/html/render/sidebar.rs +++ b/src/librustdoc/html/render/sidebar.rs @@ -79,7 +79,7 @@ impl<'a> LinkBlock<'a> { } /// A link to an item. Content should not be escaped. -#[derive(Ord, PartialEq, Eq, Hash, Clone)] +#[derive(PartialEq, Eq, Hash, Clone)] pub(crate) struct Link<'a> { /// The content for the anchor tag and title attr name: Cow<'a, str>, @@ -91,13 +91,13 @@ pub(crate) struct Link<'a> { children: Vec>, } -impl PartialOrd for Link<'_> { - fn partial_cmp(&self, other: &Link<'_>) -> Option { +impl Ord for Link<'_> { + fn cmp(&self, other: &Self) -> Ordering { match compare_names(&self.name, &other.name) { - Ordering::Equal => (), - result => return Some(result), + Ordering::Equal => {} + result => return result, } - (&self.name_html, &self.href, &self.children).partial_cmp(&( + (&self.name_html, &self.href, &self.children).cmp(&( &other.name_html, &other.href, &other.children, @@ -105,6 +105,12 @@ impl PartialOrd for Link<'_> { } } +impl PartialOrd for Link<'_> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + impl<'a> Link<'a> { pub fn new(href: impl Into>, name: impl Into>) -> Self { Self { href: href.into(), name: name.into(), children: vec![], name_html: None } From 5d259224bd69425b44b4c83adc310cbf72b15855 Mon Sep 17 00:00:00 2001 From: Yotam Ofek Date: Sat, 1 Mar 2025 19:12:21 +0000 Subject: [PATCH 09/13] Manual, post-`clippy --fix` cleanups --- src/librustdoc/clean/mod.rs | 4 ++-- src/librustdoc/clean/utils.rs | 4 ++-- src/librustdoc/html/format.rs | 10 +++++----- src/librustdoc/html/highlight.rs | 2 +- src/librustdoc/html/render/print_item.rs | 16 +++++++--------- src/librustdoc/html/render/search_index.rs | 5 +---- 6 files changed, 18 insertions(+), 23 deletions(-) diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index b45c43e6979f6..67bc5cefd58ac 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -2539,7 +2539,7 @@ fn clean_generic_args<'tcx>( ) -> GenericArgs { // FIXME(return_type_notation): Fix RTN parens rendering if let Some((inputs, output)) = generic_args.paren_sugar_inputs_output() { - let inputs = inputs.iter().map(|x| clean_ty(x, cx)).collect::>(); + let inputs = inputs.iter().map(|x| clean_ty(x, cx)).collect(); let output = match output.kind { hir::TyKind::Tup(&[]) => None, _ => Some(Box::new(clean_ty(output, cx))), @@ -2560,7 +2560,7 @@ fn clean_generic_args<'tcx>( } hir::GenericArg::Infer(_inf) => GenericArg::Infer, }) - .collect::>(); + .collect(); let constraints = generic_args .constraints .iter() diff --git a/src/librustdoc/clean/utils.rs b/src/librustdoc/clean/utils.rs index cf649bdb11f0e..f81db58950cbd 100644 --- a/src/librustdoc/clean/utils.rs +++ b/src/librustdoc/clean/utils.rs @@ -223,7 +223,7 @@ fn clean_middle_generic_args_with_constraints<'tcx>( let args = clean_middle_generic_args(cx, args.map_bound(|args| &args[..]), has_self, did); - GenericArgs::AngleBracketed { args: args, constraints } + GenericArgs::AngleBracketed { args, constraints } } pub(super) fn clean_middle_path<'tcx>( @@ -394,7 +394,7 @@ pub(crate) fn print_evaluated_const( fn format_integer_with_underscore_sep(num: &str) -> String { let num_chars: Vec<_> = num.chars().collect(); let mut num_start_index = if num_chars.first() == Some(&'-') { 1 } else { 0 }; - let chunk_size = match num[num_start_index..].as_bytes() { + let chunk_size = match &num.as_bytes()[num_start_index..] { [b'0', b'b' | b'x', ..] => { num_start_index += 2; 4 diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 6731837d71843..8b8439a253527 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -623,9 +623,9 @@ pub(crate) fn href_relative_parts<'fqp>( // e.g. linking to std::iter from std::vec (`dissimilar_part_count` will be 1) if f != r { let dissimilar_part_count = relative_to_fqp.len() - i; - let fqp_module = &fqp[i..fqp.len()]; + let fqp_module = &fqp[i..]; return Box::new( - std::iter::repeat_n(sym::dotdot, dissimilar_part_count) + iter::repeat_n(sym::dotdot, dissimilar_part_count) .chain(fqp_module.iter().copied()), ); } @@ -638,7 +638,7 @@ pub(crate) fn href_relative_parts<'fqp>( Ordering::Greater => { // e.g. linking to std::sync from std::sync::atomic let dissimilar_part_count = relative_to_fqp.len() - fqp.len(); - Box::new(std::iter::repeat_n(sym::dotdot, dissimilar_part_count)) + Box::new(iter::repeat_n(sym::dotdot, dissimilar_part_count)) } Ordering::Equal => { // linking to the same module @@ -769,9 +769,9 @@ fn primitive_link_fragment( ExternalLocation::Local => { let cname_sym = ExternalCrate { crate_num: def_id.krate }.name(cx.tcx()); Some(if cx.current.first() == Some(&cname_sym) { - std::iter::repeat_n(sym::dotdot, cx.current.len() - 1).collect() + iter::repeat_n(sym::dotdot, cx.current.len() - 1).collect() } else { - std::iter::repeat_n(sym::dotdot, cx.current.len()) + iter::repeat_n(sym::dotdot, cx.current.len()) .chain(iter::once(cname_sym)) .collect() }) diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index 8c17b84a42f40..760a6d0c1a09e 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -131,7 +131,7 @@ fn write_header( /// * If the other `Class` is unclassified and only contains white characters (backline, /// whitespace, etc), it can be merged. /// * `Class::Ident` is considered the same as unclassified (because it doesn't have an associated -/// CSS class). +/// CSS class). fn can_merge(class1: Option, class2: Option, text: &str) -> bool { match (class1, class2) { (Some(c1), Some(c2)) => c1.is_equal_to(c2), diff --git a/src/librustdoc/html/render/print_item.rs b/src/librustdoc/html/render/print_item.rs index 48bbba607b2da..3c5c2ce19767d 100644 --- a/src/librustdoc/html/render/print_item.rs +++ b/src/librustdoc/html/render/print_item.rs @@ -1,6 +1,6 @@ use std::cmp::Ordering; -use std::fmt; -use std::fmt::{Display, Write as _}; +use std::fmt::{self, Display, Write as _}; +use std::iter; use rinja::Template; use rustc_abi::VariantIdx; @@ -1192,9 +1192,8 @@ fn item_trait(cx: &Context<'_>, it: &clean::Item, t: &clean::Trait) -> impl fmt: // to already be in the HTML, and will be ignored. // // [JSONP]: https://en.wikipedia.org/wiki/JSONP - let mut js_src_path: UrlPartsBuilder = std::iter::repeat_n("..", cx.current.len()) - .chain(std::iter::once("trait.impl")) - .collect(); + let mut js_src_path: UrlPartsBuilder = + iter::repeat_n("..", cx.current.len()).chain(iter::once("trait.impl")).collect(); if let Some(did) = it.item_id.as_def_id() && let get_extern = { || cx.shared.cache.external_paths.get(&did).map(|s| &s.0) } && let Some(fqp) = cx.shared.cache.exact_paths.get(&did).or_else(get_extern) @@ -1445,9 +1444,8 @@ fn item_type_alias(cx: &Context<'_>, it: &clean::Item, t: &clean::TypeAlias) -> && let get_local = { || cache.paths.get(&self_did).map(|(p, _)| p) } && let Some(self_fqp) = cache.exact_paths.get(&self_did).or_else(get_local) { - let mut js_src_path: UrlPartsBuilder = std::iter::repeat_n("..", cx.current.len()) - .chain(std::iter::once("type.impl")) - .collect(); + let mut js_src_path: UrlPartsBuilder = + iter::repeat_n("..", cx.current.len()).chain(iter::once("type.impl")).collect(); js_src_path.extend(target_fqp[..target_fqp.len() - 1].iter().copied()); js_src_path.push_fmt(format_args!("{target_type}.{}.js", target_fqp.last().unwrap())); let self_path = fmt::from_fn(|f| self_fqp.iter().joined("::", f)); @@ -1491,7 +1489,7 @@ fn item_union(cx: &Context<'_>, it: &clean::Item, s: &clean::Union) -> impl fmt: fn fields_iter( &self, - ) -> std::iter::Peekable> { + ) -> iter::Peekable> { self.s .fields .iter() diff --git a/src/librustdoc/html/render/search_index.rs b/src/librustdoc/html/render/search_index.rs index 95f617c98390a..b39701fae1d6a 100644 --- a/src/librustdoc/html/render/search_index.rs +++ b/src/librustdoc/html/render/search_index.rs @@ -842,10 +842,7 @@ pub(crate) fn get_function_type_for_search( } clean::ConstantItem(ref c) => make_nullary_fn(&c.type_), clean::StaticItem(ref s) => make_nullary_fn(&s.type_), - clean::StructFieldItem(ref t) => { - let Some(parent) = parent else { - return None; - }; + clean::StructFieldItem(ref t) if let Some(parent) = parent => { let mut rgen: FxIndexMap)> = Default::default(); let output = get_index_type(t, vec![], &mut rgen); From 5c1733e4f4d51d7887a117a06ac4c465d55c3b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Wed, 5 Mar 2025 13:14:08 +0100 Subject: [PATCH 10/13] Break critical edges in inline asm before code generation An inline asm terminator defines outputs along its target edges -- a fallthrough target and labeled targets. Code generation implements this by inserting code directly into the target blocks. This approach works only if the target blocks don't have other predecessors. Establish required invariant by extending existing code that breaks critical edges before code generation. --- .../src/add_call_guards.rs | 26 +++++++++++++ tests/codegen/asm/critical.rs | 37 +++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 tests/codegen/asm/critical.rs diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs index c64bb30fa2157..ce7d7a435c844 100644 --- a/compiler/rustc_mir_transform/src/add_call_guards.rs +++ b/compiler/rustc_mir_transform/src/add_call_guards.rs @@ -65,6 +65,32 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { // It's a critical edge, break it *destination = new_block(source_info, block.is_cleanup, *destination); } + Some(Terminator { + kind: + TerminatorKind::InlineAsm { + asm_macro: InlineAsmMacro::Asm, + ref mut targets, + ref operands, + unwind, + .. + }, + source_info, + }) if self == &CriticalCallEdges => { + let has_outputs = operands.iter().any(|op| { + matches!(op, InlineAsmOperand::InOut { .. } | InlineAsmOperand::Out { .. }) + }); + let has_labels = + operands.iter().any(|op| matches!(op, InlineAsmOperand::Label { .. })); + let invoke = + matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate(_)); + if has_outputs && (has_labels || invoke) { + for target in targets.iter_mut() { + if pred_count[*target] > 1 { + *target = new_block(source_info, block.is_cleanup, *target); + } + } + } + } _ => {} } } diff --git a/tests/codegen/asm/critical.rs b/tests/codegen/asm/critical.rs new file mode 100644 index 0000000000000..8c039900cab38 --- /dev/null +++ b/tests/codegen/asm/critical.rs @@ -0,0 +1,37 @@ +//@ only-x86_64 +//@ compile-flags: -C no-prepopulate-passes +#![feature(asm_goto)] +#![feature(asm_goto_with_outputs)] +#![crate_type = "lib"] +use std::arch::asm; + +// Regression test for #137867. Check that critical edges have been split before code generation, +// and so all stores to the asm output occur on disjoint paths without any of them jumping to +// another callbr label. +// +// CHECK-LABEL: @f( +// CHECK: [[OUT:%.*]] = callbr i32 asm +// CHECK-NEXT: to label %[[BB0:.*]] [label %[[BB1:.*]], label %[[BB2:.*]]], +// CHECK: [[BB1]]: +// CHECK-NEXT: store i32 [[OUT]], ptr %a +// CHECK-NEXT: br label %[[BBR:.*]] +// CHECK: [[BB2]]: +// CHECK-NEXT: store i32 [[OUT]], ptr %a +// CHECK-NEXT: br label %[[BBR]] +// CHECK: [[BB0]]: +// CHECK-NEXT: store i32 [[OUT]], ptr %a +// CHECK-NEXT: br label %[[BBR]] +// CHECK: [[BBR]]: +// CHECK-NEXT: [[RET:%.*]] = load i32, ptr %a +// CHECK-NEXT: ret i32 [[RET]] +#[unsafe(no_mangle)] +pub unsafe fn f(mut a: u32) -> u32 { + asm!( + "jmp {} + jmp {}", + label {}, + label {}, + inout("eax") a, + ); + a +} From 02d7fc167fea5013e2300b1c5a3ca75f7a048664 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mi=C4=85sko?= Date: Thu, 6 Mar 2025 00:00:00 +0000 Subject: [PATCH 11/13] Factor out check whether an unwind action generates invoke --- .../rustc_mir_transform/src/add_call_guards.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs index ce7d7a435c844..bc335cee14797 100644 --- a/compiler/rustc_mir_transform/src/add_call_guards.rs +++ b/compiler/rustc_mir_transform/src/add_call_guards.rs @@ -57,10 +57,7 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { kind: TerminatorKind::Call { target: Some(ref mut destination), unwind, .. }, source_info, }) if pred_count[*destination] > 1 - && (matches!( - unwind, - UnwindAction::Cleanup(_) | UnwindAction::Terminate(_) - ) || self == &AllCallEdges) => + && (generates_invoke(unwind) || self == &AllCallEdges) => { // It's a critical edge, break it *destination = new_block(source_info, block.is_cleanup, *destination); @@ -81,9 +78,7 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { }); let has_labels = operands.iter().any(|op| matches!(op, InlineAsmOperand::Label { .. })); - let invoke = - matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate(_)); - if has_outputs && (has_labels || invoke) { + if has_outputs && (has_labels || generates_invoke(unwind)) { for target in targets.iter_mut() { if pred_count[*target] > 1 { *target = new_block(source_info, block.is_cleanup, *target); @@ -104,3 +99,11 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { true } } + +/// Returns true if this unwind action is code generated as an invoke as opposed to a call. +fn generates_invoke(unwind: UnwindAction) -> bool { + match unwind { + UnwindAction::Continue | UnwindAction::Unreachable => false, + UnwindAction::Cleanup(_) | UnwindAction::Terminate(_) => true, + } +} From 988eb1997014987caad878699ee1e7c000214508 Mon Sep 17 00:00:00 2001 From: Thalia Archibald Date: Tue, 4 Mar 2025 20:28:38 -0800 Subject: [PATCH 12/13] library: Use size_of from the prelude instead of imported Use `std::mem::{size_of, size_of_val, align_of, align_of_val}` from the prelude instead of importing or qualifying them. These functions were added to all preludes in Rust 1.80. --- library/alloc/benches/slice.rs | 14 +++--- library/alloc/benches/vec.rs | 8 ++-- library/alloc/src/boxed/convert.rs | 28 ++++------- library/alloc/src/boxed/thin.rs | 22 ++++----- .../alloc/src/collections/btree/node/tests.rs | 8 ++-- library/alloc/src/raw_vec.rs | 4 +- library/alloc/src/raw_vec/tests.rs | 3 +- library/alloc/src/slice.rs | 4 +- library/alloc/src/string.rs | 6 +-- library/alloc/src/sync.rs | 4 +- library/alloc/src/vec/in_place_collect.rs | 24 +++++----- library/alloc/src/vec/mod.rs | 10 ++-- library/alloc/tests/arc.rs | 5 +- library/alloc/tests/rc.rs | 5 +- library/alloc/tests/slice.rs | 4 +- .../tests/sort/known_good_stable_sort.rs | 6 +-- library/alloc/tests/thin_box.rs | 3 +- library/alloc/tests/vec.rs | 6 +-- library/core/src/alloc/layout.rs | 4 +- library/core/src/char/convert.rs | 16 ++----- library/core/src/clone.rs | 4 +- library/core/src/hash/mod.rs | 4 +- library/core/src/hash/sip.rs | 6 +-- library/core/src/intrinsics/mod.rs | 8 ++-- library/core/src/iter/adapters/map_windows.rs | 4 +- library/core/src/marker.rs | 5 +- library/core/src/mem/maybe_uninit.rs | 4 +- library/core/src/mem/mod.rs | 48 ++++++++----------- library/core/src/mem/transmutability.rs | 6 +-- library/core/src/num/bignum.rs | 3 +- library/core/src/num/dec2flt/fpu.rs | 1 - library/core/src/num/int_macros.rs | 18 +++---- library/core/src/num/mod.rs | 6 +-- library/core/src/num/nonzero.rs | 5 +- library/core/src/num/uint_macros.rs | 20 ++++---- library/core/src/primitive_docs.rs | 28 +++++------ library/core/src/ptr/alignment.rs | 8 ++-- library/core/src/ptr/const_ptr.rs | 22 ++++----- library/core/src/ptr/metadata.rs | 2 +- library/core/src/ptr/mod.rs | 30 +++++------- library/core/src/ptr/mut_ptr.rs | 22 ++++----- library/core/src/ptr/non_null.rs | 18 ++++--- library/core/src/slice/cmp.rs | 6 +-- library/core/src/slice/memchr.rs | 5 +- library/core/src/slice/mod.rs | 26 +++++----- library/core/src/slice/raw.rs | 8 ++-- library/core/src/slice/rotate.rs | 6 +-- .../core/src/slice/sort/shared/smallsort.rs | 16 +++---- library/core/src/slice/sort/stable/mod.rs | 6 +-- .../core/src/slice/sort/stable/quicksort.rs | 4 +- .../core/src/slice/sort/unstable/quicksort.rs | 8 ++-- library/core/src/str/count.rs | 2 +- library/core/src/str/validations.rs | 3 +- library/core/src/sync/atomic.rs | 4 +- library/coretests/benches/ascii/is_ascii.rs | 6 +-- library/coretests/benches/iter.rs | 5 +- library/coretests/tests/alloc.rs | 1 - library/coretests/tests/atomic.rs | 2 - library/coretests/tests/hash/sip.rs | 4 +- library/coretests/tests/nonzero.rs | 1 - library/coretests/tests/ptr.rs | 18 +++---- library/coretests/tests/slice.rs | 4 +- library/panic_unwind/src/emcc.rs | 4 +- library/panic_unwind/src/seh.rs | 4 +- .../proc_macro/src/bridge/selfless_reify.rs | 2 +- library/std/src/fs/tests.rs | 2 +- library/std/src/io/error/tests.rs | 1 - library/std/src/os/fd/tests.rs | 1 - library/std/src/os/unix/io/tests.rs | 1 - library/std/src/os/unix/net/addr.rs | 2 +- library/std/src/os/unix/net/listener.rs | 2 +- library/std/src/os/unix/net/ucred.rs | 14 +++--- library/std/src/os/wasi/io/tests.rs | 1 - library/std/src/os/windows/io/tests.rs | 1 - library/std/src/os/windows/process.rs | 8 +--- library/std/src/os/xous/ffi.rs | 8 ++-- library/std/src/os/xous/services/log.rs | 4 +- library/std/src/sys/alloc/unix.rs | 2 +- library/std/src/sys/alloc/windows/tests.rs | 5 +- library/std/src/sys/io/is_terminal/windows.rs | 1 - library/std/src/sys/net/connection/socket.rs | 16 +++---- .../src/sys/net/connection/socket/hermit.rs | 2 +- .../src/sys/net/connection/socket/solid.rs | 2 +- .../std/src/sys/net/connection/socket/unix.rs | 2 +- .../src/sys/net/connection/socket/wasip2.rs | 2 +- .../src/sys/net/connection/socket/windows.rs | 10 ++-- .../std/src/sys/net/connection/xous/udp.rs | 2 +- library/std/src/sys/pal/itron/thread.rs | 2 +- .../src/sys/pal/sgx/abi/usercalls/alloc.rs | 42 ++++++++-------- library/std/src/sys/pal/uefi/args.rs | 1 - library/std/src/sys/pal/uefi/helpers.rs | 2 +- library/std/src/sys/pal/uefi/process.rs | 2 +- library/std/src/sys/pal/uefi/tests.rs | 2 +- library/std/src/sys/pal/unix/fs.rs | 4 +- library/std/src/sys/pal/unix/futex.rs | 2 +- .../sys/pal/unix/process/process_common.rs | 5 +- .../sys/pal/unix/process/process_fuchsia.rs | 4 +- .../src/sys/pal/unix/process/process_unix.rs | 8 ++-- .../std/src/sys/pal/unix/stack_overflow.rs | 2 +- library/std/src/sys/pal/unix/thread.rs | 6 +-- library/std/src/sys/pal/unix/weak.rs | 2 +- library/std/src/sys/pal/wasi/fd.rs | 8 ++-- library/std/src/sys/pal/wasi/fs.rs | 2 +- library/std/src/sys/pal/wasi/thread.rs | 7 ++- library/std/src/sys/pal/windows/api.rs | 4 +- library/std/src/sys/pal/windows/c.rs | 6 +-- library/std/src/sys/pal/windows/fs.rs | 27 +++++------ library/std/src/sys/pal/windows/futex.rs | 4 +- library/std/src/sys/pal/windows/pipe.rs | 2 +- library/std/src/sys/pal/windows/process.rs | 8 ++-- library/std/src/sys/pal/windows/stdio.rs | 2 +- library/std/src/sys/pal/xous/stdio.rs | 2 +- library/std/src/sys/pal/zkvm/mod.rs | 2 +- library/std/src/sys/personality/dwarf/eh.rs | 5 +- library/std/src/sys/personality/dwarf/mod.rs | 4 +- library/std/src/sys/thread_local/key/xous.rs | 11 ++--- library/std/src/thread/tests.rs | 4 +- library/unwind/src/unwinding.rs | 6 +-- 118 files changed, 392 insertions(+), 486 deletions(-) diff --git a/library/alloc/benches/slice.rs b/library/alloc/benches/slice.rs index c6b46e6a2a188..27b0e6fac0adb 100644 --- a/library/alloc/benches/slice.rs +++ b/library/alloc/benches/slice.rs @@ -1,4 +1,4 @@ -use std::{mem, ptr}; +use std::ptr; use rand::Rng; use rand::distr::{Alphanumeric, SampleString, StandardUniform}; @@ -234,7 +234,7 @@ macro_rules! sort { fn $name(b: &mut Bencher) { let v = $gen($len); b.iter(|| v.clone().$f()); - b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + b.bytes = $len * size_of_val(&$gen(1)[0]) as u64; } }; } @@ -246,7 +246,7 @@ macro_rules! sort_strings { let v = $gen($len); let v = v.iter().map(|s| &**s).collect::>(); b.iter(|| v.clone().$f()); - b.bytes = $len * mem::size_of::<&str>() as u64; + b.bytes = $len * size_of::<&str>() as u64; } }; } @@ -268,7 +268,7 @@ macro_rules! sort_expensive { }); black_box(count); }); - b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + b.bytes = $len * size_of_val(&$gen(1)[0]) as u64; } }; } @@ -279,7 +279,7 @@ macro_rules! sort_lexicographic { fn $name(b: &mut Bencher) { let v = $gen($len); b.iter(|| v.clone().$f(|x| x.to_string())); - b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + b.bytes = $len * size_of_val(&$gen(1)[0]) as u64; } }; } @@ -322,7 +322,7 @@ macro_rules! reverse { fn $name(b: &mut Bencher) { // odd length and offset by 1 to be as unaligned as possible let n = 0xFFFFF; - let mut v: Vec<_> = (0..1 + (n / mem::size_of::<$ty>() as u64)).map($f).collect(); + let mut v: Vec<_> = (0..1 + (n / size_of::<$ty>() as u64)).map($f).collect(); b.iter(|| black_box(&mut v[1..]).reverse()); b.bytes = n; } @@ -346,7 +346,7 @@ macro_rules! rotate { ($name:ident, $gen:expr, $len:expr, $mid:expr) => { #[bench] fn $name(b: &mut Bencher) { - let size = mem::size_of_val(&$gen(1)[0]); + let size = size_of_val(&$gen(1)[0]); let mut v = $gen($len * 8 / size); b.iter(|| black_box(&mut v).rotate_left(($mid * 8 + size - 1) / size)); b.bytes = (v.len() * size) as u64; diff --git a/library/alloc/benches/vec.rs b/library/alloc/benches/vec.rs index a725ad6894b9c..1dab71fa1f4f4 100644 --- a/library/alloc/benches/vec.rs +++ b/library/alloc/benches/vec.rs @@ -669,7 +669,7 @@ fn random_sorted_fill(mut seed: u32, buf: &mut [u32]) { // This algorithm was used for Vecs prior to Rust 1.52. fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) { let mut template = vec![0u32; sz]; - b.bytes = std::mem::size_of_val(template.as_slice()) as u64; + b.bytes = size_of_val(template.as_slice()) as u64; random_sorted_fill(0x43, &mut template); let mut vec = template.clone(); @@ -691,7 +691,7 @@ fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) { // Measures performance of Vec::dedup on random data. fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) { let mut template = vec![0u32; sz]; - b.bytes = std::mem::size_of_val(template.as_slice()) as u64; + b.bytes = size_of_val(template.as_slice()) as u64; random_sorted_fill(0x43, &mut template); let mut vec = template.clone(); @@ -708,7 +708,7 @@ fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) { // Measures performance of Vec::dedup when there is no items removed fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) { let mut template = vec![0u32; sz]; - b.bytes = std::mem::size_of_val(template.as_slice()) as u64; + b.bytes = size_of_val(template.as_slice()) as u64; template.chunks_exact_mut(2).for_each(|w| { w[0] = black_box(0); w[1] = black_box(5); @@ -729,7 +729,7 @@ fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) { // Measures performance of Vec::dedup when there is all items removed fn bench_vec_dedup_all(b: &mut Bencher, sz: usize) { let mut template = vec![0u32; sz]; - b.bytes = std::mem::size_of_val(template.as_slice()) as u64; + b.bytes = size_of_val(template.as_slice()) as u64; template.iter_mut().for_each(|w| { *w = black_box(0); }); diff --git a/library/alloc/src/boxed/convert.rs b/library/alloc/src/boxed/convert.rs index 255cefb1e78fb..8062658020239 100644 --- a/library/alloc/src/boxed/convert.rs +++ b/library/alloc/src/boxed/convert.rs @@ -529,7 +529,6 @@ impl<'a, E: Error + 'a> From for Box { /// ``` /// use std::error::Error; /// use std::fmt; - /// use std::mem; /// /// #[derive(Debug)] /// struct AnError; @@ -543,9 +542,9 @@ impl<'a, E: Error + 'a> From for Box { /// impl Error for AnError {} /// /// let an_error = AnError; - /// assert!(0 == mem::size_of_val(&an_error)); + /// assert!(0 == size_of_val(&an_error)); /// let a_boxed_error = Box::::from(an_error); - /// assert!(mem::size_of::>() == mem::size_of_val(&a_boxed_error)) + /// assert!(size_of::>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: E) -> Box { Box::new(err) @@ -563,7 +562,6 @@ impl<'a, E: Error + Send + Sync + 'a> From for Box From for Box::from(an_error); /// assert!( - /// mem::size_of::>() == mem::size_of_val(&a_boxed_error)) + /// size_of::>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: E) -> Box { Box::new(err) @@ -600,12 +598,11 @@ impl<'a> From for Box { /// /// ``` /// use std::error::Error; - /// use std::mem; /// /// let a_string_error = "a string error".to_string(); /// let a_boxed_error = Box::::from(a_string_error); /// assert!( - /// mem::size_of::>() == mem::size_of_val(&a_boxed_error)) + /// size_of::>() == size_of_val(&a_boxed_error)) /// ``` #[inline] fn from(err: String) -> Box { @@ -644,11 +641,10 @@ impl<'a> From for Box { /// /// ``` /// use std::error::Error; - /// use std::mem; /// /// let a_string_error = "a string error".to_string(); /// let a_boxed_error = Box::::from(a_string_error); - /// assert!(mem::size_of::>() == mem::size_of_val(&a_boxed_error)) + /// assert!(size_of::>() == size_of_val(&a_boxed_error)) /// ``` fn from(str_err: String) -> Box { let err1: Box = From::from(str_err); @@ -668,12 +664,11 @@ impl<'a> From<&str> for Box { /// /// ``` /// use std::error::Error; - /// use std::mem; /// /// let a_str_error = "a str error"; /// let a_boxed_error = Box::::from(a_str_error); /// assert!( - /// mem::size_of::>() == mem::size_of_val(&a_boxed_error)) + /// size_of::>() == size_of_val(&a_boxed_error)) /// ``` #[inline] fn from(err: &str) -> Box { @@ -692,11 +687,10 @@ impl<'a> From<&str> for Box { /// /// ``` /// use std::error::Error; - /// use std::mem; /// /// let a_str_error = "a str error"; /// let a_boxed_error = Box::::from(a_str_error); - /// assert!(mem::size_of::>() == mem::size_of_val(&a_boxed_error)) + /// assert!(size_of::>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: &str) -> Box { From::from(String::from(err)) @@ -712,13 +706,12 @@ impl<'a, 'b> From> for Box { /// /// ``` /// use std::error::Error; - /// use std::mem; /// use std::borrow::Cow; /// /// let a_cow_str_error = Cow::from("a str error"); /// let a_boxed_error = Box::::from(a_cow_str_error); /// assert!( - /// mem::size_of::>() == mem::size_of_val(&a_boxed_error)) + /// size_of::>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: Cow<'b, str>) -> Box { From::from(String::from(err)) @@ -734,12 +727,11 @@ impl<'a, 'b> From> for Box { /// /// ``` /// use std::error::Error; - /// use std::mem; /// use std::borrow::Cow; /// /// let a_cow_str_error = Cow::from("a str error"); /// let a_boxed_error = Box::::from(a_cow_str_error); - /// assert!(mem::size_of::>() == mem::size_of_val(&a_boxed_error)) + /// assert!(size_of::>() == size_of_val(&a_boxed_error)) /// ``` fn from(err: Cow<'b, str>) -> Box { From::from(String::from(err)) diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs index 78e5aec09b18d..21425b9846e42 100644 --- a/library/alloc/src/boxed/thin.rs +++ b/library/alloc/src/boxed/thin.rs @@ -9,9 +9,8 @@ use core::intrinsics::const_allocate; use core::marker::PhantomData; #[cfg(not(no_global_oom_handling))] use core::marker::Unsize; -use core::mem; #[cfg(not(no_global_oom_handling))] -use core::mem::SizedTypeProperties; +use core::mem::{self, SizedTypeProperties}; use core::ops::{Deref, DerefMut}; use core::ptr::{self, NonNull, Pointee}; @@ -30,7 +29,6 @@ use crate::alloc::{self, Layout, LayoutError}; /// let five = ThinBox::new(5); /// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]); /// -/// use std::mem::{size_of, size_of_val}; /// let size_of_ptr = size_of::<*const ()>(); /// assert_eq!(size_of_ptr, size_of_val(&five)); /// assert_eq!(size_of_ptr, size_of_val(&thin_slice)); @@ -114,7 +112,7 @@ impl ThinBox { where T: Unsize, { - if mem::size_of::() == 0 { + if size_of::() == 0 { let ptr = WithOpaqueHeader::new_unsize_zst::(value); ThinBox { ptr, _marker: PhantomData } } else { @@ -283,9 +281,7 @@ impl WithHeader { let ptr = if layout.size() == 0 { // Some paranoia checking, mostly so that the ThinBox tests are // more able to catch issues. - debug_assert!( - value_offset == 0 && mem::size_of::() == 0 && mem::size_of::() == 0 - ); + debug_assert!(value_offset == 0 && size_of::() == 0 && size_of::() == 0); layout.dangling() } else { let ptr = alloc::alloc(layout); @@ -315,7 +311,7 @@ impl WithHeader { Dyn: Pointee + ?Sized, T: Unsize, { - assert!(mem::size_of::() == 0); + assert!(size_of::() == 0); const fn max(a: usize, b: usize) -> usize { if a > b { a } else { b } @@ -329,18 +325,16 @@ impl WithHeader { // FIXME: just call `WithHeader::alloc_layout` with size reset to 0. // Currently that's blocked on `Layout::extend` not being `const fn`. - let alloc_align = - max(mem::align_of::(), mem::align_of::<::Metadata>()); + let alloc_align = max(align_of::(), align_of::<::Metadata>()); - let alloc_size = - max(mem::align_of::(), mem::size_of::<::Metadata>()); + let alloc_size = max(align_of::(), size_of::<::Metadata>()); unsafe { // SAFETY: align is power of two because it is the maximum of two alignments. let alloc: *mut u8 = const_allocate(alloc_size, alloc_align); let metadata_offset = - alloc_size.checked_sub(mem::size_of::<::Metadata>()).unwrap(); + alloc_size.checked_sub(size_of::<::Metadata>()).unwrap(); // SAFETY: adding offset within the allocation. let metadata_ptr: *mut ::Metadata = alloc.add(metadata_offset).cast(); @@ -421,7 +415,7 @@ impl WithHeader { } const fn header_size() -> usize { - mem::size_of::() + size_of::() } fn alloc_layout(value_layout: Layout) -> Result<(Layout, usize), LayoutError> { diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs index ecd009f11c71a..7d1a2ea480943 100644 --- a/library/alloc/src/collections/btree/node/tests.rs +++ b/library/alloc/src/collections/btree/node/tests.rs @@ -92,8 +92,8 @@ fn test_partial_eq() { #[cfg(target_arch = "x86_64")] #[cfg_attr(any(miri, randomized_layouts), ignore)] // We'd like to run Miri with layout randomization fn test_sizes() { - assert_eq!(core::mem::size_of::>(), 16); - assert_eq!(core::mem::size_of::>(), 16 + CAPACITY * 2 * 8); - assert_eq!(core::mem::size_of::>(), 16 + (CAPACITY + 1) * 8); - assert_eq!(core::mem::size_of::>(), 16 + (CAPACITY * 3 + 1) * 8); + assert_eq!(size_of::>(), 16); + assert_eq!(size_of::>(), 16 + CAPACITY * 2 * 8); + assert_eq!(size_of::>(), 16 + (CAPACITY + 1) * 8); + assert_eq!(size_of::>(), 16 + (CAPACITY * 3 + 1) * 8); } diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index b80d1fc788947..70f32fbaab427 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -480,7 +480,7 @@ impl RawVecInner { // Allocators currently return a `NonNull<[u8]>` whose length // matches the size requested. If that ever changes, the capacity - // here should change to `ptr.len() / mem::size_of::()`. + // here should change to `ptr.len() / size_of::()`. Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap::new_unchecked(capacity) }, @@ -627,7 +627,7 @@ impl RawVecInner { unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { // Allocators currently return a `NonNull<[u8]>` whose length matches // the size requested. If that ever changes, the capacity here should - // change to `ptr.len() / mem::size_of::()`. + // change to `ptr.len() / size_of::()`. self.ptr = Unique::from(ptr.cast()); self.cap = unsafe { Cap::new_unchecked(cap) }; } diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs index d78ded104fb09..700fa922739d6 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/raw_vec/tests.rs @@ -1,4 +1,3 @@ -use core::mem::size_of; use std::cell::Cell; use super::*; @@ -93,7 +92,7 @@ fn zst_sanity(v: &RawVec) { fn zst() { let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); - assert_eq!(std::mem::size_of::(), 0); + assert_eq!(size_of::(), 0); // All these different ways of creating the RawVec produce the same thing. diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index dcd95ddf00ff5..8baf96850626d 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -16,7 +16,7 @@ use core::borrow::{Borrow, BorrowMut}; #[cfg(not(no_global_oom_handling))] use core::cmp::Ordering::{self, Less}; #[cfg(not(no_global_oom_handling))] -use core::mem::{self, MaybeUninit}; +use core::mem::MaybeUninit; #[cfg(not(no_global_oom_handling))] use core::ptr; #[unstable(feature = "array_chunks", issue = "74985")] @@ -446,7 +446,7 @@ impl [T] { // Avoids binary-size usage in cases where the alignment doesn't work out to make this // beneficial or on 32-bit platforms. let is_using_u32_as_idx_type_helpful = - const { mem::size_of::<(K, u32)>() < mem::size_of::<(K, usize)>() }; + const { size_of::<(K, u32)>() < size_of::<(K, usize)>() }; // It's possible to instantiate this for u8 and u16 but, doing so is very wasteful in terms // of compile-times and binary-size, the peak saved heap memory for u16 is (u8 + u16) -> 4 diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index f10ef1fca1377..679c8eb12b4e1 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -119,8 +119,6 @@ use crate::vec::{self, Vec}; /// the same `char`s: /// /// ``` -/// use std::mem; -/// /// // `s` is ASCII which represents each `char` as one byte /// let s = "hello"; /// assert_eq!(s.len(), 5); @@ -128,7 +126,7 @@ use crate::vec::{self, Vec}; /// // A `char` array with the same contents would be longer because /// // every `char` is four bytes /// let s = ['h', 'e', 'l', 'l', 'o']; -/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum(); +/// let size: usize = s.into_iter().map(|c| size_of_val(&c)).sum(); /// assert_eq!(size, 20); /// /// // However, for non-ASCII strings, the difference will be smaller @@ -137,7 +135,7 @@ use crate::vec::{self, Vec}; /// assert_eq!(s.len(), 20); /// /// let s = ['💖', '💖', '💖', '💖', '💖']; -/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum(); +/// let size: usize = s.into_iter().map(|c| size_of_val(&c)).sum(); /// assert_eq!(size, 20); /// ``` /// diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index dba1449347ac0..1956dda538816 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -2274,7 +2274,7 @@ impl Arc { #[inline] #[stable(feature = "arc_unique", since = "1.4.0")] pub fn make_mut(this: &mut Self) -> &mut T { - let size_of_val = mem::size_of_val::(&**this); + let size_of_val = size_of_val::(&**this); // Note that we hold both a strong reference and a weak reference. // Thus, releasing our strong reference only will not, by itself, cause @@ -3544,7 +3544,7 @@ impl Default for Arc<[T]> { /// This may or may not share an allocation with other Arcs. #[inline] fn default() -> Self { - if mem::align_of::() <= MAX_STATIC_INNER_SLICE_ALIGNMENT { + if align_of::() <= MAX_STATIC_INNER_SLICE_ALIGNMENT { // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows. // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.) diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs index dffd85f13aa50..b98a118048f2d 100644 --- a/library/alloc/src/vec/in_place_collect.rs +++ b/library/alloc/src/vec/in_place_collect.rs @@ -171,7 +171,7 @@ const fn in_place_collectible( ) -> bool { // Require matching alignments because an alignment-changing realloc is inefficient on many // system allocators and better implementations would require the unstable Allocator trait. - if const { SRC::IS_ZST || DEST::IS_ZST || mem::align_of::() != mem::align_of::() } { + if const { SRC::IS_ZST || DEST::IS_ZST || align_of::() != align_of::() } { return false; } @@ -181,7 +181,7 @@ const fn in_place_collectible( // e.g. // - 1 x [u8; 4] -> 4x u8, via flatten // - 4 x u8 -> 1x [u8; 4], via array_chunks - mem::size_of::() * step_merge.get() >= mem::size_of::() * step_expand.get() + size_of::() * step_merge.get() >= size_of::() * step_expand.get() } // Fall back to other from_iter impls if an overflow occurred in the step merge/expansion // tracking. @@ -190,7 +190,7 @@ const fn in_place_collectible( } const fn needs_realloc(src_cap: usize, dst_cap: usize) -> bool { - if const { mem::align_of::() != mem::align_of::() } { + if const { align_of::() != align_of::() } { // FIXME(const-hack): use unreachable! once that works in const panic!("in_place_collectible() prevents this"); } @@ -199,8 +199,8 @@ const fn needs_realloc(src_cap: usize, dst_cap: usize) -> bool { // the caller will have calculated a `dst_cap` that is an integer multiple of // `src_cap` without remainder. if const { - let src_sz = mem::size_of::(); - let dest_sz = mem::size_of::(); + let src_sz = size_of::(); + let dest_sz = size_of::(); dest_sz != 0 && src_sz % dest_sz == 0 } { return false; @@ -208,7 +208,7 @@ const fn needs_realloc(src_cap: usize, dst_cap: usize) -> bool { // type layouts don't guarantee a fit, so do a runtime check to see if // the allocations happen to match - src_cap > 0 && src_cap * mem::size_of::() != dst_cap * mem::size_of::() + src_cap > 0 && src_cap * size_of::() != dst_cap * size_of::() } /// This provides a shorthand for the source type since local type aliases aren't a thing. @@ -262,7 +262,7 @@ where inner.buf.cast::(), inner.end as *const T, // SAFETY: the multiplication can not overflow, since `inner.cap * size_of::()` is the size of the allocation. - inner.cap.unchecked_mul(mem::size_of::()) / mem::size_of::(), + inner.cap.unchecked_mul(size_of::()) / size_of::(), ) }; @@ -310,14 +310,14 @@ where debug_assert_ne!(dst_cap, 0); unsafe { // The old allocation exists, therefore it must have a valid layout. - let src_align = mem::align_of::(); - let src_size = mem::size_of::().unchecked_mul(src_cap); + let src_align = align_of::(); + let src_size = size_of::().unchecked_mul(src_cap); let old_layout = Layout::from_size_align_unchecked(src_size, src_align); // The allocation must be equal or smaller for in-place iteration to be possible // therefore the new layout must be ≤ the old one and therefore valid. - let dst_align = mem::align_of::(); - let dst_size = mem::size_of::().unchecked_mul(dst_cap); + let dst_align = align_of::(); + let dst_size = size_of::().unchecked_mul(dst_cap); let new_layout = Layout::from_size_align_unchecked(dst_size, dst_align); let result = alloc.shrink(dst_buf.cast(), old_layout, new_layout); @@ -325,7 +325,7 @@ where dst_buf = reallocated.cast::(); } } else { - debug_assert_eq!(src_cap * mem::size_of::(), dst_cap * mem::size_of::()); + debug_assert_eq!(src_cap * size_of::(), dst_cap * size_of::()); } mem::forget(dst_guard); diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 701144cc3af22..49878f2b6fa5d 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -293,7 +293,7 @@ mod spec_extend; /// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized /// types inside a `Vec`, it will not allocate space for them. *Note that in this case /// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only -/// if [mem::size_of::\]\() * [capacity]\() > 0. In general, `Vec`'s allocation +/// if [size_of::\]\() * [capacity]\() > 0. In general, `Vec`'s allocation /// details are very subtle --- if you intend to allocate memory using a `Vec` /// and use it for something else (either to pass to unsafe code, or to build your /// own memory-backed collection), be sure to deallocate this memory by using @@ -393,7 +393,7 @@ mod spec_extend; /// [capacity]: Vec::capacity /// [`capacity`]: Vec::capacity /// [`Vec::capacity`]: Vec::capacity -/// [mem::size_of::\]: core::mem::size_of +/// [size_of::\]: size_of /// [len]: Vec::len /// [`len`]: Vec::len /// [`push`]: Vec::push @@ -1573,7 +1573,7 @@ impl Vec { pub const fn as_slice(&self) -> &[T] { // SAFETY: `slice::from_raw_parts` requires pointee is a contiguous, aligned buffer of size // `len` containing properly-initialized `T`s. Data must not be mutated for the returned - // lifetime. Further, `len * mem::size_of::` <= `ISIZE::MAX`, and allocation does not + // lifetime. Further, `len * size_of::` <= `isize::MAX`, and allocation does not // "wrap" through overflowing memory addresses. // // * Vec API guarantees that self.buf: @@ -1605,7 +1605,7 @@ impl Vec { pub const fn as_mut_slice(&mut self) -> &mut [T] { // SAFETY: `slice::from_raw_parts_mut` requires pointee is a contiguous, aligned buffer of // size `len` containing properly-initialized `T`s. Data must not be accessed through any - // other pointer for the returned lifetime. Further, `len * mem::size_of::` <= + // other pointer for the returned lifetime. Further, `len * size_of::` <= // `ISIZE::MAX` and allocation does not "wrap" through overflowing memory addresses. // // * Vec API guarantees that self.buf: @@ -2693,7 +2693,7 @@ impl Vec { let len = self.len; // SAFETY: The maximum capacity of `Vec` is `isize::MAX` bytes, so the maximum value can - // be returned is `usize::checked_div(mem::size_of::()).unwrap_or(usize::MAX)`, which + // be returned is `usize::checked_div(size_of::()).unwrap_or(usize::MAX)`, which // matches the definition of `T::MAX_SLICE_LEN`. unsafe { intrinsics::assume(len <= T::MAX_SLICE_LEN) }; diff --git a/library/alloc/tests/arc.rs b/library/alloc/tests/arc.rs index a259c0131ecdf..0baa50f439b37 100644 --- a/library/alloc/tests/arc.rs +++ b/library/alloc/tests/arc.rs @@ -1,7 +1,6 @@ use std::any::Any; use std::cell::{Cell, RefCell}; use std::iter::TrustedLen; -use std::mem; use std::sync::{Arc, Weak}; #[test] @@ -129,7 +128,7 @@ fn shared_from_iter_trustedlen_normal() { let vec = iter.clone().collect::>(); let rc = iter.collect::>(); assert_eq!(&*vec, &*rc); - assert_eq!(mem::size_of::>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc)); + assert_eq!(size_of::>() * SHARED_ITER_MAX as usize, size_of_val(&*rc)); // Clone a bit and let these get dropped. { @@ -145,7 +144,7 @@ fn shared_from_iter_trustedlen_normal() { let vec = iter.clone().collect::>(); let rc = iter.collect::>(); assert_eq!(&*vec, &*rc); - assert_eq!(0, mem::size_of_val(&*rc)); + assert_eq!(0, size_of_val(&*rc)); { let _rc_2 = rc.clone(); let _rc_3 = rc.clone(); diff --git a/library/alloc/tests/rc.rs b/library/alloc/tests/rc.rs index 451765d724283..9d82a7621a216 100644 --- a/library/alloc/tests/rc.rs +++ b/library/alloc/tests/rc.rs @@ -1,7 +1,6 @@ use std::any::Any; use std::cell::{Cell, RefCell}; use std::iter::TrustedLen; -use std::mem; use std::rc::{Rc, Weak}; #[test] @@ -125,7 +124,7 @@ fn shared_from_iter_trustedlen_normal() { let vec = iter.clone().collect::>(); let rc = iter.collect::>(); assert_eq!(&*vec, &*rc); - assert_eq!(mem::size_of::>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc)); + assert_eq!(size_of::>() * SHARED_ITER_MAX as usize, size_of_val(&*rc)); // Clone a bit and let these get dropped. { @@ -141,7 +140,7 @@ fn shared_from_iter_trustedlen_normal() { let vec = iter.clone().collect::>(); let rc = iter.collect::>(); assert_eq!(&*vec, &*rc); - assert_eq!(0, mem::size_of_val(&*rc)); + assert_eq!(0, size_of_val(&*rc)); { let _rc_2 = rc.clone(); let _rc_3 = rc.clone(); diff --git a/library/alloc/tests/slice.rs b/library/alloc/tests/slice.rs index f990a41b679fa..2516563187f2d 100644 --- a/library/alloc/tests/slice.rs +++ b/library/alloc/tests/slice.rs @@ -1,7 +1,7 @@ use std::cmp::Ordering::{Equal, Greater, Less}; use std::convert::identity; use std::rc::Rc; -use std::{fmt, mem, panic}; +use std::{fmt, panic}; fn square(n: usize) -> usize { n * n @@ -73,7 +73,7 @@ fn test_len_divzero() { let v0: &[Z] = &[]; let v1: &[Z] = &[[]]; let v2: &[Z] = &[[], []]; - assert_eq!(mem::size_of::(), 0); + assert_eq!(size_of::(), 0); assert_eq!(v0.len(), 0); assert_eq!(v1.len(), 1); assert_eq!(v2.len(), 2); diff --git a/library/alloc/tests/sort/known_good_stable_sort.rs b/library/alloc/tests/sort/known_good_stable_sort.rs index f8615435fc2a7..2df891462538d 100644 --- a/library/alloc/tests/sort/known_good_stable_sort.rs +++ b/library/alloc/tests/sort/known_good_stable_sort.rs @@ -5,7 +5,7 @@ // Based on https://github.com/voultapher/tiny-sort-rs. use alloc::alloc::{Layout, alloc, dealloc}; -use std::{mem, ptr}; +use std::ptr; /// Sort `v` preserving initial order of equal elements. /// @@ -26,7 +26,7 @@ pub fn sort(v: &mut [T]) { #[inline(always)] fn stable_sort bool>(v: &mut [T], mut is_less: F) { - if mem::size_of::() == 0 { + if size_of::() == 0 { return; } @@ -166,7 +166,7 @@ struct BufGuard { impl BufGuard { // SAFETY: The caller has to ensure that len is not 0 and that T is not a ZST. unsafe fn new(len: usize) -> Self { - debug_assert!(len > 0 && mem::size_of::() > 0); + debug_assert!(len > 0 && size_of::() > 0); // SAFETY: See function safety description. let layout = unsafe { unwrap_unchecked(Layout::array::(len).ok()) }; diff --git a/library/alloc/tests/thin_box.rs b/library/alloc/tests/thin_box.rs index e008b0cc35718..4c46b61412796 100644 --- a/library/alloc/tests/thin_box.rs +++ b/library/alloc/tests/thin_box.rs @@ -1,5 +1,4 @@ use core::fmt::Debug; -use core::mem::size_of; use std::boxed::ThinBox; #[test] @@ -52,7 +51,7 @@ fn verify_aligned(ptr: *const T) { ptr.is_aligned() && !ptr.is_null(), "misaligned ThinBox data; valid pointers to `{ty}` should be aligned to {align}: {ptr:p}", ty = core::any::type_name::(), - align = core::mem::align_of::(), + align = align_of::(), ); } diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs index fe1db56414e0c..f430d979fa848 100644 --- a/library/alloc/tests/vec.rs +++ b/library/alloc/tests/vec.rs @@ -11,14 +11,14 @@ use std::borrow::Cow; use std::cell::Cell; use std::collections::TryReserveErrorKind::*; use std::fmt::Debug; +use std::hint; use std::iter::InPlaceIterable; -use std::mem::{size_of, swap}; +use std::mem::swap; use std::ops::Bound::*; use std::panic::{AssertUnwindSafe, catch_unwind}; use std::rc::Rc; use std::sync::atomic::{AtomicU32, Ordering}; use std::vec::{Drain, IntoIter}; -use std::{hint, mem}; struct DropCounter<'a> { count: &'a mut u32, @@ -1134,7 +1134,7 @@ fn test_into_iter_zst() { impl Drop for AlignedZstWithDrop { fn drop(&mut self) { let addr = self as *mut _ as usize; - assert!(hint::black_box(addr) % mem::align_of::() == 0); + assert!(hint::black_box(addr) % align_of::() == 0); } } diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs index 17f4d68867e1e..1595a3af883d1 100644 --- a/library/core/src/alloc/layout.rs +++ b/library/core/src/alloc/layout.rs @@ -17,7 +17,7 @@ use crate::{assert_unsafe_precondition, fmt, mem}; // * https://github.com/rust-lang/rust/pull/72189 // * https://github.com/rust-lang/rust/pull/79827 const fn size_align() -> (usize, usize) { - (mem::size_of::(), mem::align_of::()) + (size_of::(), align_of::()) } /// Layout of a block of memory. @@ -182,7 +182,7 @@ impl Layout { #[must_use] #[inline] pub const fn for_value(t: &T) -> Self { - let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); + let (size, align) = (size_of_val(t), align_of_val(t)); // SAFETY: see rationale in `new` for why this is using the unsafe variant unsafe { Layout::from_size_align_unchecked(size, align) } } diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs index 73ab4f1e52ade..ac808038f8900 100644 --- a/library/core/src/char/convert.rs +++ b/library/core/src/char/convert.rs @@ -40,11 +40,9 @@ impl From for u32 { /// # Examples /// /// ``` - /// use std::mem; - /// /// let c = 'c'; /// let u = u32::from(c); - /// assert!(4 == mem::size_of_val(&u)) + /// assert!(4 == size_of_val(&u)) /// ``` #[inline] fn from(c: char) -> Self { @@ -59,11 +57,9 @@ impl From for u64 { /// # Examples /// /// ``` - /// use std::mem; - /// /// let c = '👤'; /// let u = u64::from(c); - /// assert!(8 == mem::size_of_val(&u)) + /// assert!(8 == size_of_val(&u)) /// ``` #[inline] fn from(c: char) -> Self { @@ -80,11 +76,9 @@ impl From for u128 { /// # Examples /// /// ``` - /// use std::mem; - /// /// let c = '⚙'; /// let u = u128::from(c); - /// assert!(16 == mem::size_of_val(&u)) + /// assert!(16 == size_of_val(&u)) /// ``` #[inline] fn from(c: char) -> Self { @@ -167,11 +161,9 @@ impl From for char { /// # Examples /// /// ``` - /// use std::mem; - /// /// let u = 32 as u8; /// let c = char::from(u); - /// assert!(4 == mem::size_of_val(&c)) + /// assert!(4 == size_of_val(&c)) /// ``` #[inline] fn from(i: u8) -> Self { diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs index 00300328b64c1..9d64348289cf2 100644 --- a/library/core/src/clone.rs +++ b/library/core/src/clone.rs @@ -244,8 +244,8 @@ pub unsafe trait CloneToUninit { /// /// Behavior is undefined if any of the following conditions are violated: /// - /// * `dst` must be [valid] for writes for `std::mem::size_of_val(self)` bytes. - /// * `dst` must be properly aligned to `std::mem::align_of_val(self)`. + /// * `dst` must be [valid] for writes for `size_of_val(self)` bytes. + /// * `dst` must be properly aligned to `align_of_val(self)`. /// /// [valid]: crate::ptr#safety /// [pointer metadata]: crate::ptr::metadata() diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs index 7a6630c82d0da..f7b874b26bb74 100644 --- a/library/core/src/hash/mod.rs +++ b/library/core/src/hash/mod.rs @@ -801,7 +801,7 @@ impl Eq for BuildHasherDefault {} mod impls { use super::*; - use crate::{mem, slice}; + use crate::slice; macro_rules! impl_write { ($(($ty:ident, $meth:ident),)*) => {$( @@ -814,7 +814,7 @@ mod impls { #[inline] fn hash_slice(data: &[$ty], state: &mut H) { - let newlen = mem::size_of_val(data); + let newlen = size_of_val(data); let ptr = data.as_ptr() as *const u8; // SAFETY: `ptr` is valid and aligned, as this macro is only used // for numeric primitives which have no padding. The new slice only diff --git a/library/core/src/hash/sip.rs b/library/core/src/hash/sip.rs index 6ea3241c59354..780e522c48ebf 100644 --- a/library/core/src/hash/sip.rs +++ b/library/core/src/hash/sip.rs @@ -3,7 +3,7 @@ #![allow(deprecated)] // the types in this module are deprecated use crate::marker::PhantomData; -use crate::{cmp, mem, ptr}; +use crate::{cmp, ptr}; /// An implementation of SipHash 1-3. /// @@ -99,12 +99,12 @@ macro_rules! compress { /// `$i..$i+size_of::<$int_ty>()`, so that must be in-bounds. macro_rules! load_int_le { ($buf:expr, $i:expr, $int_ty:ident) => {{ - debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len()); + debug_assert!($i + size_of::<$int_ty>() <= $buf.len()); let mut data = 0 as $int_ty; ptr::copy_nonoverlapping( $buf.as_ptr().add($i), &mut data as *mut _ as *mut u8, - mem::size_of::<$int_ty>(), + size_of::<$int_ty>(), ); data.to_le() }}; diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index 38a60338e74ed..b405e1d413ae4 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -3340,7 +3340,7 @@ pub unsafe fn vtable_align(_ptr: *const ()) -> usize; /// More specifically, this is the offset in bytes between successive /// items of the same type, including alignment padding. /// -/// The stabilized version of this intrinsic is [`core::mem::size_of`]. +/// The stabilized version of this intrinsic is [`size_of`]. #[rustc_nounwind] #[unstable(feature = "core_intrinsics", issue = "none")] #[rustc_intrinsic_const_stable_indirect] @@ -3354,7 +3354,7 @@ pub const fn size_of() -> usize; /// Therefore, implementations must not require the user to uphold /// any safety invariants. /// -/// The stabilized version of this intrinsic is [`core::mem::align_of`]. +/// The stabilized version of this intrinsic is [`align_of`]. #[rustc_nounwind] #[unstable(feature = "core_intrinsics", issue = "none")] #[rustc_intrinsic_const_stable_indirect] @@ -3386,7 +3386,7 @@ pub const fn variant_count() -> usize; /// The size of the referenced value in bytes. /// -/// The stabilized version of this intrinsic is [`crate::mem::size_of_val`]. +/// The stabilized version of this intrinsic is [`size_of_val`]. /// /// # Safety /// @@ -3399,7 +3399,7 @@ pub const unsafe fn size_of_val(_ptr: *const T) -> usize; /// The required alignment of the referenced value. /// -/// The stabilized version of this intrinsic is [`core::mem::align_of_val`]. +/// The stabilized version of this intrinsic is [`align_of_val`]. /// /// # Safety /// diff --git a/library/core/src/iter/adapters/map_windows.rs b/library/core/src/iter/adapters/map_windows.rs index cb13023c85c41..a9c07fee2a91e 100644 --- a/library/core/src/iter/adapters/map_windows.rs +++ b/library/core/src/iter/adapters/map_windows.rs @@ -1,5 +1,5 @@ use crate::iter::FusedIterator; -use crate::mem::{self, MaybeUninit}; +use crate::mem::MaybeUninit; use crate::{fmt, ptr}; /// An iterator over the mapped windows of another iterator. @@ -50,7 +50,7 @@ impl MapWindows { assert!(N != 0, "array in `Iterator::map_windows` must contain more than 0 elements"); // Only ZST arrays' length can be so large. - if mem::size_of::() == 0 { + if size_of::() == 0 { assert!( N.checked_mul(2).is_some(), "array size of `Iterator::map_windows` is too large" diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index b0571bf7247af..b6fd823f77eae 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -405,7 +405,7 @@ marker_impls! { /// /// [`Vec`]: ../../std/vec/struct.Vec.html /// [`String`]: ../../std/string/struct.String.html -/// [`size_of::`]: crate::mem::size_of +/// [`size_of::`]: size_of /// [impls]: #implementors #[stable(feature = "rust1", since = "1.0.0")] #[lang = "copy"] @@ -731,7 +731,6 @@ impl !Sync for *mut T {} /// # } /// # fn convert_params(_: ParamType) -> usize { 42 } /// use std::marker::PhantomData; -/// use std::mem; /// /// struct ExternalResource { /// resource_handle: *mut (), @@ -740,7 +739,7 @@ impl !Sync for *mut T {} /// /// impl ExternalResource { /// fn new() -> Self { -/// let size_of_res = mem::size_of::(); +/// let size_of_res = size_of::(); /// Self { /// resource_handle: foreign_lib::new(size_of_res), /// resource_type: PhantomData, diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs index 067371c1b58ab..ce84f105e5c50 100644 --- a/library/core/src/mem/maybe_uninit.rs +++ b/library/core/src/mem/maybe_uninit.rs @@ -203,7 +203,7 @@ use crate::{fmt, intrinsics, ptr, slice}; /// `MaybeUninit` is guaranteed to have the same size, alignment, and ABI as `T`: /// /// ```rust -/// use std::mem::{MaybeUninit, size_of, align_of}; +/// use std::mem::MaybeUninit; /// assert_eq!(size_of::>(), size_of::()); /// assert_eq!(align_of::>(), align_of::()); /// ``` @@ -215,7 +215,7 @@ use crate::{fmt, intrinsics, ptr, slice}; /// optimizations, potentially resulting in a larger size: /// /// ```rust -/// # use std::mem::{MaybeUninit, size_of}; +/// # use std::mem::MaybeUninit; /// assert_eq!(size_of::>(), 1); /// assert_eq!(size_of::>>(), 2); /// ``` diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index b9bb6d6a13f7f..caab7a6ddb52f 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -226,31 +226,27 @@ pub fn forget_unsized(t: T) { /// # Examples /// /// ``` -/// use std::mem; -/// /// // Some primitives -/// assert_eq!(4, mem::size_of::()); -/// assert_eq!(8, mem::size_of::()); -/// assert_eq!(0, mem::size_of::<()>()); +/// assert_eq!(4, size_of::()); +/// assert_eq!(8, size_of::()); +/// assert_eq!(0, size_of::<()>()); /// /// // Some arrays -/// assert_eq!(8, mem::size_of::<[i32; 2]>()); -/// assert_eq!(12, mem::size_of::<[i32; 3]>()); -/// assert_eq!(0, mem::size_of::<[i32; 0]>()); +/// assert_eq!(8, size_of::<[i32; 2]>()); +/// assert_eq!(12, size_of::<[i32; 3]>()); +/// assert_eq!(0, size_of::<[i32; 0]>()); /// /// /// // Pointer size equality -/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<*const i32>()); -/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::>()); -/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::>()); -/// assert_eq!(mem::size_of::>(), mem::size_of::>>()); +/// assert_eq!(size_of::<&i32>(), size_of::<*const i32>()); +/// assert_eq!(size_of::<&i32>(), size_of::>()); +/// assert_eq!(size_of::<&i32>(), size_of::>()); +/// assert_eq!(size_of::>(), size_of::>>()); /// ``` /// /// Using `#[repr(C)]`. /// /// ``` -/// use std::mem; -/// /// #[repr(C)] /// struct FieldStruct { /// first: u8, @@ -265,13 +261,13 @@ pub fn forget_unsized(t: T) { /// // The size of the third field is 1, so add 1 to the size. Size is 5. /// // Finally, the alignment of the struct is 2 (because the largest alignment amongst its /// // fields is 2), so add 1 to the size for padding. Size is 6. -/// assert_eq!(6, mem::size_of::()); +/// assert_eq!(6, size_of::()); /// /// #[repr(C)] /// struct TupleStruct(u8, u16, u8); /// /// // Tuple structs follow the same rules. -/// assert_eq!(6, mem::size_of::()); +/// assert_eq!(6, size_of::()); /// /// // Note that reordering the fields can lower the size. We can remove both padding bytes /// // by putting `third` before `second`. @@ -282,7 +278,7 @@ pub fn forget_unsized(t: T) { /// second: u16 /// } /// -/// assert_eq!(4, mem::size_of::()); +/// assert_eq!(4, size_of::()); /// /// // Union size is the size of the largest field. /// #[repr(C)] @@ -291,7 +287,7 @@ pub fn forget_unsized(t: T) { /// larger: u16 /// } /// -/// assert_eq!(2, mem::size_of::()); +/// assert_eq!(2, size_of::()); /// ``` /// /// [alignment]: align_of @@ -320,13 +316,11 @@ pub const fn size_of() -> usize { /// # Examples /// /// ``` -/// use std::mem; -/// -/// assert_eq!(4, mem::size_of_val(&5i32)); +/// assert_eq!(4, size_of_val(&5i32)); /// /// let x: [u8; 13] = [0; 13]; /// let y: &[u8] = &x; -/// assert_eq!(13, mem::size_of_val(y)); +/// assert_eq!(13, size_of_val(y)); /// ``` /// /// [`size_of::()`]: size_of @@ -381,7 +375,7 @@ pub const fn size_of_val(val: &T) -> usize { /// #![feature(layout_for_ptr)] /// use std::mem; /// -/// assert_eq!(4, mem::size_of_val(&5i32)); +/// assert_eq!(4, size_of_val(&5i32)); /// /// let x: [u8; 13] = [0; 13]; /// let y: &[u8] = &x; @@ -454,9 +448,7 @@ pub fn min_align_of_val(val: &T) -> usize { /// # Examples /// /// ``` -/// use std::mem; -/// -/// assert_eq!(4, mem::align_of::()); +/// assert_eq!(4, align_of::()); /// ``` #[inline(always)] #[must_use] @@ -477,9 +469,7 @@ pub const fn align_of() -> usize { /// # Examples /// /// ``` -/// use std::mem; -/// -/// assert_eq!(4, mem::align_of_val(&5i32)); +/// assert_eq!(4, align_of_val(&5i32)); /// ``` #[inline] #[must_use] diff --git a/library/core/src/mem/transmutability.rs b/library/core/src/mem/transmutability.rs index 7b920d7a777ca..782b826448a32 100644 --- a/library/core/src/mem/transmutability.rs +++ b/library/core/src/mem/transmutability.rs @@ -153,7 +153,7 @@ pub struct Assume { /// /// ```compile_fail,E0277 /// #![feature(transmutability)] - /// use core::mem::{align_of, TransmuteFrom}; + /// use core::mem::TransmuteFrom; /// /// assert_eq!(align_of::<[u8; 2]>(), 1); /// assert_eq!(align_of::(), 2); @@ -172,7 +172,7 @@ pub struct Assume { /// /// ```rust /// #![feature(pointer_is_aligned_to, transmutability)] - /// use core::mem::{align_of, Assume, TransmuteFrom}; + /// use core::mem::{Assume, TransmuteFrom}; /// /// let src: &[u8; 2] = &[0xFF, 0xFF]; /// @@ -337,7 +337,7 @@ impl Assume { /// transmutability, /// )] /// #![allow(incomplete_features)] - /// use core::mem::{align_of, Assume, TransmuteFrom}; + /// use core::mem::{Assume, TransmuteFrom}; /// /// /// Attempts to transmute `src` to `&Dst`. /// /// diff --git a/library/core/src/num/bignum.rs b/library/core/src/num/bignum.rs index 2a47c89e2aee2..40e6eaf075ed5 100644 --- a/library/core/src/num/bignum.rs +++ b/library/core/src/num/bignum.rs @@ -253,12 +253,11 @@ macro_rules! define_bignum { /// Multiplies itself by `5^e` and returns its own mutable reference. pub fn mul_pow5(&mut self, mut e: usize) -> &mut $name { - use crate::mem; use crate::num::bignum::SMALL_POW5; // There are exactly n trailing zeros on 2^n, and the only relevant digit sizes // are consecutive powers of two, so this is well suited index for the table. - let table_index = mem::size_of::<$ty>().trailing_zeros() as usize; + let table_index = size_of::<$ty>().trailing_zeros() as usize; let (small_power, small_e) = SMALL_POW5[table_index]; let small_power = small_power as $ty; diff --git a/library/core/src/num/dec2flt/fpu.rs b/library/core/src/num/dec2flt/fpu.rs index daeee1755b0b5..8aad087ec1bc4 100644 --- a/library/core/src/num/dec2flt/fpu.rs +++ b/library/core/src/num/dec2flt/fpu.rs @@ -22,7 +22,6 @@ pub(super) use fpu_precision::set_precision; #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))] mod fpu_precision { use core::arch::asm; - use core::mem::size_of; /// A structure used to preserve the original value of the FPU control word, so that it can be /// restored when the structure is dropped. diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 7d99aaa173143..a72ca4bcb059b 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -3627,7 +3627,7 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] - pub const fn to_be_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_be_bytes(self) -> [u8; size_of::()] { self.to_be().to_ne_bytes() } @@ -3647,7 +3647,7 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] - pub const fn to_le_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_le_bytes(self) -> [u8; size_of::()] { self.to_le().to_ne_bytes() } @@ -3683,7 +3683,7 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] - pub const fn to_ne_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_ne_bytes(self) -> [u8; size_of::()] { // SAFETY: integers are plain old datatypes so we can always transmute them to // arrays of bytes unsafe { mem::transmute(self) } @@ -3705,7 +3705,7 @@ macro_rules! int_impl { /// /// ``` #[doc = concat!("fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")] - #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")] + #[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")] /// *input = rest; #[doc = concat!(" ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())")] /// } @@ -3714,7 +3714,7 @@ macro_rules! int_impl { #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[must_use] #[inline] - pub const fn from_be_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_be_bytes(bytes: [u8; size_of::()]) -> Self { Self::from_be(Self::from_ne_bytes(bytes)) } @@ -3734,7 +3734,7 @@ macro_rules! int_impl { /// /// ``` #[doc = concat!("fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")] - #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")] + #[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")] /// *input = rest; #[doc = concat!(" ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())")] /// } @@ -3743,7 +3743,7 @@ macro_rules! int_impl { #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[must_use] #[inline] - pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_le_bytes(bytes: [u8; size_of::()]) -> Self { Self::from_le(Self::from_ne_bytes(bytes)) } @@ -3774,7 +3774,7 @@ macro_rules! int_impl { /// /// ``` #[doc = concat!("fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")] - #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")] + #[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")] /// *input = rest; #[doc = concat!(" ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())")] /// } @@ -3785,7 +3785,7 @@ macro_rules! int_impl { // SAFETY: const sound because integers are plain old datatypes so we can always // transmute to them #[inline] - pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_ne_bytes(bytes: [u8; size_of::()]) -> Self { // SAFETY: integers are plain old datatypes so we can always transmute to them unsafe { mem::transmute(bytes) } } diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index 80a38a6013dd0..151e128cd78a9 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -1241,7 +1241,7 @@ impl usize { /// Returns an `usize` where every byte is equal to `x`. #[inline] pub(crate) const fn repeat_u8(x: u8) -> usize { - usize::from_ne_bytes([x; mem::size_of::()]) + usize::from_ne_bytes([x; size_of::()]) } /// Returns an `usize` where every byte pair is equal to `x`. @@ -1249,7 +1249,7 @@ impl usize { pub(crate) const fn repeat_u16(x: u16) -> usize { let mut r = 0usize; let mut i = 0; - while i < mem::size_of::() { + while i < size_of::() { // Use `wrapping_shl` to make it work on targets with 16-bit `usize` r = r.wrapping_shl(16) | (x as usize); i += 2; @@ -1330,7 +1330,7 @@ pub enum FpCategory { #[inline(always)] #[unstable(issue = "none", feature = "std_internals")] pub const fn can_not_overflow(radix: u32, is_signed_ty: bool, digits: &[u8]) -> bool { - radix <= 16 && digits.len() <= mem::size_of::() * 2 - is_signed_ty as usize + radix <= 16 && digits.len() <= size_of::() * 2 - is_signed_ty as usize } #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))] diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index a967b72c4fa9b..6c9b366d90304 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -86,7 +86,7 @@ impl_zeroable_primitive!( /// For example, `Option>` is the same size as `u32`: /// /// ``` -/// use core::{mem::size_of, num::NonZero}; +/// use core::{num::NonZero}; /// /// assert_eq!(size_of::>>(), size_of::()); /// ``` @@ -102,7 +102,6 @@ impl_zeroable_primitive!( /// `Option>` are guaranteed to have the same size and alignment: /// /// ``` -/// # use std::mem::{size_of, align_of}; /// use std::num::NonZero; /// /// assert_eq!(size_of::>(), size_of::>>()); @@ -500,7 +499,6 @@ macro_rules! nonzero_integer { #[doc = concat!("For example, `Option<", stringify!($Ty), ">` is the same size as `", stringify!($Int), "`:")] /// /// ```rust - /// use std::mem::size_of; #[doc = concat!("assert_eq!(size_of::>(), size_of::<", stringify!($Int), ">());")] /// ``` /// @@ -516,7 +514,6 @@ macro_rules! nonzero_integer { /// are guaranteed to have the same size and alignment: /// /// ``` - /// # use std::mem::{size_of, align_of}; #[doc = concat!("use std::num::", stringify!($Ty), ";")] /// #[doc = concat!("assert_eq!(size_of::<", stringify!($Ty), ">(), size_of::>());")] diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 405c71121caad..d8709d51cccb2 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -2586,7 +2586,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline] pub const fn abs_diff(self, other: Self) -> Self { - if mem::size_of::() == 1 { + if size_of::() == 1 { // Trick LLVM into generating the psadbw instruction when SSE2 // is available and this function is autovectorized for u8's. (self as i32).wrapping_sub(other as i32).abs() as Self @@ -3465,7 +3465,7 @@ macro_rules! uint_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] - pub const fn to_be_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_be_bytes(self) -> [u8; size_of::()] { self.to_be().to_ne_bytes() } @@ -3485,7 +3485,7 @@ macro_rules! uint_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] - pub const fn to_le_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_le_bytes(self) -> [u8; size_of::()] { self.to_le().to_ne_bytes() } @@ -3521,7 +3521,7 @@ macro_rules! uint_impl { // SAFETY: const sound because integers are plain old datatypes so we can always // transmute them to arrays of bytes #[inline] - pub const fn to_ne_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_ne_bytes(self) -> [u8; size_of::()] { // SAFETY: integers are plain old datatypes so we can always transmute them to // arrays of bytes unsafe { mem::transmute(self) } @@ -3543,7 +3543,7 @@ macro_rules! uint_impl { /// /// ``` #[doc = concat!("fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")] - #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")] + #[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")] /// *input = rest; #[doc = concat!(" ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())")] /// } @@ -3552,7 +3552,7 @@ macro_rules! uint_impl { #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[must_use] #[inline] - pub const fn from_be_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_be_bytes(bytes: [u8; size_of::()]) -> Self { Self::from_be(Self::from_ne_bytes(bytes)) } @@ -3572,7 +3572,7 @@ macro_rules! uint_impl { /// /// ``` #[doc = concat!("fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")] - #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")] + #[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")] /// *input = rest; #[doc = concat!(" ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())")] /// } @@ -3581,7 +3581,7 @@ macro_rules! uint_impl { #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[must_use] #[inline] - pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_le_bytes(bytes: [u8; size_of::()]) -> Self { Self::from_le(Self::from_ne_bytes(bytes)) } @@ -3612,7 +3612,7 @@ macro_rules! uint_impl { /// /// ``` #[doc = concat!("fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {")] - #[doc = concat!(" let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());")] + #[doc = concat!(" let (int_bytes, rest) = input.split_at(size_of::<", stringify!($SelfT), ">());")] /// *input = rest; #[doc = concat!(" ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())")] /// } @@ -3623,7 +3623,7 @@ macro_rules! uint_impl { // SAFETY: const sound because integers are plain old datatypes so we can always // transmute to them #[inline] - pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_ne_bytes(bytes: [u8; size_of::()]) -> Self { // SAFETY: integers are plain old datatypes so we can always transmute to them unsafe { mem::transmute(bytes) } } diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs index bbf5939fe1b05..89c856fe10746 100644 --- a/library/core/src/primitive_docs.rs +++ b/library/core/src/primitive_docs.rs @@ -398,12 +398,12 @@ mod prim_never {} /// let v = vec!['h', 'e', 'l', 'l', 'o']; /// /// // five elements times four bytes for each element -/// assert_eq!(20, v.len() * std::mem::size_of::()); +/// assert_eq!(20, v.len() * size_of::()); /// /// let s = String::from("hello"); /// /// // five elements times one byte per element -/// assert_eq!(5, s.len() * std::mem::size_of::()); +/// assert_eq!(5, s.len() * size_of::()); /// ``` /// /// [`String`]: ../std/string/struct.String.html @@ -443,8 +443,8 @@ mod prim_never {} /// let s = String::from("love: ❤️"); /// let v: Vec = s.chars().collect(); /// -/// assert_eq!(12, std::mem::size_of_val(&s[..])); -/// assert_eq!(32, std::mem::size_of_val(&v[..])); +/// assert_eq!(12, size_of_val(&s[..])); +/// assert_eq!(32, size_of_val(&v[..])); /// ``` #[stable(feature = "rust1", since = "1.0.0")] mod prim_char {} @@ -594,10 +594,8 @@ impl () {} /// #[allow(unused_extern_crates)] /// extern crate libc; /// -/// use std::mem; -/// /// unsafe { -/// let my_num: *mut i32 = libc::malloc(mem::size_of::()) as *mut i32; +/// let my_num: *mut i32 = libc::malloc(size_of::()) as *mut i32; /// if my_num.is_null() { /// panic!("failed to allocate memory"); /// } @@ -893,11 +891,11 @@ mod prim_array {} /// /// ``` /// # use std::rc::Rc; -/// let pointer_size = std::mem::size_of::<&u8>(); -/// assert_eq!(2 * pointer_size, std::mem::size_of::<&[u8]>()); -/// assert_eq!(2 * pointer_size, std::mem::size_of::<*const [u8]>()); -/// assert_eq!(2 * pointer_size, std::mem::size_of::>()); -/// assert_eq!(2 * pointer_size, std::mem::size_of::>()); +/// let pointer_size = size_of::<&u8>(); +/// assert_eq!(2 * pointer_size, size_of::<&[u8]>()); +/// assert_eq!(2 * pointer_size, size_of::<*const [u8]>()); +/// assert_eq!(2 * pointer_size, size_of::>()); +/// assert_eq!(2 * pointer_size, size_of::>()); /// ``` /// /// ## Trait Implementations @@ -1692,15 +1690,13 @@ mod prim_ref {} /// This zero-sized type *coerces* to a regular function pointer. For example: /// /// ```rust -/// use std::mem; -/// /// fn bar(x: i32) {} /// /// let not_bar_ptr = bar; // `not_bar_ptr` is zero-sized, uniquely identifying `bar` -/// assert_eq!(mem::size_of_val(¬_bar_ptr), 0); +/// assert_eq!(size_of_val(¬_bar_ptr), 0); /// /// let bar_ptr: fn(i32) = not_bar_ptr; // force coercion to function pointer -/// assert_eq!(mem::size_of_val(&bar_ptr), mem::size_of::()); +/// assert_eq!(size_of_val(&bar_ptr), size_of::()); /// /// let footgun = &bar; // this is a shared reference to the zero-sized type identifying `bar` /// ``` diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs index 2da94e72566e9..19311e39b454e 100644 --- a/library/core/src/ptr/alignment.rs +++ b/library/core/src/ptr/alignment.rs @@ -13,8 +13,8 @@ use crate::{cmp, fmt, hash, mem, num}; pub struct Alignment(AlignmentEnum); // Alignment is `repr(usize)`, but via extra steps. -const _: () = assert!(mem::size_of::() == mem::size_of::()); -const _: () = assert!(mem::align_of::() == mem::align_of::()); +const _: () = assert!(size_of::() == size_of::()); +const _: () = assert!(align_of::() == align_of::()); fn _alignment_can_be_structurally_matched(a: Alignment) -> bool { matches!(a, Alignment::MIN) @@ -38,14 +38,14 @@ impl Alignment { /// Returns the alignment for a type. /// - /// This provides the same numerical value as [`mem::align_of`], + /// This provides the same numerical value as [`align_of`], /// but in an `Alignment` instead of a `usize`. #[unstable(feature = "ptr_alignment_type", issue = "102070")] #[inline] #[must_use] pub const fn of() -> Self { // This can't actually panic since type alignment is always a power of two. - const { Alignment::new(mem::align_of::()).unwrap() } + const { Alignment::new(align_of::()).unwrap() } } /// Creates an `Alignment` from a `usize`, or returns `None` if it's diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 8db620596dde7..43306cfa674b9 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -1,7 +1,7 @@ use super::*; use crate::cmp::Ordering::{Equal, Greater, Less}; use crate::intrinsics::const_eval_select; -use crate::mem::SizedTypeProperties; +use crate::mem::{self, SizedTypeProperties}; use crate::slice::{self, SliceIndex}; impl *const T { @@ -595,9 +595,9 @@ impl *const T { } /// Calculates the distance between two pointers within the same allocation. The returned value is in - /// units of T: the distance in bytes divided by `mem::size_of::()`. + /// units of T: the distance in bytes divided by `size_of::()`. /// - /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::() as isize)`, + /// This is equivalent to `(self as isize - origin as isize) / (size_of::() as isize)`, /// except that it has a lot more opportunities for UB, in exchange for the compiler /// better understanding what you are doing. /// @@ -633,7 +633,7 @@ impl *const T { /// objects is not known at compile-time. However, the requirement also exists at /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - - /// origin as isize) / mem::size_of::()`. + /// origin as isize) / size_of::()`. // FIXME: recommend `addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add @@ -683,7 +683,7 @@ impl *const T { where T: Sized, { - let pointee_size = mem::size_of::(); + let pointee_size = size_of::(); assert!(0 < pointee_size && pointee_size <= isize::MAX as usize); // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`. unsafe { intrinsics::ptr_offset_from(self, origin) } @@ -709,7 +709,7 @@ impl *const T { /// Calculates the distance between two pointers within the same allocation, *where it's known that /// `self` is equal to or greater than `origin`*. The returned value is in - /// units of T: the distance in bytes is divided by `mem::size_of::()`. + /// units of T: the distance in bytes is divided by `size_of::()`. /// /// This computes the same value that [`offset_from`](#method.offset_from) /// would compute, but with the added precondition that the offset is @@ -793,7 +793,7 @@ impl *const T { ) => runtime_ptr_ge(this, origin) ); - let pointee_size = mem::size_of::(); + let pointee_size = size_of::(); assert!(0 < pointee_size && pointee_size <= isize::MAX as usize); // SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`. unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) } @@ -1375,8 +1375,6 @@ impl *const T { /// Accessing adjacent `u8` as `u16` /// /// ``` - /// use std::mem::align_of; - /// /// # unsafe { /// let x = [5_u8, 6, 7, 8, 9]; /// let ptr = x.as_ptr(); @@ -1436,7 +1434,7 @@ impl *const T { where T: Sized, { - self.is_aligned_to(mem::align_of::()) + self.is_aligned_to(align_of::()) } /// Returns whether the pointer is aligned to `align`. @@ -1595,7 +1593,7 @@ impl *const [T] { /// When calling this method, you have to ensure that *either* the pointer is null *or* /// all of the following is true: /// - /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::()` many bytes, + /// * The pointer must be [valid] for reads for `ptr.len() * size_of::()` many bytes, /// and it must be properly aligned. This means in particular: /// /// * The entire memory range of this slice must be contained within a single [allocated object]! @@ -1607,7 +1605,7 @@ impl *const [T] { /// them from other data. You can obtain a pointer that is usable as `data` /// for zero-length slices using [`NonNull::dangling()`]. /// - /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// * The total size `ptr.len() * size_of::()` of the slice must be no larger than `isize::MAX`. /// See the safety documentation of [`pointer::offset`]. /// /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs index 9eee29d485f41..4870750638957 100644 --- a/library/core/src/ptr/metadata.rs +++ b/library/core/src/ptr/metadata.rs @@ -74,7 +74,7 @@ pub trait Pointee { /// #![feature(ptr_metadata)] /// /// fn this_never_panics() { -/// assert_eq!(std::mem::size_of::<&T>(), std::mem::size_of::()) +/// assert_eq!(size_of::<&T>(), size_of::()) /// } /// ``` #[unstable(feature = "ptr_metadata", issue = "81513")] diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index eb99be817a2ca..ea53da78d3bd2 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -48,7 +48,7 @@ //! //! Valid raw pointers as defined above are not necessarily properly aligned (where //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be -//! aligned to `mem::align_of::()`). However, most functions require their +//! aligned to `align_of::()`). However, most functions require their //! arguments to be properly aligned, and will explicitly state //! this requirement in their documentation. Notable exceptions to this are //! [`read_unaligned`] and [`write_unaligned`]. @@ -297,7 +297,7 @@ //! //! // Our value, which must have enough alignment to have spare least-significant-bits. //! let my_precious_data: u32 = 17; -//! assert!(core::mem::align_of::() > 1); +//! assert!(align_of::() > 1); //! //! // Create a tagged pointer //! let ptr = &my_precious_data as *const u32; @@ -1098,12 +1098,12 @@ pub const unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { } else { macro_rules! attempt_swap_as_chunks { ($ChunkTy:ty) => { - if mem::align_of::() >= mem::align_of::<$ChunkTy>() - && mem::size_of::() % mem::size_of::<$ChunkTy>() == 0 + if align_of::() >= align_of::<$ChunkTy>() + && size_of::() % size_of::<$ChunkTy>() == 0 { let x: *mut $ChunkTy = x.cast(); let y: *mut $ChunkTy = y.cast(); - let count = count * (mem::size_of::() / mem::size_of::<$ChunkTy>()); + let count = count * (size_of::() / size_of::<$ChunkTy>()); // SAFETY: these are the same bytes that the caller promised were // ok, just typed as `MaybeUninit`s instead of as `T`s. // The `if` condition above ensures that we're not violating @@ -1117,9 +1117,9 @@ pub const unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { // Split up the slice into small power-of-two-sized chunks that LLVM is able // to vectorize (unless it's a special type with more-than-pointer alignment, // because we don't want to pessimize things like slices of SIMD vectors.) - if mem::align_of::() <= mem::size_of::() - && (!mem::size_of::().is_power_of_two() - || mem::size_of::() > mem::size_of::() * 2) + if align_of::() <= size_of::() + && (!size_of::().is_power_of_two() + || size_of::() > size_of::() * 2) { attempt_swap_as_chunks!(usize); attempt_swap_as_chunks!(u8); @@ -1443,10 +1443,8 @@ pub const unsafe fn read(src: *const T) -> T { /// Read a `usize` value from a byte buffer: /// /// ``` -/// use std::mem; -/// /// fn read_usize(x: &[u8]) -> usize { -/// assert!(x.len() >= mem::size_of::()); +/// assert!(x.len() >= size_of::()); /// /// let ptr = x.as_ptr() as *const usize; /// @@ -1467,7 +1465,7 @@ pub const unsafe fn read_unaligned(src: *const T) -> T { // Also, since we just wrote a valid value into `tmp`, it is guaranteed // to be properly initialized. unsafe { - copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::()); + copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, size_of::()); tmp.assume_init() } } @@ -1647,10 +1645,8 @@ pub const unsafe fn write(dst: *mut T, src: T) { /// Write a `usize` value to a byte buffer: /// /// ``` -/// use std::mem; -/// /// fn write_usize(x: &mut [u8], val: usize) { -/// assert!(x.len() >= mem::size_of::()); +/// assert!(x.len() >= size_of::()); /// /// let ptr = x.as_mut_ptr() as *mut usize; /// @@ -1667,7 +1663,7 @@ pub const unsafe fn write_unaligned(dst: *mut T, src: T) { // `dst` cannot overlap `src` because the caller has mutable access // to `dst` while `src` is owned by this function. unsafe { - copy_nonoverlapping((&raw const src) as *const u8, dst as *mut u8, mem::size_of::()); + copy_nonoverlapping((&raw const src) as *const u8, dst as *mut u8, size_of::()); // We are calling the intrinsic directly to avoid function calls in the generated code. intrinsics::forget(src); } @@ -1911,7 +1907,7 @@ pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { inverse & m_minus_one } - let stride = mem::size_of::(); + let stride = size_of::(); let addr: usize = p.addr(); diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index 5a64f12ca99ff..26aaac476027f 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -1,7 +1,7 @@ use super::*; use crate::cmp::Ordering::{Equal, Greater, Less}; use crate::intrinsics::const_eval_select; -use crate::mem::SizedTypeProperties; +use crate::mem::{self, SizedTypeProperties}; use crate::slice::{self, SliceIndex}; impl *mut T { @@ -769,9 +769,9 @@ impl *mut T { } /// Calculates the distance between two pointers within the same allocation. The returned value is in - /// units of T: the distance in bytes divided by `mem::size_of::()`. + /// units of T: the distance in bytes divided by `size_of::()`. /// - /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::() as isize)`, + /// This is equivalent to `(self as isize - origin as isize) / (size_of::() as isize)`, /// except that it has a lot more opportunities for UB, in exchange for the compiler /// better understanding what you are doing. /// @@ -807,7 +807,7 @@ impl *mut T { /// objects is not known at compile-time. However, the requirement also exists at /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - - /// origin as isize) / mem::size_of::()`. + /// origin as isize) / size_of::()`. // FIXME: recommend `addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add @@ -881,7 +881,7 @@ impl *mut T { /// Calculates the distance between two pointers within the same allocation, *where it's known that /// `self` is equal to or greater than `origin`*. The returned value is in - /// units of T: the distance in bytes is divided by `mem::size_of::()`. + /// units of T: the distance in bytes is divided by `size_of::()`. /// /// This computes the same value that [`offset_from`](#method.offset_from) /// would compute, but with the added precondition that the offset is @@ -1623,8 +1623,6 @@ impl *mut T { /// Accessing adjacent `u8` as `u16` /// /// ``` - /// use std::mem::align_of; - /// /// # unsafe { /// let mut x = [5_u8, 6, 7, 8, 9]; /// let ptr = x.as_mut_ptr(); @@ -1689,7 +1687,7 @@ impl *mut T { where T: Sized, { - self.is_aligned_to(mem::align_of::()) + self.is_aligned_to(align_of::()) } /// Returns whether the pointer is aligned to `align`. @@ -1950,7 +1948,7 @@ impl *mut [T] { /// When calling this method, you have to ensure that *either* the pointer is null *or* /// all of the following is true: /// - /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::()` many bytes, + /// * The pointer must be [valid] for reads for `ptr.len() * size_of::()` many bytes, /// and it must be properly aligned. This means in particular: /// /// * The entire memory range of this slice must be contained within a single [allocated object]! @@ -1962,7 +1960,7 @@ impl *mut [T] { /// them from other data. You can obtain a pointer that is usable as `data` /// for zero-length slices using [`NonNull::dangling()`]. /// - /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// * The total size `ptr.len() * size_of::()` of the slice must be no larger than `isize::MAX`. /// See the safety documentation of [`pointer::offset`]. /// /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is @@ -2008,7 +2006,7 @@ impl *mut [T] { /// When calling this method, you have to ensure that *either* the pointer is null *or* /// all of the following is true: /// - /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::()` + /// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::()` /// many bytes, and it must be properly aligned. This means in particular: /// /// * The entire memory range of this slice must be contained within a single [allocated object]! @@ -2020,7 +2018,7 @@ impl *mut [T] { /// them from other data. You can obtain a pointer that is usable as `data` /// for zero-length slices using [`NonNull::dangling()`]. /// - /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// * The total size `ptr.len() * size_of::()` of the slice must be no larger than `isize::MAX`. /// See the safety documentation of [`pointer::offset`]. /// /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index 7abd3ddaa9efc..7c567d522c556 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -49,7 +49,6 @@ use crate::{fmt, hash, intrinsics, mem, ptr}; /// are guaranteed to have the same size and alignment: /// /// ``` -/// # use std::mem::{size_of, align_of}; /// use std::ptr::NonNull; /// /// assert_eq!(size_of::>(), size_of::>>()); @@ -724,9 +723,9 @@ impl NonNull { } /// Calculates the distance between two pointers within the same allocation. The returned value is in - /// units of T: the distance in bytes divided by `mem::size_of::()`. + /// units of T: the distance in bytes divided by `size_of::()`. /// - /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::() as isize)`, + /// This is equivalent to `(self as isize - origin as isize) / (size_of::() as isize)`, /// except that it has a lot more opportunities for UB, in exchange for the compiler /// better understanding what you are doing. /// @@ -762,7 +761,7 @@ impl NonNull { /// objects is not known at compile-time. However, the requirement also exists at /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - - /// origin as isize) / mem::size_of::()`. + /// origin as isize) / size_of::()`. // FIXME: recommend `addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add @@ -842,7 +841,7 @@ impl NonNull { /// Calculates the distance between two pointers within the same allocation, *where it's known that /// `self` is equal to or greater than `origin`*. The returned value is in - /// units of T: the distance in bytes is divided by `mem::size_of::()`. + /// units of T: the distance in bytes is divided by `size_of::()`. /// /// This computes the same value that [`offset_from`](#method.offset_from) /// would compute, but with the added precondition that the offset is @@ -1223,7 +1222,6 @@ impl NonNull { /// Accessing adjacent `u8` as `u16` /// /// ``` - /// use std::mem::align_of; /// use std::ptr::NonNull; /// /// # unsafe { @@ -1443,7 +1441,7 @@ impl NonNull<[T]> { /// /// When calling this method, you have to ensure that all of the following is true: /// - /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::()` many bytes, + /// * The pointer must be [valid] for reads for `ptr.len() * size_of::()` many bytes, /// and it must be properly aligned. This means in particular: /// /// * The entire memory range of this slice must be contained within a single allocated object! @@ -1455,7 +1453,7 @@ impl NonNull<[T]> { /// them from other data. You can obtain a pointer that is usable as `data` /// for zero-length slices using [`NonNull::dangling()`]. /// - /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// * The total size `ptr.len() * size_of::()` of the slice must be no larger than `isize::MAX`. /// See the safety documentation of [`pointer::offset`]. /// /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is @@ -1488,7 +1486,7 @@ impl NonNull<[T]> { /// /// When calling this method, you have to ensure that all of the following is true: /// - /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::()` + /// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::()` /// many bytes, and it must be properly aligned. This means in particular: /// /// * The entire memory range of this slice must be contained within a single allocated object! @@ -1500,7 +1498,7 @@ impl NonNull<[T]> { /// them from other data. You can obtain a pointer that is usable as `data` /// for zero-length slices using [`NonNull::dangling()`]. /// - /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// * The total size `ptr.len() * size_of::()` of the slice must be no larger than `isize::MAX`. /// See the safety documentation of [`pointer::offset`]. /// /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs index 9cb00644e6442..804bdfcbb4fc7 100644 --- a/library/core/src/slice/cmp.rs +++ b/library/core/src/slice/cmp.rs @@ -1,10 +1,10 @@ //! Comparison traits for `[T]`. use super::{from_raw_parts, memchr}; +use crate::ascii; use crate::cmp::{self, BytewiseEq, Ordering}; use crate::intrinsics::compare_bytes; use crate::num::NonZero; -use crate::{ascii, mem}; #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq<[U]> for [T] @@ -87,7 +87,7 @@ where // SAFETY: `self` and `other` are references and are thus guaranteed to be valid. // The two slices have been checked to have the same size above. unsafe { - let size = mem::size_of_val(self); + let size = size_of_val(self); compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0 } } @@ -266,7 +266,7 @@ macro_rules! impl_slice_contains { fn slice_contains(&self, arr: &[$t]) -> bool { // Make our LANE_COUNT 4x the normal lane count (aiming for 128 bit vectors). // The compiler will nicely unroll it. - const LANE_COUNT: usize = 4 * (128 / (mem::size_of::<$t>() * 8)); + const LANE_COUNT: usize = 4 * (128 / (size_of::<$t>() * 8)); // SIMD let mut chunks = arr.chunks_exact(LANE_COUNT); for chunk in &mut chunks { diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index 98db7aaf53321..1e1053583a617 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -2,11 +2,10 @@ // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch use crate::intrinsics::const_eval_select; -use crate::mem; const LO_USIZE: usize = usize::repeat_u8(0x01); const HI_USIZE: usize = usize::repeat_u8(0x80); -const USIZE_BYTES: usize = mem::size_of::(); +const USIZE_BYTES: usize = size_of::(); /// Returns `true` if `x` contains any zero byte. /// @@ -138,7 +137,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { // offset is always aligned, so just testing `>` is sufficient and avoids possible // overflow. let repeated_x = usize::repeat_u8(x); - let chunk_bytes = mem::size_of::(); + let chunk_bytes = size_of::(); while offset > min_aligned_offset { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index 7a2764206e8db..c79ae2c1915de 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -3894,9 +3894,9 @@ impl [T] { // Explicitly wrap the function call in a const block so it gets // constant-evaluated even in debug mode. - let gcd: usize = const { gcd(mem::size_of::(), mem::size_of::()) }; - let ts: usize = mem::size_of::() / gcd; - let us: usize = mem::size_of::() / gcd; + let gcd: usize = const { gcd(size_of::(), size_of::()) }; + let ts: usize = size_of::() / gcd; + let us: usize = size_of::() / gcd; // Armed with this knowledge, we can find how many `U`s we can fit! let us_len = self.len() / ts * us; @@ -3946,7 +3946,7 @@ impl [T] { // ptr.align_offset. let ptr = self.as_ptr(); // SAFETY: See the `align_to_mut` method for the detailed safety comment. - let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::()) }; + let offset = unsafe { crate::ptr::align_offset(ptr, align_of::()) }; if offset > self.len() { (self, &[], &[]) } else { @@ -3956,7 +3956,7 @@ impl [T] { #[cfg(miri)] crate::intrinsics::miri_promise_symbolic_alignment( rest.as_ptr().cast(), - mem::align_of::(), + align_of::(), ); // SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay, // since the caller guarantees that we can transmute `T` to `U` safely. @@ -4017,7 +4017,7 @@ impl [T] { // valid pointer `ptr` (it comes from a reference to `self`) and with // a size that is a power of two (since it comes from the alignment for U), // satisfying its safety constraints. - let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::()) }; + let offset = unsafe { crate::ptr::align_offset(ptr, align_of::()) }; if offset > self.len() { (self, &mut [], &mut []) } else { @@ -4029,7 +4029,7 @@ impl [T] { #[cfg(miri)] crate::intrinsics::miri_promise_symbolic_alignment( mut_ptr.cast() as *const (), - mem::align_of::(), + align_of::(), ); // We can't use `rest` again after this, that would invalidate its alias `mut_ptr`! // SAFETY: see comments for `align_to`. @@ -4100,7 +4100,7 @@ impl [T] { // These are expected to always match, as vector types are laid out like // arrays per , but we // might as well double-check since it'll optimize away anyhow. - assert_eq!(mem::size_of::>(), mem::size_of::<[T; LANES]>()); + assert_eq!(size_of::>(), size_of::<[T; LANES]>()); // SAFETY: The simd types have the same layout as arrays, just with // potentially-higher alignment, so the de-facto transmutes are sound. @@ -4136,7 +4136,7 @@ impl [T] { // These are expected to always match, as vector types are laid out like // arrays per , but we // might as well double-check since it'll optimize away anyhow. - assert_eq!(mem::size_of::>(), mem::size_of::<[T; LANES]>()); + assert_eq!(size_of::>(), size_of::<[T; LANES]>()); // SAFETY: The simd types have the same layout as arrays, just with // potentially-higher alignment, so the de-facto transmutes are sound. @@ -4721,11 +4721,11 @@ impl [T] { let byte_offset = elem_start.wrapping_sub(self_start); - if byte_offset % mem::size_of::() != 0 { + if byte_offset % size_of::() != 0 { return None; } - let offset = byte_offset / mem::size_of::(); + let offset = byte_offset / size_of::(); if offset < self.len() { Some(offset) } else { None } } @@ -4775,11 +4775,11 @@ impl [T] { let byte_start = subslice_start.wrapping_sub(self_start); - if byte_start % core::mem::size_of::() != 0 { + if byte_start % size_of::() != 0 { return None; } - let start = byte_start / core::mem::size_of::(); + let start = byte_start / size_of::(); let end = start.wrapping_add(subslice.len()); if start <= self.len() && end <= self.len() { Some(start..end) } else { None } diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs index e24b52cff82e1..3582c7e8b3f38 100644 --- a/library/core/src/slice/raw.rs +++ b/library/core/src/slice/raw.rs @@ -11,7 +11,7 @@ use crate::{array, ptr, ub_checks}; /// /// Behavior is undefined if any of the following conditions are violated: /// -/// * `data` must be non-null, [valid] for reads for `len * mem::size_of::()` many bytes, +/// * `data` must be non-null, [valid] for reads for `len * size_of::()` many bytes, /// and it must be properly aligned. This means in particular: /// /// * The entire memory range of this slice must be contained within a single allocated object! @@ -28,7 +28,7 @@ use crate::{array, ptr, ub_checks}; /// * The memory referenced by the returned slice must not be mutated for the duration /// of lifetime `'a`, except inside an `UnsafeCell`. /// -/// * The total size `len * mem::size_of::()` of the slice must be no larger than `isize::MAX`, +/// * The total size `len * size_of::()` of the slice must be no larger than `isize::MAX`, /// and adding that size to `data` must not "wrap around" the address space. /// See the safety documentation of [`pointer::offset`]. /// @@ -146,7 +146,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] /// /// Behavior is undefined if any of the following conditions are violated: /// -/// * `data` must be non-null, [valid] for both reads and writes for `len * mem::size_of::()` many bytes, +/// * `data` must be non-null, [valid] for both reads and writes for `len * size_of::()` many bytes, /// and it must be properly aligned. This means in particular: /// /// * The entire memory range of this slice must be contained within a single allocated object! @@ -163,7 +163,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] /// (not derived from the return value) for the duration of lifetime `'a`. /// Both read and write accesses are forbidden. /// -/// * The total size `len * mem::size_of::()` of the slice must be no larger than `isize::MAX`, +/// * The total size `len * size_of::()` of the slice must be no larger than `isize::MAX`, /// and adding that size to `data` must not "wrap around" the address space. /// See the safety documentation of [`pointer::offset`]. /// diff --git a/library/core/src/slice/rotate.rs b/library/core/src/slice/rotate.rs index 5d5ee4c7b6240..80178f297eaae 100644 --- a/library/core/src/slice/rotate.rs +++ b/library/core/src/slice/rotate.rs @@ -1,4 +1,4 @@ -use crate::mem::{self, MaybeUninit, SizedTypeProperties}; +use crate::mem::{MaybeUninit, SizedTypeProperties}; use crate::{cmp, ptr}; type BufType = [usize; 32]; @@ -21,12 +21,12 @@ pub(super) unsafe fn ptr_rotate(left: usize, mid: *mut T, right: usize) { } // `T` is not a zero-sized type, so it's okay to divide by its size. if !cfg!(feature = "optimize_for_size") - && cmp::min(left, right) <= mem::size_of::() / mem::size_of::() + && cmp::min(left, right) <= size_of::() / size_of::() { // SAFETY: guaranteed by the caller unsafe { ptr_rotate_memmove(left, mid, right) }; } else if !cfg!(feature = "optimize_for_size") - && ((left + right < 24) || (mem::size_of::() > mem::size_of::<[usize; 4]>())) + && ((left + right < 24) || (size_of::() > size_of::<[usize; 4]>())) { // SAFETY: guaranteed by the caller unsafe { ptr_rotate_gcd(left, mid, right) } diff --git a/library/core/src/slice/sort/shared/smallsort.rs b/library/core/src/slice/sort/shared/smallsort.rs index f6dcf42ba6037..95f196a40d01c 100644 --- a/library/core/src/slice/sort/shared/smallsort.rs +++ b/library/core/src/slice/sort/shared/smallsort.rs @@ -113,7 +113,7 @@ pub(crate) trait UnstableSmallSortFreezeTypeImpl: Sized + FreezeMarker { impl UnstableSmallSortFreezeTypeImpl for T { #[inline(always)] default fn small_sort_threshold() -> usize { - if (mem::size_of::() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { + if (size_of::() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { SMALL_SORT_GENERAL_THRESHOLD } else { SMALL_SORT_FALLBACK_THRESHOLD @@ -125,7 +125,7 @@ impl UnstableSmallSortFreezeTypeImpl for T { where F: FnMut(&T, &T) -> bool, { - if (mem::size_of::() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { + if (size_of::() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { small_sort_general(v, is_less); } else { small_sort_fallback(v, is_less); @@ -143,10 +143,10 @@ impl UnstableSmallSortFreezeTypeImpl for T { #[inline(always)] fn small_sort_threshold() -> usize { if has_efficient_in_place_swap::() - && (mem::size_of::() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE + && (size_of::() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { SMALL_SORT_NETWORK_THRESHOLD - } else if (mem::size_of::() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { + } else if (size_of::() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { SMALL_SORT_GENERAL_THRESHOLD } else { SMALL_SORT_FALLBACK_THRESHOLD @@ -159,10 +159,10 @@ impl UnstableSmallSortFreezeTypeImpl for T { F: FnMut(&T, &T) -> bool, { if has_efficient_in_place_swap::() - && (mem::size_of::() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE + && (size_of::() * SMALL_SORT_NETWORK_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { small_sort_network(v, is_less); - } else if (mem::size_of::() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { + } else if (size_of::() * SMALL_SORT_GENERAL_SCRATCH_LEN) <= MAX_STACK_ARRAY_SIZE { small_sort_general(v, is_less); } else { small_sort_fallback(v, is_less); @@ -238,7 +238,7 @@ fn small_sort_general_with_scratch bool>( unsafe { let scratch_base = scratch.as_mut_ptr() as *mut T; - let presorted_len = if const { mem::size_of::() <= 16 } && len >= 16 { + let presorted_len = if const { size_of::() <= 16 } && len >= 16 { // SAFETY: scratch_base is valid and has enough space. sort8_stable(v_base, scratch_base, scratch_base.add(len), is_less); sort8_stable( @@ -863,5 +863,5 @@ fn panic_on_ord_violation() -> ! { #[must_use] pub(crate) const fn has_efficient_in_place_swap() -> bool { // Heuristic that holds true on all tested 64-bit capable architectures. - mem::size_of::() <= 8 // mem::size_of::() + size_of::() <= 8 // size_of::() } diff --git a/library/core/src/slice/sort/stable/mod.rs b/library/core/src/slice/sort/stable/mod.rs index 3ff2e71fd05bc..090367cdabadd 100644 --- a/library/core/src/slice/sort/stable/mod.rs +++ b/library/core/src/slice/sort/stable/mod.rs @@ -3,7 +3,7 @@ #[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))] use crate::cmp; use crate::intrinsics; -use crate::mem::{self, MaybeUninit, SizedTypeProperties}; +use crate::mem::{MaybeUninit, SizedTypeProperties}; #[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))] use crate::slice::sort::shared::smallsort::{ SMALL_SORT_GENERAL_SCRATCH_LEN, StableSmallSortTypeImpl, insertion_sort_shift_left, @@ -107,7 +107,7 @@ fn driftsort_main bool, BufT: BufGuard>(v: &mut [T], i // If min_good_run_len is ever modified, this code must be updated to allocate // the correct scratch size for it. const MAX_FULL_ALLOC_BYTES: usize = 8_000_000; // 8MB - let max_full_alloc = MAX_FULL_ALLOC_BYTES / mem::size_of::(); + let max_full_alloc = MAX_FULL_ALLOC_BYTES / size_of::(); let len = v.len(); let alloc_len = cmp::max( cmp::max(len - len / 2, cmp::min(len, max_full_alloc)), @@ -155,7 +155,7 @@ impl AlignedStorage { } fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit] { - let len = N / mem::size_of::(); + let len = N / size_of::(); // SAFETY: `_align` ensures we are correctly aligned. unsafe { core::slice::from_raw_parts_mut(self.storage.as_mut_ptr().cast(), len) } diff --git a/library/core/src/slice/sort/stable/quicksort.rs b/library/core/src/slice/sort/stable/quicksort.rs index 630c6ff907703..3c9688790c40b 100644 --- a/library/core/src/slice/sort/stable/quicksort.rs +++ b/library/core/src/slice/sort/stable/quicksort.rs @@ -1,6 +1,6 @@ //! This module contains a stable quicksort and partition implementation. -use crate::mem::{self, ManuallyDrop, MaybeUninit}; +use crate::mem::{ManuallyDrop, MaybeUninit}; use crate::slice::sort::shared::FreezeMarker; use crate::slice::sort::shared::pivot::choose_pivot; use crate::slice::sort::shared::smallsort::StableSmallSortTypeImpl; @@ -126,7 +126,7 @@ fn stable_partition bool>( // this gave significant performance boosts in benchmarks. Unrolling // through for _ in 0..UNROLL_LEN { .. } instead of manually improves // compile times but has a ~10-20% performance penalty on opt-level=s. - if const { mem::size_of::() <= 16 } { + if const { size_of::() <= 16 } { const UNROLL_LEN: usize = 4; let unroll_end = v_base.add(loop_end_pos.saturating_sub(UNROLL_LEN - 1)); while state.scan < unroll_end { diff --git a/library/core/src/slice/sort/unstable/quicksort.rs b/library/core/src/slice/sort/unstable/quicksort.rs index bb9f90fc881a0..68a1611871699 100644 --- a/library/core/src/slice/sort/unstable/quicksort.rs +++ b/library/core/src/slice/sort/unstable/quicksort.rs @@ -1,6 +1,8 @@ //! This module contains an unstable quicksort and two partition implementations. -use crate::mem::{self, ManuallyDrop}; +#[cfg(not(feature = "optimize_for_size"))] +use crate::mem; +use crate::mem::ManuallyDrop; #[cfg(not(feature = "optimize_for_size"))] use crate::slice::sort::shared::pivot::choose_pivot; #[cfg(not(feature = "optimize_for_size"))] @@ -137,7 +139,7 @@ where const fn inst_partition bool>() -> fn(&mut [T], &T, &mut F) -> usize { const MAX_BRANCHLESS_PARTITION_SIZE: usize = 96; - if mem::size_of::() <= MAX_BRANCHLESS_PARTITION_SIZE { + if size_of::() <= MAX_BRANCHLESS_PARTITION_SIZE { // Specialize for types that are relatively cheap to copy, where branchless optimizations // have large leverage e.g. `u64` and `String`. cfg_if! { @@ -304,7 +306,7 @@ where // Manual unrolling that works well on x86, Arm and with opt-level=s without murdering // compile-times. Leaving this to the compiler yields ok to bad results. - let unroll_len = const { if mem::size_of::() <= 16 { 2 } else { 1 } }; + let unroll_len = const { if size_of::() <= 16 { 2 } else { 1 } }; let unroll_end = v_base.add(len - (unroll_len - 1)); while state.right < unroll_end { diff --git a/library/core/src/str/count.rs b/library/core/src/str/count.rs index b5d7aaf05d4bd..452403b23dee1 100644 --- a/library/core/src/str/count.rs +++ b/library/core/src/str/count.rs @@ -20,7 +20,7 @@ use core::intrinsics::unlikely; -const USIZE_SIZE: usize = core::mem::size_of::(); +const USIZE_SIZE: usize = size_of::(); const UNROLL_INNER: usize = 4; #[inline] diff --git a/library/core/src/str/validations.rs b/library/core/src/str/validations.rs index 0f724dd961329..8174e4ff97dfc 100644 --- a/library/core/src/str/validations.rs +++ b/library/core/src/str/validations.rs @@ -2,7 +2,6 @@ use super::Utf8Error; use crate::intrinsics::const_eval_select; -use crate::mem; /// Returns the initial codepoint accumulator for the first byte. /// The first byte is special, only want bottom 5 bits for width 2, 4 bits @@ -128,7 +127,7 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { let mut index = 0; let len = v.len(); - const USIZE_BYTES: usize = mem::size_of::(); + const USIZE_BYTES: usize = size_of::(); let ascii_block_size = 2 * USIZE_BYTES; let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 }; diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 73180bde54aa9..bac92ef94e71c 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -2033,7 +2033,7 @@ impl AtomicPtr { #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T { - self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::()), order) + self.fetch_byte_add(val.wrapping_mul(size_of::()), order) } /// Offsets the pointer's address by subtracting `val` (in units of `T`), @@ -2078,7 +2078,7 @@ impl AtomicPtr { #[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T { - self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::()), order) + self.fetch_byte_sub(val.wrapping_mul(size_of::()), order) } /// Offsets the pointer's address by adding `val` *bytes*, returning the diff --git a/library/coretests/benches/ascii/is_ascii.rs b/library/coretests/benches/ascii/is_ascii.rs index ced7084fb0e48..a6c718409ee85 100644 --- a/library/coretests/benches/ascii/is_ascii.rs +++ b/library/coretests/benches/ascii/is_ascii.rs @@ -95,7 +95,7 @@ benches! { // These are separate since it's easier to debug errors if they don't go through // macro expansion first. fn is_ascii_align_to(bytes: &[u8]) -> bool { - if bytes.len() < core::mem::size_of::() { + if bytes.len() < size_of::() { return bytes.iter().all(|b| b.is_ascii()); } // SAFETY: transmuting a sequence of `u8` to `usize` is always fine @@ -106,7 +106,7 @@ fn is_ascii_align_to(bytes: &[u8]) -> bool { } fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool { - if bytes.len() < core::mem::size_of::() { + if bytes.len() < size_of::() { return bytes.iter().all(|b| b.is_ascii()); } // SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine @@ -118,6 +118,6 @@ fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool { #[inline] fn contains_nonascii(v: usize) -> bool { - const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; core::mem::size_of::()]); + const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; size_of::()]); (NONASCII_MASK & v) != 0 } diff --git a/library/coretests/benches/iter.rs b/library/coretests/benches/iter.rs index e14f26b729032..e49d152eb539f 100644 --- a/library/coretests/benches/iter.rs +++ b/library/coretests/benches/iter.rs @@ -1,6 +1,5 @@ use core::borrow::Borrow; use core::iter::*; -use core::mem; use core::num::Wrapping; use core::ops::Range; @@ -477,7 +476,7 @@ fn bench_next_chunk_copied(b: &mut Bencher) { let mut iter = black_box(&v).iter().copied(); let mut acc = Wrapping(0); // This uses a while-let loop to side-step the TRA specialization in ArrayChunks - while let Ok(chunk) = iter.next_chunk::<{ mem::size_of::() }>() { + while let Ok(chunk) = iter.next_chunk::<{ size_of::() }>() { let d = u64::from_ne_bytes(chunk); acc += Wrapping(d.rotate_left(7).wrapping_add(1)); } @@ -496,7 +495,7 @@ fn bench_next_chunk_trusted_random_access(b: &mut Bencher) { .iter() // this shows that we're not relying on the slice::Iter specialization in Copied .map(|b| *b.borrow()) - .array_chunks::<{ mem::size_of::() }>() + .array_chunks::<{ size_of::() }>() .map(|ary| { let d = u64::from_ne_bytes(ary); Wrapping(d.rotate_left(7).wrapping_add(1)) diff --git a/library/coretests/tests/alloc.rs b/library/coretests/tests/alloc.rs index b88f1821cd77c..72fdf82c1f8cf 100644 --- a/library/coretests/tests/alloc.rs +++ b/library/coretests/tests/alloc.rs @@ -1,5 +1,4 @@ use core::alloc::Layout; -use core::mem::size_of; use core::ptr::{self, NonNull}; #[test] diff --git a/library/coretests/tests/atomic.rs b/library/coretests/tests/atomic.rs index 0ffba538b2074..e0c0fe4790c04 100644 --- a/library/coretests/tests/atomic.rs +++ b/library/coretests/tests/atomic.rs @@ -250,8 +250,6 @@ fn atomic_access_bool() { #[test] fn atomic_alignment() { - use std::mem::{align_of, size_of}; - #[cfg(target_has_atomic = "8")] assert_eq!(align_of::(), size_of::()); #[cfg(target_has_atomic = "ptr")] diff --git a/library/coretests/tests/hash/sip.rs b/library/coretests/tests/hash/sip.rs index f79954f916b77..6add1a33cb931 100644 --- a/library/coretests/tests/hash/sip.rs +++ b/library/coretests/tests/hash/sip.rs @@ -1,7 +1,7 @@ #![allow(deprecated)] use core::hash::{Hash, Hasher, SipHasher, SipHasher13}; -use core::{mem, slice}; +use core::slice; // Hash just the bytes of the slice, without length prefix struct Bytes<'a>(&'a [u8]); @@ -314,7 +314,7 @@ fn test_write_short_works() { h1.write_u8(0x01u8); let mut h2 = SipHasher::new(); h2.write(unsafe { - slice::from_raw_parts(&test_usize as *const _ as *const u8, mem::size_of::()) + slice::from_raw_parts(&test_usize as *const _ as *const u8, size_of::()) }); h2.write(b"bytes"); h2.write(b"string"); diff --git a/library/coretests/tests/nonzero.rs b/library/coretests/tests/nonzero.rs index bdc5701d9fd23..00232c9b7061a 100644 --- a/library/coretests/tests/nonzero.rs +++ b/library/coretests/tests/nonzero.rs @@ -1,6 +1,5 @@ use core::num::{IntErrorKind, NonZero}; use core::option::Option::None; -use std::mem::size_of; #[test] fn test_create_nonzero_instance() { diff --git a/library/coretests/tests/ptr.rs b/library/coretests/tests/ptr.rs index 0c9f9b338b0c1..370349c879e08 100644 --- a/library/coretests/tests/ptr.rs +++ b/library/coretests/tests/ptr.rs @@ -1,6 +1,6 @@ use core::cell::RefCell; use core::marker::Freeze; -use core::mem::{self, MaybeUninit}; +use core::mem::MaybeUninit; use core::num::NonZero; use core::ptr; use core::ptr::*; @@ -388,7 +388,7 @@ fn align_offset_various_strides() { let mut expected = usize::MAX; // Naive but definitely correct way to find the *first* aligned element of stride::. for el in 0..align { - if (numptr + el * ::std::mem::size_of::()) % align == 0 { + if (numptr + el * size_of::()) % align == 0 { expected = el; break; } @@ -398,7 +398,7 @@ fn align_offset_various_strides() { eprintln!( "aligning {:p} (with stride of {}) to {}, expected {}, got {}", ptr, - ::std::mem::size_of::(), + size_of::(), align, expected, got @@ -613,9 +613,9 @@ fn dyn_metadata() { let meta = metadata(trait_object); assert_eq!(meta.size_of(), 64); - assert_eq!(meta.size_of(), std::mem::size_of::()); + assert_eq!(meta.size_of(), size_of::()); assert_eq!(meta.align_of(), 32); - assert_eq!(meta.align_of(), std::mem::align_of::()); + assert_eq!(meta.align_of(), align_of::()); assert_eq!(meta.layout(), std::alloc::Layout::new::()); assert!(format!("{meta:?}").starts_with("DynMetadata(0x")); @@ -789,7 +789,7 @@ fn nonnull_tagged_pointer_with_provenance() { impl TaggedPointer { /// The ABI-required minimum alignment of the `P` type. - pub const ALIGNMENT: usize = core::mem::align_of::(); + pub const ALIGNMENT: usize = align_of::(); /// A mask for data-carrying bits of the address. pub const DATA_MASK: usize = !Self::ADDRESS_MASK; /// Number of available bits of storage in the address. @@ -873,7 +873,7 @@ fn test_const_copy_ptr() { ptr::copy( &ptr1 as *const _ as *const MaybeUninit, &mut ptr2 as *mut _ as *mut MaybeUninit, - mem::size_of::<&i32>(), + size_of::<&i32>(), ); } @@ -891,7 +891,7 @@ fn test_const_copy_ptr() { ptr::copy_nonoverlapping( &ptr1 as *const _ as *const MaybeUninit, &mut ptr2 as *mut _ as *mut MaybeUninit, - mem::size_of::<&i32>(), + size_of::<&i32>(), ); } @@ -936,7 +936,7 @@ fn test_const_swap_ptr() { let mut s2 = A(S { ptr: &666, f1: 0, f2: [0; 3] }); // Swap ptr1 and ptr2, as an array. - type T = [u8; mem::size_of::()]; + type T = [u8; size_of::()]; unsafe { ptr::swap(ptr::from_mut(&mut s1).cast::(), ptr::from_mut(&mut s2).cast::()); } diff --git a/library/coretests/tests/slice.rs b/library/coretests/tests/slice.rs index 1c5c8a9ebf258..4fd0721370896 100644 --- a/library/coretests/tests/slice.rs +++ b/library/coretests/tests/slice.rs @@ -2058,15 +2058,13 @@ fn test_align_to_non_trivial() { #[test] fn test_align_to_empty_mid() { - use core::mem; - // Make sure that we do not create empty unaligned slices for the mid part, even when the // overall slice is too short to contain an aligned address. let bytes = [1, 2, 3, 4, 5, 6, 7]; type Chunk = u32; for offset in 0..4 { let (_, mid, _) = unsafe { bytes[offset..offset + 1].align_to::() }; - assert_eq!(mid.as_ptr() as usize % mem::align_of::(), 0); + assert_eq!(mid.as_ptr() as usize % align_of::(), 0); } } diff --git a/library/panic_unwind/src/emcc.rs b/library/panic_unwind/src/emcc.rs index 1569c26c9de47..bad795a019c9a 100644 --- a/library/panic_unwind/src/emcc.rs +++ b/library/panic_unwind/src/emcc.rs @@ -9,7 +9,7 @@ use alloc::boxed::Box; use core::any::Any; use core::sync::atomic::{AtomicBool, Ordering}; -use core::{intrinsics, mem, ptr}; +use core::{intrinsics, ptr}; use unwind as uw; @@ -97,7 +97,7 @@ pub(crate) unsafe fn cleanup(ptr: *mut u8) -> Box { pub(crate) unsafe fn panic(data: Box) -> u32 { unsafe { - let exception = __cxa_allocate_exception(mem::size_of::()) as *mut Exception; + let exception = __cxa_allocate_exception(size_of::()) as *mut Exception; if exception.is_null() { return uw::_URC_FATAL_PHASE1_ERROR as u32; } diff --git a/library/panic_unwind/src/seh.rs b/library/panic_unwind/src/seh.rs index 3a95b940221c2..3794b56c0898f 100644 --- a/library/panic_unwind/src/seh.rs +++ b/library/panic_unwind/src/seh.rs @@ -49,7 +49,7 @@ use alloc::boxed::Box; use core::any::Any; use core::ffi::{c_int, c_uint, c_void}; -use core::mem::{self, ManuallyDrop}; +use core::mem::ManuallyDrop; // NOTE(nbdd0121): The `canary` field is part of stable ABI. #[repr(C)] @@ -225,7 +225,7 @@ static mut CATCHABLE_TYPE: _CatchableType = _CatchableType { properties: 0, pType: ptr_t::null(), thisDisplacement: _PMD { mdisp: 0, pdisp: -1, vdisp: 0 }, - sizeOrOffset: mem::size_of::() as c_int, + sizeOrOffset: size_of::() as c_int, copyFunction: ptr_t::null(), }; diff --git a/library/proc_macro/src/bridge/selfless_reify.rs b/library/proc_macro/src/bridge/selfless_reify.rs index 312a79152e23b..b06434a5ffee2 100644 --- a/library/proc_macro/src/bridge/selfless_reify.rs +++ b/library/proc_macro/src/bridge/selfless_reify.rs @@ -50,7 +50,7 @@ macro_rules! define_reify_functions { >(f: F) -> $(extern $abi)? fn($($arg_ty),*) -> $ret_ty { // FIXME(eddyb) describe the `F` type (e.g. via `type_name::`) once panic // formatting becomes possible in `const fn`. - assert!(mem::size_of::() == 0, "selfless_reify: closure must be zero-sized"); + assert!(size_of::() == 0, "selfless_reify: closure must be zero-sized"); $(extern $abi)? fn wrapper< $($($param,)*)? diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs index 38dcd816d267d..6dd18e4f4c837 100644 --- a/library/std/src/fs/tests.rs +++ b/library/std/src/fs/tests.rs @@ -1878,7 +1878,7 @@ fn windows_unix_socket_exists() { let bytes = socket_path.as_os_str().as_encoded_bytes(); let bytes = core::slice::from_raw_parts(bytes.as_ptr().cast::(), bytes.len()); addr.sun_path[..bytes.len()].copy_from_slice(bytes); - let len = mem::size_of_val(&addr) as i32; + let len = size_of_val(&addr) as i32; let result = c::bind(socket, (&raw const addr).cast::(), len); c::closesocket(socket); assert_eq!(result, 0); diff --git a/library/std/src/io/error/tests.rs b/library/std/src/io/error/tests.rs index edac6563478cd..3e4029768eb85 100644 --- a/library/std/src/io/error/tests.rs +++ b/library/std/src/io/error/tests.rs @@ -1,6 +1,5 @@ use super::{Custom, Error, ErrorData, ErrorKind, Repr, SimpleMessage, const_error}; use crate::assert_matches::assert_matches; -use crate::mem::size_of; use crate::sys::decode_error_kind; use crate::sys::os::error_string; use crate::{error, fmt}; diff --git a/library/std/src/os/fd/tests.rs b/library/std/src/os/fd/tests.rs index b39863644f116..7e9cf038e9a75 100644 --- a/library/std/src/os/fd/tests.rs +++ b/library/std/src/os/fd/tests.rs @@ -36,7 +36,6 @@ fn test_fd() { #[cfg(any(unix, target_os = "wasi"))] #[test] fn test_niche_optimizations() { - use crate::mem::size_of; #[cfg(unix)] use crate::os::unix::io::{BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd}; #[cfg(target_os = "wasi")] diff --git a/library/std/src/os/unix/io/tests.rs b/library/std/src/os/unix/io/tests.rs index 84d2a7a1a91b4..fc147730578ac 100644 --- a/library/std/src/os/unix/io/tests.rs +++ b/library/std/src/os/unix/io/tests.rs @@ -1,4 +1,3 @@ -use crate::mem::size_of; use crate::os::unix::io::RawFd; #[test] diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs index 56789f235fdab..cb1246db3109e 100644 --- a/library/std/src/os/unix/net/addr.rs +++ b/library/std/src/os/unix/net/addr.rs @@ -94,7 +94,7 @@ impl SocketAddr { { unsafe { let mut addr: libc::sockaddr_un = mem::zeroed(); - let mut len = mem::size_of::() as libc::socklen_t; + let mut len = size_of::() as libc::socklen_t; cvt(f((&raw mut addr) as *mut _, &mut len))?; SocketAddr::from_parts(addr, len) } diff --git a/library/std/src/os/unix/net/listener.rs b/library/std/src/os/unix/net/listener.rs index be236317d047d..27428c9eb2855 100644 --- a/library/std/src/os/unix/net/listener.rs +++ b/library/std/src/os/unix/net/listener.rs @@ -177,7 +177,7 @@ impl UnixListener { #[stable(feature = "unix_socket", since = "1.10.0")] pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> { let mut storage: libc::sockaddr_un = unsafe { mem::zeroed() }; - let mut len = mem::size_of_val(&storage) as libc::socklen_t; + let mut len = size_of_val(&storage) as libc::socklen_t; let sock = self.0.accept((&raw mut storage) as *mut _, &mut len)?; let addr = SocketAddr::from_parts(storage, len)?; Ok((UnixStream(sock), addr)) diff --git a/library/std/src/os/unix/net/ucred.rs b/library/std/src/os/unix/net/ucred.rs index e1014a4f296dd..2dd7d409e48c2 100644 --- a/library/std/src/os/unix/net/ucred.rs +++ b/library/std/src/os/unix/net/ucred.rs @@ -41,15 +41,15 @@ mod impl_linux { use libc::{SO_PEERCRED, SOL_SOCKET, c_void, getsockopt, socklen_t, ucred}; use super::UCred; + use crate::io; use crate::os::unix::io::AsRawFd; use crate::os::unix::net::UnixStream; - use crate::{io, mem}; pub fn peer_cred(socket: &UnixStream) -> io::Result { - let ucred_size = mem::size_of::(); + let ucred_size = size_of::(); // Trivial sanity checks. - assert!(mem::size_of::() <= mem::size_of::()); + assert!(size_of::() <= size_of::()); assert!(ucred_size <= u32::MAX as usize); let mut ucred_size = ucred_size as socklen_t; @@ -64,7 +64,7 @@ mod impl_linux { &mut ucred_size, ); - if ret == 0 && ucred_size as usize == mem::size_of::() { + if ret == 0 && ucred_size as usize == size_of::() { Ok(UCred { uid: ucred.uid, gid: ucred.gid, pid: Some(ucred.pid) }) } else { Err(io::Error::last_os_error()) @@ -101,9 +101,9 @@ mod impl_apple { use libc::{LOCAL_PEERPID, SOL_LOCAL, c_void, getpeereid, getsockopt, pid_t, socklen_t}; use super::UCred; + use crate::io; use crate::os::unix::io::AsRawFd; use crate::os::unix::net::UnixStream; - use crate::{io, mem}; pub fn peer_cred(socket: &UnixStream) -> io::Result { let mut cred = UCred { uid: 1, gid: 1, pid: None }; @@ -115,7 +115,7 @@ mod impl_apple { } let mut pid: pid_t = 1; - let mut pid_size = mem::size_of::() as socklen_t; + let mut pid_size = size_of::() as socklen_t; let ret = getsockopt( socket.as_raw_fd(), @@ -125,7 +125,7 @@ mod impl_apple { &mut pid_size, ); - if ret == 0 && pid_size as usize == mem::size_of::() { + if ret == 0 && pid_size as usize == size_of::() { cred.pid = Some(pid); Ok(cred) } else { diff --git a/library/std/src/os/wasi/io/tests.rs b/library/std/src/os/wasi/io/tests.rs index 418274752b0ad..c5c6a19a6c885 100644 --- a/library/std/src/os/wasi/io/tests.rs +++ b/library/std/src/os/wasi/io/tests.rs @@ -1,4 +1,3 @@ -use crate::mem::size_of; use crate::os::wasi::io::RawFd; #[test] diff --git a/library/std/src/os/windows/io/tests.rs b/library/std/src/os/windows/io/tests.rs index 41734e52e8cce..029b6f5cd3d9e 100644 --- a/library/std/src/os/windows/io/tests.rs +++ b/library/std/src/os/windows/io/tests.rs @@ -1,6 +1,5 @@ #[test] fn test_niche_optimizations_socket() { - use crate::mem::size_of; use crate::os::windows::io::{ BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket, }; diff --git a/library/std/src/os/windows/process.rs b/library/std/src/os/windows/process.rs index 201274cf03aec..fa65a7c51bfa0 100644 --- a/library/std/src/os/windows/process.rs +++ b/library/std/src/os/windows/process.rs @@ -500,11 +500,7 @@ impl<'a> ProcThreadAttributeListBuilder<'a> { /// [1]: pub fn attribute(self, attribute: usize, value: &'a T) -> Self { unsafe { - self.raw_attribute( - attribute, - ptr::addr_of!(*value).cast::(), - crate::mem::size_of::(), - ) + self.raw_attribute(attribute, ptr::addr_of!(*value).cast::(), size_of::()) } } @@ -574,7 +570,7 @@ impl<'a> ProcThreadAttributeListBuilder<'a> { /// .raw_attribute( /// PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE, /// h_pc as *const c_void, - /// std::mem::size_of::(), + /// size_of::(), /// ) /// .finish()? /// }; diff --git a/library/std/src/os/xous/ffi.rs b/library/std/src/os/xous/ffi.rs index 1db314e9ddad7..9394f0a0496b2 100644 --- a/library/std/src/os/xous/ffi.rs +++ b/library/std/src/os/xous/ffi.rs @@ -368,7 +368,7 @@ pub(crate) unsafe fn map_memory( let mut a0 = Syscall::MapMemory as usize; let mut a1 = phys.map(|p| p.as_ptr() as usize).unwrap_or_default(); let mut a2 = virt.map(|p| p.as_ptr() as usize).unwrap_or_default(); - let a3 = count * core::mem::size_of::(); + let a3 = count * size_of::(); let a4 = flags.bits(); let a5 = 0; let a6 = 0; @@ -392,7 +392,7 @@ pub(crate) unsafe fn map_memory( if result == SyscallResult::MemoryRange as usize { let start = core::ptr::with_exposed_provenance_mut::(a1); - let len = a2 / core::mem::size_of::(); + let len = a2 / size_of::(); let end = unsafe { start.add(len) }; Ok(unsafe { core::slice::from_raw_parts_mut(start, len) }) } else if result == SyscallResult::Error as usize { @@ -409,7 +409,7 @@ pub(crate) unsafe fn map_memory( pub(crate) unsafe fn unmap_memory(range: *mut [T]) -> Result<(), Error> { let mut a0 = Syscall::UnmapMemory as usize; let mut a1 = range.as_mut_ptr() as usize; - let a2 = range.len() * core::mem::size_of::(); + let a2 = range.len() * size_of::(); let a3 = 0; let a4 = 0; let a5 = 0; @@ -455,7 +455,7 @@ pub(crate) unsafe fn update_memory_flags( ) -> Result<(), Error> { let mut a0 = Syscall::UpdateMemoryFlags as usize; let mut a1 = range.as_mut_ptr() as usize; - let a2 = range.len() * core::mem::size_of::(); + let a2 = range.len() * size_of::(); let a3 = new_flags.bits(); let a4 = 0; // Process ID is currently None let a5 = 0; diff --git a/library/std/src/os/xous/services/log.rs b/library/std/src/os/xous/services/log.rs index 1661011ca64b1..095d4f4a3e7a8 100644 --- a/library/std/src/os/xous/services/log.rs +++ b/library/std/src/os/xous/services/log.rs @@ -7,8 +7,8 @@ use crate::os::xous::ffi::Connection; /// `group_or_null([1,2,3,4,5,6,7,8], 1)` on a 32-bit system will return a /// `usize` with 5678 packed into it. fn group_or_null(data: &[u8], offset: usize) -> usize { - let start = offset * core::mem::size_of::(); - let mut out_array = [0u8; core::mem::size_of::()]; + let start = offset * size_of::(); + let mut out_array = [0u8; size_of::()]; if start < data.len() { for (dest, src) in out_array.iter_mut().zip(&data[start..]) { *dest = *src; diff --git a/library/std/src/sys/alloc/unix.rs b/library/std/src/sys/alloc/unix.rs index 1af9d76629014..a7ac4117ec902 100644 --- a/library/std/src/sys/alloc/unix.rs +++ b/library/std/src/sys/alloc/unix.rs @@ -81,7 +81,7 @@ cfg_if::cfg_if! { // while others require the alignment to be at least the pointer size (Illumos, macOS). // posix_memalign only has one, clear requirement: that the alignment be a multiple of // `sizeof(void*)`. Since these are all powers of 2, we can just use max. - let align = layout.align().max(crate::mem::size_of::()); + let align = layout.align().max(size_of::()); let ret = unsafe { libc::posix_memalign(&mut out, align, layout.size()) }; if ret != 0 { ptr::null_mut() } else { out as *mut u8 } } diff --git a/library/std/src/sys/alloc/windows/tests.rs b/library/std/src/sys/alloc/windows/tests.rs index 674a3e1d92d17..1d5614528b12a 100644 --- a/library/std/src/sys/alloc/windows/tests.rs +++ b/library/std/src/sys/alloc/windows/tests.rs @@ -1,9 +1,8 @@ use super::{Header, MIN_ALIGN}; -use crate::mem; #[test] fn alloc_header() { // Header must fit in the padding before an aligned pointer - assert!(mem::size_of::
() <= MIN_ALIGN); - assert!(mem::align_of::
() <= MIN_ALIGN); + assert!(size_of::
() <= MIN_ALIGN); + assert!(align_of::
() <= MIN_ALIGN); } diff --git a/library/std/src/sys/io/is_terminal/windows.rs b/library/std/src/sys/io/is_terminal/windows.rs index 3ec18fb47b9de..b0c718d71f9f3 100644 --- a/library/std/src/sys/io/is_terminal/windows.rs +++ b/library/std/src/sys/io/is_terminal/windows.rs @@ -1,5 +1,4 @@ use crate::ffi::c_void; -use crate::mem::size_of; use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle}; use crate::sys::c; diff --git a/library/std/src/sys/net/connection/socket.rs b/library/std/src/sys/net/connection/socket.rs index ddd74b426158d..e154cf039cad1 100644 --- a/library/std/src/sys/net/connection/socket.rs +++ b/library/std/src/sys/net/connection/socket.rs @@ -154,11 +154,11 @@ fn socket_addr_to_c(addr: &SocketAddr) -> (SocketAddrCRepr, c::socklen_t) { match addr { SocketAddr::V4(a) => { let sockaddr = SocketAddrCRepr { v4: socket_addr_v4_to_c(a) }; - (sockaddr, mem::size_of::() as c::socklen_t) + (sockaddr, size_of::() as c::socklen_t) } SocketAddr::V6(a) => { let sockaddr = SocketAddrCRepr { v6: socket_addr_v6_to_c(a) }; - (sockaddr, mem::size_of::() as c::socklen_t) + (sockaddr, size_of::() as c::socklen_t) } } } @@ -169,13 +169,13 @@ unsafe fn socket_addr_from_c( ) -> io::Result { match (*storage).ss_family as c_int { c::AF_INET => { - assert!(len >= mem::size_of::()); + assert!(len >= size_of::()); Ok(SocketAddr::V4(socket_addr_v4_from_c(unsafe { *(storage as *const _ as *const c::sockaddr_in) }))) } c::AF_INET6 => { - assert!(len >= mem::size_of::()); + assert!(len >= size_of::()); Ok(SocketAddr::V6(socket_addr_v6_from_c(unsafe { *(storage as *const _ as *const c::sockaddr_in6) }))) @@ -200,7 +200,7 @@ pub fn setsockopt( level, option_name, (&raw const option_value) as *const _, - mem::size_of::() as c::socklen_t, + size_of::() as c::socklen_t, ))?; Ok(()) } @@ -209,7 +209,7 @@ pub fn setsockopt( pub fn getsockopt(sock: &Socket, level: c_int, option_name: c_int) -> io::Result { unsafe { let mut option_value: T = mem::zeroed(); - let mut option_len = mem::size_of::() as c::socklen_t; + let mut option_len = size_of::() as c::socklen_t; cvt(c::getsockopt( sock.as_raw(), level, @@ -227,7 +227,7 @@ where { unsafe { let mut storage: c::sockaddr_storage = mem::zeroed(); - let mut len = mem::size_of_val(&storage) as c::socklen_t; + let mut len = size_of_val(&storage) as c::socklen_t; cvt(f((&raw mut storage) as *mut _, &mut len))?; socket_addr_from_c(&storage, len as usize) } @@ -561,7 +561,7 @@ impl TcpListener { // so we don't need to zero it here. // reference: https://linux.die.net/man/2/accept4 let mut storage: mem::MaybeUninit = mem::MaybeUninit::uninit(); - let mut len = mem::size_of_val(&storage) as c::socklen_t; + let mut len = size_of_val(&storage) as c::socklen_t; let sock = self.inner.accept(storage.as_mut_ptr() as *mut _, &mut len)?; let addr = unsafe { socket_addr_from_c(storage.as_ptr(), len as usize)? }; Ok((TcpStream { inner: sock }, addr)) diff --git a/library/std/src/sys/net/connection/socket/hermit.rs b/library/std/src/sys/net/connection/socket/hermit.rs index e393342ced9da..f49821657d940 100644 --- a/library/std/src/sys/net/connection/socket/hermit.rs +++ b/library/std/src/sys/net/connection/socket/hermit.rs @@ -183,7 +183,7 @@ impl Socket { fn recv_from_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result<(usize, SocketAddr)> { let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() }; - let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t; + let mut addrlen = size_of_val(&storage) as netc::socklen_t; let n = cvt(unsafe { netc::recvfrom( diff --git a/library/std/src/sys/net/connection/socket/solid.rs b/library/std/src/sys/net/connection/socket/solid.rs index 906bef267b6f0..94bb605c1007c 100644 --- a/library/std/src/sys/net/connection/socket/solid.rs +++ b/library/std/src/sys/net/connection/socket/solid.rs @@ -244,7 +244,7 @@ impl Socket { flags: c_int, ) -> io::Result<(usize, SocketAddr)> { let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() }; - let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t; + let mut addrlen = size_of_val(&storage) as netc::socklen_t; let n = cvt(unsafe { netc::recvfrom( diff --git a/library/std/src/sys/net/connection/socket/unix.rs b/library/std/src/sys/net/connection/socket/unix.rs index 29fb47ddca3b9..e633cf772c528 100644 --- a/library/std/src/sys/net/connection/socket/unix.rs +++ b/library/std/src/sys/net/connection/socket/unix.rs @@ -326,7 +326,7 @@ impl Socket { // so we don't need to zero it here. // reference: https://linux.die.net/man/2/recvfrom let mut storage: mem::MaybeUninit = mem::MaybeUninit::uninit(); - let mut addrlen = mem::size_of_val(&storage) as libc::socklen_t; + let mut addrlen = size_of_val(&storage) as libc::socklen_t; let n = cvt(unsafe { libc::recvfrom( diff --git a/library/std/src/sys/net/connection/socket/wasip2.rs b/library/std/src/sys/net/connection/socket/wasip2.rs index c5034e73dd704..73c2583187207 100644 --- a/library/std/src/sys/net/connection/socket/wasip2.rs +++ b/library/std/src/sys/net/connection/socket/wasip2.rs @@ -211,7 +211,7 @@ impl Socket { flags: c_int, ) -> io::Result<(usize, SocketAddr)> { let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() }; - let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t; + let mut addrlen = size_of_val(&storage) as netc::socklen_t; let n = cvt(unsafe { netc::recvfrom( diff --git a/library/std/src/sys/net/connection/socket/windows.rs b/library/std/src/sys/net/connection/socket/windows.rs index 428f142dabe20..ce975bb2289c2 100644 --- a/library/std/src/sys/net/connection/socket/windows.rs +++ b/library/std/src/sys/net/connection/socket/windows.rs @@ -381,7 +381,7 @@ impl Socket { flags: c_int, ) -> io::Result<(usize, SocketAddr)> { let mut storage = unsafe { mem::zeroed::() }; - let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t; + let mut addrlen = size_of_val(&storage) as netc::socklen_t; let length = cmp::min(buf.len(), ::MAX as usize) as wrlen_t; // On unix when a socket is shut down all further reads return 0, so we @@ -514,13 +514,13 @@ impl Socket { // This is used by sys_common code to abstract over Windows and Unix. pub fn as_raw(&self) -> c::SOCKET { - debug_assert_eq!(mem::size_of::(), mem::size_of::()); - debug_assert_eq!(mem::align_of::(), mem::align_of::()); + debug_assert_eq!(size_of::(), size_of::()); + debug_assert_eq!(align_of::(), align_of::()); self.as_inner().as_raw_socket() as c::SOCKET } pub unsafe fn from_raw(raw: c::SOCKET) -> Self { - debug_assert_eq!(mem::size_of::(), mem::size_of::()); - debug_assert_eq!(mem::align_of::(), mem::align_of::()); + debug_assert_eq!(size_of::(), size_of::()); + debug_assert_eq!(align_of::(), align_of::()); unsafe { Self::from_raw_socket(raw as RawSocket) } } } diff --git a/library/std/src/sys/net/connection/xous/udp.rs b/library/std/src/sys/net/connection/xous/udp.rs index f35970bc32152..c112c04ce94bc 100644 --- a/library/std/src/sys/net/connection/xous/udp.rs +++ b/library/std/src/sys/net/connection/xous/udp.rs @@ -244,7 +244,7 @@ impl UdpSocket { // let buf = unsafe { // xous::MemoryRange::new( // &mut tx_req as *mut SendData as usize, - // core::mem::size_of::(), + // size_of::(), // ) // .unwrap() // }; diff --git a/library/std/src/sys/pal/itron/thread.rs b/library/std/src/sys/pal/itron/thread.rs index 04095e1a7cf99..d1481f827e1e1 100644 --- a/library/std/src/sys/pal/itron/thread.rs +++ b/library/std/src/sys/pal/itron/thread.rs @@ -80,7 +80,7 @@ const LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE: usize = usize::MAX; // there's no single value for `JOINING` // 64KiB for 32-bit ISAs, 128KiB for 64-bit ISAs. -pub const DEFAULT_MIN_STACK_SIZE: usize = 0x4000 * crate::mem::size_of::(); +pub const DEFAULT_MIN_STACK_SIZE: usize = 0x4000 * size_of::(); impl Thread { /// # Safety diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs index 5069ab82ccc90..301e3299c0572 100644 --- a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs +++ b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs @@ -63,7 +63,7 @@ unsafe impl UserSafeSized for [T; 2] {} /// A type that can be represented in memory as one or more `UserSafeSized`s. #[unstable(feature = "sgx_platform", issue = "56975")] pub unsafe trait UserSafe { - /// Equivalent to `mem::align_of::`. + /// Equivalent to `align_of::`. fn align_of() -> usize; /// Constructs a pointer to `Self` given a memory range in user space. @@ -120,7 +120,7 @@ pub unsafe trait UserSafe { let is_aligned = |p: *const u8| -> bool { p.is_aligned_to(Self::align_of()) }; assert!(is_aligned(ptr as *const u8)); - assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr }))); + assert!(is_user_range(ptr as _, size_of_val(unsafe { &*ptr }))); assert!(!ptr.is_null()); } } @@ -128,11 +128,11 @@ pub unsafe trait UserSafe { #[unstable(feature = "sgx_platform", issue = "56975")] unsafe impl UserSafe for T { fn align_of() -> usize { - mem::align_of::() + align_of::() } unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self { - assert_eq!(size, mem::size_of::()); + assert_eq!(size, size_of::()); ptr as _ } } @@ -140,7 +140,7 @@ unsafe impl UserSafe for T { #[unstable(feature = "sgx_platform", issue = "56975")] unsafe impl UserSafe for [T] { fn align_of() -> usize { - mem::align_of::() + align_of::() } /// # Safety @@ -155,7 +155,7 @@ unsafe impl UserSafe for [T] { /// /// * the element size is not a factor of the size unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self { - let elem_size = mem::size_of::(); + let elem_size = size_of::(); assert_eq!(size % elem_size, 0); let len = size / elem_size; // SAFETY: The caller must uphold the safety contract for `from_raw_sized_unchecked` @@ -239,7 +239,7 @@ where /// Copies `val` into freshly allocated space in user memory. pub fn new_from_enclave(val: &T) -> Self { unsafe { - let mut user = Self::new_uninit_bytes(mem::size_of_val(val)); + let mut user = Self::new_uninit_bytes(size_of_val(val)); user.copy_from_enclave(val); user } @@ -277,7 +277,7 @@ where { /// Allocates space for `T` in user memory. pub fn uninitialized() -> Self { - Self::new_uninit_bytes(mem::size_of::()) + Self::new_uninit_bytes(size_of::()) } } @@ -288,7 +288,7 @@ where { /// Allocates space for a `[T]` of `n` elements in user memory. pub fn uninitialized(n: usize) -> Self { - Self::new_uninit_bytes(n * mem::size_of::()) + Self::new_uninit_bytes(n * size_of::()) } /// Creates an owned `User<[T]>` from a raw thin pointer and a slice length. @@ -306,9 +306,7 @@ where /// * The pointed-to range does not fit in the address space /// * The pointed-to range is not in user memory pub unsafe fn from_raw_parts(ptr: *mut T, len: usize) -> Self { - User(unsafe { - NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::())) - }) + User(unsafe { NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * size_of::())) }) } } @@ -326,7 +324,7 @@ where // `<*const u8>::align_offset` aren't _guaranteed_ to compute the largest // possible middle region, and as such can't be used. fn u64_align_to_guaranteed(ptr: *const u8, mut len: usize) -> (usize, usize, usize) { - const QWORD_SIZE: usize = mem::size_of::(); + const QWORD_SIZE: usize = size_of::(); let offset = ptr as usize % QWORD_SIZE; @@ -532,11 +530,11 @@ where /// the source. This can happen for dynamically-sized types such as slices. pub fn copy_from_enclave(&mut self, val: &T) { unsafe { - assert_eq!(mem::size_of_val(val), mem::size_of_val(&*self.0.get())); + assert_eq!(size_of_val(val), size_of_val(&*self.0.get())); copy_to_userspace( val as *const T as *const u8, self.0.get() as *mut T as *mut u8, - mem::size_of_val(val), + size_of_val(val), ); } } @@ -548,11 +546,11 @@ where /// the source. This can happen for dynamically-sized types such as slices. pub fn copy_to_enclave(&self, dest: &mut T) { unsafe { - assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get())); + assert_eq!(size_of_val(dest), size_of_val(&*self.0.get())); copy_from_userspace( self.0.get() as *const T as *const u8, dest as *mut T as *mut u8, - mem::size_of_val(dest), + size_of_val(dest), ); } } @@ -577,7 +575,7 @@ where pub fn to_enclave(&self) -> T { unsafe { let mut data = mem::MaybeUninit::uninit(); - copy_from_userspace(self.0.get() as _, data.as_mut_ptr() as _, mem::size_of::()); + copy_from_userspace(self.0.get() as _, data.as_mut_ptr() as _, size_of::()); data.assume_init() } } @@ -602,9 +600,7 @@ where /// * The pointed-to range is not in user memory pub unsafe fn from_raw_parts<'a>(ptr: *const T, len: usize) -> &'a Self { // SAFETY: The caller must uphold the safety contract for `from_raw_parts`. - unsafe { - &*(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::()).as_ptr() as *const Self) - } + unsafe { &*(<[T]>::from_raw_sized(ptr as _, len * size_of::()).as_ptr() as *const Self) } } /// Creates a `&mut UserRef<[T]>` from a raw thin pointer and a slice length. @@ -624,7 +620,7 @@ where pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut T, len: usize) -> &'a mut Self { // SAFETY: The caller must uphold the safety contract for `from_raw_parts_mut`. unsafe { - &mut *(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::()).as_ptr() as *mut Self) + &mut *(<[T]>::from_raw_sized(ptr as _, len * size_of::()).as_ptr() as *mut Self) } } @@ -744,7 +740,7 @@ where fn drop(&mut self) { unsafe { let ptr = (*self.0.as_ptr()).0.get(); - super::free(ptr as _, mem::size_of_val(&mut *ptr), T::align_of()); + super::free(ptr as _, size_of_val(&mut *ptr), T::align_of()); } } } diff --git a/library/std/src/sys/pal/uefi/args.rs b/library/std/src/sys/pal/uefi/args.rs index bdf6f5a0c1c34..0c29caf2db676 100644 --- a/library/std/src/sys/pal/uefi/args.rs +++ b/library/std/src/sys/pal/uefi/args.rs @@ -4,7 +4,6 @@ use super::helpers; use crate::env::current_exe; use crate::ffi::OsString; use crate::iter::Iterator; -use crate::mem::size_of; use crate::{fmt, vec}; pub struct Args { diff --git a/library/std/src/sys/pal/uefi/helpers.rs b/library/std/src/sys/pal/uefi/helpers.rs index cb6aacd0063d4..ca28239ef14fa 100644 --- a/library/std/src/sys/pal/uefi/helpers.rs +++ b/library/std/src/sys/pal/uefi/helpers.rs @@ -15,7 +15,7 @@ use r_efi::protocols::{device_path, device_path_to_text, shell}; use crate::ffi::{OsStr, OsString}; use crate::io::{self, const_error}; use crate::marker::PhantomData; -use crate::mem::{MaybeUninit, size_of}; +use crate::mem::MaybeUninit; use crate::os::uefi::env::boot_services; use crate::os::uefi::ffi::{OsStrExt, OsStringExt}; use crate::os::uefi::{self}; diff --git a/library/std/src/sys/pal/uefi/process.rs b/library/std/src/sys/pal/uefi/process.rs index a47c8dbcaaafc..5b9e119edd1c6 100644 --- a/library/std/src/sys/pal/uefi/process.rs +++ b/library/std/src/sys/pal/uefi/process.rs @@ -490,7 +490,7 @@ mod uefi_command_internal { helpers::open_protocol(self.handle, loaded_image::PROTOCOL_GUID).unwrap(); let len = args.len(); - let args_size: u32 = (len * crate::mem::size_of::()).try_into().unwrap(); + let args_size: u32 = (len * size_of::()).try_into().unwrap(); let ptr = Box::into_raw(args).as_mut_ptr(); unsafe { diff --git a/library/std/src/sys/pal/uefi/tests.rs b/library/std/src/sys/pal/uefi/tests.rs index 5eb36da922b54..38658cc4e9ac4 100644 --- a/library/std/src/sys/pal/uefi/tests.rs +++ b/library/std/src/sys/pal/uefi/tests.rs @@ -16,7 +16,7 @@ fn align() { if *j <= 8 { assert_eq!(align_size(i, *j), i); } else { - assert!(align_size(i, *j) > i + std::mem::size_of::<*mut ()>()); + assert!(align_size(i, *j) > i + size_of::<*mut ()>()); } } } diff --git a/library/std/src/sys/pal/unix/fs.rs b/library/std/src/sys/pal/unix/fs.rs index 3df460e38b72e..20ba915af1364 100644 --- a/library/std/src/sys/pal/unix/fs.rs +++ b/library/std/src/sys/pal/unix/fs.rs @@ -1505,7 +1505,7 @@ impl File { self.as_raw_fd(), (&raw const attrlist).cast::().cast_mut(), buf.as_ptr().cast::().cast_mut(), - num_times * mem::size_of::(), + num_times * size_of::(), 0 ) })?; Ok(()) @@ -1660,7 +1660,7 @@ impl fmt::Debug for File { fn get_path(fd: c_int) -> Option { let info = Box::::new_zeroed(); let mut info = unsafe { info.assume_init() }; - info.kf_structsize = mem::size_of::() as libc::c_int; + info.kf_structsize = size_of::() as libc::c_int; let n = unsafe { libc::fcntl(fd, libc::F_KINFO, &mut *info) }; if n == -1 { return None; diff --git a/library/std/src/sys/pal/unix/futex.rs b/library/std/src/sys/pal/unix/futex.rs index d4551dd6a38bb..87ba13ca9321d 100644 --- a/library/std/src/sys/pal/unix/futex.rs +++ b/library/std/src/sys/pal/unix/futex.rs @@ -58,7 +58,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option) - _clockid: libc::CLOCK_MONOTONIC as u32, }); let umtx_timeout_ptr = umtx_timeout.as_ref().map_or(null(), |t| t as *const _); - let umtx_timeout_size = umtx_timeout.as_ref().map_or(0, |t| crate::mem::size_of_val(t)); + let umtx_timeout_size = umtx_timeout.as_ref().map_or(0, |t| size_of_val(t)); libc::_umtx_op( futex as *const AtomicU32 as *mut _, libc::UMTX_OP_WAIT_UINT_PRIVATE, diff --git a/library/std/src/sys/pal/unix/process/process_common.rs b/library/std/src/sys/pal/unix/process/process_common.rs index 342818ac91183..0ea9db211b311 100644 --- a/library/std/src/sys/pal/unix/process/process_common.rs +++ b/library/std/src/sys/pal/unix/process/process_common.rs @@ -43,10 +43,7 @@ cfg_if::cfg_if! { #[allow(dead_code)] pub unsafe fn sigaddset(set: *mut libc::sigset_t, signum: libc::c_int) -> libc::c_int { - use crate::{ - mem::{align_of, size_of}, - slice, - }; + use crate::slice; use libc::{c_ulong, sigset_t}; // The implementations from bionic (android libc) type pun `sigset_t` as an diff --git a/library/std/src/sys/pal/unix/process/process_fuchsia.rs b/library/std/src/sys/pal/unix/process/process_fuchsia.rs index 4ddc96356b996..05c9ace470e3e 100644 --- a/library/std/src/sys/pal/unix/process/process_fuchsia.rs +++ b/library/std/src/sys/pal/unix/process/process_fuchsia.rs @@ -179,7 +179,7 @@ impl Process { self.handle.raw(), ZX_INFO_PROCESS, (&raw mut proc_info) as *mut libc::c_void, - mem::size_of::(), + size_of::(), &mut actual, &mut avail, ))?; @@ -216,7 +216,7 @@ impl Process { self.handle.raw(), ZX_INFO_PROCESS, (&raw mut proc_info) as *mut libc::c_void, - mem::size_of::(), + size_of::(), &mut actual, &mut avail, ))?; diff --git a/library/std/src/sys/pal/unix/process/process_unix.rs b/library/std/src/sys/pal/unix/process/process_unix.rs index 1f3abd4cc1286..00d4d46dba598 100644 --- a/library/std/src/sys/pal/unix/process/process_unix.rs +++ b/library/std/src/sys/pal/unix/process/process_unix.rs @@ -799,7 +799,7 @@ impl Command { let fds: [c_int; 1] = [pidfd as RawFd]; - const SCM_MSG_LEN: usize = mem::size_of::<[c_int; 1]>(); + const SCM_MSG_LEN: usize = size_of::<[c_int; 1]>(); #[repr(C)] union Cmsg { @@ -818,7 +818,7 @@ impl Command { // only attach cmsg if we successfully acquired the pidfd if pidfd >= 0 { - msg.msg_controllen = mem::size_of_val(&cmsg.buf) as _; + msg.msg_controllen = size_of_val(&cmsg.buf) as _; msg.msg_control = (&raw mut cmsg.buf) as *mut _; let hdr = CMSG_FIRSTHDR((&raw mut msg) as *mut _); @@ -850,7 +850,7 @@ impl Command { use crate::sys::cvt_r; unsafe { - const SCM_MSG_LEN: usize = mem::size_of::<[c_int; 1]>(); + const SCM_MSG_LEN: usize = size_of::<[c_int; 1]>(); #[repr(C)] union Cmsg { @@ -865,7 +865,7 @@ impl Command { msg.msg_iov = (&raw mut iov) as *mut _; msg.msg_iovlen = 1; - msg.msg_controllen = mem::size_of::() as _; + msg.msg_controllen = size_of::() as _; msg.msg_control = (&raw mut cmsg) as *mut _; match cvt_r(|| libc::recvmsg(sock.as_raw(), &mut msg, libc::MSG_CMSG_CLOEXEC)) { diff --git a/library/std/src/sys/pal/unix/stack_overflow.rs b/library/std/src/sys/pal/unix/stack_overflow.rs index 43ece63457fe6..0ecccdc8812dd 100644 --- a/library/std/src/sys/pal/unix/stack_overflow.rs +++ b/library/std/src/sys/pal/unix/stack_overflow.rs @@ -426,7 +426,7 @@ mod imp { use crate::sys::weak::dlsym; dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int); let mut guard: usize = 0; - let mut size = mem::size_of_val(&guard); + let mut size = size_of_val(&guard); let oid = c"security.bsd.stack_guard_page"; match sysctlbyname.get() { Some(fcn) if unsafe { diff --git a/library/std/src/sys/pal/unix/thread.rs b/library/std/src/sys/pal/unix/thread.rs index 3dedc8d1257cb..11f6998cac118 100644 --- a/library/std/src/sys/pal/unix/thread.rs +++ b/library/std/src/sys/pal/unix/thread.rs @@ -372,7 +372,7 @@ pub fn available_parallelism() -> io::Result> { quota = cgroups::quota().max(1); let mut set: libc::cpu_set_t = unsafe { mem::zeroed() }; unsafe { - if libc::sched_getaffinity(0, mem::size_of::(), &mut set) == 0 { + if libc::sched_getaffinity(0, size_of::(), &mut set) == 0 { let count = libc::CPU_COUNT(&set) as usize; let count = count.min(quota); @@ -412,7 +412,7 @@ pub fn available_parallelism() -> io::Result> { libc::CPU_LEVEL_WHICH, libc::CPU_WHICH_PID, -1, - mem::size_of::(), + size_of::(), &mut set, ) == 0 { let count = libc::CPU_COUNT(&set) as usize; @@ -447,7 +447,7 @@ pub fn available_parallelism() -> io::Result> { } let mut cpus: libc::c_uint = 0; - let mut cpus_size = crate::mem::size_of_val(&cpus); + let mut cpus_size = size_of_val(&cpus); unsafe { cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint; diff --git a/library/std/src/sys/pal/unix/weak.rs b/library/std/src/sys/pal/unix/weak.rs index 5a37598f43827..7ec4787f1eab7 100644 --- a/library/std/src/sys/pal/unix/weak.rs +++ b/library/std/src/sys/pal/unix/weak.rs @@ -123,7 +123,7 @@ impl DlsymWeak { // Cold because it should only happen during first-time initialization. #[cold] unsafe fn initialize(&self) -> Option { - assert_eq!(mem::size_of::(), mem::size_of::<*mut libc::c_void>()); + assert_eq!(size_of::(), size_of::<*mut libc::c_void>()); let val = fetch(self.name); // This synchronizes with the acquire fence in `get`. diff --git a/library/std/src/sys/pal/wasi/fd.rs b/library/std/src/sys/pal/wasi/fd.rs index 19b60157e2e00..4b3dd1ce49ef6 100644 --- a/library/std/src/sys/pal/wasi/fd.rs +++ b/library/std/src/sys/pal/wasi/fd.rs @@ -14,8 +14,8 @@ pub struct WasiFd { } fn iovec<'a>(a: &'a mut [IoSliceMut<'_>]) -> &'a [wasi::Iovec] { - assert_eq!(mem::size_of::>(), mem::size_of::()); - assert_eq!(mem::align_of::>(), mem::align_of::()); + assert_eq!(size_of::>(), size_of::()); + assert_eq!(align_of::>(), align_of::()); // SAFETY: `IoSliceMut` and `IoVec` have exactly the same memory layout. // We decorate our `IoSliceMut` with `repr(transparent)` (see `io.rs`), and // `crate::io::IoSliceMut` is a `repr(transparent)` wrapper around our type, so this is @@ -24,8 +24,8 @@ fn iovec<'a>(a: &'a mut [IoSliceMut<'_>]) -> &'a [wasi::Iovec] { } fn ciovec<'a>(a: &'a [IoSlice<'_>]) -> &'a [wasi::Ciovec] { - assert_eq!(mem::size_of::>(), mem::size_of::()); - assert_eq!(mem::align_of::>(), mem::align_of::()); + assert_eq!(size_of::>(), size_of::()); + assert_eq!(align_of::>(), align_of::()); // SAFETY: `IoSlice` and `CIoVec` have exactly the same memory layout. // We decorate our `IoSlice` with `repr(transparent)` (see `io.rs`), and // `crate::io::IoSlice` is a `repr(transparent)` wrapper around our type, so this is diff --git a/library/std/src/sys/pal/wasi/fs.rs b/library/std/src/sys/pal/wasi/fs.rs index 39978346d7382..6d7d125fc4d4c 100644 --- a/library/std/src/sys/pal/wasi/fs.rs +++ b/library/std/src/sys/pal/wasi/fs.rs @@ -209,7 +209,7 @@ impl Iterator for ReadDir { } ReadDirState::ProcessEntry { buf, next_read_offset, offset } => { let contents = &buf[*offset..]; - const DIRENT_SIZE: usize = crate::mem::size_of::(); + const DIRENT_SIZE: usize = size_of::(); if contents.len() >= DIRENT_SIZE { let (dirent, data) = contents.split_at(DIRENT_SIZE); let dirent = diff --git a/library/std/src/sys/pal/wasi/thread.rs b/library/std/src/sys/pal/wasi/thread.rs index 0ae0236941061..c85b03d4a8918 100644 --- a/library/std/src/sys/pal/wasi/thread.rs +++ b/library/std/src/sys/pal/wasi/thread.rs @@ -13,16 +13,15 @@ cfg_if::cfg_if! { // Add a few symbols not in upstream `libc` just yet. mod libc { pub use crate::ffi; - pub use crate::mem; pub use libc::*; // defined in wasi-libc // https://github.com/WebAssembly/wasi-libc/blob/a6f871343313220b76009827ed0153586361c0d5/libc-top-half/musl/include/alltypes.h.in#L108 #[repr(C)] union pthread_attr_union { - __i: [ffi::c_int; if mem::size_of::() == 8 { 14 } else { 9 }], - __vi: [ffi::c_int; if mem::size_of::() == 8 { 14 } else { 9 }], - __s: [ffi::c_ulong; if mem::size_of::() == 8 { 7 } else { 9 }], + __i: [ffi::c_int; if size_of::() == 8 { 14 } else { 9 }], + __vi: [ffi::c_int; if size_of::() == 8 { 14 } else { 9 }], + __s: [ffi::c_ulong; if size_of::() == 8 { 7 } else { 9 }], } #[repr(C)] diff --git a/library/std/src/sys/pal/windows/api.rs b/library/std/src/sys/pal/windows/api.rs index ebe207fde935c..6b5f9aeace28a 100644 --- a/library/std/src/sys/pal/windows/api.rs +++ b/library/std/src/sys/pal/windows/api.rs @@ -137,7 +137,7 @@ pub const fn to_utf16(s: &str) -> [u16; UTF16_LEN] { /// use frequent `as` casts. This is risky because they are too powerful. /// For example, the following will compile today: /// -/// `std::mem::size_of:: as u32` +/// `size_of:: as u32` /// /// Note that `size_of` is never actually called, instead a function pointer is /// converted to a `u32`. Clippy would warn about this but, alas, it's not run @@ -147,7 +147,7 @@ const fn win32_size_of() -> u32 { // Uses a trait to workaround restriction on using generic types in inner items. trait Win32SizeOf: Sized { const WIN32_SIZE_OF: u32 = { - let size = core::mem::size_of::(); + let size = size_of::(); assert!(size <= u32::MAX as usize); size as u32 }; diff --git a/library/std/src/sys/pal/windows/c.rs b/library/std/src/sys/pal/windows/c.rs index 4fbdc839939c9..40b2bed73c0db 100644 --- a/library/std/src/sys/pal/windows/c.rs +++ b/library/std/src/sys/pal/windows/c.rs @@ -6,7 +6,7 @@ #![allow(clippy::style)] use core::ffi::{CStr, c_uint, c_ulong, c_ushort, c_void}; -use core::{mem, ptr}; +use core::ptr; mod windows_sys; pub use windows_sys::*; @@ -39,7 +39,7 @@ pub fn nt_success(status: NTSTATUS) -> bool { impl UNICODE_STRING { pub fn from_ref(slice: &[u16]) -> Self { - let len = mem::size_of_val(slice); + let len = size_of_val(slice); Self { Length: len as _, MaximumLength: len as _, Buffer: slice.as_ptr() as _ } } } @@ -47,7 +47,7 @@ impl UNICODE_STRING { impl Default for OBJECT_ATTRIBUTES { fn default() -> Self { Self { - Length: mem::size_of::() as _, + Length: size_of::() as _, RootDirectory: ptr::null_mut(), ObjectName: ptr::null_mut(), Attributes: 0, diff --git a/library/std/src/sys/pal/windows/fs.rs b/library/std/src/sys/pal/windows/fs.rs index 623a7d89ba5a0..17dc3e5c257d4 100644 --- a/library/std/src/sys/pal/windows/fs.rs +++ b/library/std/src/sys/pal/windows/fs.rs @@ -477,7 +477,7 @@ impl File { self.handle.as_raw_handle(), c::FileAttributeTagInfo, (&raw mut attr_tag).cast(), - mem::size_of::().try_into().unwrap(), + size_of::().try_into().unwrap(), ))?; if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 { reparse_tag = attr_tag.ReparseTag; @@ -504,7 +504,7 @@ impl File { pub fn file_attr(&self) -> io::Result { unsafe { let mut info: c::FILE_BASIC_INFO = mem::zeroed(); - let size = mem::size_of_val(&info); + let size = size_of_val(&info); cvt(c::GetFileInformationByHandleEx( self.handle.as_raw_handle(), c::FileBasicInfo, @@ -536,7 +536,7 @@ impl File { file_index: None, }; let mut info: c::FILE_STANDARD_INFO = mem::zeroed(); - let size = mem::size_of_val(&info); + let size = size_of_val(&info); cvt(c::GetFileInformationByHandleEx( self.handle.as_raw_handle(), c::FileStandardInfo, @@ -551,7 +551,7 @@ impl File { self.handle.as_raw_handle(), c::FileAttributeTagInfo, (&raw mut attr_tag).cast(), - mem::size_of::().try_into().unwrap(), + size_of::().try_into().unwrap(), ))?; if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 { attr.reparse_tag = attr_tag.ReparseTag; @@ -649,7 +649,7 @@ impl File { ptr::null_mut(), ) })?; - const _: () = assert!(core::mem::align_of::() <= 8); + const _: () = assert!(align_of::() <= 8); Ok((bytes, space.0.as_mut_ptr().cast::())) } } @@ -753,7 +753,7 @@ impl File { fn basic_info(&self) -> io::Result { unsafe { let mut info: c::FILE_BASIC_INFO = mem::zeroed(); - let size = mem::size_of_val(&info); + let size = size_of_val(&info); cvt(c::GetFileInformationByHandleEx( self.handle.as_raw_handle(), c::FileBasicInfo, @@ -886,7 +886,6 @@ impl<'a> DirBuffIter<'a> { impl<'a> Iterator for DirBuffIter<'a> { type Item = (Cow<'a, [u16]>, bool); fn next(&mut self) -> Option { - use crate::mem::size_of; let buffer = &self.buffer?[self.cursor..]; // Get the name and next entry from the buffer. @@ -1249,8 +1248,8 @@ pub fn rename(old: &Path, new: &Path) -> io::Result<()> { // Therefore we need to make sure to not allocate less than // size_of::() bytes, which would be the case with // 0 or 1 character paths + a null byte. - let struct_size = mem::size_of::() - .max(mem::offset_of!(c::FILE_RENAME_INFO, FileName) + new.len() * mem::size_of::()); + let struct_size = size_of::() + .max(mem::offset_of!(c::FILE_RENAME_INFO, FileName) + new.len() * size_of::()); let struct_size: u32 = struct_size.try_into().unwrap(); @@ -1282,7 +1281,7 @@ pub fn rename(old: &Path, new: &Path) -> io::Result<()> { handle.as_raw_handle(), c::FileAttributeTagInfo, file_attribute_tag_info.as_mut_ptr().cast(), - mem::size_of::().try_into().unwrap(), + size_of::().try_into().unwrap(), )) }; @@ -1321,11 +1320,9 @@ pub fn rename(old: &Path, new: &Path) -> io::Result<()> { } .unwrap_or_else(|| create_file(0, 0))?; - let layout = core::alloc::Layout::from_size_align( - struct_size as _, - mem::align_of::(), - ) - .unwrap(); + let layout = + core::alloc::Layout::from_size_align(struct_size as _, align_of::()) + .unwrap(); let file_rename_info = unsafe { alloc(layout) } as *mut c::FILE_RENAME_INFO; diff --git a/library/std/src/sys/pal/windows/futex.rs b/library/std/src/sys/pal/windows/futex.rs index 38afb8c043b3b..aebf638239ca9 100644 --- a/library/std/src/sys/pal/windows/futex.rs +++ b/library/std/src/sys/pal/windows/futex.rs @@ -1,10 +1,10 @@ use core::ffi::c_void; +use core::ptr; use core::sync::atomic::{ AtomicBool, AtomicI8, AtomicI16, AtomicI32, AtomicI64, AtomicIsize, AtomicPtr, AtomicU8, AtomicU16, AtomicU32, AtomicU64, AtomicUsize, }; use core::time::Duration; -use core::{mem, ptr}; use super::api::{self, WinError}; use crate::sys::{c, dur2timeout}; @@ -61,7 +61,7 @@ pub fn wait_on_address( ) -> bool { unsafe { let addr = ptr::from_ref(address).cast::(); - let size = mem::size_of::(); + let size = size_of::(); let compare_addr = (&raw const compare).cast::(); let timeout = timeout.map(dur2timeout).unwrap_or(c::INFINITE); c::WaitOnAddress(addr, compare_addr, size, timeout) == c::TRUE diff --git a/library/std/src/sys/pal/windows/pipe.rs b/library/std/src/sys/pal/windows/pipe.rs index a8f6617c9dc8f..8521cf4162f5c 100644 --- a/library/std/src/sys/pal/windows/pipe.rs +++ b/library/std/src/sys/pal/windows/pipe.rs @@ -151,7 +151,7 @@ pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Res opts.write(ours_readable); opts.read(!ours_readable); opts.share_mode(0); - let size = mem::size_of::(); + let size = size_of::(); let mut sa = c::SECURITY_ATTRIBUTES { nLength: size as u32, lpSecurityDescriptor: ptr::null_mut(), diff --git a/library/std/src/sys/pal/windows/process.rs b/library/std/src/sys/pal/windows/process.rs index 6eff471f38670..c57ff355d124d 100644 --- a/library/std/src/sys/pal/windows/process.rs +++ b/library/std/src/sys/pal/windows/process.rs @@ -24,7 +24,7 @@ use crate::sys::pipe::{self, AnonPipe}; use crate::sys::{cvt, path, stdio}; use crate::sys_common::IntoInner; use crate::sys_common::process::{CommandEnv, CommandEnvs}; -use crate::{cmp, env, fmt, mem, ptr}; +use crate::{cmp, env, fmt, ptr}; //////////////////////////////////////////////////////////////////////////////// // Command @@ -355,7 +355,7 @@ impl Command { let mut si_ex; if let Some(proc_thread_attribute_list) = proc_thread_attribute_list { - si.cb = mem::size_of::() as u32; + si.cb = size_of::() as u32; flags |= c::EXTENDED_STARTUPINFO_PRESENT; si_ex = c::STARTUPINFOEXW { @@ -367,7 +367,7 @@ impl Command { }; si_ptr = (&raw mut si_ex) as _; } else { - si.cb = mem::size_of::() as u32; + si.cb = size_of::() as u32; si_ptr = (&raw mut si) as _; } @@ -599,7 +599,7 @@ impl Stdio { // permissions as well as the ability to be inherited to child // processes (as this is about to be inherited). Stdio::Null => { - let size = mem::size_of::(); + let size = size_of::(); let mut sa = c::SECURITY_ATTRIBUTES { nLength: size as u32, lpSecurityDescriptor: ptr::null_mut(), diff --git a/library/std/src/sys/pal/windows/stdio.rs b/library/std/src/sys/pal/windows/stdio.rs index 1b245991aa797..58d3406e1382f 100644 --- a/library/std/src/sys/pal/windows/stdio.rs +++ b/library/std/src/sys/pal/windows/stdio.rs @@ -359,7 +359,7 @@ fn read_u16s(handle: c::HANDLE, buf: &mut [MaybeUninit]) -> io::Result() as u32, + nLength: size_of::() as u32, nInitialChars: 0, dwCtrlWakeupMask: CTRL_Z_MASK, dwControlKeyState: 0, diff --git a/library/std/src/sys/pal/xous/stdio.rs b/library/std/src/sys/pal/xous/stdio.rs index dfd47a1775ae2..717361452213b 100644 --- a/library/std/src/sys/pal/xous/stdio.rs +++ b/library/std/src/sys/pal/xous/stdio.rs @@ -87,7 +87,7 @@ pub struct PanicWriter { impl io::Write for PanicWriter { fn write(&mut self, s: &[u8]) -> core::result::Result { - for c in s.chunks(core::mem::size_of::() * 4) { + for c in s.chunks(size_of::() * 4) { // Text is grouped into 4x `usize` words. The id is 1100 plus // the number of characters in this message. // Ignore errors since we're already panicking. diff --git a/library/std/src/sys/pal/zkvm/mod.rs b/library/std/src/sys/pal/zkvm/mod.rs index 054c867f90d8e..8d8fe321f6615 100644 --- a/library/std/src/sys/pal/zkvm/mod.rs +++ b/library/std/src/sys/pal/zkvm/mod.rs @@ -8,7 +8,7 @@ //! will likely change over time. #![forbid(unsafe_op_in_unsafe_fn)] -const WORD_SIZE: usize = core::mem::size_of::(); +const WORD_SIZE: usize = size_of::(); pub mod abi; #[path = "../zkvm/args.rs"] diff --git a/library/std/src/sys/personality/dwarf/eh.rs b/library/std/src/sys/personality/dwarf/eh.rs index 778d8686f023e..ef5112ad74f13 100644 --- a/library/std/src/sys/personality/dwarf/eh.rs +++ b/library/std/src/sys/personality/dwarf/eh.rs @@ -12,7 +12,7 @@ #![allow(non_upper_case_globals)] #![allow(unused)] -use core::{mem, ptr}; +use core::ptr; use super::DwarfReader; @@ -245,8 +245,7 @@ unsafe fn read_encoded_pointer( DW_EH_PE_datarel => (*context.get_data_start)(), // aligned means the value is aligned to the size of a pointer DW_EH_PE_aligned => { - reader.ptr = - reader.ptr.with_addr(round_up(reader.ptr.addr(), mem::size_of::<*const u8>())?); + reader.ptr = reader.ptr.with_addr(round_up(reader.ptr.addr(), size_of::<*const u8>())?); core::ptr::null() } _ => return Err(()), diff --git a/library/std/src/sys/personality/dwarf/mod.rs b/library/std/src/sys/personality/dwarf/mod.rs index 5c52d96c4cad4..2bc91951b49fd 100644 --- a/library/std/src/sys/personality/dwarf/mod.rs +++ b/library/std/src/sys/personality/dwarf/mod.rs @@ -12,8 +12,6 @@ mod tests; pub mod eh; -use core::mem; - pub struct DwarfReader { pub ptr: *const u8, } @@ -29,7 +27,7 @@ impl DwarfReader { pub unsafe fn read(&mut self) -> T { unsafe { let result = self.ptr.cast::().read_unaligned(); - self.ptr = self.ptr.byte_add(mem::size_of::()); + self.ptr = self.ptr.byte_add(size_of::()); result } } diff --git a/library/std/src/sys/thread_local/key/xous.rs b/library/std/src/sys/thread_local/key/xous.rs index 55ac5b20e1ab0..48dfe17ab3261 100644 --- a/library/std/src/sys/thread_local/key/xous.rs +++ b/library/std/src/sys/thread_local/key/xous.rs @@ -85,7 +85,7 @@ fn tls_table() -> &'static mut [*mut u8] { if !tp.is_null() { return unsafe { - core::slice::from_raw_parts_mut(tp, TLS_MEMORY_SIZE / core::mem::size_of::<*mut u8>()) + core::slice::from_raw_parts_mut(tp, TLS_MEMORY_SIZE / size_of::<*mut u8>()) }; } // If the TP register is `0`, then this thread hasn't initialized @@ -94,7 +94,7 @@ fn tls_table() -> &'static mut [*mut u8] { map_memory( None, None, - TLS_MEMORY_SIZE / core::mem::size_of::<*mut u8>(), + TLS_MEMORY_SIZE / size_of::<*mut u8>(), MemoryFlags::R | MemoryFlags::W, ) .expect("Unable to allocate memory for thread local storage") @@ -177,11 +177,8 @@ pub unsafe fn destroy_tls() { // Finally, free the TLS array unsafe { - unmap_memory(core::slice::from_raw_parts_mut( - tp, - TLS_MEMORY_SIZE / core::mem::size_of::(), - )) - .unwrap() + unmap_memory(core::slice::from_raw_parts_mut(tp, TLS_MEMORY_SIZE / size_of::())) + .unwrap() }; } diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs index ff45e82bd9c71..06c347af1819f 100644 --- a/library/std/src/thread/tests.rs +++ b/library/std/src/thread/tests.rs @@ -1,12 +1,12 @@ use super::Builder; use crate::any::Any; use crate::panic::panic_any; +use crate::result; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::mpsc::{Sender, channel}; use crate::sync::{Arc, Barrier}; use crate::thread::{self, Scope, ThreadId}; use crate::time::{Duration, Instant}; -use crate::{mem, result}; // !!! These tests are dangerous. If something is buggy, they will hang, !!! // !!! instead of exiting cleanly. This might wedge the buildbots. !!! @@ -327,7 +327,7 @@ fn sleep_ms_smoke() { #[test] fn test_size_of_option_thread_id() { - assert_eq!(mem::size_of::>(), mem::size_of::()); + assert_eq!(size_of::>(), size_of::()); } #[test] diff --git a/library/unwind/src/unwinding.rs b/library/unwind/src/unwinding.rs index 1b94005ab6cd0..fa8a8c385839b 100644 --- a/library/unwind/src/unwinding.rs +++ b/library/unwind/src/unwinding.rs @@ -39,9 +39,9 @@ pub type _Unwind_Exception_Class = u64; pub type _Unwind_Word = *const u8; pub type _Unwind_Ptr = *const u8; -pub const unwinder_private_data_size: usize = core::mem::size_of::() - - core::mem::size_of::<_Unwind_Exception_Class>() - - core::mem::size_of::<_Unwind_Exception_Cleanup_Fn>(); +pub const unwinder_private_data_size: usize = size_of::() + - size_of::<_Unwind_Exception_Class>() + - size_of::<_Unwind_Exception_Cleanup_Fn>(); pub type _Unwind_Exception_Cleanup_Fn = Option; From 5dfa2f5fd0d2e1cdf650679d709ae71bbad7d87a Mon Sep 17 00:00:00 2001 From: Thalia Archibald Date: Tue, 4 Mar 2025 21:40:04 -0800 Subject: [PATCH 13/13] Use turbofish for size_of and align_of in docs --- library/core/src/ptr/const_ptr.rs | 4 ++-- library/core/src/ptr/mut_ptr.rs | 8 ++++---- library/core/src/ptr/non_null.rs | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 43306cfa674b9..9a4f916803e81 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -1313,7 +1313,7 @@ impl *const T { unsafe { read_unaligned(self) } } - /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// Copies `count * size_of::()` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy`]. @@ -1333,7 +1333,7 @@ impl *const T { unsafe { copy(self, dest, count) } } - /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// Copies `count * size_of::()` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index 26aaac476027f..b960a3d86bef0 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -1397,7 +1397,7 @@ impl *mut T { unsafe { read_unaligned(self) } } - /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// Copies `count * size_of::()` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy`]. @@ -1417,7 +1417,7 @@ impl *mut T { unsafe { copy(self, dest, count) } } - /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// Copies `count * size_of::()` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. @@ -1437,7 +1437,7 @@ impl *mut T { unsafe { copy_nonoverlapping(self, dest, count) } } - /// Copies `count * size_of` bytes from `src` to `self`. The source + /// Copies `count * size_of::()` bytes from `src` to `self`. The source /// and destination may overlap. /// /// NOTE: this has the *opposite* argument order of [`ptr::copy`]. @@ -1457,7 +1457,7 @@ impl *mut T { unsafe { copy(src, self, count) } } - /// Copies `count * size_of` bytes from `src` to `self`. The source + /// Copies `count * size_of::()` bytes from `src` to `self`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`]. diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index 7c567d522c556..c769ba673c61e 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -988,7 +988,7 @@ impl NonNull { unsafe { ptr::read_unaligned(self.as_ptr()) } } - /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// Copies `count * size_of::()` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy`]. @@ -1008,7 +1008,7 @@ impl NonNull { unsafe { ptr::copy(self.as_ptr(), dest.as_ptr(), count) } } - /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// Copies `count * size_of::()` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. @@ -1028,7 +1028,7 @@ impl NonNull { unsafe { ptr::copy_nonoverlapping(self.as_ptr(), dest.as_ptr(), count) } } - /// Copies `count * size_of` bytes from `src` to `self`. The source + /// Copies `count * size_of::()` bytes from `src` to `self`. The source /// and destination may overlap. /// /// NOTE: this has the *opposite* argument order of [`ptr::copy`]. @@ -1048,7 +1048,7 @@ impl NonNull { unsafe { ptr::copy(src.as_ptr(), self.as_ptr(), count) } } - /// Copies `count * size_of` bytes from `src` to `self`. The source + /// Copies `count * size_of::()` bytes from `src` to `self`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].