Skip to content

Commit 2a19be6

Browse files
committed
mm/slab: remove CONFIG_SLAB from all Kconfig and Makefile
Remove CONFIG_SLAB, CONFIG_DEBUG_SLAB, CONFIG_SLAB_DEPRECATED and everything in Kconfig files and mm/Makefile that depends on those. Since SLUB is the only remaining allocator, remove the allocator choice, make CONFIG_SLUB a "def_bool y" for now and remove all explicit dependencies on SLUB or SLAB as it's now always enabled. Make every option's verbose name and description refer to "the slab allocator" without refering to the specific implementation. Do not rename the CONFIG_ option names yet. Everything under #ifdef CONFIG_SLAB, and mm/slab.c is now dead code, all code under #ifdef CONFIG_SLUB is now always compiled. Reviewed-by: Kees Cook <[email protected]> Reviewed-by: Christoph Lameter <[email protected]> Acked-by: David Rientjes <[email protected]> Tested-by: David Rientjes <[email protected]> Reviewed-by: Hyeonggon Yoo <[email protected]> Tested-by: Hyeonggon Yoo <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 0445ee0 commit 2a19be6

File tree

10 files changed

+28
-84
lines changed

10 files changed

+28
-84
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ config ARM64
154154
select HAVE_MOVE_PUD
155155
select HAVE_PCI
156156
select HAVE_ACPI_APEI if (ACPI && EFI)
157-
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
157+
select HAVE_ALIGNED_STRUCT_PAGE
158158
select HAVE_ARCH_AUDITSYSCALL
159159
select HAVE_ARCH_BITREVERSE
160160
select HAVE_ARCH_COMPILER_H

arch/s390/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ config S390
146146
select GENERIC_TIME_VSYSCALL
147147
select GENERIC_VDSO_TIME_NS
148148
select GENERIC_IOREMAP if PCI
149-
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
149+
select HAVE_ALIGNED_STRUCT_PAGE
150150
select HAVE_ARCH_AUDITSYSCALL
151151
select HAVE_ARCH_JUMP_LABEL
152152
select HAVE_ARCH_JUMP_LABEL_RELATIVE

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ config X86
169169
select HAS_IOPORT
170170
select HAVE_ACPI_APEI if ACPI
171171
select HAVE_ACPI_APEI_NMI if ACPI
172-
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
172+
select HAVE_ALIGNED_STRUCT_PAGE
173173
select HAVE_ARCH_AUDITSYSCALL
174174
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
175175
select HAVE_ARCH_HUGE_VMALLOC if X86_64

lib/Kconfig.debug

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1985,7 +1985,6 @@ config FAULT_INJECTION
19851985
config FAILSLAB
19861986
bool "Fault-injection capability for kmalloc"
19871987
depends on FAULT_INJECTION
1988-
depends on SLAB || SLUB
19891988
help
19901989
Provide fault-injection capability for kmalloc.
19911990

lib/Kconfig.kasan

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ menuconfig KASAN
3737
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
3838
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
3939
HAVE_ARCH_KASAN_HW_TAGS
40-
depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB)
40+
depends on SYSFS && !SLUB_TINY
4141
select STACKDEPOT_ALWAYS_INIT
4242
help
4343
Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
@@ -78,7 +78,7 @@ config KASAN_GENERIC
7878
bool "Generic KASAN"
7979
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
8080
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
81-
select SLUB_DEBUG if SLUB
81+
select SLUB_DEBUG
8282
select CONSTRUCTORS
8383
help
8484
Enables Generic KASAN.
@@ -89,13 +89,11 @@ config KASAN_GENERIC
8989
overhead of ~50% for dynamic allocations.
9090
The performance slowdown is ~x3.
9191

92-
(Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
93-
9492
config KASAN_SW_TAGS
9593
bool "Software Tag-Based KASAN"
9694
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
9795
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
98-
select SLUB_DEBUG if SLUB
96+
select SLUB_DEBUG
9997
select CONSTRUCTORS
10098
help
10199
Enables Software Tag-Based KASAN.
@@ -110,12 +108,9 @@ config KASAN_SW_TAGS
110108
May potentially introduce problems related to pointer casting and
111109
comparison, as it embeds a tag into the top byte of each pointer.
112110

113-
(Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
114-
115111
config KASAN_HW_TAGS
116112
bool "Hardware Tag-Based KASAN"
117113
depends on HAVE_ARCH_KASAN_HW_TAGS
118-
depends on SLUB
119114
help
120115
Enables Hardware Tag-Based KASAN.
121116

lib/Kconfig.kfence

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ config HAVE_ARCH_KFENCE
55

66
menuconfig KFENCE
77
bool "KFENCE: low-overhead sampling-based memory safety error detector"
8-
depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
8+
depends on HAVE_ARCH_KFENCE
99
select STACKTRACE
1010
select IRQ_WORK
1111
help

lib/Kconfig.kmsan

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ config HAVE_KMSAN_COMPILER
1111
config KMSAN
1212
bool "KMSAN: detector of uninitialized values use"
1313
depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
14-
depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN
14+
depends on DEBUG_KERNEL && !KASAN && !KCSAN
1515
depends on !PREEMPT_RT
1616
select STACKDEPOT
1717
select STACKDEPOT_ALWAYS_INIT

mm/Kconfig

Lines changed: 15 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -226,52 +226,17 @@ config ZSMALLOC_CHAIN_SIZE
226226

227227
For more information, see zsmalloc documentation.
228228

229-
menu "SLAB allocator options"
230-
231-
choice
232-
prompt "Choose SLAB allocator"
233-
default SLUB
234-
help
235-
This option allows to select a slab allocator.
236-
237-
config SLAB_DEPRECATED
238-
bool "SLAB (DEPRECATED)"
239-
depends on !PREEMPT_RT
240-
help
241-
Deprecated and scheduled for removal in a few cycles. Replaced by
242-
SLUB.
243-
244-
If you cannot migrate to SLUB, please contact [email protected]
245-
and the people listed in the SLAB ALLOCATOR section of MAINTAINERS
246-
file, explaining why.
247-
248-
The regular slab allocator that is established and known to work
249-
well in all environments. It organizes cache hot objects in
250-
per cpu and per node queues.
229+
menu "Slab allocator options"
251230

252231
config SLUB
253-
bool "SLUB (Unqueued Allocator)"
254-
help
255-
SLUB is a slab allocator that minimizes cache line usage
256-
instead of managing queues of cached objects (SLAB approach).
257-
Per cpu caching is realized using slabs of objects instead
258-
of queues of objects. SLUB can use memory efficiently
259-
and has enhanced diagnostics. SLUB is the default choice for
260-
a slab allocator.
261-
262-
endchoice
263-
264-
config SLAB
265-
bool
266-
default y
267-
depends on SLAB_DEPRECATED
232+
def_bool y
268233

269234
config SLUB_TINY
270-
bool "Configure SLUB for minimal memory footprint"
271-
depends on SLUB && EXPERT
235+
bool "Configure for minimal memory footprint"
236+
depends on EXPERT
272237
select SLAB_MERGE_DEFAULT
273238
help
274-
Configures the SLUB allocator in a way to achieve minimal memory
239+
Configures the slab allocator in a way to achieve minimal memory
275240
footprint, sacrificing scalability, debugging and other features.
276241
This is intended only for the smallest system that had used the
277242
SLOB allocator and is not recommended for systems with more than
@@ -282,7 +247,6 @@ config SLUB_TINY
282247
config SLAB_MERGE_DEFAULT
283248
bool "Allow slab caches to be merged"
284249
default y
285-
depends on SLAB || SLUB
286250
help
287251
For reduced kernel memory fragmentation, slab caches can be
288252
merged when they share the same size and other characteristics.
@@ -296,29 +260,27 @@ config SLAB_MERGE_DEFAULT
296260

297261
config SLAB_FREELIST_RANDOM
298262
bool "Randomize slab freelist"
299-
depends on SLAB || (SLUB && !SLUB_TINY)
263+
depends on !SLUB_TINY
300264
help
301265
Randomizes the freelist order used on creating new pages. This
302266
security feature reduces the predictability of the kernel slab
303267
allocator against heap overflows.
304268

305269
config SLAB_FREELIST_HARDENED
306270
bool "Harden slab freelist metadata"
307-
depends on SLAB || (SLUB && !SLUB_TINY)
271+
depends on !SLUB_TINY
308272
help
309273
Many kernel heap attacks try to target slab cache metadata and
310274
other infrastructure. This options makes minor performance
311275
sacrifices to harden the kernel slab allocator against common
312-
freelist exploit methods. Some slab implementations have more
313-
sanity-checking than others. This option is most effective with
314-
CONFIG_SLUB.
276+
freelist exploit methods.
315277

316278
config SLUB_STATS
317279
default n
318-
bool "Enable SLUB performance statistics"
319-
depends on SLUB && SYSFS && !SLUB_TINY
280+
bool "Enable performance statistics"
281+
depends on SYSFS && !SLUB_TINY
320282
help
321-
SLUB statistics are useful to debug SLUBs allocation behavior in
283+
The statistics are useful to debug slab allocation behavior in
322284
order find ways to optimize the allocator. This should never be
323285
enabled for production use since keeping statistics slows down
324286
the allocator by a few percentage points. The slabinfo command
@@ -328,8 +290,8 @@ config SLUB_STATS
328290

329291
config SLUB_CPU_PARTIAL
330292
default y
331-
depends on SLUB && SMP && !SLUB_TINY
332-
bool "SLUB per cpu partial cache"
293+
depends on SMP && !SLUB_TINY
294+
bool "Enable per cpu partial caches"
333295
help
334296
Per cpu partial caches accelerate objects allocation and freeing
335297
that is local to a processor at the price of more indeterminism
@@ -339,7 +301,7 @@ config SLUB_CPU_PARTIAL
339301

340302
config RANDOM_KMALLOC_CACHES
341303
default n
342-
depends on SLUB && !SLUB_TINY
304+
depends on !SLUB_TINY
343305
bool "Randomize slab caches for normal kmalloc"
344306
help
345307
A hardening feature that creates multiple copies of slab caches for
@@ -354,7 +316,7 @@ config RANDOM_KMALLOC_CACHES
354316
limited degree of memory and CPU overhead that relates to hardware and
355317
system workload.
356318

357-
endmenu # SLAB allocator options
319+
endmenu # Slab allocator options
358320

359321
config SHUFFLE_PAGE_ALLOCATOR
360322
bool "Page allocator randomization"

mm/Kconfig.debug

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -45,18 +45,10 @@ config DEBUG_PAGEALLOC_ENABLE_DEFAULT
4545
Enable debug page memory allocations by default? This value
4646
can be overridden by debug_pagealloc=off|on.
4747

48-
config DEBUG_SLAB
49-
bool "Debug slab memory allocations"
50-
depends on DEBUG_KERNEL && SLAB
51-
help
52-
Say Y here to have the kernel do limited verification on memory
53-
allocation as well as poisoning memory on free to catch use of freed
54-
memory. This can make kmalloc/kfree-intensive workloads much slower.
55-
5648
config SLUB_DEBUG
5749
default y
5850
bool "Enable SLUB debugging support" if EXPERT
59-
depends on SLUB && SYSFS && !SLUB_TINY
51+
depends on SYSFS && !SLUB_TINY
6052
select STACKDEPOT if STACKTRACE_SUPPORT
6153
help
6254
SLUB has extensive debug support features. Disabling these can
@@ -66,7 +58,7 @@ config SLUB_DEBUG
6658

6759
config SLUB_DEBUG_ON
6860
bool "SLUB debugging on by default"
69-
depends on SLUB && SLUB_DEBUG
61+
depends on SLUB_DEBUG
7062
select STACKDEPOT_ALWAYS_INIT if STACKTRACE_SUPPORT
7163
default n
7264
help
@@ -231,8 +223,8 @@ config DEBUG_KMEMLEAK
231223
allocations. See Documentation/dev-tools/kmemleak.rst for more
232224
details.
233225

234-
Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
235-
of finding leaks due to the slab objects poisoning.
226+
Enabling SLUB_DEBUG may increase the chances of finding leaks
227+
due to the slab objects poisoning.
236228

237229
In order to access the kmemleak file, debugfs needs to be
238230
mounted (usually at /sys/kernel/debug).

mm/Makefile

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,13 @@
44
#
55

66
KASAN_SANITIZE_slab_common.o := n
7-
KASAN_SANITIZE_slab.o := n
87
KASAN_SANITIZE_slub.o := n
98
KCSAN_SANITIZE_kmemleak.o := n
109

1110
# These produce frequent data race reports: most of them are due to races on
1211
# the same word but accesses to different bits of that word. Re-enable KCSAN
1312
# for these when we have more consensus on what to do about them.
1413
KCSAN_SANITIZE_slab_common.o := n
15-
KCSAN_SANITIZE_slab.o := n
1614
KCSAN_SANITIZE_slub.o := n
1715
KCSAN_SANITIZE_page_alloc.o := n
1816
# But enable explicit instrumentation for memory barriers.
@@ -22,7 +20,6 @@ KCSAN_INSTRUMENT_BARRIERS := y
2220
# flaky coverage that is not a function of syscall inputs. E.g. slab is out of
2321
# free pages, or a task is migrated between nodes.
2422
KCOV_INSTRUMENT_slab_common.o := n
25-
KCOV_INSTRUMENT_slab.o := n
2623
KCOV_INSTRUMENT_slub.o := n
2724
KCOV_INSTRUMENT_page_alloc.o := n
2825
KCOV_INSTRUMENT_debug-pagealloc.o := n
@@ -66,6 +63,7 @@ obj-y += page-alloc.o
6663
obj-y += init-mm.o
6764
obj-y += memblock.o
6865
obj-y += $(memory-hotplug-y)
66+
obj-y += slub.o
6967

7068
ifdef CONFIG_MMU
7169
obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
@@ -82,8 +80,6 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
8280
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
8381
obj-$(CONFIG_KSM) += ksm.o
8482
obj-$(CONFIG_PAGE_POISONING) += page_poison.o
85-
obj-$(CONFIG_SLAB) += slab.o
86-
obj-$(CONFIG_SLUB) += slub.o
8783
obj-$(CONFIG_KASAN) += kasan/
8884
obj-$(CONFIG_KFENCE) += kfence/
8985
obj-$(CONFIG_KMSAN) += kmsan/

0 commit comments

Comments
 (0)