Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Harden module auto-loading #19

Closed
wants to merge 104 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
104 commits
Select commit Hold shift + click to select a range
0abdd0a
make DEFAULT_MMAP_MIN_ADDR match LSM_MMAP_MIN_ADDR
thestinger May 27, 2017
663bfcf
enable HARDENED_USERCOPY by default
thestinger May 29, 2017
164db07
disable HARDENED_USERCOPY_FALLBACK by default
thestinger Apr 26, 2018
ffc82ed
enable SECURITY_DMESG_RESTRICT by default
thestinger May 3, 2017
75275ce
set kptr_restrict=2 by default
thestinger May 3, 2017
bd5a1f1
enable DEBUG_LIST by default
thestinger May 3, 2017
914bee4
enable BUG_ON_DATA_CORRUPTION by default
thestinger May 29, 2017
e04e564
enable ARM64_SW_TTBR0_PAN by default
thestinger Feb 25, 2018
7b3bdf8
arm64: enable RANDOMIZE_BASE by default
thestinger Feb 25, 2018
44c598e
enable SLAB_FREELIST_RANDOM by default
thestinger May 3, 2017
786923d
enable SLAB_FREELIST_HARDENED by default
thestinger Aug 20, 2017
f50117b
disable SLAB_MERGE_DEFAULT by default
thestinger Jul 8, 2017
8cd07cf
enable REFCOUNT_FULL by default
thestinger Jan 3, 2018
547b680
enable FORTIFY_SOURCE by default
thestinger May 8, 2017
03f7f04
enable PANIC_ON_OOPS by default
thestinger May 3, 2017
d7fbdf6
stop hiding SLUB_DEBUG behind EXPERT
thestinger May 15, 2017
31391f1
stop hiding X86_16BIT behind EXPERT
thestinger May 4, 2017
5340267
disable X86_16BIT by default
thestinger May 4, 2017
10d5628
stop hiding MODIFY_LDT_SYSCALL behind EXPERT
thestinger May 4, 2017
6d229af
disable MODIFY_LDT_SYSCALL by default
thestinger May 4, 2017
ecf6366
set LEGACY_VSYSCALL_NONE by default
thestinger May 29, 2017
280d02b
stop hiding AIO behind EXPERT
Bernhard40 Oct 6, 2017
a25aa13
disable AIO by default
Bernhard40 Oct 6, 2017
c8640a1
remove SYSVIPC from arm64/x86_64 defconfigs
thestinger Feb 25, 2018
b302e02
disable DEVPORT by default
thestinger May 27, 2017
faa8f6f
disable PROC_VMCORE by default
thestinger May 27, 2017
d528ed8
disable NFS_DEBUG by default
thestinger May 28, 2017
58d5357
enable DEBUG_WX by default
thestinger May 29, 2017
3339464
disable LEGACY_PTYS by default
thestinger Jan 5, 2018
97bcde5
disable DEVMEM by default
thestinger Jan 5, 2018
af0a3aa
enable IO_STRICT_DEVMEM by default
thestinger Jan 5, 2018
3cca67a
disable COMPAT_BRK by default
thestinger May 7, 2017
1f2e0d9
use maximum supported mmap rnd entropy by default
thestinger May 7, 2017
1116919
enable protected_{symlinks,hardlinks} by default
thestinger May 30, 2017
14b5aa5
enable SECURITY by default
thestinger Feb 25, 2018
d3ebf9f
enable SECURITY_YAMA by default
thestinger May 29, 2017
011cb2d
enable SECURITY_NETWORK by default
thestinger Feb 25, 2018
b39b072
enable AUDIT by default
thestinger Feb 25, 2018
47cb028
enable SECURITY_SELINUX by default
thestinger Feb 25, 2018
4fe50d3
enable SYN_COOKIES by default
thestinger Jan 6, 2018
ff101e6
add __read_only for non-init related usage
thestinger May 7, 2017
85dfb0e
make sysctl constants read-only
thestinger May 7, 2017
858d78e
mark kernel_set_to_readonly as __ro_after_init
thestinger May 12, 2017
8c5d547
mark slub runtime configuration as __ro_after_init
thestinger May 14, 2017
48fe81e
add __ro_after_init to slab_nomerge and slab_state
thestinger May 3, 2017
6e43231
mark kmem_cache as __ro_after_init
thestinger May 28, 2017
47518f9
mark __supported_pte_mask as __ro_after_init
thestinger May 12, 2017
862d8c2
mark kobj_ns_type_register as only used for init
thestinger Jul 4, 2017
66390ab
mark open_softirq as only used for init
thestinger Jul 4, 2017
b4f5a5a
remove unused softirq_action callback parameter
thestinger Jul 4, 2017
54111a0
mark softirq_vec as __ro_after_init
thestinger Jul 4, 2017
07f1ce5
mm: slab: trigger BUG if requested object is not a slab page
thestinger Sep 17, 2019
632d9cf
bug on kmem_cache_free with the wrong cache
thestinger May 3, 2017
dc3ca52
bug on !PageSlab && !PageCompound in ksize
thestinger May 3, 2017
bd47e5d
mm: add support for verifying page sanitization
thestinger May 4, 2017
f51074a
slub: Extend init_on_free to slab caches with constructors
tsautereau-anssi Sep 20, 2019
f154147
mm: slub: add support for verifying slab sanitization
thestinger May 4, 2017
253ac25
slub: add multi-purpose random canaries
thestinger May 3, 2017
3a6bd94
security,perf: Allow further restriction of perf_event_open
bwhacks Jan 11, 2016
992dec3
enable SECURITY_PERF_EVENTS_RESTRICT by default
thestinger May 4, 2017
3fd874d
add sysctl to disallow unprivileged CLONE_NEWUSER by default
hallyn May 31, 2013
62892de
add kmalloc/krealloc alloc_size attributes
thestinger May 3, 2017
a1178c2
add vmalloc alloc_size attributes
thestinger May 3, 2017
2e95e8a
add kvmalloc alloc_size attribute
thestinger Jul 4, 2017
b04d447
add percpu alloc_size attributes
thestinger May 14, 2017
bc7c386
add alloc_pages_exact alloc_size attributes
thestinger May 14, 2017
6ee23a8
Add the extra_latent_entropy kernel parameter
ephox-gcc-plugins May 30, 2016
80e4b37
ata: avoid null pointer dereference on bug
thestinger May 16, 2017
548b764
sanity check for negative length in nla_memcpy
thestinger May 16, 2017
2deda6d
add page destructor sanity check
thestinger May 16, 2017
9ecadc0
PaX shadow cr4 sanity check (essentially a revert)
thestinger May 16, 2017
2657206
add writable function pointer detection
thestinger Jul 9, 2017
b47e286
support overriding early audit kernel cmdline
thestinger Jul 9, 2017
d052512
FORTIFY_SOURCE intra-object overflow checking
thestinger Jun 3, 2017
a19642f
Revert "mm: revert x86_64 and arm64 ELF_ET_DYN_BASE base changes"
thestinger Aug 27, 2017
080507e
x86_64: move vdso to mmap region from stack region
thestinger May 11, 2017
27922cd
x86: determine stack entropy based on mmap entropy
thestinger May 22, 2017
21bc624
arm64: determine stack entropy based on mmap entropy
thestinger May 22, 2017
7d7cac6
randomize lower bits of the argument block
thestinger May 11, 2017
8997e7a
x86_64: match arm64 brk randomization entropy
thestinger May 30, 2017
6630468
support randomizing the lower bits of brk
thestinger May 30, 2017
69e03a6
arm64: randomize lower bits of brk
thestinger Jun 1, 2017
c912522
x86: randomize lower bits of brk
thestinger Jun 1, 2017
4084d63
arm64: guarantee brk gap is at least one page
thestinger Jun 1, 2017
d1d5860
x86: guarantee brk gap is at least one page
thestinger Jun 1, 2017
4a655c0
x86_64: bound mmap between legacy/modern bases
thestinger Jul 4, 2017
0e708b8
restrict device timing side channels
thestinger May 16, 2017
4a1ffde
add toggle for disabling newly added USB devices
thestinger May 16, 2017
7793b68
hard-wire legacy checkreqprot option to 0
thestinger Feb 25, 2018
9f7420c
security: tty: Add owner user namespace to tty_struct
nmatt0 May 29, 2017
338baf8
security: tty: make TIOCSTI ioctl require CAP_SYS_ADMIN
nmatt0 May 29, 2017
b03b1c8
enable SECURITY_TIOCSTI_RESTRICT by default
thestinger May 4, 2017
fb6a4e2
disable unprivileged eBPF access by default
anthraxx May 7, 2018
afde57e
enable BPF JIT hardening by default (if available)
anthraxx May 7, 2018
f558875
enable protected_{fifos,regular} by default
anthraxx Nov 4, 2018
80c3240
Revert "mark kernel_set_to_readonly as __ro_after_init"
anthraxx Jan 13, 2019
3df7771
modpost: Add CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE
tsautereau-anssi May 6, 2019
25c3f4b
mm: Fix extra_latent_entropy
tsautereau-anssi May 7, 2019
e188395
add CONFIG for unprivileged_userns_clone
anthraxx Jul 31, 2019
4525808
enable INIT_ON_ALLOC_DEFAULT_ON by default
anthraxx Sep 19, 2019
3d054c3
enable INIT_ON_FREE_DEFAULT_ON by default
anthraxx Sep 19, 2019
a3f0d0e
add CONFIG for unprivileged_userfaultfd
anthraxx Oct 1, 2019
f27a0b5
Harden module auto-loading
madaidan Nov 26, 2019
689e7b8
Restrict module auto-loading to CAP_SYS_MODULE
madaidan Dec 1, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 5 additions & 10 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -505,16 +505,6 @@
nosocket -- Disable socket memory accounting.
nokmem -- Disable kernel memory accounting.

checkreqprot [SELINUX] Set initial checkreqprot flag value.
Format: { "0" | "1" }
See security/selinux/Kconfig help text.
0 -- check protection applied by kernel (includes
any implied execute protection).
1 -- check protection requested by application.
Default value is set via a kernel config option.
Value can be changed at runtime via
/selinux/checkreqprot.

cio_ignore= [S390]
See Documentation/s390/common_io.rst for details.
clk_ignore_unused
Expand Down Expand Up @@ -3315,6 +3305,11 @@
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.

extra_latent_entropy
Enable a very simple form of latent entropy extraction
from the first 4GB of memory as the bootmem allocator
passes the memory pages to the buddy allocator.

pcbit= [HW,ISDN]

pcd. [PARIDE]
Expand Down
20 changes: 20 additions & 0 deletions Documentation/admin-guide/sysctl/kernel.rst
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ show up in /proc/sys/kernel:
- sysctl_writes_strict
- tainted ==> Documentation/admin-guide/tainted-kernels.rst
- threads-max
- tiocsti_restrict
- unknown_nmi_panic
- watchdog
- watchdog_thresh
Expand Down Expand Up @@ -1114,6 +1115,25 @@ thread structures would occupy too much (more than 1/8th) of the
available RAM pages threads-max is reduced accordingly.


tiocsti_restrict:
=================

This toggle indicates whether unprivileged users are prevented from using the
TIOCSTI ioctl to inject commands into other processes which share a tty
session.

When tiocsti_restrict is set to (0) there are no restrictions(accept the
default restriction of only being able to injection commands into one's own
tty). When tiocsti_restrict is set to (1), users must have CAP_SYS_ADMIN to
use the TIOCSTI ioctl.

When user namespaces are in use, the check for the capability CAP_SYS_ADMIN is
done against the user namespace that originally opened the tty.

The kernel config option CONFIG_SECURITY_TIOCSTI_RESTRICT sets the default
value of tiocsti_restrict.


unknown_nmi_panic:
==================

Expand Down
5 changes: 3 additions & 2 deletions arch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -643,7 +643,7 @@ config ARCH_MMAP_RND_BITS
int "Number of bits to use for ASLR of mmap base address" if EXPERT
range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
default ARCH_MMAP_RND_BITS_MIN
default ARCH_MMAP_RND_BITS_MAX
depends on HAVE_ARCH_MMAP_RND_BITS
help
This value can be used to select the number of bits to use to
Expand Down Expand Up @@ -677,7 +677,7 @@ config ARCH_MMAP_RND_COMPAT_BITS
int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
default ARCH_MMAP_RND_COMPAT_BITS_MIN
default ARCH_MMAP_RND_COMPAT_BITS_MAX
depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
help
This value can be used to select the number of bits to use to
Expand Down Expand Up @@ -888,6 +888,7 @@ config ARCH_HAS_REFCOUNT

config REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed"
default y
help
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1121,6 +1121,7 @@ config RODATA_FULL_DEFAULT_ENABLED

config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
default y
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
Expand Down Expand Up @@ -1497,6 +1498,7 @@ config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
default y
help
Randomizes the virtual address at which the kernel image is
loaded, as a security feature that deters exploit attempts
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
config DEBUG_WX
bool "Warn on W+X mappings at boot"
select ARM64_PTDUMP_CORE
default y
---help---
Generate a warning if any W+X mappings are found at boot.

Expand Down
1 change: 0 additions & 1 deletion arch/arm64/configs/defconfig
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
Expand Down
14 changes: 5 additions & 9 deletions arch/arm64/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,10 @@

/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is above 4GB to leave the entire 32-bit address
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#ifdef CONFIG_ARM64_FORCE_52BIT
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#else
#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3)
#endif /* CONFIG_ARM64_FORCE_52BIT */
#define ELF_ET_DYN_BASE 0x100000000UL

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -164,10 +160,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
/* 1GB of VA */
#ifdef CONFIG_COMPAT
#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12))
((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
#else
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
#endif

#ifdef __AARCH64EB__
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -549,9 +549,9 @@ unsigned long arch_align_stack(unsigned long sp)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
if (is_compat_task())
return randomize_page(mm->brk, SZ_32M);
return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
else
return randomize_page(mm->brk, SZ_1G);
return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
}

/*
Expand Down
8 changes: 3 additions & 5 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1210,8 +1210,7 @@ config VM86
default X86_LEGACY_VM86

config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
default y
bool "Enable support for 16-bit segments"
depends on MODIFY_LDT_SYSCALL
---help---
This option is required by programs like Wine to run 16-bit
Expand Down Expand Up @@ -2349,7 +2348,7 @@ config COMPAT_VDSO
choice
prompt "vsyscall table for legacy applications"
depends on X86_64
default LEGACY_VSYSCALL_XONLY
default LEGACY_VSYSCALL_NONE
help
Legacy user code that does not know how to find the vDSO expects
to be able to issue three syscalls by calling fixed addresses in
Expand Down Expand Up @@ -2445,8 +2444,7 @@ config CMDLINE_OVERRIDE
be set to 'N' under normal conditions.

config MODIFY_LDT_SYSCALL
bool "Enable the LDT (local descriptor table)" if EXPERT
default y
bool "Enable the LDT (local descriptor table)"
---help---
Linux can allow user programs to install a per-process x86
Local Descriptor Table (LDT) using the modify_ldt(2) system
Expand Down
1 change: 1 addition & 0 deletions arch/x86/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ config EFI_PGT_DUMP
config DEBUG_WX
bool "Warn on W+X mappings at boot"
select X86_PTDUMP_CORE
default y
---help---
Generate a warning if any W+X mappings are found at boot.

Expand Down
1 change: 0 additions & 1 deletion arch/x86/configs/x86_64_defconfig
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TASKSTATS=y
Expand Down
48 changes: 1 addition & 47 deletions arch/x86/entry/vdso/vma.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,55 +198,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}

#ifdef CONFIG_X86_64
/*
* Put the vdso above the (randomized) stack with another randomized
* offset. This way there is no hole in the middle of address space.
* To save memory make sure it is still in the same PTE as the stack
* top. This doesn't give that many random bits.
*
* Note that this algorithm is imperfect: the distribution of the vdso
* start address within a PMD is biased toward the end.
*
* Only used for the 64-bit and x32 vdsos.
*/
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
unsigned long addr, end;
unsigned offset;

/*
* Round up the start address. It can start out unaligned as a result
* of stack start randomization.
*/
start = PAGE_ALIGN(start);

/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;

if (end > start) {
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
}

/*
* Forcibly align the final address in case we have a hardware
* issue that requires alignment for performance reasons.
*/
addr = align_vdso_addr(addr);

return addr;
}

static int map_vdso_randomized(const struct vdso_image *image)
{
unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);

return map_vdso(image, addr);
return map_vdso(image, 0);
}
#endif

Expand Down
15 changes: 9 additions & 6 deletions arch/x86/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,11 +248,11 @@ extern int force_personality32;

/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is above 4GB to leave the entire 32-bit address
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
(DEFAULT_MAP_WINDOW / 3 * 2))
0x100000000UL)

/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
Expand Down Expand Up @@ -312,8 +312,8 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);

#ifdef CONFIG_X86_32

#define __STACK_RND_MASK(is32bit) (0x7ff)
#define STACK_RND_MASK (0x7ff)
#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)

#define ARCH_DLINFO ARCH_DLINFO_IA32

Expand All @@ -322,7 +322,11 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
#else /* CONFIG_X86_32 */

/* 1GB for 64bit, 8MB for 32bit */
#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
#ifdef CONFIG_COMPAT
#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
#else
#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
#endif
#define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())

#define ARCH_DLINFO \
Expand Down Expand Up @@ -380,5 +384,4 @@ struct va_alignment {
} ____cacheline_aligned;

extern struct va_alignment va_align;
extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */
4 changes: 4 additions & 0 deletions arch/x86/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,7 @@ static inline void cr4_set_bits(unsigned long mask)

local_irq_save(flags);
cr4 = this_cpu_read(cpu_tlbstate.cr4);
BUG_ON(cr4 != __read_cr4());
if ((cr4 | mask) != cr4)
__cr4_set(cr4 | mask);
local_irq_restore(flags);
Expand All @@ -308,6 +309,7 @@ static inline void cr4_clear_bits(unsigned long mask)

local_irq_save(flags);
cr4 = this_cpu_read(cpu_tlbstate.cr4);
BUG_ON(cr4 != __read_cr4());
if ((cr4 & ~mask) != cr4)
__cr4_set(cr4 & ~mask);
local_irq_restore(flags);
Expand All @@ -318,6 +320,7 @@ static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
unsigned long cr4;

cr4 = this_cpu_read(cpu_tlbstate.cr4);
BUG_ON(cr4 != __read_cr4());
__cr4_set(cr4 ^ mask);
}

Expand Down Expand Up @@ -424,6 +427,7 @@ static inline void __native_flush_tlb_global(void)
raw_local_irq_save(flags);

cr4 = this_cpu_read(cpu_tlbstate.cr4);
BUG_ON(cr4 != __read_cr4());
/* toggle PGE */
native_write_cr4(cr4 ^ X86_CR4_PGE);
/* write old PGE again and flush TLBs */
Expand Down
1 change: 0 additions & 1 deletion arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1853,7 +1853,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();

x86_configure_nx();
x2apic_setup();

/*
Expand Down
7 changes: 6 additions & 1 deletion arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@
#include <asm/prctl.h>
#include <asm/spec-ctrl.h>
#include <asm/proto.h>
#include <asm/elf.h>
#include <linux/sizes.h>

#include "process.h"

Expand Down Expand Up @@ -798,7 +800,10 @@ unsigned long arch_align_stack(unsigned long sp)

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_page(mm->brk, 0x02000000);
if (mmap_is_ia32())
return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
else
return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
}

/*
Expand Down
Loading