Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions crates/cranelift/src/bounds_checks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -402,6 +402,7 @@ fn bounds_check_field_access(
if can_use_virtual_memory
&& heap.memory.minimum_byte_size().unwrap_or(u64::MAX) <= memory_reservation
&& !heap.memory.memory_may_move(env.tunables())
&& memory_reservation >= offset_and_size
{
let adjusted_bound = memory_reservation.checked_sub(offset_and_size).unwrap();
let adjusted_bound_value = builder
Expand Down
2 changes: 1 addition & 1 deletion crates/environ/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2152,7 +2152,7 @@ impl Memory {
pub fn can_elide_bounds_check(&self, tunables: &Tunables, host_page_size_log2: u8) -> bool {
self.can_use_virtual_memory(tunables, host_page_size_log2)
&& self.idx_type == IndexType::I32
&& tunables.memory_reservation >= (1 << 32)
&& tunables.memory_reservation + tunables.memory_guard_size >= (1 << 32)
}

/// Returns the static size of this heap in bytes at runtime, if available.
Expand Down
56 changes: 56 additions & 0 deletions tests/disas/bounds-check.wat
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
;;! test = "optimize"
;;! target = "x86_64"
;;! flags = ["-Omemory-reservation=0x8000000", "-Omemory-guard-size=0x100000000", "-Omemory-may-move=n"]

(module
(memory 16)
(func $store (param i32)
;; No offset. But because we have a 4 GiB guard, this needs no bounds check.
local.get 0
i32.const 0
i32.store8 0

;; The greatest possible offset that can ever be in bounds. Again, no
;; bounds check.
local.get 0
i32.const 0
i32.store8 0 offset=134217727

;; The greatest encodable offset. This will never be in bounds, given
;; our memory reservation size, so optimization isn't a concern.
local.get 0
i32.const 0
i32.store8 0 offset=4294967295
)
(export "store" (func $store))
)
;; function u0:0(i64 vmctx, i64, i32) tail {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned gv1+16
;; gv3 = vmctx
;; gv4 = load.i64 notrap aligned gv3+64
;; gv5 = load.i64 notrap aligned readonly can_move checked gv3+56
;; stack_limit = gv2
;;
;; block0(v0: i64, v1: i64, v2: i32):
;; @002a v3 = iconst.i32 0
;; @002c v5 = load.i64 notrap aligned readonly can_move checked v0+56
;; @002c v4 = uextend.i64 v2
;; @002c v6 = iadd v5, v4
;; @002c istore8 little heap v3, v6 ; v3 = 0
;; @0033 v11 = iconst.i64 0x07ff_ffff
;; @0033 v12 = iadd v6, v11 ; v11 = 0x07ff_ffff
;; @0033 istore8 little heap v3, v12 ; v3 = 0
;; @003d v15 = load.i64 notrap aligned v0+64
;; @003d v16 = icmp ugt v4, v15
;; @003d v21 = iconst.i64 0
;; @003d v19 = iconst.i64 0xffff_ffff
;; @003d v20 = iadd v6, v19 ; v19 = 0xffff_ffff
;; @003d v22 = select_spectre_guard v16, v21, v20 ; v21 = 0
;; @003d istore8 little heap v3, v22 ; v3 = 0
;; @0044 jump block1
;;
;; block1:
;; @0044 return
;; }
27 changes: 11 additions & 16 deletions tests/disas/winch/x64/load/v128_load32x2_s_oob_avx.wat
Original file line number Diff line number Diff line change
Expand Up @@ -14,27 +14,22 @@
;; movq 0x10(%r11), %r11
;; addq $0x10, %r11
;; cmpq %rsp, %r11
;; ja 0x72
;; ja 0x64
;; 1c: movq %rdi, %r14
;; subq $0x10, %rsp
;; movq %rdi, 8(%rsp)
;; movq %rsi, (%rsp)
;; movl $0xfff9, %eax
;; movl $0x10000, %ecx
;; movl %eax, %edx
;; addq $8, %rdx
;; jb 0x74
;; 48: cmpq %rcx, %rdx
;; ja 0x76
;; 51: movq 0x38(%r14), %rbx
;; addq %rax, %rbx
;; movl $0, %esi
;; cmpq %rcx, %rdx
;; cmovaq %rsi, %rbx
;; vpmovsxdq (%rbx), %xmm0
;; cmpq $0xfff8, %rax
;; ja 0x66
;; 40: movq 0x38(%r14), %rcx
;; addq %rax, %rcx
;; movl $0, %edx
;; cmpq $0xfff8, %rax
;; cmovaq %rdx, %rcx
;; vpmovsxdq (%rcx), %xmm0
;; addq $0x10, %rsp
;; popq %rbp
;; retq
;; 72: ud2
;; 74: ud2
;; 76: ud2
;; 64: ud2
;; 66: ud2
27 changes: 11 additions & 16 deletions tests/disas/winch/x64/load/v128_load32x2_u_oob_avx.wat
Original file line number Diff line number Diff line change
Expand Up @@ -14,27 +14,22 @@
;; movq 0x10(%r11), %r11
;; addq $0x10, %r11
;; cmpq %rsp, %r11
;; ja 0x72
;; ja 0x64
;; 1c: movq %rdi, %r14
;; subq $0x10, %rsp
;; movq %rdi, 8(%rsp)
;; movq %rsi, (%rsp)
;; movl $0xfff9, %eax
;; movl $0x10000, %ecx
;; movl %eax, %edx
;; addq $8, %rdx
;; jb 0x74
;; 48: cmpq %rcx, %rdx
;; ja 0x76
;; 51: movq 0x38(%r14), %rbx
;; addq %rax, %rbx
;; movl $0, %esi
;; cmpq %rcx, %rdx
;; cmovaq %rsi, %rbx
;; vpmovzxdq (%rbx), %xmm0
;; cmpq $0xfff8, %rax
;; ja 0x66
;; 40: movq 0x38(%r14), %rcx
;; addq %rax, %rcx
;; movl $0, %edx
;; cmpq $0xfff8, %rax
;; cmovaq %rdx, %rcx
;; vpmovzxdq (%rcx), %xmm0
;; addq $0x10, %rsp
;; popq %rbp
;; retq
;; 72: ud2
;; 74: ud2
;; 76: ud2
;; 64: ud2
;; 66: ud2
151 changes: 76 additions & 75 deletions winch/codegen/src/codegen/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,8 @@ where
.can_elide_bounds_check(self.tunables, self.env.page_size_log2);

let addr = if offset_with_access_size > heap.memory.maximum_byte_size().unwrap_or(u64::MAX)
|| (!self.tunables.memory_may_move
&& offset_with_access_size > self.tunables.memory_reservation)
{
// Detect at compile time if the access is out of bounds.
// Doing so will put the compiler in an unreachable code state,
Expand All @@ -706,7 +708,80 @@ where
self.masm.trap(TrapCode::HEAP_OUT_OF_BOUNDS)?;
self.context.reachable = false;
None
} else if !can_elide_bounds_check {

// Account for the case in which we can completely elide the bounds
// checks.
//
// This case, makes use of the fact that if a memory access uses
// a 32-bit index, then we be certain that
//
// index <= u32::MAX
//
// Therefore if any 32-bit index access occurs in the region
// represented by
//
// bound + guard_size - (offset + access_size)
//
// We are certain that it's in bounds or that the underlying virtual
// memory subsystem will report an illegal access at runtime.
//
// Note:
//
// * bound - (offset + access_size) cannot wrap, because it's checked
// in the condition above.
// * bound + heap.offset_guard_size is guaranteed to not overflow if
// the heap configuration is correct, given that it's address must
// fit in 64-bits.
// * If the heap type is 32-bits, the offset is at most u32::MAX, so
// no adjustment is needed as part of
// [bounds::ensure_index_and_offset].
} else if can_elide_bounds_check
&& u64::from(u32::MAX)
<= self.tunables.memory_reservation + self.tunables.memory_guard_size
- offset_with_access_size
{
assert!(can_elide_bounds_check);
assert!(heap.index_type() == WasmValType::I32);
let addr = self.context.any_gpr(self.masm)?;
bounds::load_heap_addr_unchecked(self.masm, &heap, index, offset, addr, ptr_size)?;
Some(addr)

// Account for the case of a static memory size. The access is out
// of bounds if:
//
// index > bound - (offset + access_size)
//
// bound - (offset + access_size) cannot wrap, because we already
// checked that (offset + access_size) > bound, above.
} else if let Some(static_size) = heap.memory.static_heap_size() {
let bounds = Bounds::from_u64(static_size);
let addr = bounds::load_heap_addr_checked(
self.masm,
&mut self.context,
ptr_size,
&heap,
enable_spectre_mitigation,
bounds,
index,
offset,
|masm, bounds, index| {
let adjusted_bounds = bounds.as_u64() - offset_with_access_size;
let index_reg = index.as_typed_reg().reg;
masm.cmp(
index_reg,
RegImm::i64(adjusted_bounds as i64),
// Similar to the dynamic heap case, even though the
// offset and access size are bound through the heap
// type, when added they can overflow, resulting in
// an erroneous comparison, therefore we rely on the
// target pointer size.
ptr_size,
)?;
Ok(IntCmpKind::GtU)
},
)?;
Some(addr)
} else {
// Account for the general case for bounds-checked memories. The
// access is out of bounds if:
// * index + offset + access_size overflows
Expand Down Expand Up @@ -784,80 +859,6 @@ where
self.context.free_reg(bounds.as_typed_reg().reg);
self.context.free_reg(index_offset_and_access_size);
Some(addr)

// Account for the case in which we can completely elide the bounds
// checks.
//
// This case, makes use of the fact that if a memory access uses
// a 32-bit index, then we be certain that
//
// index <= u32::MAX
//
// Therefore if any 32-bit index access occurs in the region
// represented by
//
// bound + guard_size - (offset + access_size)
//
// We are certain that it's in bounds or that the underlying virtual
// memory subsystem will report an illegal access at runtime.
//
// Note:
//
// * bound - (offset + access_size) cannot wrap, because it's checked
// in the condition above.
// * bound + heap.offset_guard_size is guaranteed to not overflow if
// the heap configuration is correct, given that it's address must
// fit in 64-bits.
// * If the heap type is 32-bits, the offset is at most u32::MAX, so
// no adjustment is needed as part of
// [bounds::ensure_index_and_offset].
} else if u64::from(u32::MAX)
<= self.tunables.memory_reservation + self.tunables.memory_guard_size
- offset_with_access_size
{
assert!(can_elide_bounds_check);
assert!(heap.index_type() == WasmValType::I32);
let addr = self.context.any_gpr(self.masm)?;
bounds::load_heap_addr_unchecked(self.masm, &heap, index, offset, addr, ptr_size)?;
Some(addr)

// Account for the all remaining cases, aka. The access is out
// of bounds if:
//
// index > bound - (offset + access_size)
//
// bound - (offset + access_size) cannot wrap, because we already
// checked that (offset + access_size) > bound, above.
} else {
assert!(can_elide_bounds_check);
assert!(heap.index_type() == WasmValType::I32);
let bounds = Bounds::from_u64(self.tunables.memory_reservation);
let addr = bounds::load_heap_addr_checked(
self.masm,
&mut self.context,
ptr_size,
&heap,
enable_spectre_mitigation,
bounds,
index,
offset,
|masm, bounds, index| {
let adjusted_bounds = bounds.as_u64() - offset_with_access_size;
let index_reg = index.as_typed_reg().reg;
masm.cmp(
index_reg,
RegImm::i64(adjusted_bounds as i64),
// Similar to the dynamic heap case, even though the
// offset and access size are bound through the heap
// type, when added they can overflow, resulting in
// an erroneous comparison, therefore we rely on the
// target pointer size.
ptr_size,
)?;
Ok(IntCmpKind::GtU)
},
)?;
Some(addr)
};

self.context.free_reg(index.as_typed_reg().reg);
Expand Down
Loading