@@ -695,6 +695,8 @@ where
695695 . can_elide_bounds_check ( self . tunables , self . env . page_size_log2 ) ;
696696
697697 let addr = if offset_with_access_size > heap. memory . maximum_byte_size ( ) . unwrap_or ( u64:: MAX )
698+ || ( !self . tunables . memory_may_move
699+ && offset_with_access_size > self . tunables . memory_reservation )
698700 {
699701 // Detect at compile time if the access is out of bounds.
700702 // Doing so will put the compiler in an unreachable code state,
@@ -706,7 +708,80 @@ where
706708 self . masm . trap ( TrapCode :: HEAP_OUT_OF_BOUNDS ) ?;
707709 self . context . reachable = false ;
708710 None
709- } else if !can_elide_bounds_check {
711+
712+ // Account for the case in which we can completely elide the bounds
713+ // checks.
714+ //
715+ // This case, makes use of the fact that if a memory access uses
716+ // a 32-bit index, then we be certain that
717+ //
718+ // index <= u32::MAX
719+ //
720+ // Therefore if any 32-bit index access occurs in the region
721+ // represented by
722+ //
723+ // bound + guard_size - (offset + access_size)
724+ //
725+ // We are certain that it's in bounds or that the underlying virtual
726+ // memory subsystem will report an illegal access at runtime.
727+ //
728+ // Note:
729+ //
730+ // * bound - (offset + access_size) cannot wrap, because it's checked
731+ // in the condition above.
732+ // * bound + heap.offset_guard_size is guaranteed to not overflow if
733+ // the heap configuration is correct, given that it's address must
734+ // fit in 64-bits.
735+ // * If the heap type is 32-bits, the offset is at most u32::MAX, so
736+ // no adjustment is needed as part of
737+ // [bounds::ensure_index_and_offset].
738+ } else if can_elide_bounds_check
739+ && u64:: from ( u32:: MAX )
740+ <= self . tunables . memory_reservation + self . tunables . memory_guard_size
741+ - offset_with_access_size
742+ {
743+ assert ! ( can_elide_bounds_check) ;
744+ assert ! ( heap. index_type( ) == WasmValType :: I32 ) ;
745+ let addr = self . context . any_gpr ( self . masm ) ?;
746+ bounds:: load_heap_addr_unchecked ( self . masm , & heap, index, offset, addr, ptr_size) ?;
747+ Some ( addr)
748+
749+ // Account for the case of a static memory size. The access is out
750+ // of bounds if:
751+ //
752+ // index > bound - (offset + access_size)
753+ //
754+ // bound - (offset + access_size) cannot wrap, because we already
755+ // checked that (offset + access_size) > bound, above.
756+ } else if let Some ( static_size) = heap. memory . static_heap_size ( ) {
757+ let bounds = Bounds :: from_u64 ( static_size) ;
758+ let addr = bounds:: load_heap_addr_checked (
759+ self . masm ,
760+ & mut self . context ,
761+ ptr_size,
762+ & heap,
763+ enable_spectre_mitigation,
764+ bounds,
765+ index,
766+ offset,
767+ |masm, bounds, index| {
768+ let adjusted_bounds = bounds. as_u64 ( ) - offset_with_access_size;
769+ let index_reg = index. as_typed_reg ( ) . reg ;
770+ masm. cmp (
771+ index_reg,
772+ RegImm :: i64 ( adjusted_bounds as i64 ) ,
773+ // Similar to the dynamic heap case, even though the
774+ // offset and access size are bound through the heap
775+ // type, when added they can overflow, resulting in
776+ // an erroneous comparison, therefore we rely on the
777+ // target pointer size.
778+ ptr_size,
779+ ) ?;
780+ Ok ( IntCmpKind :: GtU )
781+ } ,
782+ ) ?;
783+ Some ( addr)
784+ } else {
710785 // Account for the general case for bounds-checked memories. The
711786 // access is out of bounds if:
712787 // * index + offset + access_size overflows
@@ -784,80 +859,6 @@ where
784859 self . context . free_reg ( bounds. as_typed_reg ( ) . reg ) ;
785860 self . context . free_reg ( index_offset_and_access_size) ;
786861 Some ( addr)
787-
788- // Account for the case in which we can completely elide the bounds
789- // checks.
790- //
791- // This case, makes use of the fact that if a memory access uses
792- // a 32-bit index, then we be certain that
793- //
794- // index <= u32::MAX
795- //
796- // Therefore if any 32-bit index access occurs in the region
797- // represented by
798- //
799- // bound + guard_size - (offset + access_size)
800- //
801- // We are certain that it's in bounds or that the underlying virtual
802- // memory subsystem will report an illegal access at runtime.
803- //
804- // Note:
805- //
806- // * bound - (offset + access_size) cannot wrap, because it's checked
807- // in the condition above.
808- // * bound + heap.offset_guard_size is guaranteed to not overflow if
809- // the heap configuration is correct, given that it's address must
810- // fit in 64-bits.
811- // * If the heap type is 32-bits, the offset is at most u32::MAX, so
812- // no adjustment is needed as part of
813- // [bounds::ensure_index_and_offset].
814- } else if u64:: from ( u32:: MAX )
815- <= self . tunables . memory_reservation + self . tunables . memory_guard_size
816- - offset_with_access_size
817- {
818- assert ! ( can_elide_bounds_check) ;
819- assert ! ( heap. index_type( ) == WasmValType :: I32 ) ;
820- let addr = self . context . any_gpr ( self . masm ) ?;
821- bounds:: load_heap_addr_unchecked ( self . masm , & heap, index, offset, addr, ptr_size) ?;
822- Some ( addr)
823-
824- // Account for the all remaining cases, aka. The access is out
825- // of bounds if:
826- //
827- // index > bound - (offset + access_size)
828- //
829- // bound - (offset + access_size) cannot wrap, because we already
830- // checked that (offset + access_size) > bound, above.
831- } else {
832- assert ! ( can_elide_bounds_check) ;
833- assert ! ( heap. index_type( ) == WasmValType :: I32 ) ;
834- let bounds = Bounds :: from_u64 ( self . tunables . memory_reservation ) ;
835- let addr = bounds:: load_heap_addr_checked (
836- self . masm ,
837- & mut self . context ,
838- ptr_size,
839- & heap,
840- enable_spectre_mitigation,
841- bounds,
842- index,
843- offset,
844- |masm, bounds, index| {
845- let adjusted_bounds = bounds. as_u64 ( ) - offset_with_access_size;
846- let index_reg = index. as_typed_reg ( ) . reg ;
847- masm. cmp (
848- index_reg,
849- RegImm :: i64 ( adjusted_bounds as i64 ) ,
850- // Similar to the dynamic heap case, even though the
851- // offset and access size are bound through the heap
852- // type, when added they can overflow, resulting in
853- // an erroneous comparison, therefore we rely on the
854- // target pointer size.
855- ptr_size,
856- ) ?;
857- Ok ( IntCmpKind :: GtU )
858- } ,
859- ) ?;
860- Some ( addr)
861862 } ;
862863
863864 self . context . free_reg ( index. as_typed_reg ( ) . reg ) ;
0 commit comments