diff --git a/Makefile b/Makefile index 0bed927..59332ff 100755 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ REPORT_WITH_OFFSET ?= 0 ENABLE_COVERAGE ?= 1 COVERAGE_MEMORY_ERRORS ?= 1 COVERAGE_CONTROL_FLOW_ERRORS ?= 1 +SEED_NON_SPECULATIVE_ERRORS ?= 1 ENABLE_SANITY_CHECKS ?= 1 ENABLE_STATS ?= 0 ENABLE_SEQUENTIAL_SIMULATION ?= 0 diff --git a/install/patches/honggfuzz/instrument.c b/install/patches/honggfuzz/instrument.c index f7383d0..21ca46f 100644 --- a/install/patches/honggfuzz/instrument.c +++ b/install/patches/honggfuzz/instrument.c @@ -99,6 +99,12 @@ void specfuzz_cov_vuln(uintptr_t pc) { } } +// Adds current input to corpus +void specfuzz_seed_input() { + // TODO: less lazy implementation + ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackPc[my_thread_no]); +} + __attribute__((preserve_most)) void specfuzz_cov_trace_pc(uintptr_t pc) { // quick path - check the cache diff --git a/src/SpecFuzzPass.cpp b/src/SpecFuzzPass.cpp index 3eca0c2..ac015c1 100644 --- a/src/SpecFuzzPass.cpp +++ b/src/SpecFuzzPass.cpp @@ -633,17 +633,140 @@ auto X86SpecFuzzPass::visitWrite(MachineInstr &MI, MachineBasicBlock &Parent) -> .add(MI.getOperand(MemRefBegin + X86::AddrSegmentReg)); restoreRegister(Parent, MI, Loc, X86::RSP, "checkpoint_sp"); - + + MachineMemOperand *MMO = *MI.memoperands_begin(); + uint64_t width = MMO->getSize(); + + LLVM_DEBUG(dbgs() << "Store's width: " << width << "\n"); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm((width > 8)? 8 : width); + // PUSH %TmpReg BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); - - // PUSH (%TmpReg) - BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) - .addImm(1).addReg(0) - .addImm(0).addReg(0); + + + switch (width) { + case 1: + preserveRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + // Immediate is arbitrary + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV8rm), X86::R14B) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(0) + .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV8mr)) + .addReg(X86::RSP).addImm(1) + .addReg(0).addImm(0) + .addReg(0) + .addReg(X86::R14B); + + restoreRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + break; + + case 2: + preserveRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + // Immediate is arbitrary + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV16rm), X86::R14W) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(0) + .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV16mr)) + .addReg(X86::RSP).addImm(1) + .addReg(0).addImm(0) + .addReg(0) + .addReg(X86::R14W); + + restoreRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + break; + + case 4: + preserveRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + // Immediate is arbitrary + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV32rm), X86::R14D) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(0) + .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV32mr)) + .addReg(X86::RSP).addImm(1) + .addReg(0).addImm(0) + .addReg(0) + .addReg(X86::R14D); + + restoreRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + break; + + case 8: + case 16: + case 32: + // PUSH (%TmpReg) + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) + .addImm(1).addReg(0) + .addImm(0).addReg(0); + + if (width == 8) break; + + BuildMI(Parent, MI, Loc, TII->get(X86::LEA64r), TmpReg) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(8) + .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(8); + + // PUSH %TmpReg + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); + + // PUSH (%TmpReg) + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) + .addImm(1).addReg(0) + .addImm(0).addReg(0); + + if (width == 16) { LLVM_DEBUG(dbgs() << " The store is 128-bit wide\n"); break; } + + BuildMI(Parent, MI, Loc, TII->get(X86::LEA64r), TmpReg) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(8) + .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(8); + + // PUSH %TmpReg + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); + + // PUSH (%TmpReg) + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) + .addImm(1).addReg(0) + .addImm(0).addReg(0); + + LLVM_DEBUG(dbgs() << " The store is 256-bit wide\n"); + break; + + default: + llvm_unreachable("Unknown width"); + break; + } // SSE stores are 128-bit wide - if (Desc.TSFlags >> X86II::SSEDomainShift & 3) { // NOLINT + /*if (Desc.TSFlags >> X86II::SSEDomainShift & 3) { // NOLINT LLVM_DEBUG(dbgs() << " The store is 128-bit wide\n"); // LEAQ 8(%TmpReg), %TmpReg @@ -651,6 +774,9 @@ auto X86SpecFuzzPass::visitWrite(MachineInstr &MI, MachineBasicBlock &Parent) -> .addReg(TmpReg).addImm(1) .addReg(0).addImm(8) .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(8); // PUSH %TmpReg BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); @@ -659,7 +785,7 @@ auto X86SpecFuzzPass::visitWrite(MachineInstr &MI, MachineBasicBlock &Parent) -> BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) .addImm(1).addReg(0) .addImm(0).addReg(0); - } + }*/ preserveRegister(Parent, MI, Loc, X86::RSP, "checkpoint_sp"); restoreRegister(Parent, MI, Loc, TmpReg, "tmp_gpr1"); @@ -685,6 +811,9 @@ auto X86SpecFuzzPass::visitPush(MachineInstr &MI, MachineBasicBlock &Parent) -> restoreRegister(Parent, MI, Loc, X86::RSP, "checkpoint_sp"); + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(8); + // PUSH %TmpReg BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); @@ -1042,6 +1171,7 @@ auto X86SpecFuzzPass::getCallTargetType(MachineInstr &MI) -> X86SpecFuzzPass::Ca "__asan_set_shadow_f8", "__asan_frame_malloc_0", + "__asan_stack_malloc_0", "__asan_stack_malloc_1", "__asan_stack_malloc_2", "__asan_stack_malloc_3", diff --git a/src/specfuzz_init.c b/src/specfuzz_init.c index 0e9b7d4..639de4d 100644 --- a/src/specfuzz_init.c +++ b/src/specfuzz_init.c @@ -48,26 +48,48 @@ void specfuzz_handler(int signo, siginfo_t *siginfo, void *ucontext) { #if ENABLE_SANITY_CHECKS == 1 if (inside_handler != 0) { fprintf(stderr, "\n[SF] Error: Fault inside the signal handler\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif abort(); } inside_handler = 1; if (nesting_level <= 0x0) { fprintf(stderr, "[SF] Error: Signal handler called outside speculation\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif abort(); } if (checkpoint_sp > &checkpoint_stack || checkpoint_sp < &checkpoint_stack_bottom) { fprintf(stderr, "[SF] Error: checkpoint_sp is corrupted\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif abort(); } if ((uint64_t *) uc_gregs[REG_RSP] <= &specfuzz_rtl_frame && (uint64_t *) uc_gregs[REG_RSP] >= &specfuzz_rtl_frame_bottom) { fprintf(stderr, "[SF] Error: a signal caught within the SpecFuzz runtime\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif abort(); } + + if (in_rlbk) { + fprintf(stderr, "[SF] Error: a signal caught within SpecFuzz's rollback\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); #endif + abort(); + } +#endif + + if (siginfo->si_signo == SIGFPE) { STAT_INCREMENT(stat_signal_misc); diff --git a/src/specfuzz_rtl.S b/src/specfuzz_rtl.S index 4c5f637..ac8bbcc 100644 --- a/src/specfuzz_rtl.S +++ b/src/specfuzz_rtl.S @@ -23,8 +23,9 @@ .extern stderr .extern specfuzz_cov_trace_pc .extern specfuzz_cov_vuln +.extern specfuzz_seed_input -.set CHECKPOINT_STACK_SIZE, (4096 * 25) +.set CHECKPOINT_STACK_SIZE, (4096 * 64) .set SPECULATION_WINDOW, 250 #ifndef MAX_NESTING_LEVEL .set MAX_NESTING_LEVEL, 1 @@ -146,6 +147,7 @@ asan_rtl_frame: .quad 0 // Error messages error_checkpoint_stack_overflow: .string "[SF] Error: overflow of Checkpoint Stack\n" +error_corrupted_checkpoint_stack: .string "[SF] Error: Corruption of Checkpoint Stack\n" error_branch_table_overflow: .string "[SF] Error: too many Branch Table collisions\n" asan_detected_real_overflow: .string "[SF] Error: That was a real, non-speculative overflow\n" error_corrupted_nesting: .string "[SF] Error: nesting_level is corrupted (negative)\n" @@ -190,6 +192,9 @@ simulation_id: .quad 0 debug_rollback_depth: .string "[SF] rlbk: 0x%llx %lld %lld %lld\n" #endif +.globl in_rlbk +in_rlbk: .quad 0 + //===------------------------------------------------------------------------===// // Checkpoint and rollback //===------------------------------------------------------------------------===// @@ -477,6 +482,8 @@ specfuzz_rlbk: // Check that we're not overflowing movq checkpoint_sp, %rsp ASSERT %rsp jle $checkpoint_stack_bottom specfuzz_exit_state_overflow + + movq $1, in_rlbk // Rewind the Store Log: // - First, a special case: a segfault might have been triggered right after @@ -487,16 +494,59 @@ specfuzz_rlbk: je .L2 movq (%rsp), %rbx movq 8(%rsp), %rcx - cmp %rbx, (%rcx) - jne .L1 - addq $16, %rsp + movq 16(%rsp), %rdx + + cmpq $8, %rdx + je .L12.quad_word + cmpq $4, %rdx + je .L12.double_word + cmpq $2, %rdx + je .L12.word + ASSERT %rdx jne $1 specfuzz_exit_state_corruption + jmp .L12.byte + + .L12.quad_word: + cmp %rbx, (%rcx) + jne .L1 + jmp .L13 + .L12.double_word: + cmpl %ebx, (%rcx) + jne .L1 + jmp .L13 + .L12.word: + cmpw %bx, (%rcx) + jne .L1 + jmp .L13 + .L12.byte: + cmpb %bl, (%rcx) + jne .L1 + + .L13: + addq $24, %rsp // - now, the actual rewind .L1: cmp store_log_bp, %rsp je .L2 popq %rbx // value popq %rcx // address - movq %rbx, (%rcx) + popq %rdx // size + cmpq $8, %rdx + je .L1.quad_word + cmpq $4, %rdx + je .L1.double_word + cmpq $2, %rdx + je .L1.word + ASSERT %rdx jne $1 specfuzz_exit_state_corruption + jmp .L1.byte + + .L1.quad_word: + movq %rbx, (%rcx) + .L1.double_word: + movl %ebx, (%rcx) + .L1.word: + movw %bx, (%rcx) + .L1.byte: + movb %bl, (%rcx) jmp .L1 .L2: @@ -559,6 +609,8 @@ specfuzz_rlbk: // Restore the original value of eflags pushq tmp_eflags popfq + + movq $0, in_rlbk ret // Finish the simulation @@ -779,20 +831,39 @@ specfuzz_cov_trace_pc_wrapper: /// specfuzz_exit_*: Exit with an error message /// specfuzz_exit_unknown_corruption: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif movl $error_not_speculative, %edi jmp specfuzz_exit specfuzz_exit_state_overflow: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif movl $error_checkpoint_stack_overflow, %edi jmp specfuzz_exit specfuzz_exit_asan_overflow: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif movl $asan_detected_real_overflow, %edi jmp specfuzz_exit specfuzz_exit_corrupted_nesting_level: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif movl $error_corrupted_nesting, %edi jmp specfuzz_exit + +specfuzz_exit_state_corruption: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif + movl $error_corrupted_checkpoint_stack, %edi + jmp specfuzz_exit .type specfuzz_exit, @function specfuzz_exit: diff --git a/src/specfuzz_rtl.h b/src/specfuzz_rtl.h index d8cfcc2..33b6592 100644 --- a/src/specfuzz_rtl.h +++ b/src/specfuzz_rtl.h @@ -39,8 +39,11 @@ extern uint64_t stat_signal_misc; extern uint64_t stat_simulation_disables; extern uint64_t stat_skiped_due_to_disabled; +extern uint64_t in_rlbk; + extern void specfuzz_rlbk_forced(void); + // Coverage void specfuzz_cov_init(); __attribute__((weak)) __attribute__((preserve_most)) @@ -50,5 +53,6 @@ void specfuzz_cov_vuln(uintptr_t pc); __attribute__((weak)) __attribute__((preserve_most)) struct map_entry_t *get_hash_map_entry(uintptr_t pc); +void specfuzz_seed_input(); #endif //SPECFUZZ_RTL_H