diff --git a/Makefile b/Makefile index 0bed927..7553018 100755 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ REPORT_WITH_OFFSET ?= 0 ENABLE_COVERAGE ?= 1 COVERAGE_MEMORY_ERRORS ?= 1 COVERAGE_CONTROL_FLOW_ERRORS ?= 1 +SEED_NON_SPECULATIVE_ERRORS ?= 1 ENABLE_SANITY_CHECKS ?= 1 ENABLE_STATS ?= 0 ENABLE_SEQUENTIAL_SIMULATION ?= 0 @@ -21,7 +22,7 @@ RUNTIME_CONFIGURATION := -DMAX_NESTING_LEVEL=$(MAX_NESTING_LEVEL)\ -DENABLE_SANITY_CHECKS=$(ENABLE_SANITY_CHECKS)\ -DENABLE_STATS=$(ENABLE_STATS) -DENABLE_SEQUENTIAL_SIMULATION=$(ENABLE_SEQUENTIAL_SIMULATION)\ -DDUMP_COVERAGE_AT_EXIT=$(DUMP_COVERAGE_AT_EXIT) -DPRINT_ROLLABACK_STATS=$(PRINT_ROLLABACK_STATS)\ - -DREPORT_CONTROL_FLOW_ERRORS=$(REPORT_CONTROL_FLOW_ERRORS) + -DREPORT_CONTROL_FLOW_ERRORS=$(REPORT_CONTROL_FLOW_ERRORS) -DSEED_NON_SPECULATIVE_ERRORS=$(SEED_NON_SPECULATIVE_ERRORS) # Paths LLVM_CONFIG ?= llvm-7.0.1-config @@ -68,14 +69,14 @@ rebuild_llvm: make -j -C $(LLVM_BUILD) install_specfuzz: - cp -u install/wrapper.sh /usr/bin/clang-sf - cp -u install/wrapper.sh /usr/bin/clang-sf++ - sed -i -e 's:/clang$$:/clang++:g' /usr/bin/clang-sf++ + sudo cp -u install/wrapper.sh /usr/bin/clang-sf + sudo cp -u install/wrapper.sh /usr/bin/clang-sf++ + sudo sed -i -e 's:/clang$$:/clang++:g' /usr/bin/clang-sf++ install_tools: analyzer hongg analyzer: postprocessing/analyzer.py - cp $< /usr/bin/analyzer + sudo cp $< /usr/bin/analyzer hongg: check_hongg_path patch_hongg rebuild_hongg @@ -95,7 +96,7 @@ patch_hongg: $(HONGG_PATCH) rebuild_hongg: CC=${CLANG} CFLAGS=-ggdb make -C $(HONGG_SRC) -j4 - make -C $(HONGG_SRC) install + sudo make -C $(HONGG_SRC) install test: cd tests && ./run.bats diff --git a/install/patches/honggfuzz/fuzz.c b/install/patches/honggfuzz/fuzz.c index 36d0a73..a6c4861 100644 --- a/install/patches/honggfuzz/fuzz.c +++ b/install/patches/honggfuzz/fuzz.c @@ -585,7 +585,7 @@ static void* fuzz_threadNew(void* arg) { map_entry_t entry = coverage_map_conflicts[i]; if (entry.count == 0) continue; - uint64_t address = (entry.tag << COVERAGE_INDEX_WIDTH) + i; + uint64_t address = entry.tag; LOG_I("[SF], 0x%lx: %d", address, entry.count); } //hash_map_usage(run.global->feedback.feedbackMap); diff --git a/install/patches/honggfuzz/instrument.c b/install/patches/honggfuzz/instrument.c index f7383d0..e509cf2 100644 --- a/install/patches/honggfuzz/instrument.c +++ b/install/patches/honggfuzz/instrument.c @@ -99,6 +99,12 @@ void specfuzz_cov_vuln(uintptr_t pc) { } } +// Adds current input to corpus +void specfuzz_seed_input() { + // TODO: less lazy implementation + ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackPc[my_thread_no]); +} + __attribute__((preserve_most)) void specfuzz_cov_trace_pc(uintptr_t pc) { // quick path - check the cache @@ -158,6 +164,7 @@ static map_entry_t *get_hash_map_entry(uintptr_t pc) { // hash conflict map_entry_t *coverage_map_conflicts = &coverage_map[COVERAGE_MAP_HASHMAP_SIZE]; + tag = pc; // assert(uint64_t == unsigned long) // anyway it is priorly assumed that tag and pc are of the same size. do { if (entry->next == 0) { // create a new entry uint32_t top = feedback->cmpMapPcTop; diff --git a/src/SpecFuzzPass.cpp b/src/SpecFuzzPass.cpp index 3eca0c2..afc4a23 100644 --- a/src/SpecFuzzPass.cpp +++ b/src/SpecFuzzPass.cpp @@ -594,6 +594,8 @@ auto X86SpecFuzzPass::visitReturn(MachineInstr &MI, MachineBasicBlock &Parent) - /// | MOVQ %r15, tmp_gpr1 // reserve the value of r15 /// | LEAQ 8(%rsp), %r15 // store the address /// | MOVQ checkpoint_sp, %rsp +/// | PUSH $0 +/// | PUSH $WRITING_WIDTH /// | PUSH %r15 /// | PUSH (%r15) // store the original value /// | MOVQ %rsp, checkpoint_sp // restore stack @@ -633,17 +635,154 @@ auto X86SpecFuzzPass::visitWrite(MachineInstr &MI, MachineBasicBlock &Parent) -> .add(MI.getOperand(MemRefBegin + X86::AddrSegmentReg)); restoreRegister(Parent, MI, Loc, X86::RSP, "checkpoint_sp"); - + + MachineMemOperand *MMO = *MI.memoperands_begin(); + uint64_t width = MMO->getSize(); + + LLVM_DEBUG(dbgs() << "Store's width: " << width << "\n"); + + // push arbitrary value for 16 byte alignment in checkpoint stack + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm((width > 8)? 8 : width); + // PUSH %TmpReg BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); - - // PUSH (%TmpReg) - BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) - .addImm(1).addReg(0) - .addImm(0).addReg(0); + + + switch (width) { + case 1: + preserveRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + // Immediate is arbitrary + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV8rm), X86::R14B) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(0) + .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV8mr)) + .addReg(X86::RSP).addImm(1) + .addReg(0).addImm(0) + .addReg(0) + .addReg(X86::R14B); + + restoreRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + break; + + case 2: + preserveRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + // Immediate is arbitrary + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV16rm), X86::R14W) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(0) + .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV16mr)) + .addReg(X86::RSP).addImm(1) + .addReg(0).addImm(0) + .addReg(0) + .addReg(X86::R14W); + + restoreRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + break; + + case 4: + preserveRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + // Immediate is arbitrary + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV32rm), X86::R14D) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(0) + .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::MOV32mr)) + .addReg(X86::RSP).addImm(1) + .addReg(0).addImm(0) + .addReg(0) + .addReg(X86::R14D); + + restoreRegister(Parent, MI, Loc, X86::R14, "tmp_gpr2"); + + break; + + case 8: + case 16: + case 32: + // PUSH (%TmpReg) + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) + .addImm(1).addReg(0) + .addImm(0).addReg(0); + + if (width == 8) break; + + // else that's SSE or AVX instruction. repeat logging of quadwords. + + BuildMI(Parent, MI, Loc, TII->get(X86::LEA64r), TmpReg) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(8) + .addReg(0); + + // push arbitrary value for 16 byte alignment in checkpoint stack + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(8); + + // PUSH %TmpReg + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); + + // PUSH (%TmpReg) + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) + .addImm(1).addReg(0) + .addImm(0).addReg(0); + + if (width == 16) { LLVM_DEBUG(dbgs() << " The store is 128-bit wide\n"); break; } + + BuildMI(Parent, MI, Loc, TII->get(X86::LEA64r), TmpReg) + .addReg(TmpReg).addImm(1) + .addReg(0).addImm(8) + .addReg(0); + + // push arbitrary value for 16 byte alignment in checkpoint stack + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(8); + + // PUSH %TmpReg + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); + + // PUSH (%TmpReg) + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) + .addImm(1).addReg(0) + .addImm(0).addReg(0); + + LLVM_DEBUG(dbgs() << " The store is 256-bit wide\n"); + break; + + default: + llvm_unreachable("Unknown width"); + break; + } // SSE stores are 128-bit wide - if (Desc.TSFlags >> X86II::SSEDomainShift & 3) { // NOLINT + /*if (Desc.TSFlags >> X86II::SSEDomainShift & 3) { // NOLINT LLVM_DEBUG(dbgs() << " The store is 128-bit wide\n"); // LEAQ 8(%TmpReg), %TmpReg @@ -651,6 +790,9 @@ auto X86SpecFuzzPass::visitWrite(MachineInstr &MI, MachineBasicBlock &Parent) -> .addReg(TmpReg).addImm(1) .addReg(0).addImm(8) .addReg(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(8); // PUSH %TmpReg BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); @@ -659,7 +801,7 @@ auto X86SpecFuzzPass::visitWrite(MachineInstr &MI, MachineBasicBlock &Parent) -> BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64rmm), TmpReg) .addImm(1).addReg(0) .addImm(0).addReg(0); - } + }*/ preserveRegister(Parent, MI, Loc, X86::RSP, "checkpoint_sp"); restoreRegister(Parent, MI, Loc, TmpReg, "tmp_gpr1"); @@ -684,6 +826,13 @@ auto X86SpecFuzzPass::visitPush(MachineInstr &MI, MachineBasicBlock &Parent) -> .addReg(0); restoreRegister(Parent, MI, Loc, X86::RSP, "checkpoint_sp"); + + // push arbitrary value for 16 byte alignment in checkpoint stack + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(0); + + BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64i8)) + .addImm(8); // PUSH %TmpReg BuildMI(Parent, MI, Loc, TII->get(X86::PUSH64r), TmpReg); @@ -1042,6 +1191,7 @@ auto X86SpecFuzzPass::getCallTargetType(MachineInstr &MI) -> X86SpecFuzzPass::Ca "__asan_set_shadow_f8", "__asan_frame_malloc_0", + "__asan_stack_malloc_0", "__asan_stack_malloc_1", "__asan_stack_malloc_2", "__asan_stack_malloc_3", diff --git a/src/specfuzz_init.c b/src/specfuzz_init.c index 0e9b7d4..6a8dae8 100644 --- a/src/specfuzz_init.c +++ b/src/specfuzz_init.c @@ -48,27 +48,57 @@ void specfuzz_handler(int signo, siginfo_t *siginfo, void *ucontext) { #if ENABLE_SANITY_CHECKS == 1 if (inside_handler != 0) { fprintf(stderr, "\n[SF] Error: Fault inside the signal handler\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif abort(); } inside_handler = 1; if (nesting_level <= 0x0) { fprintf(stderr, "[SF] Error: Signal handler called outside speculation\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif abort(); } if (checkpoint_sp > &checkpoint_stack || checkpoint_sp < &checkpoint_stack_bottom) { fprintf(stderr, "[SF] Error: checkpoint_sp is corrupted\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif abort(); } if ((uint64_t *) uc_gregs[REG_RSP] <= &specfuzz_rtl_frame && (uint64_t *) uc_gregs[REG_RSP] >= &specfuzz_rtl_frame_bottom) { fprintf(stderr, "[SF] Error: a signal caught within the SpecFuzz runtime\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif + abort(); + } + + if (specfuzz_executing_rollback) { + fprintf(stderr, "[SF] Error: a signal caught within SpecFuzz's rollback\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif + abort(); + } + + if (specfuzz_executing_checkpoint) { + fprintf(stderr, "[SF] Error: a signal caught within SpecFuzz's checkpoint\n"); +#if SEED_NON_SPECULATIVE_ERRORS == 1 + specfuzz_seed_input(); +#endif abort(); } #endif + + if (siginfo->si_signo == SIGFPE) { STAT_INCREMENT(stat_signal_misc); } else if (context->uc_mcontext.gregs[REG_RSP] >= (long long) &asan_rtl_frame_bottom && diff --git a/src/specfuzz_rtl.S b/src/specfuzz_rtl.S index 4c5f637..edd3d49 100644 --- a/src/specfuzz_rtl.S +++ b/src/specfuzz_rtl.S @@ -23,8 +23,9 @@ .extern stderr .extern specfuzz_cov_trace_pc .extern specfuzz_cov_vuln +.extern specfuzz_seed_input -.set CHECKPOINT_STACK_SIZE, (4096 * 25) +.set CHECKPOINT_STACK_SIZE, (4096 * 64) .set SPECULATION_WINDOW, 250 #ifndef MAX_NESTING_LEVEL .set MAX_NESTING_LEVEL, 1 @@ -146,6 +147,7 @@ asan_rtl_frame: .quad 0 // Error messages error_checkpoint_stack_overflow: .string "[SF] Error: overflow of Checkpoint Stack\n" +error_corrupted_checkpoint_stack: .string "[SF] Error: Corruption of Checkpoint Stack\n" error_branch_table_overflow: .string "[SF] Error: too many Branch Table collisions\n" asan_detected_real_overflow: .string "[SF] Error: That was a real, non-speculative overflow\n" error_corrupted_nesting: .string "[SF] Error: nesting_level is corrupted (negative)\n" @@ -190,6 +192,12 @@ simulation_id: .quad 0 debug_rollback_depth: .string "[SF] rlbk: 0x%llx %lld %lld %lld\n" #endif +.globl specfuzz_executing_rollback +specfuzz_executing_rollback: .quad 0 + +.globl specfuzz_executing_checkpoint +specfuzz_executing_checkpoint: .quad 0 + //===------------------------------------------------------------------------===// // Checkpoint and rollback //===------------------------------------------------------------------------===// @@ -206,6 +214,7 @@ debug_rollback_depth: .string "[SF] rlbk: 0x%llx %lld %lld .globl specfuzz_chkp .type specfuzz_chkp, @function specfuzz_chkp: + movq $1, specfuzz_executing_checkpoint push %r15 movq %r15, tmp_gpr1 @@ -214,13 +223,15 @@ specfuzz_chkp: xorq %r15, %r15 movq (%rsp), %r15 movq %r15, tmp_eflags - + // do not start a simulation if it is globally disabled cmpq $0, disable_speculation jg specfuzz_chkp.disabled_simulation // check if it's time to rollback + movq $0, specfuzz_executing_checkpoint call specfuzz_rlbk_if_done + movq $1, specfuzz_executing_checkpoint ASSERT nesting_level jl $0 specfuzz_exit_corrupted_nesting_level // do not start a new simulation if we've reached the max nesting depth @@ -353,13 +364,16 @@ specfuzz_chkp: // the next instruction, we'll also skip the restoration to the application stack. // Thus, we have to do it here movq current_rsp, %rsp - + + movq $0, specfuzz_executing_checkpoint + // Return jmpq *simulation_start_address specfuzz_chkp.no_simulation: popfq popq %r15 + movq $0, specfuzz_executing_checkpoint ret specfuzz_chkp.disabled_simulation: @@ -370,6 +384,7 @@ specfuzz_chkp.disabled_simulation: popfq popq %r15 + movq $0, specfuzz_executing_checkpoint ret @@ -477,26 +492,76 @@ specfuzz_rlbk: // Check that we're not overflowing movq checkpoint_sp, %rsp ASSERT %rsp jle $checkpoint_stack_bottom specfuzz_exit_state_overflow + + movq $1, specfuzz_executing_rollback // Rewind the Store Log: // - First, a special case: a segfault might have been triggered right after // the checkpoint, if the page is labeled as read-only // Thus, attempting to restore the value will cause another segfault // In this case, ignore the broken entry: checkpoint_sp++ + // Store Log consists of tuples of 4 quadwords <(padding, write_width, address, value)> + // rollback depends on "write_width" value (loaded to %rdx) + .L14: cmp store_log_bp, %rsp je .L2 movq (%rsp), %rbx movq 8(%rsp), %rcx - cmp %rbx, (%rcx) - jne .L1 - addq $16, %rsp + movq 16(%rsp), %rdx + + cmpq $8, %rdx + je .L12.quad_word + cmpq $4, %rdx + je .L12.double_word + cmpq $2, %rdx + je .L12.word + ASSERT %rdx jne $1 specfuzz_exit_state_corruption + jmp .L12.byte + + .L12.quad_word: + cmp %rbx, (%rcx) + jne .L1 + addq $32, %rsp + jmp .L14 + .L12.double_word: + cmpl %ebx, (%rcx) + jne .L1 + jmp .L13 + .L12.word: + cmpw %bx, (%rcx) + jne .L1 + jmp .L13 + .L12.byte: + cmpb %bl, (%rcx) + jne .L1 + + .L13: + addq $32, %rsp // - now, the actual rewind .L1: cmp store_log_bp, %rsp je .L2 popq %rbx // value popq %rcx // address - movq %rbx, (%rcx) + popq %rdx // size + cmpq $8, %rdx + je .L1.quad_word + cmpq $4, %rdx + je .L1.double_word + cmpq $2, %rdx + je .L1.word + ASSERT %rdx jne $1 specfuzz_exit_state_corruption + jmp .L1.byte + + .L1.quad_word: + movq %rbx, (%rcx) + .L1.double_word: + movl %ebx, (%rcx) + .L1.word: + movw %bx, (%rcx) + .L1.byte: + movb %bl, (%rcx) + addq $8, %rsp jmp .L1 .L2: @@ -559,6 +624,8 @@ specfuzz_rlbk: // Restore the original value of eflags pushq tmp_eflags popfq + + movq $0, specfuzz_executing_rollback ret // Finish the simulation @@ -779,20 +846,39 @@ specfuzz_cov_trace_pc_wrapper: /// specfuzz_exit_*: Exit with an error message /// specfuzz_exit_unknown_corruption: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif movl $error_not_speculative, %edi jmp specfuzz_exit specfuzz_exit_state_overflow: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif movl $error_checkpoint_stack_overflow, %edi jmp specfuzz_exit specfuzz_exit_asan_overflow: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif movl $asan_detected_real_overflow, %edi jmp specfuzz_exit specfuzz_exit_corrupted_nesting_level: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif movl $error_corrupted_nesting, %edi jmp specfuzz_exit + +specfuzz_exit_state_corruption: +#if SEED_NON_SPECULATIVE_ERRORS == 1 + call specfuzz_seed_input +#endif + movl $error_corrupted_checkpoint_stack, %edi + jmp specfuzz_exit .type specfuzz_exit, @function specfuzz_exit: diff --git a/src/specfuzz_rtl.h b/src/specfuzz_rtl.h index d8cfcc2..0cc0d4b 100644 --- a/src/specfuzz_rtl.h +++ b/src/specfuzz_rtl.h @@ -39,8 +39,12 @@ extern uint64_t stat_signal_misc; extern uint64_t stat_simulation_disables; extern uint64_t stat_skiped_due_to_disabled; +extern uint64_t specfuzz_executing_rollback; +extern uint64_t specfuzz_executing_checkpoint; + extern void specfuzz_rlbk_forced(void); + // Coverage void specfuzz_cov_init(); __attribute__((weak)) __attribute__((preserve_most)) @@ -50,5 +54,6 @@ void specfuzz_cov_vuln(uintptr_t pc); __attribute__((weak)) __attribute__((preserve_most)) struct map_entry_t *get_hash_map_entry(uintptr_t pc); +void specfuzz_seed_input(); #endif //SPECFUZZ_RTL_H