|
| 1 | +; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown -mattr=+lvi-load-hardening -mattr=+lvi-cfi -x86-experimental-lvi-inline-asm-hardening < %s -o %t.out 2> %t.err |
| 2 | +; RUN: FileCheck %s --check-prefix=X86 < %t.out |
| 3 | +; RUN: FileCheck %s --check-prefix=WARN < %t.err |
| 4 | + |
| 5 | +; Test module-level assembly |
| 6 | +module asm "pop %rbx" |
| 7 | +module asm "ret" |
| 8 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 9 | +; WARN-NEXT: ret |
| 10 | +; WARN-NEXT: ^ |
| 11 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 12 | + |
| 13 | +; Function Attrs: noinline nounwind optnone uwtable |
| 14 | +define dso_local void @test_inline_asm() { |
| 15 | +entry: |
| 16 | +; X86-LABEL: test_inline_asm: |
| 17 | + call void asm sideeffect "mov 0x3fed(%rip),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 18 | +; X86: movq 16365(%rip), %rax |
| 19 | +; X86-NEXT: lfence |
| 20 | + call void asm sideeffect "movdqa 0x0(%rip),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 21 | +; X86: movdqa (%rip), %xmm0 |
| 22 | +; X86-NEXT: lfence |
| 23 | + call void asm sideeffect "movslq 0x3e5d(%rip),%rbx", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 24 | +; X86: movslq 15965(%rip), %rbx |
| 25 | +; X86-NEXT: lfence |
| 26 | + call void asm sideeffect "mov (%r12,%rax,8),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 27 | +; X86: movq (%r12,%rax,8), %rax |
| 28 | +; X86-NEXT: lfence |
| 29 | + call void asm sideeffect "movq (24)(%rsi), %r11", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 30 | +; X86: movq 24(%rsi), %r11 |
| 31 | +; X86-NEXT: lfence |
| 32 | + call void asm sideeffect "cmove %r12,%rax", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 33 | +; X86: cmoveq %r12, %rax |
| 34 | +; X86-NOT: lfence |
| 35 | + call void asm sideeffect "cmove (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 36 | +; X86: cmoveq (%r12), %rax |
| 37 | +; X86-NEXT: lfence |
| 38 | + call void asm sideeffect "pop %rbx", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 39 | +; X86: popq %rbx |
| 40 | +; X86-NEXT: lfence |
| 41 | + call void asm sideeffect "popq %rbx", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 42 | +; X86: popq %rbx |
| 43 | +; X86-NEXT: lfence |
| 44 | + call void asm sideeffect "xchg (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 45 | +; X86: xchgq %rax, (%r12) |
| 46 | +; X86-NEXT: lfence |
| 47 | + call void asm sideeffect "cmpxchg %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 48 | +; X86: cmpxchgq %r12, (%rax) |
| 49 | +; X86-NEXT: lfence |
| 50 | + call void asm sideeffect "vpxor (%rcx,%rdx,1),%ymm1,%ymm0", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 51 | +; X86: vpxor (%rcx,%rdx), %ymm1, %ymm0 |
| 52 | +; X86-NEXT: lfence |
| 53 | + call void asm sideeffect "vpmuludq 0x20(%rsi),%ymm0,%ymm12", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 54 | +; X86: vpmuludq 32(%rsi), %ymm0, %ymm12 |
| 55 | +; X86-NEXT: lfence |
| 56 | + call void asm sideeffect "vpexpandq 0x40(%rdi),%zmm8{%k2}{z}", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 57 | +; X86: vpexpandq 64(%rdi), %zmm8 {%k2} {z} |
| 58 | +; X86-NEXT: lfence |
| 59 | + call void asm sideeffect "addq (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 60 | +; X86: addq (%r12), %rax |
| 61 | +; X86-NEXT: lfence |
| 62 | + call void asm sideeffect "subq Lpoly+0(%rip), %rax", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 63 | +; X86: subq Lpoly+0(%rip), %rax |
| 64 | +; X86-NEXT: lfence |
| 65 | + call void asm sideeffect "adcq %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 66 | +; X86: adcq %r12, (%rax) |
| 67 | +; X86-NEXT: lfence |
| 68 | + call void asm sideeffect "negq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 69 | +; X86: negq (%rax) |
| 70 | +; X86-NEXT: lfence |
| 71 | + call void asm sideeffect "incq %rax", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 72 | +; X86: incq %rax |
| 73 | +; X86-NOT: lfence |
| 74 | + call void asm sideeffect "mulq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 75 | +; X86: mulq (%rax) |
| 76 | +; X86-NEXT: lfence |
| 77 | + call void asm sideeffect "imulq (%rax),%rdx", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 78 | +; X86: imulq (%rax), %rdx |
| 79 | +; X86-NEXT: lfence |
| 80 | + call void asm sideeffect "shlq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 81 | +; X86: shlq (%rax) |
| 82 | +; X86-NEXT: lfence |
| 83 | + call void asm sideeffect "shrq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 84 | +; X86: shrq (%rax) |
| 85 | +; X86-NEXT: lfence |
| 86 | + call void asm sideeffect "repz cmpsb %es:(%rdi),%ds:(%rsi)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 87 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 88 | +; WARN-NEXT: repz cmpsb %es:(%rdi),%ds:(%rsi) |
| 89 | +; WARN-NEXT: ^ |
| 90 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 91 | +; X86: rep cmpsb %es:(%rdi), %ds:(%rsi) |
| 92 | +; X86-NOT: lfence |
| 93 | + call void asm sideeffect "repnz scasb", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 94 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 95 | +; WARN-NEXT: repnz scasb |
| 96 | +; WARN-NEXT: ^ |
| 97 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 98 | +; X86: repne scasb %es:(%rdi), %al |
| 99 | +; X86-NOT: lfence |
| 100 | + call void asm sideeffect "repnz", ""() #1 |
| 101 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 102 | +; WARN-NEXT: repnz |
| 103 | +; WARN-NEXT: ^ |
| 104 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 105 | + call void asm sideeffect "pinsrw $$0x6,(%eax),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 106 | +; X86: pinsrw $6, (%eax), %xmm0 |
| 107 | +; X86-NEXT: lfence |
| 108 | + call void asm sideeffect "ret", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 109 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 110 | +; WARN-NEXT: ret |
| 111 | +; WARN-NEXT: ^ |
| 112 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 113 | +; X86: retq |
| 114 | +; X86-NOT: lfence |
| 115 | + call void asm sideeffect "ret $$8", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 116 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 117 | +; WARN-NEXT: ret $8 |
| 118 | +; WARN-NEXT: ^ |
| 119 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 120 | +; X86: retq $8 |
| 121 | +; X86-NOT: lfence |
| 122 | + call void asm sideeffect "jmpq *(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 123 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 124 | +; WARN-NEXT: jmpq *(%rdx) |
| 125 | +; WARN-NEXT: ^ |
| 126 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 127 | +; X86: jmpq *(%rdx) |
| 128 | +; X86-NOT: lfence |
| 129 | + call void asm sideeffect "jmpq *0x100(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 130 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 131 | +; WARN-NEXT: jmpq *0x100(%rdx) |
| 132 | +; WARN-NEXT: ^ |
| 133 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 134 | +; X86: jmpq *256(%rdx) |
| 135 | +; X86-NOT: lfence |
| 136 | + call void asm sideeffect "callq *200(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 137 | +; WARN: warning: Instruction may be vulnerable to LVI |
| 138 | +; WARN-NEXT: callq *200(%rdx) |
| 139 | +; WARN-NEXT: ^ |
| 140 | +; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information |
| 141 | +; X86: callq *200(%rdx) |
| 142 | +; X86-NOT: lfence |
| 143 | + call void asm sideeffect "fldt 0x8(%rbp)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 144 | +; X86: fldt 8(%rbp) |
| 145 | +; X86-NEXT: lfence |
| 146 | + call void asm sideeffect "fld %st(0)", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 147 | +; X86: fld %st(0) |
| 148 | +; X86-NOT: lfence |
| 149 | +; Test assembler macros |
| 150 | + call void asm sideeffect ".macro mplus1 x\0Aincq (\5Cx)\0A.endm\0Amplus1 %rcx", "~{dirflag},~{fpsr},~{flags}"() #1 |
| 151 | +; X86: incq (%rcx) |
| 152 | +; X86-NEXT: lfence |
| 153 | + ret void |
| 154 | +} |
| 155 | + |
| 156 | +attributes #1 = { nounwind } |
0 commit comments