1 // SPDX-License-Identifier: GPL-2.0
3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
5 * lockups) along with other things that don't fit well into existing LKDTM
9 #include <linux/list.h>
10 #include <linux/sched.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/uaccess.h>
14 #include <linux/slab.h>
16 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
21 struct list_head node;
25 * Make sure our attempts to over run the kernel stack doesn't trigger
26 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
27 * recurse past the end of THREAD_SIZE by default.
29 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
30 #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
32 #define REC_STACK_SIZE (THREAD_SIZE / 8)
34 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
36 static int recur_count = REC_NUM_DEFAULT;
38 static DEFINE_SPINLOCK(lock_me_up);
41 * Make sure compiler does not optimize this function or stack frame away:
42 * - function marked noinline
43 * - stack variables are marked volatile
44 * - stack variables are written (memset()) and read (pr_info())
45 * - function has external effects (pr_info())
47 static int noinline recursive_loop(int remaining)
49 volatile char buf[REC_STACK_SIZE];
51 memset((void *)buf, remaining & 0xFF, sizeof(buf));
52 pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
57 return recursive_loop(remaining - 1);
60 /* If the depth is negative, use the default, otherwise keep parameter. */
61 void __init lkdtm_bugs_init(int *recur_param)
64 *recur_param = recur_count;
66 recur_count = *recur_param;
69 void lkdtm_PANIC(void)
79 static int warn_counter;
81 void lkdtm_WARNING(void)
83 WARN_ON(++warn_counter);
86 void lkdtm_WARNING_MESSAGE(void)
88 WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
91 void lkdtm_EXCEPTION(void)
93 *((volatile int *) 0) = 0;
102 void lkdtm_EXHAUST_STACK(void)
104 pr_info("Calling function with %lu frame size to depth %d ...\n",
105 REC_STACK_SIZE, recur_count);
106 recursive_loop(recur_count);
107 pr_info("FAIL: survived without exhausting stack?!\n");
110 static noinline void __lkdtm_CORRUPT_STACK(void *stack)
112 memset(stack, '\xff', 64);
115 /* This should trip the stack canary, not corrupt the return address. */
116 noinline void lkdtm_CORRUPT_STACK(void)
118 /* Use default char array length that triggers stack protection. */
119 char data[8] __aligned(sizeof(void *));
121 pr_info("Corrupting stack containing char array ...\n");
122 __lkdtm_CORRUPT_STACK((void *)&data);
125 /* Same as above but will only get a canary with -fstack-protector-strong */
126 noinline void lkdtm_CORRUPT_STACK_STRONG(void)
129 unsigned short shorts[4];
131 } data __aligned(sizeof(void *));
133 pr_info("Corrupting stack containing union ...\n");
134 __lkdtm_CORRUPT_STACK((void *)&data);
137 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
139 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
141 u32 val = 0x12345678;
143 p = (u32 *)(data + 1);
149 void lkdtm_SOFTLOCKUP(void)
156 void lkdtm_HARDLOCKUP(void)
163 void lkdtm_SPINLOCKUP(void)
165 /* Must be called twice to trigger. */
166 spin_lock(&lock_me_up);
167 /* Let sparse know we intended to exit holding the lock. */
168 __release(&lock_me_up);
171 void lkdtm_HUNG_TASK(void)
173 set_current_state(TASK_UNINTERRUPTIBLE);
177 volatile unsigned int huge = INT_MAX - 2;
178 volatile unsigned int ignored;
180 void lkdtm_OVERFLOW_SIGNED(void)
185 pr_info("Normal signed addition ...\n");
189 pr_info("Overflowing signed addition ...\n");
195 void lkdtm_OVERFLOW_UNSIGNED(void)
200 pr_info("Normal unsigned addition ...\n");
204 pr_info("Overflowing unsigned addition ...\n");
209 /* Intentionally using old-style flex array definition of 1 byte. */
210 struct array_bounds_flex_array {
216 struct array_bounds {
223 void lkdtm_ARRAY_BOUNDS(void)
225 struct array_bounds_flex_array *not_checked;
226 struct array_bounds *checked;
229 not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
230 checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
232 pr_info("Array access within bounds ...\n");
233 /* For both, touch all bytes in the actual member size. */
234 for (i = 0; i < sizeof(checked->data); i++)
235 checked->data[i] = 'A';
237 * For the uninstrumented flex array member, also touch 1 byte
238 * beyond to verify it is correctly uninstrumented.
240 for (i = 0; i < sizeof(not_checked->data) + 1; i++)
241 not_checked->data[i] = 'A';
243 pr_info("Array access beyond bounds ...\n");
244 for (i = 0; i < sizeof(checked->data) + 1; i++)
245 checked->data[i] = 'B';
249 pr_err("FAIL: survived array bounds overflow!\n");
252 void lkdtm_CORRUPT_LIST_ADD(void)
255 * Initially, an empty list via LIST_HEAD:
256 * test_head.next = &test_head
257 * test_head.prev = &test_head
259 LIST_HEAD(test_head);
260 struct lkdtm_list good, bad;
261 void *target[2] = { };
262 void *redirection = ⌖
264 pr_info("attempting good list addition\n");
267 * Adding to the list performs these actions:
268 * test_head.next->prev = &good.node
269 * good.node.next = test_head.next
270 * good.node.prev = test_head
271 * test_head.next = good.node
273 list_add(&good.node, &test_head);
275 pr_info("attempting corrupted list addition\n");
277 * In simulating this "write what where" primitive, the "what" is
278 * the address of &bad.node, and the "where" is the address held
281 test_head.next = redirection;
282 list_add(&bad.node, &test_head);
284 if (target[0] == NULL && target[1] == NULL)
285 pr_err("Overwrite did not happen, but no BUG?!\n");
287 pr_err("list_add() corruption not detected!\n");
290 void lkdtm_CORRUPT_LIST_DEL(void)
292 LIST_HEAD(test_head);
293 struct lkdtm_list item;
294 void *target[2] = { };
295 void *redirection = ⌖
297 list_add(&item.node, &test_head);
299 pr_info("attempting good list removal\n");
300 list_del(&item.node);
302 pr_info("attempting corrupted list removal\n");
303 list_add(&item.node, &test_head);
305 /* As with the list_add() test above, this corrupts "next". */
306 item.node.next = redirection;
307 list_del(&item.node);
309 if (target[0] == NULL && target[1] == NULL)
310 pr_err("Overwrite did not happen, but no BUG?!\n");
312 pr_err("list_del() corruption not detected!\n");
315 /* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
316 void lkdtm_CORRUPT_USER_DS(void)
318 pr_info("setting bad task size limit\n");
321 /* Make sure we do not keep running with a KERNEL_DS! */
325 /* Test that VMAP_STACK is actually allocating with a leading guard page */
326 void lkdtm_STACK_GUARD_PAGE_LEADING(void)
328 const unsigned char *stack = task_stack_page(current);
329 const unsigned char *ptr = stack - 1;
330 volatile unsigned char byte;
332 pr_info("attempting bad read from page below current stack\n");
336 pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
339 /* Test that VMAP_STACK is actually allocating with a trailing guard page */
340 void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
342 const unsigned char *stack = task_stack_page(current);
343 const unsigned char *ptr = stack + THREAD_SIZE;
344 volatile unsigned char byte;
346 pr_info("attempting bad read from page above current stack\n");
350 pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
353 void lkdtm_UNSET_SMEP(void)
355 #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
356 #define MOV_CR4_DEPTH 64
357 void (*direct_write_cr4)(unsigned long val);
362 cr4 = native_read_cr4();
364 if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
365 pr_err("FAIL: SMEP not in use\n");
368 cr4 &= ~(X86_CR4_SMEP);
370 pr_info("trying to clear SMEP normally\n");
371 native_write_cr4(cr4);
372 if (cr4 == native_read_cr4()) {
373 pr_err("FAIL: pinning SMEP failed!\n");
375 pr_info("restoring SMEP\n");
376 native_write_cr4(cr4);
379 pr_info("ok: SMEP did not get cleared\n");
382 * To test the post-write pinning verification we need to call
383 * directly into the middle of native_write_cr4() where the
384 * cr4 write happens, skipping any pinning. This searches for
385 * the cr4 writing instruction.
387 insn = (unsigned char *)native_write_cr4;
388 for (i = 0; i < MOV_CR4_DEPTH; i++) {
390 if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
392 /* mov %rdi,%rax; mov %rax, %cr4 */
393 if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
394 insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
395 insn[i+4] == 0x22 && insn[i+5] == 0xe0)
398 if (i >= MOV_CR4_DEPTH) {
399 pr_info("ok: cannot locate cr4 writing call gadget\n");
402 direct_write_cr4 = (void *)(insn + i);
404 pr_info("trying to clear SMEP with call gadget\n");
405 direct_write_cr4(cr4);
406 if (native_read_cr4() & X86_CR4_SMEP) {
407 pr_info("ok: SMEP removal was reverted\n");
409 pr_err("FAIL: cleared SMEP not detected!\n");
411 pr_info("restoring SMEP\n");
412 native_write_cr4(cr4);
415 pr_err("XFAIL: this test is x86_64-only\n");
419 void lkdtm_DOUBLE_FAULT(void)
421 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
423 * Trigger #DF by setting the stack limit to zero. This clobbers
424 * a GDT TLS slot, which is okay because the current task will die
425 * anyway due to the double fault.
427 struct desc_struct d = {
428 .type = 3, /* expand-up, writable, accessed data */
429 .p = 1, /* present */
431 .g = 0, /* limit in bytes */
432 .s = 1, /* not system */
436 write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
437 GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
440 * Put our zero-limit segment in SS and then trigger a fault. The
441 * 4-byte access to (%esp) will fault with #SS, and the attempt to
442 * deliver the fault will recursively cause #SS and result in #DF.
443 * This whole process happens while NMIs and MCEs are blocked by the
444 * MOV SS window. This is nice because an NMI with an invalid SS
445 * would also double-fault, resulting in the NMI or MCE being lost.
447 asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
448 "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
450 pr_err("FAIL: tried to double fault but didn't die\n");
452 pr_err("XFAIL: this test is ia32-only\n");
457 static noinline void change_pac_parameters(void)
459 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
460 /* Reset the keys of current task */
461 ptrauth_thread_init_kernel(current);
462 ptrauth_thread_switch_kernel(current);
467 noinline void lkdtm_CORRUPT_PAC(void)
470 #define CORRUPT_PAC_ITERATE 10
473 if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
474 pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
476 if (!system_supports_address_auth()) {
477 pr_err("FAIL: CPU lacks pointer authentication feature\n");
481 pr_info("changing PAC parameters to force function return failure...\n");
483 * PAC is a hash value computed from input keys, return address and
484 * stack pointer. As pac has fewer bits so there is a chance of
485 * collision, so iterate few times to reduce the collision probability.
487 for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
488 change_pac_parameters();
490 pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
492 pr_err("XFAIL: this test is arm64-only\n");