1 // SPDX-License-Identifier: GPL-2.0
3 * unaligned.c: Unaligned load/store trap handling with special
4 * cases for the kernel to do them more quickly.
11 #include <linux/jiffies.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <linux/extable.h>
17 #include <asm/ptrace.h>
18 #include <asm/pstate.h>
19 #include <asm/processor.h>
20 #include <linux/uaccess.h>
21 #include <linux/smp.h>
22 #include <linux/bitops.h>
23 #include <linux/perf_event.h>
24 #include <linux/ratelimit.h>
25 #include <linux/context_tracking.h>
26 #include <asm/fpumacro.h>
27 #include <asm/cacheflush.h>
28 #include <asm/setup.h>
34 load, /* ld, ldd, ldh, ldsh */
35 store, /* st, std, sth, stsh */
36 both, /* Swap, ldstub, cas, ... */
42 static inline enum direction decode_direction(unsigned int insn)
44 unsigned long tmp = (insn >> 21) & 1;
49 switch ((insn>>19)&0xf) {
58 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
59 static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
63 tmp = ((insn >> 19) & 0xf);
64 if (tmp == 11 || tmp == 14) /* ldx/stx */
70 return 16; /* ldd/std - Although it is actually 8 */
74 printk("Impossible unaligned trap. insn=%08x\n", insn);
75 die_if_kernel("Byte sized unaligned access?!?!", regs);
77 /* GCC should never warn that control reaches the end
78 * of this function without returning a value because
79 * die_if_kernel() is marked with attribute 'noreturn'.
80 * Alas, some versions do...
87 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
89 if (insn & 0x800000) {
91 return (unsigned char)(regs->tstate >> 24); /* %asi */
93 return (unsigned char)(insn >> 5); /* imm_asi */
98 /* 0x400000 = signed, 0 = unsigned */
99 static inline int decode_signedness(unsigned int insn)
101 return (insn & 0x400000);
104 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
105 unsigned int rd, int from_kernel)
107 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
108 if (from_kernel != 0)
109 __asm__ __volatile__("flushw");
115 static inline long sign_extend_imm13(long imm)
117 return imm << 51 >> 51;
120 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
122 unsigned long value, fp;
125 return (!reg ? 0 : regs->u_regs[reg]);
127 fp = regs->u_regs[UREG_FP];
129 if (regs->tstate & TSTATE_PRIV) {
130 struct reg_window *win;
131 win = (struct reg_window *)(fp + STACK_BIAS);
132 value = win->locals[reg - 16];
133 } else if (!test_thread_64bit_stack(fp)) {
134 struct reg_window32 __user *win32;
135 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
136 get_user(value, &win32->locals[reg - 16]);
138 struct reg_window __user *win;
139 win = (struct reg_window __user *)(fp + STACK_BIAS);
140 get_user(value, &win->locals[reg - 16]);
145 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
150 return ®s->u_regs[reg];
152 fp = regs->u_regs[UREG_FP];
154 if (regs->tstate & TSTATE_PRIV) {
155 struct reg_window *win;
156 win = (struct reg_window *)(fp + STACK_BIAS);
157 return &win->locals[reg - 16];
158 } else if (!test_thread_64bit_stack(fp)) {
159 struct reg_window32 *win32;
160 win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
161 return (unsigned long *)&win32->locals[reg - 16];
163 struct reg_window *win;
164 win = (struct reg_window *)(fp + STACK_BIAS);
165 return &win->locals[reg - 16];
169 unsigned long compute_effective_address(struct pt_regs *regs,
170 unsigned int insn, unsigned int rd)
172 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
173 unsigned int rs1 = (insn >> 14) & 0x1f;
174 unsigned int rs2 = insn & 0x1f;
178 maybe_flush_windows(rs1, 0, rd, from_kernel);
179 addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
181 maybe_flush_windows(rs1, rs2, rd, from_kernel);
182 addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
185 if (!from_kernel && test_thread_flag(TIF_32BIT))
191 /* This is just to make gcc think die_if_kernel does return... */
192 static void __used unaligned_panic(char *str, struct pt_regs *regs)
194 die_if_kernel(str, regs);
197 extern int do_int_load(unsigned long *dest_reg, int size,
198 unsigned long *saddr, int is_signed, int asi);
200 extern int __do_int_store(unsigned long *dst_addr, int size,
201 unsigned long src_val, int asi);
203 static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
204 struct pt_regs *regs, int asi, int orig_asi)
206 unsigned long zero = 0;
207 unsigned long *src_val_p = &zero;
208 unsigned long src_val;
212 zero = (((long)(reg_num ?
213 (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) |
214 (unsigned int)fetch_reg(reg_num + 1, regs);
215 } else if (reg_num) {
216 src_val_p = fetch_reg_addr(reg_num, regs);
218 src_val = *src_val_p;
219 if (unlikely(asi != orig_asi)) {
222 src_val = swab16(src_val);
225 src_val = swab32(src_val);
228 src_val = swab64(src_val);
236 return __do_int_store(dst_addr, size, src_val, asi);
239 static inline void advance(struct pt_regs *regs)
241 regs->tpc = regs->tnpc;
243 if (test_thread_flag(TIF_32BIT)) {
244 regs->tpc &= 0xffffffff;
245 regs->tnpc &= 0xffffffff;
249 static inline int floating_point_load_or_store_p(unsigned int insn)
251 return (insn >> 24) & 1;
254 static inline int ok_for_kernel(unsigned int insn)
256 return !floating_point_load_or_store_p(insn);
259 static void kernel_mna_trap_fault(int fixup_tstate_asi)
261 struct pt_regs *regs = current_thread_info()->kern_una_regs;
262 unsigned int insn = current_thread_info()->kern_una_insn;
263 const struct exception_table_entry *entry;
265 entry = search_exception_tables(regs->tpc);
267 unsigned long address;
269 address = compute_effective_address(regs, insn,
270 ((insn >> 25) & 0x1f));
271 if (address < PAGE_SIZE) {
272 printk(KERN_ALERT "Unable to handle kernel NULL "
273 "pointer dereference in mna handler");
275 printk(KERN_ALERT "Unable to handle kernel paging "
276 "request in mna handler");
277 printk(KERN_ALERT " at virtual address %016lx\n",address);
278 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
279 (current->mm ? CTX_HWBITS(current->mm->context) :
280 CTX_HWBITS(current->active_mm->context)));
281 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
282 (current->mm ? (unsigned long) current->mm->pgd :
283 (unsigned long) current->active_mm->pgd));
284 die_if_kernel("Oops", regs);
287 regs->tpc = entry->fixup;
288 regs->tnpc = regs->tpc + 4;
290 if (fixup_tstate_asi) {
291 regs->tstate &= ~TSTATE_ASI;
292 regs->tstate |= (ASI_AIUS << 24UL);
296 static void log_unaligned(struct pt_regs *regs)
298 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
300 if (__ratelimit(&ratelimit)) {
301 printk("Kernel unaligned access at TPC[%lx] %pS\n",
302 regs->tpc, (void *) regs->tpc);
306 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
308 enum direction dir = decode_direction(insn);
309 int size = decode_access_size(regs, insn);
312 current_thread_info()->kern_una_regs = regs;
313 current_thread_info()->kern_una_insn = insn;
315 orig_asi = asi = decode_asi(insn, regs);
317 /* If this is a {get,put}_user() on an unaligned userspace pointer,
318 * just signal a fault and do not log the event.
320 if (asi == ASI_AIUS) {
321 kernel_mna_trap_fault(0);
327 if (!ok_for_kernel(insn) || dir == both) {
328 printk("Unsupported unaligned load/store trap for kernel "
329 "at <%016lx>.\n", regs->tpc);
330 unaligned_panic("Kernel does fpu/atomic "
331 "unaligned load/store.", regs);
333 kernel_mna_trap_fault(0);
335 unsigned long addr, *reg_addr;
338 addr = compute_effective_address(regs, insn,
339 ((insn >> 25) & 0x1f));
340 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
354 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
355 err = do_int_load(reg_addr, size,
356 (unsigned long *) addr,
357 decode_signedness(insn), asi);
358 if (likely(!err) && unlikely(asi != orig_asi)) {
359 unsigned long val_in = *reg_addr;
362 val_in = swab16(val_in);
365 val_in = swab32(val_in);
368 val_in = swab64(val_in);
380 err = do_int_store(((insn>>25)&0x1f), size,
381 (unsigned long *) addr, regs,
386 panic("Impossible kernel unaligned trap.");
390 kernel_mna_trap_fault(1);
396 int handle_popc(u32 insn, struct pt_regs *regs)
398 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
399 int ret, rd = ((insn >> 25) & 0x1f);
402 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
404 maybe_flush_windows(0, 0, rd, from_kernel);
405 value = sign_extend_imm13(insn);
407 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
408 value = fetch_reg(insn & 0x1f, regs);
410 ret = hweight64(value);
413 regs->u_regs[rd] = ret;
415 unsigned long fp = regs->u_regs[UREG_FP];
417 if (!test_thread_64bit_stack(fp)) {
418 struct reg_window32 __user *win32;
419 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
420 put_user(ret, &win32->locals[rd - 16]);
422 struct reg_window __user *win;
423 win = (struct reg_window __user *)(fp + STACK_BIAS);
424 put_user(ret, &win->locals[rd - 16]);
431 extern void do_fpother(struct pt_regs *regs);
432 extern void do_privact(struct pt_regs *regs);
433 extern void sun4v_data_access_exception(struct pt_regs *regs,
435 unsigned long type_ctx);
437 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
439 unsigned long addr = compute_effective_address(regs, insn, 0);
441 struct fpustate *f = FPUSTATE;
442 int asi = decode_asi(insn, regs);
445 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
447 save_and_clear_fpu();
448 current_thread_info()->xfsr[0] &= ~0x1c000;
449 if (insn & 0x200000) {
451 u64 first = 0, second = 0;
453 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
454 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
456 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
460 if (current_thread_info()->fpsaved[0] & flag) {
461 first = *(u64 *)&f->regs[freg];
462 second = *(u64 *)&f->regs[freg+2];
474 /* Need to convert endians */
475 u64 tmp = __swab64p(&first);
477 first = __swab64p(&second);
482 if (tlb_type == hypervisor)
483 sun4v_data_access_exception(regs, addr, 0);
485 spitfire_data_access_exception(regs, 0, addr);
488 if (put_user (first >> 32, (u32 __user *)addr) ||
489 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
490 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
491 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
492 if (tlb_type == hypervisor)
493 sun4v_data_access_exception(regs, addr, 0);
495 spitfire_data_access_exception(regs, 0, addr);
499 /* LDF, LDDF, LDQF */
500 u32 data[4] __attribute__ ((aligned(8)));
507 } else if (asi > ASI_SNFL) {
508 if (tlb_type == hypervisor)
509 sun4v_data_access_exception(regs, addr, 0);
511 spitfire_data_access_exception(regs, 0, addr);
514 switch (insn & 0x180000) {
515 case 0x000000: size = 1; break;
516 case 0x100000: size = 4; break;
517 default: size = 2; break;
520 freg = (insn >> 25) & 0x1f;
522 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
523 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
525 for (i = 0; i < size; i++)
528 err = get_user (data[0], (u32 __user *) addr);
530 for (i = 1; i < size; i++)
531 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
533 if (err && !(asi & 0x2 /* NF */)) {
534 if (tlb_type == hypervisor)
535 sun4v_data_access_exception(regs, addr, 0);
537 spitfire_data_access_exception(regs, 0, addr);
540 if (asi & 0x8) /* Little */ {
544 case 1: data[0] = le32_to_cpup(data + 0); break;
545 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
547 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
548 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
549 *(u64 *)(data + 2) = tmp;
553 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
554 current_thread_info()->fpsaved[0] = FPRS_FEF;
555 current_thread_info()->gsr[0] = 0;
557 if (!(current_thread_info()->fpsaved[0] & flag)) {
559 memset(f->regs, 0, 32*sizeof(u32));
561 memset(f->regs+32, 0, 32*sizeof(u32));
563 memcpy(f->regs + freg, data, size * 4);
564 current_thread_info()->fpsaved[0] |= flag;
570 void handle_ld_nf(u32 insn, struct pt_regs *regs)
572 int rd = ((insn >> 25) & 0x1f);
573 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
576 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
578 maybe_flush_windows(0, 0, rd, from_kernel);
579 reg = fetch_reg_addr(rd, regs);
580 if (from_kernel || rd < 16) {
582 if ((insn & 0x780000) == 0x180000)
584 } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
585 put_user(0, (int __user *) reg);
586 if ((insn & 0x780000) == 0x180000)
587 put_user(0, ((int __user *) reg) + 1);
589 put_user(0, (unsigned long __user *) reg);
590 if ((insn & 0x780000) == 0x180000)
591 put_user(0, (unsigned long __user *) reg + 1);
596 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
598 enum ctx_state prev_state = exception_enter();
599 unsigned long pc = regs->tpc;
600 unsigned long tstate = regs->tstate;
605 struct fpustate *f = FPUSTATE;
607 if (tstate & TSTATE_PRIV)
608 die_if_kernel("lddfmna from kernel", regs);
609 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
610 if (test_thread_flag(TIF_32BIT))
612 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
613 int asi = decode_asi(insn, regs);
617 if ((asi > ASI_SNFL) ||
621 err = get_user(first, (u32 __user *)sfar);
623 err = get_user(second, (u32 __user *)(sfar + 4));
629 save_and_clear_fpu();
630 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
631 value = (((u64)first) << 32) | second;
632 if (asi & 0x8) /* Little */
633 value = __swab64p(&value);
634 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
635 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
636 current_thread_info()->fpsaved[0] = FPRS_FEF;
637 current_thread_info()->gsr[0] = 0;
639 if (!(current_thread_info()->fpsaved[0] & flag)) {
641 memset(f->regs, 0, 32*sizeof(u32));
643 memset(f->regs+32, 0, 32*sizeof(u32));
645 *(u64 *)(f->regs + freg) = value;
646 current_thread_info()->fpsaved[0] |= flag;
649 if (tlb_type == hypervisor)
650 sun4v_data_access_exception(regs, sfar, sfsr);
652 spitfire_data_access_exception(regs, sfsr, sfar);
657 exception_exit(prev_state);
660 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
662 enum ctx_state prev_state = exception_enter();
663 unsigned long pc = regs->tpc;
664 unsigned long tstate = regs->tstate;
669 struct fpustate *f = FPUSTATE;
671 if (tstate & TSTATE_PRIV)
672 die_if_kernel("stdfmna from kernel", regs);
673 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
674 if (test_thread_flag(TIF_32BIT))
676 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
677 int asi = decode_asi(insn, regs);
678 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
680 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
681 if ((asi > ASI_SNFL) ||
684 save_and_clear_fpu();
685 if (current_thread_info()->fpsaved[0] & flag)
686 value = *(u64 *)&f->regs[freg];
692 value = __swab64p(&value); break;
695 if (put_user (value >> 32, (u32 __user *) sfar) ||
696 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
700 if (tlb_type == hypervisor)
701 sun4v_data_access_exception(regs, sfar, sfsr);
703 spitfire_data_access_exception(regs, sfsr, sfar);
708 exception_exit(prev_state);