2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
25 * problems with user programs have been fixed. For programmers this is the
28 * Fixing address errors is a per process option. The option is inherited
29 * across fork(2) and execve(2) calls. If you really want to use the
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
33 * #include <sys/sysmips.h>
36 * sysmips(MIPS_FIXADE, x);
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
41 * Below a little program to play around with this feature.
44 * #include <sys/sysmips.h>
47 * unsigned char bar[8];
50 * main(int argc, char *argv[])
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
59 * printf("*p = %08lx\n", *p);
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
68 * Coprocessor loads are not supported; I think this case is unimportant
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
76 #include <linux/context_tracking.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
88 #include <asm/debug.h>
90 #include <asm/fpu_emulator.h>
92 #include <linux/uaccess.h>
94 #define STR(x) __STR(x)
98 UNALIGNED_ACTION_QUIET,
99 UNALIGNED_ACTION_SIGNAL,
100 UNALIGNED_ACTION_SHOW,
102 #ifdef CONFIG_DEBUG_FS
103 static u32 unaligned_instructions;
104 static u32 unaligned_action;
106 #define unaligned_action UNALIGNED_ACTION_QUIET
108 extern void show_registers(struct pt_regs *regs);
111 #define _LoadHW(addr, value, res, type) \
113 __asm__ __volatile__ (".set\tnoat\n" \
114 "1:\t"type##_lb("%0", "0(%2)")"\n" \
115 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
121 ".section\t.fixup,\"ax\"\n\t" \
122 "4:\tli\t%1, %3\n\t" \
125 ".section\t__ex_table,\"a\"\n\t" \
126 STR(PTR)"\t1b, 4b\n\t" \
127 STR(PTR)"\t2b, 4b\n\t" \
129 : "=&r" (value), "=r" (res) \
130 : "r" (addr), "i" (-EFAULT)); \
133 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
134 #define _LoadW(addr, value, res, type) \
136 __asm__ __volatile__ ( \
137 "1:\t"type##_lwl("%0", "(%2)")"\n" \
138 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
142 ".section\t.fixup,\"ax\"\n\t" \
143 "4:\tli\t%1, %3\n\t" \
146 ".section\t__ex_table,\"a\"\n\t" \
147 STR(PTR)"\t1b, 4b\n\t" \
148 STR(PTR)"\t2b, 4b\n\t" \
150 : "=&r" (value), "=r" (res) \
151 : "r" (addr), "i" (-EFAULT)); \
154 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
155 /* For CPUs without lwl instruction */
156 #define _LoadW(addr, value, res, type) \
158 __asm__ __volatile__ ( \
161 "1:"type##_lb("%0", "0(%2)")"\n\t" \
162 "2:"type##_lbu("$1", "1(%2)")"\n\t" \
165 "3:"type##_lbu("$1", "2(%2)")"\n\t" \
168 "4:"type##_lbu("$1", "3(%2)")"\n\t" \
175 ".section\t.fixup,\"ax\"\n\t" \
176 "11:\tli\t%1, %3\n\t" \
179 ".section\t__ex_table,\"a\"\n\t" \
180 STR(PTR)"\t1b, 11b\n\t" \
181 STR(PTR)"\t2b, 11b\n\t" \
182 STR(PTR)"\t3b, 11b\n\t" \
183 STR(PTR)"\t4b, 11b\n\t" \
185 : "=&r" (value), "=r" (res) \
186 : "r" (addr), "i" (-EFAULT)); \
189 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
191 #define _LoadHWU(addr, value, res, type) \
193 __asm__ __volatile__ ( \
195 "1:\t"type##_lbu("%0", "0(%2)")"\n" \
196 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
203 ".section\t.fixup,\"ax\"\n\t" \
204 "4:\tli\t%1, %3\n\t" \
207 ".section\t__ex_table,\"a\"\n\t" \
208 STR(PTR)"\t1b, 4b\n\t" \
209 STR(PTR)"\t2b, 4b\n\t" \
211 : "=&r" (value), "=r" (res) \
212 : "r" (addr), "i" (-EFAULT)); \
215 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
216 #define _LoadWU(addr, value, res, type) \
218 __asm__ __volatile__ ( \
219 "1:\t"type##_lwl("%0", "(%2)")"\n" \
220 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
221 "dsll\t%0, %0, 32\n\t" \
222 "dsrl\t%0, %0, 32\n\t" \
226 "\t.section\t.fixup,\"ax\"\n\t" \
227 "4:\tli\t%1, %3\n\t" \
230 ".section\t__ex_table,\"a\"\n\t" \
231 STR(PTR)"\t1b, 4b\n\t" \
232 STR(PTR)"\t2b, 4b\n\t" \
234 : "=&r" (value), "=r" (res) \
235 : "r" (addr), "i" (-EFAULT)); \
238 #define _LoadDW(addr, value, res) \
240 __asm__ __volatile__ ( \
241 "1:\tldl\t%0, (%2)\n" \
242 "2:\tldr\t%0, 7(%2)\n\t" \
246 "\t.section\t.fixup,\"ax\"\n\t" \
247 "4:\tli\t%1, %3\n\t" \
250 ".section\t__ex_table,\"a\"\n\t" \
251 STR(PTR)"\t1b, 4b\n\t" \
252 STR(PTR)"\t2b, 4b\n\t" \
254 : "=&r" (value), "=r" (res) \
255 : "r" (addr), "i" (-EFAULT)); \
258 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
259 /* For CPUs without lwl and ldl instructions */
260 #define _LoadWU(addr, value, res, type) \
262 __asm__ __volatile__ ( \
265 "1:"type##_lbu("%0", "0(%2)")"\n\t" \
266 "2:"type##_lbu("$1", "1(%2)")"\n\t" \
269 "3:"type##_lbu("$1", "2(%2)")"\n\t" \
272 "4:"type##_lbu("$1", "3(%2)")"\n\t" \
279 ".section\t.fixup,\"ax\"\n\t" \
280 "11:\tli\t%1, %3\n\t" \
283 ".section\t__ex_table,\"a\"\n\t" \
284 STR(PTR)"\t1b, 11b\n\t" \
285 STR(PTR)"\t2b, 11b\n\t" \
286 STR(PTR)"\t3b, 11b\n\t" \
287 STR(PTR)"\t4b, 11b\n\t" \
289 : "=&r" (value), "=r" (res) \
290 : "r" (addr), "i" (-EFAULT)); \
293 #define _LoadDW(addr, value, res) \
295 __asm__ __volatile__ ( \
298 "1:lb\t%0, 0(%2)\n\t" \
299 "2:lbu\t $1, 1(%2)\n\t" \
300 "dsll\t%0, 0x8\n\t" \
302 "3:lbu\t$1, 2(%2)\n\t" \
303 "dsll\t%0, 0x8\n\t" \
305 "4:lbu\t$1, 3(%2)\n\t" \
306 "dsll\t%0, 0x8\n\t" \
308 "5:lbu\t$1, 4(%2)\n\t" \
309 "dsll\t%0, 0x8\n\t" \
311 "6:lbu\t$1, 5(%2)\n\t" \
312 "dsll\t%0, 0x8\n\t" \
314 "7:lbu\t$1, 6(%2)\n\t" \
315 "dsll\t%0, 0x8\n\t" \
317 "8:lbu\t$1, 7(%2)\n\t" \
318 "dsll\t%0, 0x8\n\t" \
324 ".section\t.fixup,\"ax\"\n\t" \
325 "11:\tli\t%1, %3\n\t" \
328 ".section\t__ex_table,\"a\"\n\t" \
329 STR(PTR)"\t1b, 11b\n\t" \
330 STR(PTR)"\t2b, 11b\n\t" \
331 STR(PTR)"\t3b, 11b\n\t" \
332 STR(PTR)"\t4b, 11b\n\t" \
333 STR(PTR)"\t5b, 11b\n\t" \
334 STR(PTR)"\t6b, 11b\n\t" \
335 STR(PTR)"\t7b, 11b\n\t" \
336 STR(PTR)"\t8b, 11b\n\t" \
338 : "=&r" (value), "=r" (res) \
339 : "r" (addr), "i" (-EFAULT)); \
342 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
345 #define _StoreHW(addr, value, res, type) \
347 __asm__ __volatile__ ( \
349 "1:\t"type##_sb("%1", "1(%2)")"\n" \
350 "srl\t$1, %1, 0x8\n" \
351 "2:\t"type##_sb("$1", "0(%2)")"\n" \
356 ".section\t.fixup,\"ax\"\n\t" \
357 "4:\tli\t%0, %3\n\t" \
360 ".section\t__ex_table,\"a\"\n\t" \
361 STR(PTR)"\t1b, 4b\n\t" \
362 STR(PTR)"\t2b, 4b\n\t" \
365 : "r" (value), "r" (addr), "i" (-EFAULT));\
368 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
369 #define _StoreW(addr, value, res, type) \
371 __asm__ __volatile__ ( \
372 "1:\t"type##_swl("%1", "(%2)")"\n" \
373 "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
377 ".section\t.fixup,\"ax\"\n\t" \
378 "4:\tli\t%0, %3\n\t" \
381 ".section\t__ex_table,\"a\"\n\t" \
382 STR(PTR)"\t1b, 4b\n\t" \
383 STR(PTR)"\t2b, 4b\n\t" \
386 : "r" (value), "r" (addr), "i" (-EFAULT)); \
389 #define _StoreDW(addr, value, res) \
391 __asm__ __volatile__ ( \
392 "1:\tsdl\t%1,(%2)\n" \
393 "2:\tsdr\t%1, 7(%2)\n\t" \
397 ".section\t.fixup,\"ax\"\n\t" \
398 "4:\tli\t%0, %3\n\t" \
401 ".section\t__ex_table,\"a\"\n\t" \
402 STR(PTR)"\t1b, 4b\n\t" \
403 STR(PTR)"\t2b, 4b\n\t" \
406 : "r" (value), "r" (addr), "i" (-EFAULT)); \
409 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
410 #define _StoreW(addr, value, res, type) \
412 __asm__ __volatile__ ( \
415 "1:"type##_sb("%1", "3(%2)")"\n\t" \
416 "srl\t$1, %1, 0x8\n\t" \
417 "2:"type##_sb("$1", "2(%2)")"\n\t" \
418 "srl\t$1, $1, 0x8\n\t" \
419 "3:"type##_sb("$1", "1(%2)")"\n\t" \
420 "srl\t$1, $1, 0x8\n\t" \
421 "4:"type##_sb("$1", "0(%2)")"\n\t" \
426 ".section\t.fixup,\"ax\"\n\t" \
427 "11:\tli\t%0, %3\n\t" \
430 ".section\t__ex_table,\"a\"\n\t" \
431 STR(PTR)"\t1b, 11b\n\t" \
432 STR(PTR)"\t2b, 11b\n\t" \
433 STR(PTR)"\t3b, 11b\n\t" \
434 STR(PTR)"\t4b, 11b\n\t" \
437 : "r" (value), "r" (addr), "i" (-EFAULT) \
441 #define _StoreDW(addr, value, res) \
443 __asm__ __volatile__ ( \
446 "1:sb\t%1, 7(%2)\n\t" \
447 "dsrl\t$1, %1, 0x8\n\t" \
448 "2:sb\t$1, 6(%2)\n\t" \
449 "dsrl\t$1, $1, 0x8\n\t" \
450 "3:sb\t$1, 5(%2)\n\t" \
451 "dsrl\t$1, $1, 0x8\n\t" \
452 "4:sb\t$1, 4(%2)\n\t" \
453 "dsrl\t$1, $1, 0x8\n\t" \
454 "5:sb\t$1, 3(%2)\n\t" \
455 "dsrl\t$1, $1, 0x8\n\t" \
456 "6:sb\t$1, 2(%2)\n\t" \
457 "dsrl\t$1, $1, 0x8\n\t" \
458 "7:sb\t$1, 1(%2)\n\t" \
459 "dsrl\t$1, $1, 0x8\n\t" \
460 "8:sb\t$1, 0(%2)\n\t" \
461 "dsrl\t$1, $1, 0x8\n\t" \
466 ".section\t.fixup,\"ax\"\n\t" \
467 "11:\tli\t%0, %3\n\t" \
470 ".section\t__ex_table,\"a\"\n\t" \
471 STR(PTR)"\t1b, 11b\n\t" \
472 STR(PTR)"\t2b, 11b\n\t" \
473 STR(PTR)"\t3b, 11b\n\t" \
474 STR(PTR)"\t4b, 11b\n\t" \
475 STR(PTR)"\t5b, 11b\n\t" \
476 STR(PTR)"\t6b, 11b\n\t" \
477 STR(PTR)"\t7b, 11b\n\t" \
478 STR(PTR)"\t8b, 11b\n\t" \
481 : "r" (value), "r" (addr), "i" (-EFAULT) \
485 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
487 #else /* __BIG_ENDIAN */
489 #define _LoadHW(addr, value, res, type) \
491 __asm__ __volatile__ (".set\tnoat\n" \
492 "1:\t"type##_lb("%0", "1(%2)")"\n" \
493 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
499 ".section\t.fixup,\"ax\"\n\t" \
500 "4:\tli\t%1, %3\n\t" \
503 ".section\t__ex_table,\"a\"\n\t" \
504 STR(PTR)"\t1b, 4b\n\t" \
505 STR(PTR)"\t2b, 4b\n\t" \
507 : "=&r" (value), "=r" (res) \
508 : "r" (addr), "i" (-EFAULT)); \
511 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
512 #define _LoadW(addr, value, res, type) \
514 __asm__ __volatile__ ( \
515 "1:\t"type##_lwl("%0", "3(%2)")"\n" \
516 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
520 ".section\t.fixup,\"ax\"\n\t" \
521 "4:\tli\t%1, %3\n\t" \
524 ".section\t__ex_table,\"a\"\n\t" \
525 STR(PTR)"\t1b, 4b\n\t" \
526 STR(PTR)"\t2b, 4b\n\t" \
528 : "=&r" (value), "=r" (res) \
529 : "r" (addr), "i" (-EFAULT)); \
532 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
533 /* For CPUs without lwl instruction */
534 #define _LoadW(addr, value, res, type) \
536 __asm__ __volatile__ ( \
539 "1:"type##_lb("%0", "3(%2)")"\n\t" \
540 "2:"type##_lbu("$1", "2(%2)")"\n\t" \
543 "3:"type##_lbu("$1", "1(%2)")"\n\t" \
546 "4:"type##_lbu("$1", "0(%2)")"\n\t" \
553 ".section\t.fixup,\"ax\"\n\t" \
554 "11:\tli\t%1, %3\n\t" \
557 ".section\t__ex_table,\"a\"\n\t" \
558 STR(PTR)"\t1b, 11b\n\t" \
559 STR(PTR)"\t2b, 11b\n\t" \
560 STR(PTR)"\t3b, 11b\n\t" \
561 STR(PTR)"\t4b, 11b\n\t" \
563 : "=&r" (value), "=r" (res) \
564 : "r" (addr), "i" (-EFAULT)); \
567 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
570 #define _LoadHWU(addr, value, res, type) \
572 __asm__ __volatile__ ( \
574 "1:\t"type##_lbu("%0", "1(%2)")"\n" \
575 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
582 ".section\t.fixup,\"ax\"\n\t" \
583 "4:\tli\t%1, %3\n\t" \
586 ".section\t__ex_table,\"a\"\n\t" \
587 STR(PTR)"\t1b, 4b\n\t" \
588 STR(PTR)"\t2b, 4b\n\t" \
590 : "=&r" (value), "=r" (res) \
591 : "r" (addr), "i" (-EFAULT)); \
594 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
595 #define _LoadWU(addr, value, res, type) \
597 __asm__ __volatile__ ( \
598 "1:\t"type##_lwl("%0", "3(%2)")"\n" \
599 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
600 "dsll\t%0, %0, 32\n\t" \
601 "dsrl\t%0, %0, 32\n\t" \
605 "\t.section\t.fixup,\"ax\"\n\t" \
606 "4:\tli\t%1, %3\n\t" \
609 ".section\t__ex_table,\"a\"\n\t" \
610 STR(PTR)"\t1b, 4b\n\t" \
611 STR(PTR)"\t2b, 4b\n\t" \
613 : "=&r" (value), "=r" (res) \
614 : "r" (addr), "i" (-EFAULT)); \
617 #define _LoadDW(addr, value, res) \
619 __asm__ __volatile__ ( \
620 "1:\tldl\t%0, 7(%2)\n" \
621 "2:\tldr\t%0, (%2)\n\t" \
625 "\t.section\t.fixup,\"ax\"\n\t" \
626 "4:\tli\t%1, %3\n\t" \
629 ".section\t__ex_table,\"a\"\n\t" \
630 STR(PTR)"\t1b, 4b\n\t" \
631 STR(PTR)"\t2b, 4b\n\t" \
633 : "=&r" (value), "=r" (res) \
634 : "r" (addr), "i" (-EFAULT)); \
637 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
638 /* For CPUs without lwl and ldl instructions */
639 #define _LoadWU(addr, value, res, type) \
641 __asm__ __volatile__ ( \
644 "1:"type##_lbu("%0", "3(%2)")"\n\t" \
645 "2:"type##_lbu("$1", "2(%2)")"\n\t" \
648 "3:"type##_lbu("$1", "1(%2)")"\n\t" \
651 "4:"type##_lbu("$1", "0(%2)")"\n\t" \
658 ".section\t.fixup,\"ax\"\n\t" \
659 "11:\tli\t%1, %3\n\t" \
662 ".section\t__ex_table,\"a\"\n\t" \
663 STR(PTR)"\t1b, 11b\n\t" \
664 STR(PTR)"\t2b, 11b\n\t" \
665 STR(PTR)"\t3b, 11b\n\t" \
666 STR(PTR)"\t4b, 11b\n\t" \
668 : "=&r" (value), "=r" (res) \
669 : "r" (addr), "i" (-EFAULT)); \
672 #define _LoadDW(addr, value, res) \
674 __asm__ __volatile__ ( \
677 "1:lb\t%0, 7(%2)\n\t" \
678 "2:lbu\t$1, 6(%2)\n\t" \
679 "dsll\t%0, 0x8\n\t" \
681 "3:lbu\t$1, 5(%2)\n\t" \
682 "dsll\t%0, 0x8\n\t" \
684 "4:lbu\t$1, 4(%2)\n\t" \
685 "dsll\t%0, 0x8\n\t" \
687 "5:lbu\t$1, 3(%2)\n\t" \
688 "dsll\t%0, 0x8\n\t" \
690 "6:lbu\t$1, 2(%2)\n\t" \
691 "dsll\t%0, 0x8\n\t" \
693 "7:lbu\t$1, 1(%2)\n\t" \
694 "dsll\t%0, 0x8\n\t" \
696 "8:lbu\t$1, 0(%2)\n\t" \
697 "dsll\t%0, 0x8\n\t" \
703 ".section\t.fixup,\"ax\"\n\t" \
704 "11:\tli\t%1, %3\n\t" \
707 ".section\t__ex_table,\"a\"\n\t" \
708 STR(PTR)"\t1b, 11b\n\t" \
709 STR(PTR)"\t2b, 11b\n\t" \
710 STR(PTR)"\t3b, 11b\n\t" \
711 STR(PTR)"\t4b, 11b\n\t" \
712 STR(PTR)"\t5b, 11b\n\t" \
713 STR(PTR)"\t6b, 11b\n\t" \
714 STR(PTR)"\t7b, 11b\n\t" \
715 STR(PTR)"\t8b, 11b\n\t" \
717 : "=&r" (value), "=r" (res) \
718 : "r" (addr), "i" (-EFAULT)); \
720 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
722 #define _StoreHW(addr, value, res, type) \
724 __asm__ __volatile__ ( \
726 "1:\t"type##_sb("%1", "0(%2)")"\n" \
727 "srl\t$1,%1, 0x8\n" \
728 "2:\t"type##_sb("$1", "1(%2)")"\n" \
733 ".section\t.fixup,\"ax\"\n\t" \
734 "4:\tli\t%0, %3\n\t" \
737 ".section\t__ex_table,\"a\"\n\t" \
738 STR(PTR)"\t1b, 4b\n\t" \
739 STR(PTR)"\t2b, 4b\n\t" \
742 : "r" (value), "r" (addr), "i" (-EFAULT));\
745 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
746 #define _StoreW(addr, value, res, type) \
748 __asm__ __volatile__ ( \
749 "1:\t"type##_swl("%1", "3(%2)")"\n" \
750 "2:\t"type##_swr("%1", "(%2)")"\n\t"\
754 ".section\t.fixup,\"ax\"\n\t" \
755 "4:\tli\t%0, %3\n\t" \
758 ".section\t__ex_table,\"a\"\n\t" \
759 STR(PTR)"\t1b, 4b\n\t" \
760 STR(PTR)"\t2b, 4b\n\t" \
763 : "r" (value), "r" (addr), "i" (-EFAULT)); \
766 #define _StoreDW(addr, value, res) \
768 __asm__ __volatile__ ( \
769 "1:\tsdl\t%1, 7(%2)\n" \
770 "2:\tsdr\t%1, (%2)\n\t" \
774 ".section\t.fixup,\"ax\"\n\t" \
775 "4:\tli\t%0, %3\n\t" \
778 ".section\t__ex_table,\"a\"\n\t" \
779 STR(PTR)"\t1b, 4b\n\t" \
780 STR(PTR)"\t2b, 4b\n\t" \
783 : "r" (value), "r" (addr), "i" (-EFAULT)); \
786 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
787 /* For CPUs without swl and sdl instructions */
788 #define _StoreW(addr, value, res, type) \
790 __asm__ __volatile__ ( \
793 "1:"type##_sb("%1", "0(%2)")"\n\t" \
794 "srl\t$1, %1, 0x8\n\t" \
795 "2:"type##_sb("$1", "1(%2)")"\n\t" \
796 "srl\t$1, $1, 0x8\n\t" \
797 "3:"type##_sb("$1", "2(%2)")"\n\t" \
798 "srl\t$1, $1, 0x8\n\t" \
799 "4:"type##_sb("$1", "3(%2)")"\n\t" \
804 ".section\t.fixup,\"ax\"\n\t" \
805 "11:\tli\t%0, %3\n\t" \
808 ".section\t__ex_table,\"a\"\n\t" \
809 STR(PTR)"\t1b, 11b\n\t" \
810 STR(PTR)"\t2b, 11b\n\t" \
811 STR(PTR)"\t3b, 11b\n\t" \
812 STR(PTR)"\t4b, 11b\n\t" \
815 : "r" (value), "r" (addr), "i" (-EFAULT) \
819 #define _StoreDW(addr, value, res) \
821 __asm__ __volatile__ ( \
824 "1:sb\t%1, 0(%2)\n\t" \
825 "dsrl\t$1, %1, 0x8\n\t" \
826 "2:sb\t$1, 1(%2)\n\t" \
827 "dsrl\t$1, $1, 0x8\n\t" \
828 "3:sb\t$1, 2(%2)\n\t" \
829 "dsrl\t$1, $1, 0x8\n\t" \
830 "4:sb\t$1, 3(%2)\n\t" \
831 "dsrl\t$1, $1, 0x8\n\t" \
832 "5:sb\t$1, 4(%2)\n\t" \
833 "dsrl\t$1, $1, 0x8\n\t" \
834 "6:sb\t$1, 5(%2)\n\t" \
835 "dsrl\t$1, $1, 0x8\n\t" \
836 "7:sb\t$1, 6(%2)\n\t" \
837 "dsrl\t$1, $1, 0x8\n\t" \
838 "8:sb\t$1, 7(%2)\n\t" \
839 "dsrl\t$1, $1, 0x8\n\t" \
844 ".section\t.fixup,\"ax\"\n\t" \
845 "11:\tli\t%0, %3\n\t" \
848 ".section\t__ex_table,\"a\"\n\t" \
849 STR(PTR)"\t1b, 11b\n\t" \
850 STR(PTR)"\t2b, 11b\n\t" \
851 STR(PTR)"\t3b, 11b\n\t" \
852 STR(PTR)"\t4b, 11b\n\t" \
853 STR(PTR)"\t5b, 11b\n\t" \
854 STR(PTR)"\t6b, 11b\n\t" \
855 STR(PTR)"\t7b, 11b\n\t" \
856 STR(PTR)"\t8b, 11b\n\t" \
859 : "r" (value), "r" (addr), "i" (-EFAULT) \
863 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
866 #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
867 #define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
868 #define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
869 #define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
870 #define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
871 #define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
872 #define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
873 #define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
874 #define LoadDW(addr, value, res) _LoadDW(addr, value, res)
876 #define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
877 #define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
878 #define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
879 #define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
880 #define StoreDW(addr, value, res) _StoreDW(addr, value, res)
882 static void emulate_load_store_insn(struct pt_regs *regs,
883 void __user *addr, unsigned int __user *pc)
885 unsigned long origpc, orig31, value;
886 union mips_instruction insn;
891 origpc = (unsigned long)pc;
892 orig31 = regs->regs[31];
894 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
897 * This load never faults.
899 __get_user(insn.word, pc);
901 switch (insn.i_format.opcode) {
903 * These are instructions that a compiler doesn't generate. We
904 * can assume therefore that the code is MIPS-aware and
905 * really buggy. Emulating these instructions would break the
914 * For these instructions the only way to create an address
915 * error is an attempted access to kernel/supervisor address
932 * The remaining opcodes are the ones that are really of
936 if (insn.dsp_format.func == lx_op) {
937 switch (insn.dsp_format.op) {
939 if (!access_ok(addr, 4))
941 LoadW(addr, value, res);
944 compute_return_epc(regs);
945 regs->regs[insn.dsp_format.rd] = value;
948 if (!access_ok(addr, 2))
950 LoadHW(addr, value, res);
953 compute_return_epc(regs);
954 regs->regs[insn.dsp_format.rd] = value;
963 * we can land here only from kernel accessing user
964 * memory, so we need to "switch" the address limit to
965 * user space, so that address check can work properly.
969 switch (insn.spec3_format.func) {
971 if (!access_ok(addr, 2)) {
975 LoadHWE(addr, value, res);
980 compute_return_epc(regs);
981 regs->regs[insn.spec3_format.rt] = value;
984 if (!access_ok(addr, 4)) {
988 LoadWE(addr, value, res);
993 compute_return_epc(regs);
994 regs->regs[insn.spec3_format.rt] = value;
997 if (!access_ok(addr, 2)) {
1001 LoadHWUE(addr, value, res);
1006 compute_return_epc(regs);
1007 regs->regs[insn.spec3_format.rt] = value;
1010 if (!access_ok(addr, 2)) {
1014 compute_return_epc(regs);
1015 value = regs->regs[insn.spec3_format.rt];
1016 StoreHWE(addr, value, res);
1023 if (!access_ok(addr, 4)) {
1027 compute_return_epc(regs);
1028 value = regs->regs[insn.spec3_format.rt];
1029 StoreWE(addr, value, res);
1044 if (!access_ok(addr, 2))
1047 if (IS_ENABLED(CONFIG_EVA)) {
1048 if (uaccess_kernel())
1049 LoadHW(addr, value, res);
1051 LoadHWE(addr, value, res);
1053 LoadHW(addr, value, res);
1058 compute_return_epc(regs);
1059 regs->regs[insn.i_format.rt] = value;
1063 if (!access_ok(addr, 4))
1066 if (IS_ENABLED(CONFIG_EVA)) {
1067 if (uaccess_kernel())
1068 LoadW(addr, value, res);
1070 LoadWE(addr, value, res);
1072 LoadW(addr, value, res);
1077 compute_return_epc(regs);
1078 regs->regs[insn.i_format.rt] = value;
1082 if (!access_ok(addr, 2))
1085 if (IS_ENABLED(CONFIG_EVA)) {
1086 if (uaccess_kernel())
1087 LoadHWU(addr, value, res);
1089 LoadHWUE(addr, value, res);
1091 LoadHWU(addr, value, res);
1096 compute_return_epc(regs);
1097 regs->regs[insn.i_format.rt] = value;
1103 * A 32-bit kernel might be running on a 64-bit processor. But
1104 * if we're on a 32-bit processor and an i-cache incoherency
1105 * or race makes us see a 64-bit instruction here the sdl/sdr
1106 * would blow up, so for now we don't handle unaligned 64-bit
1107 * instructions on 32-bit kernels.
1109 if (!access_ok(addr, 4))
1112 LoadWU(addr, value, res);
1115 compute_return_epc(regs);
1116 regs->regs[insn.i_format.rt] = value;
1118 #endif /* CONFIG_64BIT */
1120 /* Cannot handle 64-bit instructions in 32-bit kernel */
1126 * A 32-bit kernel might be running on a 64-bit processor. But
1127 * if we're on a 32-bit processor and an i-cache incoherency
1128 * or race makes us see a 64-bit instruction here the sdl/sdr
1129 * would blow up, so for now we don't handle unaligned 64-bit
1130 * instructions on 32-bit kernels.
1132 if (!access_ok(addr, 8))
1135 LoadDW(addr, value, res);
1138 compute_return_epc(regs);
1139 regs->regs[insn.i_format.rt] = value;
1141 #endif /* CONFIG_64BIT */
1143 /* Cannot handle 64-bit instructions in 32-bit kernel */
1147 if (!access_ok(addr, 2))
1150 compute_return_epc(regs);
1151 value = regs->regs[insn.i_format.rt];
1153 if (IS_ENABLED(CONFIG_EVA)) {
1154 if (uaccess_kernel())
1155 StoreHW(addr, value, res);
1157 StoreHWE(addr, value, res);
1159 StoreHW(addr, value, res);
1167 if (!access_ok(addr, 4))
1170 compute_return_epc(regs);
1171 value = regs->regs[insn.i_format.rt];
1173 if (IS_ENABLED(CONFIG_EVA)) {
1174 if (uaccess_kernel())
1175 StoreW(addr, value, res);
1177 StoreWE(addr, value, res);
1179 StoreW(addr, value, res);
1189 * A 32-bit kernel might be running on a 64-bit processor. But
1190 * if we're on a 32-bit processor and an i-cache incoherency
1191 * or race makes us see a 64-bit instruction here the sdl/sdr
1192 * would blow up, so for now we don't handle unaligned 64-bit
1193 * instructions on 32-bit kernels.
1195 if (!access_ok(addr, 8))
1198 compute_return_epc(regs);
1199 value = regs->regs[insn.i_format.rt];
1200 StoreDW(addr, value, res);
1204 #endif /* CONFIG_64BIT */
1206 /* Cannot handle 64-bit instructions in 32-bit kernel */
1209 #ifdef CONFIG_MIPS_FP_SUPPORT
1216 void __user *fault_addr = NULL;
1218 die_if_kernel("Unaligned FP access in kernel code", regs);
1219 BUG_ON(!used_math());
1221 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
1223 own_fpu(1); /* Restore FPU state. */
1225 /* Signal if something went wrong. */
1226 process_fpemu_return(res, fault_addr, 0);
1232 #endif /* CONFIG_MIPS_FP_SUPPORT */
1234 #ifdef CONFIG_CPU_HAS_MSA
1237 unsigned int wd, preempted;
1245 * If we've reached this point then userland should have taken
1246 * the MSA disabled exception & initialised vector context at
1247 * some point in the past.
1249 BUG_ON(!thread_msa_context_live());
1251 df = insn.msa_mi10_format.df;
1252 wd = insn.msa_mi10_format.wd;
1253 fpr = ¤t->thread.fpu.fpr[wd];
1255 switch (insn.msa_mi10_format.func) {
1257 if (!access_ok(addr, sizeof(*fpr)))
1262 * If we have live MSA context keep track of
1263 * whether we get preempted in order to avoid
1264 * the register context we load being clobbered
1265 * by the live context as it's saved during
1266 * preemption. If we don't have live context
1267 * then it can't be saved to clobber the value
1270 preempted = test_thread_flag(TIF_USEDMSA);
1272 res = __copy_from_user_inatomic(fpr, addr,
1278 * Update the hardware register if it is in use
1279 * by the task in this quantum, in order to
1280 * avoid having to save & restore the whole
1284 if (test_thread_flag(TIF_USEDMSA)) {
1285 write_msa_wr(wd, fpr, df);
1289 } while (preempted);
1293 if (!access_ok(addr, sizeof(*fpr)))
1297 * Update from the hardware register if it is in use by
1298 * the task in this quantum, in order to avoid having to
1299 * save & restore the whole vector context.
1302 if (test_thread_flag(TIF_USEDMSA))
1303 read_msa_wr(wd, fpr, df);
1306 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
1315 compute_return_epc(regs);
1318 #endif /* CONFIG_CPU_HAS_MSA */
1320 #ifndef CONFIG_CPU_MIPSR6
1322 * COP2 is available to implementor for application specific use.
1323 * It's up to applications to register a notifier chain and do
1324 * whatever they have to do, including possible sending of signals.
1326 * This instruction has been reallocated in Release 6
1329 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1333 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1337 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1341 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1346 * Pheeee... We encountered an yet unknown instruction or
1347 * cache coherence problem. Die sucker, die ...
1352 #ifdef CONFIG_DEBUG_FS
1353 unaligned_instructions++;
1359 /* roll back jump/branch */
1360 regs->cp0_epc = origpc;
1361 regs->regs[31] = orig31;
1362 /* Did we have an exception handler installed? */
1363 if (fixup_exception(regs))
1366 die_if_kernel("Unhandled kernel unaligned access", regs);
1367 force_sig(SIGSEGV, current);
1372 die_if_kernel("Unhandled kernel unaligned access", regs);
1373 force_sig(SIGBUS, current);
1379 ("Unhandled kernel unaligned access or invalid instruction", regs);
1380 force_sig(SIGILL, current);
1383 /* Recode table from 16-bit register notation to 32-bit GPR. */
1384 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1386 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1387 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1389 static void emulate_load_store_microMIPS(struct pt_regs *regs,
1392 unsigned long value;
1395 unsigned int reg = 0, rvar;
1396 unsigned long orig31;
1400 unsigned long origpc, contpc;
1401 union mips_instruction insn;
1402 struct mm_decoded_insn mminsn;
1404 origpc = regs->cp0_epc;
1405 orig31 = regs->regs[31];
1407 mminsn.micro_mips_mode = 1;
1410 * This load never faults.
1412 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1413 __get_user(halfword, pc16);
1415 contpc = regs->cp0_epc + 2;
1416 word = ((unsigned int)halfword << 16);
1419 if (!mm_insn_16bit(halfword)) {
1420 __get_user(halfword, pc16);
1422 contpc = regs->cp0_epc + 4;
1428 if (get_user(halfword, pc16))
1430 mminsn.next_pc_inc = 2;
1431 word = ((unsigned int)halfword << 16);
1433 if (!mm_insn_16bit(halfword)) {
1435 if (get_user(halfword, pc16))
1437 mminsn.next_pc_inc = 4;
1440 mminsn.next_insn = word;
1442 insn = (union mips_instruction)(mminsn.insn);
1443 if (mm_isBranchInstr(regs, mminsn, &contpc))
1444 insn = (union mips_instruction)(mminsn.next_insn);
1446 /* Parse instruction to find what to do */
1448 switch (insn.mm_i_format.opcode) {
1451 switch (insn.mm_x_format.func) {
1453 reg = insn.mm_x_format.rd;
1460 switch (insn.mm_m_format.func) {
1462 reg = insn.mm_m_format.rd;
1466 if (!access_ok(addr, 8))
1469 LoadW(addr, value, res);
1472 regs->regs[reg] = value;
1474 LoadW(addr, value, res);
1477 regs->regs[reg + 1] = value;
1481 reg = insn.mm_m_format.rd;
1485 if (!access_ok(addr, 8))
1488 value = regs->regs[reg];
1489 StoreW(addr, value, res);
1493 value = regs->regs[reg + 1];
1494 StoreW(addr, value, res);
1501 reg = insn.mm_m_format.rd;
1505 if (!access_ok(addr, 16))
1508 LoadDW(addr, value, res);
1511 regs->regs[reg] = value;
1513 LoadDW(addr, value, res);
1516 regs->regs[reg + 1] = value;
1518 #endif /* CONFIG_64BIT */
1524 reg = insn.mm_m_format.rd;
1528 if (!access_ok(addr, 16))
1531 value = regs->regs[reg];
1532 StoreDW(addr, value, res);
1536 value = regs->regs[reg + 1];
1537 StoreDW(addr, value, res);
1541 #endif /* CONFIG_64BIT */
1546 reg = insn.mm_m_format.rd;
1548 if ((rvar > 9) || !reg)
1551 if (!access_ok(addr, 4 * (rvar + 1)))
1554 if (!access_ok(addr, 4 * rvar))
1559 for (i = 16; rvar; rvar--, i++) {
1560 LoadW(addr, value, res);
1564 regs->regs[i] = value;
1566 if ((reg & 0xf) == 9) {
1567 LoadW(addr, value, res);
1571 regs->regs[30] = value;
1574 LoadW(addr, value, res);
1577 regs->regs[31] = value;
1582 reg = insn.mm_m_format.rd;
1584 if ((rvar > 9) || !reg)
1587 if (!access_ok(addr, 4 * (rvar + 1)))
1590 if (!access_ok(addr, 4 * rvar))
1595 for (i = 16; rvar; rvar--, i++) {
1596 value = regs->regs[i];
1597 StoreW(addr, value, res);
1602 if ((reg & 0xf) == 9) {
1603 value = regs->regs[30];
1604 StoreW(addr, value, res);
1610 value = regs->regs[31];
1611 StoreW(addr, value, res);
1619 reg = insn.mm_m_format.rd;
1621 if ((rvar > 9) || !reg)
1624 if (!access_ok(addr, 8 * (rvar + 1)))
1627 if (!access_ok(addr, 8 * rvar))
1633 for (i = 16; rvar; rvar--, i++) {
1634 LoadDW(addr, value, res);
1638 regs->regs[i] = value;
1640 if ((reg & 0xf) == 9) {
1641 LoadDW(addr, value, res);
1645 regs->regs[30] = value;
1648 LoadDW(addr, value, res);
1651 regs->regs[31] = value;
1654 #endif /* CONFIG_64BIT */
1660 reg = insn.mm_m_format.rd;
1662 if ((rvar > 9) || !reg)
1665 if (!access_ok(addr, 8 * (rvar + 1)))
1668 if (!access_ok(addr, 8 * rvar))
1674 for (i = 16; rvar; rvar--, i++) {
1675 value = regs->regs[i];
1676 StoreDW(addr, value, res);
1681 if ((reg & 0xf) == 9) {
1682 value = regs->regs[30];
1683 StoreDW(addr, value, res);
1689 value = regs->regs[31];
1690 StoreDW(addr, value, res);
1695 #endif /* CONFIG_64BIT */
1699 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1705 switch (insn.mm_m_format.func) {
1707 reg = insn.mm_m_format.rd;
1711 /* LL,SC,LLD,SCD are not serviced */
1714 #ifdef CONFIG_MIPS_FP_SUPPORT
1716 switch (insn.mm_x_format.func) {
1729 case mm_swc132_op: {
1730 void __user *fault_addr = NULL;
1733 /* roll back jump/branch */
1734 regs->cp0_epc = origpc;
1735 regs->regs[31] = orig31;
1737 die_if_kernel("Unaligned FP access in kernel code", regs);
1738 BUG_ON(!used_math());
1739 BUG_ON(!is_fpu_owner());
1741 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
1743 own_fpu(1); /* restore FPU state */
1745 /* If something went wrong, signal */
1746 process_fpemu_return(res, fault_addr, 0);
1752 #endif /* CONFIG_MIPS_FP_SUPPORT */
1755 reg = insn.mm_i_format.rt;
1759 reg = insn.mm_i_format.rt;
1763 reg = insn.mm_i_format.rt;
1767 reg = insn.mm_i_format.rt;
1771 reg = insn.mm_i_format.rt;
1775 reg = insn.mm_i_format.rt;
1779 reg = insn.mm_i_format.rt;
1783 switch (insn.mm16_m_format.func) {
1785 reg = insn.mm16_m_format.rlist;
1787 if (!access_ok(addr, 4 * rvar))
1790 for (i = 16; rvar; rvar--, i++) {
1791 LoadW(addr, value, res);
1795 regs->regs[i] = value;
1797 LoadW(addr, value, res);
1800 regs->regs[31] = value;
1805 reg = insn.mm16_m_format.rlist;
1807 if (!access_ok(addr, 4 * rvar))
1810 for (i = 16; rvar; rvar--, i++) {
1811 value = regs->regs[i];
1812 StoreW(addr, value, res);
1817 value = regs->regs[31];
1818 StoreW(addr, value, res);
1829 reg = reg16to32[insn.mm16_rb_format.rt];
1833 reg = reg16to32[insn.mm16_rb_format.rt];
1837 reg = reg16to32st[insn.mm16_rb_format.rt];
1841 reg = reg16to32st[insn.mm16_rb_format.rt];
1845 reg = insn.mm16_r5_format.rt;
1849 reg = insn.mm16_r5_format.rt;
1853 reg = reg16to32[insn.mm16_r3_format.rt];
1861 if (!access_ok(addr, 2))
1864 LoadHW(addr, value, res);
1867 regs->regs[reg] = value;
1871 if (!access_ok(addr, 2))
1874 LoadHWU(addr, value, res);
1877 regs->regs[reg] = value;
1881 if (!access_ok(addr, 4))
1884 LoadW(addr, value, res);
1887 regs->regs[reg] = value;
1893 * A 32-bit kernel might be running on a 64-bit processor. But
1894 * if we're on a 32-bit processor and an i-cache incoherency
1895 * or race makes us see a 64-bit instruction here the sdl/sdr
1896 * would blow up, so for now we don't handle unaligned 64-bit
1897 * instructions on 32-bit kernels.
1899 if (!access_ok(addr, 4))
1902 LoadWU(addr, value, res);
1905 regs->regs[reg] = value;
1907 #endif /* CONFIG_64BIT */
1909 /* Cannot handle 64-bit instructions in 32-bit kernel */
1915 * A 32-bit kernel might be running on a 64-bit processor. But
1916 * if we're on a 32-bit processor and an i-cache incoherency
1917 * or race makes us see a 64-bit instruction here the sdl/sdr
1918 * would blow up, so for now we don't handle unaligned 64-bit
1919 * instructions on 32-bit kernels.
1921 if (!access_ok(addr, 8))
1924 LoadDW(addr, value, res);
1927 regs->regs[reg] = value;
1929 #endif /* CONFIG_64BIT */
1931 /* Cannot handle 64-bit instructions in 32-bit kernel */
1935 if (!access_ok(addr, 2))
1938 value = regs->regs[reg];
1939 StoreHW(addr, value, res);
1945 if (!access_ok(addr, 4))
1948 value = regs->regs[reg];
1949 StoreW(addr, value, res);
1957 * A 32-bit kernel might be running on a 64-bit processor. But
1958 * if we're on a 32-bit processor and an i-cache incoherency
1959 * or race makes us see a 64-bit instruction here the sdl/sdr
1960 * would blow up, so for now we don't handle unaligned 64-bit
1961 * instructions on 32-bit kernels.
1963 if (!access_ok(addr, 8))
1966 value = regs->regs[reg];
1967 StoreDW(addr, value, res);
1971 #endif /* CONFIG_64BIT */
1973 /* Cannot handle 64-bit instructions in 32-bit kernel */
1977 regs->cp0_epc = contpc; /* advance or branch */
1979 #ifdef CONFIG_DEBUG_FS
1980 unaligned_instructions++;
1985 /* roll back jump/branch */
1986 regs->cp0_epc = origpc;
1987 regs->regs[31] = orig31;
1988 /* Did we have an exception handler installed? */
1989 if (fixup_exception(regs))
1992 die_if_kernel("Unhandled kernel unaligned access", regs);
1993 force_sig(SIGSEGV, current);
1998 die_if_kernel("Unhandled kernel unaligned access", regs);
1999 force_sig(SIGBUS, current);
2005 ("Unhandled kernel unaligned access or invalid instruction", regs);
2006 force_sig(SIGILL, current);
2009 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
2011 unsigned long value;
2014 unsigned long orig31;
2016 unsigned long origpc;
2017 union mips16e_instruction mips16inst, oldinst;
2018 unsigned int opcode;
2021 origpc = regs->cp0_epc;
2022 orig31 = regs->regs[31];
2023 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
2025 * This load never faults.
2027 __get_user(mips16inst.full, pc16);
2028 oldinst = mips16inst;
2030 /* skip EXTEND instruction */
2031 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
2034 __get_user(mips16inst.full, pc16);
2035 } else if (delay_slot(regs)) {
2036 /* skip jump instructions */
2037 /* JAL/JALX are 32 bits but have OPCODE in first short int */
2038 if (mips16inst.ri.opcode == MIPS16e_jal_op)
2041 if (get_user(mips16inst.full, pc16))
2045 opcode = mips16inst.ri.opcode;
2047 case MIPS16e_i64_op: /* I64 or RI64 instruction */
2048 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
2049 case MIPS16e_ldpc_func:
2050 case MIPS16e_ldsp_func:
2051 reg = reg16to32[mips16inst.ri64.ry];
2054 case MIPS16e_sdsp_func:
2055 reg = reg16to32[mips16inst.ri64.ry];
2058 case MIPS16e_sdrasp_func:
2059 reg = 29; /* GPRSP */
2065 case MIPS16e_swsp_op:
2066 reg = reg16to32[mips16inst.ri.rx];
2067 if (extended && cpu_has_mips16e2)
2068 switch (mips16inst.ri.imm >> 5) {
2073 opcode = MIPS16e_sh_op;
2080 case MIPS16e_lwpc_op:
2081 reg = reg16to32[mips16inst.ri.rx];
2084 case MIPS16e_lwsp_op:
2085 reg = reg16to32[mips16inst.ri.rx];
2086 if (extended && cpu_has_mips16e2)
2087 switch (mips16inst.ri.imm >> 5) {
2092 opcode = MIPS16e_lh_op;
2095 opcode = MIPS16e_lhu_op;
2103 if (mips16inst.i8.func != MIPS16e_swrasp_func)
2105 reg = 29; /* GPRSP */
2109 reg = reg16to32[mips16inst.rri.ry];
2116 case MIPS16e_lbu_op:
2121 if (!access_ok(addr, 2))
2124 LoadHW(addr, value, res);
2127 MIPS16e_compute_return_epc(regs, &oldinst);
2128 regs->regs[reg] = value;
2131 case MIPS16e_lhu_op:
2132 if (!access_ok(addr, 2))
2135 LoadHWU(addr, value, res);
2138 MIPS16e_compute_return_epc(regs, &oldinst);
2139 regs->regs[reg] = value;
2143 case MIPS16e_lwpc_op:
2144 case MIPS16e_lwsp_op:
2145 if (!access_ok(addr, 4))
2148 LoadW(addr, value, res);
2151 MIPS16e_compute_return_epc(regs, &oldinst);
2152 regs->regs[reg] = value;
2155 case MIPS16e_lwu_op:
2158 * A 32-bit kernel might be running on a 64-bit processor. But
2159 * if we're on a 32-bit processor and an i-cache incoherency
2160 * or race makes us see a 64-bit instruction here the sdl/sdr
2161 * would blow up, so for now we don't handle unaligned 64-bit
2162 * instructions on 32-bit kernels.
2164 if (!access_ok(addr, 4))
2167 LoadWU(addr, value, res);
2170 MIPS16e_compute_return_epc(regs, &oldinst);
2171 regs->regs[reg] = value;
2173 #endif /* CONFIG_64BIT */
2175 /* Cannot handle 64-bit instructions in 32-bit kernel */
2182 * A 32-bit kernel might be running on a 64-bit processor. But
2183 * if we're on a 32-bit processor and an i-cache incoherency
2184 * or race makes us see a 64-bit instruction here the sdl/sdr
2185 * would blow up, so for now we don't handle unaligned 64-bit
2186 * instructions on 32-bit kernels.
2188 if (!access_ok(addr, 8))
2191 LoadDW(addr, value, res);
2194 MIPS16e_compute_return_epc(regs, &oldinst);
2195 regs->regs[reg] = value;
2197 #endif /* CONFIG_64BIT */
2199 /* Cannot handle 64-bit instructions in 32-bit kernel */
2203 if (!access_ok(addr, 2))
2206 MIPS16e_compute_return_epc(regs, &oldinst);
2207 value = regs->regs[reg];
2208 StoreHW(addr, value, res);
2214 case MIPS16e_swsp_op:
2215 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
2216 if (!access_ok(addr, 4))
2219 MIPS16e_compute_return_epc(regs, &oldinst);
2220 value = regs->regs[reg];
2221 StoreW(addr, value, res);
2230 * A 32-bit kernel might be running on a 64-bit processor. But
2231 * if we're on a 32-bit processor and an i-cache incoherency
2232 * or race makes us see a 64-bit instruction here the sdl/sdr
2233 * would blow up, so for now we don't handle unaligned 64-bit
2234 * instructions on 32-bit kernels.
2236 if (!access_ok(addr, 8))
2239 MIPS16e_compute_return_epc(regs, &oldinst);
2240 value = regs->regs[reg];
2241 StoreDW(addr, value, res);
2245 #endif /* CONFIG_64BIT */
2247 /* Cannot handle 64-bit instructions in 32-bit kernel */
2252 * Pheeee... We encountered an yet unknown instruction or
2253 * cache coherence problem. Die sucker, die ...
2258 #ifdef CONFIG_DEBUG_FS
2259 unaligned_instructions++;
2265 /* roll back jump/branch */
2266 regs->cp0_epc = origpc;
2267 regs->regs[31] = orig31;
2268 /* Did we have an exception handler installed? */
2269 if (fixup_exception(regs))
2272 die_if_kernel("Unhandled kernel unaligned access", regs);
2273 force_sig(SIGSEGV, current);
2278 die_if_kernel("Unhandled kernel unaligned access", regs);
2279 force_sig(SIGBUS, current);
2285 ("Unhandled kernel unaligned access or invalid instruction", regs);
2286 force_sig(SIGILL, current);
2289 asmlinkage void do_ade(struct pt_regs *regs)
2291 enum ctx_state prev_state;
2292 unsigned int __user *pc;
2295 prev_state = exception_enter();
2296 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
2297 1, regs, regs->cp0_badvaddr);
2299 * Did we catch a fault trying to load an instruction?
2301 if (regs->cp0_badvaddr == regs->cp0_epc)
2304 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
2306 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2310 * Do branch emulation only if we didn't forward the exception.
2311 * This is all so but ugly ...
2315 * Are we running in microMIPS mode?
2317 if (get_isa16_mode(regs->cp0_epc)) {
2319 * Did we catch a fault trying to load an instruction in
2322 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2324 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2325 show_registers(regs);
2327 if (cpu_has_mmips) {
2329 if (!user_mode(regs))
2331 emulate_load_store_microMIPS(regs,
2332 (void __user *)regs->cp0_badvaddr);
2338 if (cpu_has_mips16) {
2340 if (!user_mode(regs))
2342 emulate_load_store_MIPS16e(regs,
2343 (void __user *)regs->cp0_badvaddr);
2352 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2353 show_registers(regs);
2354 pc = (unsigned int __user *)exception_epc(regs);
2357 if (!user_mode(regs))
2359 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
2365 die_if_kernel("Kernel unaligned instruction access", regs);
2366 force_sig(SIGBUS, current);
2369 * XXX On return from the signal handler we should advance the epc
2371 exception_exit(prev_state);
2374 #ifdef CONFIG_DEBUG_FS
2375 static int __init debugfs_unaligned(void)
2379 if (!mips_debugfs_dir)
2381 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
2382 mips_debugfs_dir, &unaligned_instructions);
2385 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2386 mips_debugfs_dir, &unaligned_action);
2391 arch_initcall(debugfs_unaligned);