1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
12 #include <asm/asm-offsets.h>
14 /* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
20 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
21 #include <asm/assembly.h> /* for LDREG/STREG defines */
22 #include <asm/signal.h>
23 #include <asm/unistd.h>
25 #include <asm/traps.h>
26 #include <asm/thread_info.h>
27 #include <asm/alternative.h>
29 #include <linux/linkage.h>
30 #include <linux/pgtable.h>
38 /* Get aligned page_table_lock address for this mm from cr28/tr4 */
43 /* space_to_prot macro creates a prot id from a space id */
45 #if (SPACEID_SHIFT) == 0
46 .macro space_to_prot spc prot
47 depd,z \spc,62,31,\prot
50 .macro space_to_prot spc prot
51 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
55 * The "get_stack" macros are responsible for determining the
59 * Already using a kernel stack, so call the
60 * get_stack_use_r30 macro to push a pt_regs structure
61 * on the stack, and store registers there.
63 * Need to set up a kernel stack, so call the
64 * get_stack_use_cr30 macro to set up a pointer
65 * to the pt_regs structure contained within the
66 * task pointer pointed to by cr30. Load the stack
67 * pointer from the task structure.
69 * Note that we use shadowed registers for temps until
70 * we can save %r26 and %r29. %r26 is used to preserve
71 * %r8 (a shadowed register) which temporarily contained
72 * either the fault type ("code") or the eirr. We need
73 * to use a non-shadowed register to carry the value over
74 * the rfir in virt_map. We use %r26 since this value winds
75 * up being passed as the argument to either do_cpu_irq_mask
76 * or handle_interruption. %r29 is used to hold a pointer
77 * the register save area, and once again, it needs to
78 * be a non-shadowed register so that it survives the rfir.
81 .macro get_stack_use_cr30
83 /* we save the registers in the task struct */
87 tophys %r1,%r9 /* task_struct */
88 LDREG TASK_STACK(%r9),%r30
89 ldo PT_SZ_ALGN(%r30),%r30
90 mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */
92 ldo TASK_REGS(%r9),%r9
93 STREG %r17,PT_GR30(%r9)
94 STREG %r29,PT_GR29(%r9)
95 STREG %r26,PT_GR26(%r9)
96 STREG %r16,PT_SR7(%r9)
100 .macro get_stack_use_r30
102 /* we put a struct pt_regs on the stack and save the registers there */
106 ldo PT_SZ_ALGN(%r30),%r30
107 STREG %r1,PT_GR30(%r9)
108 STREG %r29,PT_GR29(%r9)
109 STREG %r26,PT_GR26(%r9)
110 STREG %r16,PT_SR7(%r9)
115 LDREG PT_GR1(%r29), %r1
116 LDREG PT_GR30(%r29),%r30
117 LDREG PT_GR29(%r29),%r29
120 /* default interruption handler
121 * (calls traps.c:handle_interruption) */
128 /* Interrupt interruption handler
129 * (calls irq.c:do_cpu_irq_mask) */
136 .import os_hpmc, code
140 nop /* must be a NOP, will be patched later */
141 load32 PA(os_hpmc), %r3
144 .word 0 /* checksum (will be patched) */
145 .word 0 /* address of handler */
146 .word 0 /* length of handler */
150 * Performance Note: Instructions will be moved up into
151 * this part of the code later on, once we are sure
152 * that the tlb miss handlers are close to final form.
155 /* Register definitions for tlb miss handler macros */
157 va = r8 /* virtual address for which the trap occurred */
158 spc = r24 /* space for which the trap occurred */
163 * itlb miss interruption handler (parisc 1.1 - 32 bit)
177 * itlb miss interruption handler (parisc 2.0)
194 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
197 .macro naitlb_11 code
208 * naitlb miss interruption handler (parisc 2.0)
211 .macro naitlb_20 code
226 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
240 * dtlb miss interruption handler (parisc 2.0)
257 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
259 .macro nadtlb_11 code
269 /* nadtlb miss interruption handler (parisc 2.0) */
271 .macro nadtlb_20 code
286 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
300 * dirty bit trap interruption handler (parisc 2.0)
316 /* In LP64, the space contains part of the upper 32 bits of the
317 * fault. We have to extract this and place it in the va,
318 * zeroing the corresponding bits in the space register */
319 .macro space_adjust spc,va,tmp
321 extrd,u \spc,63,SPACEID_SHIFT,\tmp
322 depd %r0,63,SPACEID_SHIFT,\spc
323 depd \tmp,31,SPACEID_SHIFT,\va
327 .import swapper_pg_dir,code
329 /* Get the pgd. For faults on space zero (kernel space), this
330 * is simply swapper_pg_dir. For user space faults, the
331 * pgd is stored in %cr25 */
332 .macro get_pgd spc,reg
333 ldil L%PA(swapper_pg_dir),\reg
334 ldo R%PA(swapper_pg_dir)(\reg),\reg
335 or,COND(=) %r0,\spc,%r0
340 space_check(spc,tmp,fault)
342 spc - The space we saw the fault with.
343 tmp - The place to store the current space.
344 fault - Function to call on failure.
346 Only allow faults on different spaces from the
347 currently active one if we're the kernel
350 .macro space_check spc,tmp,fault
352 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
353 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
354 * as kernel, so defeat the space
357 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
358 cmpb,COND(<>),n \tmp,\spc,\fault
361 /* Look up a PTE in a 2-Level scheme (faulting at each
362 * level if the entry isn't present
364 * NOTE: we use ldw even for LP64, since the short pointers
365 * can address up to 1TB
367 .macro L2_ptep pmd,pte,index,va,fault
368 #if CONFIG_PGTABLE_LEVELS == 3
369 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
371 # if defined(CONFIG_64BIT)
372 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
374 # if PAGE_SIZE > 4096
375 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
377 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
381 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
382 #if CONFIG_PGTABLE_LEVELS < 3
385 ldw,s \index(\pmd),\pmd
386 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
387 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
388 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
389 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
390 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
391 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
394 /* Look up PTE in a 3-Level scheme. */
395 .macro L3_ptep pgd,pte,index,va,fault
396 #if CONFIG_PGTABLE_LEVELS == 3
398 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
399 ldw,s \index(\pgd),\pgd
400 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
401 shld \pgd,PxD_VALUE_SHIFT,\pgd
403 L2_ptep \pgd,\pte,\index,\va,\fault
406 /* Acquire page_table_lock and check page is present. */
407 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault
408 #ifdef CONFIG_TLB_PTLOCK
409 98: cmpib,COND(=),n 0,\spc,2f
411 1: LDCW 0(\tmp),\tmp1
412 cmpib,COND(=) 0,\tmp1,1b
415 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
418 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
420 2: LDREG 0(\ptp),\pte
421 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
425 /* Release page_table_lock without reloading lock address.
426 Note that the values in the register spc are limited to
427 NR_SPACE_IDS (262144). Thus, the stw instruction always
428 stores a nonzero value even when register spc is 64 bits.
429 We use an ordered store to ensure all prior accesses are
430 performed prior to releasing the lock. */
431 .macro ptl_unlock0 spc,tmp
432 #ifdef CONFIG_TLB_PTLOCK
433 98: or,COND(=) %r0,\spc,%r0
435 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
439 /* Release page_table_lock. */
440 .macro ptl_unlock1 spc,tmp
441 #ifdef CONFIG_TLB_PTLOCK
443 ptl_unlock0 \spc,\tmp
444 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
448 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
449 * don't needlessly dirty the cache line if it was already set */
450 .macro update_accessed ptp,pte,tmp,tmp1
451 ldi _PAGE_ACCESSED,\tmp1
453 and,COND(<>) \tmp1,\pte,%r0
457 /* Set the dirty bit (and accessed bit). No need to be
458 * clever, this is only used from the dirty fault */
459 .macro update_dirty ptp,pte,tmp
460 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
465 /* We have (depending on the page size):
466 * - 38 to 52-bit Physical Page Number
467 * - 12 to 26-bit page offset
469 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
470 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
471 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
472 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
474 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
475 .macro convert_for_tlb_insert20 pte,tmp
476 #ifdef CONFIG_HUGETLB_PAGE
478 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
479 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
481 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
482 (63-58)+PAGE_ADD_SHIFT,\pte
483 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
484 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
485 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
486 #else /* Huge pages disabled */
487 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
488 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
489 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
490 (63-58)+PAGE_ADD_SHIFT,\pte
494 /* Convert the pte and prot to tlb insertion values. How
495 * this happens is quite subtle, read below */
496 .macro make_insert_tlb spc,pte,prot,tmp
497 space_to_prot \spc \prot /* create prot id from space */
498 /* The following is the real subtlety. This is depositing
499 * T <-> _PAGE_REFTRAP
501 * B <-> _PAGE_DMB (memory break)
503 * Then incredible subtlety: The access rights are
504 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
505 * See 3-14 of the parisc 2.0 manual
507 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
508 * trigger an access rights trap in user space if the user
509 * tries to read an unreadable page */
512 /* PAGE_USER indicates the page can be read with user privileges,
513 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
514 * contains _PAGE_READ) */
515 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
517 /* If we're a gateway page, drop PL2 back to zero for promotion
518 * to kernel privilege (so we can execute the page as kernel).
519 * Any privilege promotion page always denys read and write */
520 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
521 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
523 /* Enforce uncacheable pages.
524 * This should ONLY be use for MMIO on PA 2.0 machines.
525 * Memory/DMA is cache coherent on all PA2.0 machines we support
526 * (that means T-class is NOT supported) and the memory controllers
527 * on most of those machines only handles cache transactions.
529 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
532 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
533 convert_for_tlb_insert20 \pte \tmp
536 /* Identical macro to make_insert_tlb above, except it
537 * makes the tlb entry for the differently formatted pa11
538 * insertion instructions */
539 .macro make_insert_tlb_11 spc,pte,prot
540 zdep \spc,30,15,\prot
542 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
544 extru,= \pte,_PAGE_USER_BIT,1,%r0
545 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
546 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
547 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
549 /* Get rid of prot bits and convert to page addr for iitlba */
551 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
552 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
555 /* This is for ILP32 PA2.0 only. The TLB insertion needs
556 * to extend into I/O space if the address is 0xfXXXXXXX
557 * so we extend the f's into the top word of the pte in
559 .macro f_extend pte,tmp
560 extrd,s \pte,42,4,\tmp
562 extrd,s \pte,63,25,\pte
565 /* The alias region is an 8MB aligned 16MB to do clear and
566 * copy user pages at addresses congruent with the user
569 * To use the alias page, you set %r26 up with the to TLB
570 * entry (identifying the physical page) and %r23 up with
571 * the from tlb entry (or nothing if only a to entry---for
572 * clear_user_page_asm) */
573 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
574 cmpib,COND(<>),n 0,\spc,\fault
575 ldil L%(TMPALIAS_MAP_START),\tmp
576 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
577 /* on LP64, ldi will sign extend into the upper 32 bits,
578 * which is behaviour we don't want */
583 cmpb,COND(<>),n \tmp,\tmp1,\fault
584 mfctl %cr19,\tmp /* iir */
585 /* get the opcode (first six bits) into \tmp */
586 extrw,u \tmp,5,6,\tmp
588 * Only setting the T bit prevents data cache movein
589 * Setting access rights to zero prevents instruction cache movein
591 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
592 * to type field and _PAGE_READ goes to top bit of PL1
594 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
596 * so if the opcode is one (i.e. this is a memory management
597 * instruction) nullify the next load so \prot is only T.
598 * Otherwise this is a normal data operation
600 cmpiclr,= 0x01,\tmp,%r0
601 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
603 depd,z \prot,8,7,\prot
606 depw,z \prot,8,7,\prot
608 .error "undefined PA type to do_alias"
612 * OK, it is in the temp alias region, check whether "from" or "to".
613 * Check "subtle" note in pacache.S re: r23/r26.
616 extrd,u,*= \va,41,1,%r0
618 extrw,u,= \va,9,1,%r0
620 or,COND(tr) %r23,%r0,\pte
626 * Fault_vectors are architecturally required to be aligned on a 2K
633 ENTRY(fault_vector_20)
634 /* First vector is invalid (0) */
635 .ascii "cows can fly"
644 itlb_20 PARISC_ITLB_TRAP
676 ENTRY(fault_vector_11)
677 /* First vector is invalid (0) */
678 .ascii "cows can fly"
687 itlb_11 PARISC_ITLB_TRAP
716 /* Fault vector is separately protected and *must* be on its own page */
719 .import handle_interruption,code
720 .import do_cpu_irq_mask,code
725 * copy_thread moved args into task save area.
728 ENTRY(ret_from_kernel_thread)
729 /* Call schedule_tail first though */
730 BL schedule_tail, %r2
733 mfctl %cr30,%r1 /* task_struct */
734 LDREG TASK_PT_GR25(%r1), %r26
736 LDREG TASK_PT_GR27(%r1), %r27
738 LDREG TASK_PT_GR26(%r1), %r1
741 b finish_child_return
743 END(ret_from_kernel_thread)
747 * struct task_struct *_switch_to(struct task_struct *prev,
748 * struct task_struct *next)
750 * switch kernel stacks and return prev */
751 ENTRY_CFI(_switch_to)
752 STREG %r2, -RP_OFFSET(%r30)
757 load32 _switch_to_ret, %r2
759 STREG %r2, TASK_PT_KPC(%r26)
760 LDREG TASK_PT_KPC(%r25), %r2
762 STREG %r30, TASK_PT_KSP(%r26)
763 LDREG TASK_PT_KSP(%r25), %r30
767 ENTRY(_switch_to_ret)
768 mtctl %r0, %cr0 /* Needed for single stepping */
772 LDREG -RP_OFFSET(%r30), %r2
775 ENDPROC_CFI(_switch_to)
778 * Common rfi return path for interruptions, kernel execve, and
779 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
780 * return via this path if the signal was received when the process
781 * was running; if the process was blocked on a syscall then the
782 * normal syscall_exit path is used. All syscalls for traced
783 * proceses exit via intr_restore.
785 * XXX If any syscalls that change a processes space id ever exit
786 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
793 ENTRY_CFI(syscall_exit_rfi)
794 mfctl %cr30,%r16 /* task_struct */
795 ldo TASK_REGS(%r16),%r16
796 /* Force iaoq to userspace, as the user has had access to our current
797 * context via sigcontext. Also Filter the PSW for the same reason.
799 LDREG PT_IAOQ0(%r16),%r19
800 depi PRIV_USER,31,2,%r19
801 STREG %r19,PT_IAOQ0(%r16)
802 LDREG PT_IAOQ1(%r16),%r19
803 depi PRIV_USER,31,2,%r19
804 STREG %r19,PT_IAOQ1(%r16)
805 LDREG PT_PSW(%r16),%r19
806 load32 USER_PSW_MASK,%r1
808 load32 USER_PSW_HI_MASK,%r20
811 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
813 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
814 STREG %r19,PT_PSW(%r16)
817 * If we aren't being traced, we never saved space registers
818 * (we don't store them in the sigcontext), so set them
819 * to "proper" values now (otherwise we'll wind up restoring
820 * whatever was last stored in the task structure, which might
821 * be inconsistent if an interrupt occurred while on the gateway
822 * page). Note that we may be "trashing" values the user put in
823 * them, but we don't support the user changing them.
826 STREG %r0,PT_SR2(%r16)
828 STREG %r19,PT_SR0(%r16)
829 STREG %r19,PT_SR1(%r16)
830 STREG %r19,PT_SR3(%r16)
831 STREG %r19,PT_SR4(%r16)
832 STREG %r19,PT_SR5(%r16)
833 STREG %r19,PT_SR6(%r16)
834 STREG %r19,PT_SR7(%r16)
837 /* check for reschedule */
839 LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
840 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
842 .import do_notify_resume,code
846 LDREG TASK_TI_FLAGS(%r1),%r19
847 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
848 and,COND(<>) %r19, %r20, %r0
849 b,n intr_restore /* skip past if we've nothing to do */
851 /* This check is critical to having LWS
852 * working. The IASQ is zero on the gateway
853 * page and we cannot deliver any signals until
854 * we get off the gateway page.
856 * Only do signals if we are returning to user space
858 LDREG PT_IASQ0(%r16), %r20
859 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
860 LDREG PT_IASQ1(%r16), %r20
861 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
863 copy %r0, %r25 /* long in_syscall = 0 */
865 ldo -16(%r30),%r29 /* Reference param save area */
868 /* NOTE: We need to enable interrupts if we have to deliver
869 * signals. We used to do this earlier but it caused kernel
870 * stack overflows. */
873 BL do_notify_resume,%r2
874 copy %r16, %r26 /* struct pt_regs *regs */
880 ldo PT_FR31(%r29),%r1
884 /* inverse of virt_map */
886 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
889 /* Restore space id's and special cr's from PT_REGS
890 * structure pointed to by r29
894 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
895 * It also restores r1 and r30.
902 #ifndef CONFIG_PREEMPTION
903 # define intr_do_preempt intr_restore
904 #endif /* !CONFIG_PREEMPTION */
906 .import schedule,code
908 /* Only call schedule on return to userspace. If we're returning
909 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
910 * we jump back to intr_restore.
912 LDREG PT_IASQ0(%r16), %r20
913 cmpib,COND(=) 0, %r20, intr_do_preempt
915 LDREG PT_IASQ1(%r16), %r20
916 cmpib,COND(=) 0, %r20, intr_do_preempt
919 /* NOTE: We need to enable interrupts if we schedule. We used
920 * to do this earlier but it caused kernel stack overflows. */
924 ldo -16(%r30),%r29 /* Reference param save area */
927 ldil L%intr_check_sig, %r2
931 load32 schedule, %r20
934 ldo R%intr_check_sig(%r2), %r2
936 /* preempt the current task on returning to kernel
937 * mode from an interrupt, iff need_resched is set,
938 * and preempt_count is 0. otherwise, we continue on
939 * our merry way back to the current running task.
941 #ifdef CONFIG_PREEMPTION
942 .import preempt_schedule_irq,code
944 rsm PSW_SM_I, %r0 /* disable interrupts */
946 /* current_thread_info()->preempt_count */
948 ldw TI_PRE_COUNT(%r1), %r19
949 cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */
950 nop /* prev insn branched backwards */
952 /* check if we interrupted a critical path */
953 LDREG PT_PSW(%r16), %r20
954 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
957 /* ssm PSW_SM_I done later in intr_restore */
958 #ifdef CONFIG_MLONGCALLS
959 ldil L%intr_restore, %r2
960 load32 preempt_schedule_irq, %r1
962 ldo R%intr_restore(%r2), %r2
964 ldil L%intr_restore, %r1
965 BL preempt_schedule_irq, %r2
966 ldo R%intr_restore(%r1), %r2
968 #endif /* CONFIG_PREEMPTION */
971 * External interrupts.
975 cmpib,COND(=),n 0,%r16,1f
987 ldo PT_FR0(%r29), %r24
992 copy %r29, %r26 /* arg0 is pt_regs */
993 copy %r29, %r16 /* save pt_regs */
995 ldil L%intr_return, %r2
998 ldo -16(%r30),%r29 /* Reference param save area */
1002 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1003 ENDPROC_CFI(syscall_exit_rfi)
1006 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1008 ENTRY_CFI(intr_save) /* for os_hpmc */
1010 cmpib,COND(=),n 0,%r16,1f
1022 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1023 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1027 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1033 * If the interrupted code was running with W bit off (32 bit),
1034 * clear the b bits (bits 0 & 1) in the ior.
1035 * save_specials left ipsw value in r8 for us to test.
1037 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1040 /* adjust isr/ior: get high bits from isr and deposit in ior */
1041 space_adjust %r16,%r17,%r1
1043 STREG %r16, PT_ISR(%r29)
1044 STREG %r17, PT_IOR(%r29)
1046 #if 0 && defined(CONFIG_64BIT)
1047 /* Revisit when we have 64-bit code above 4Gb */
1051 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1052 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1055 extrd,u,* %r8,PSW_W_BIT,1,%r1
1056 cmpib,COND(=),n 1,%r1,intr_save2
1057 LDREG PT_IASQ0(%r29), %r16
1058 LDREG PT_IAOQ0(%r29), %r17
1059 /* adjust iasq/iaoq */
1060 space_adjust %r16,%r17,%r1
1061 STREG %r16, PT_IASQ0(%r29)
1062 STREG %r17, PT_IAOQ0(%r29)
1071 ldo PT_FR0(%r29), %r25
1076 copy %r29, %r25 /* arg1 is pt_regs */
1078 ldo -16(%r30),%r29 /* Reference param save area */
1081 ldil L%intr_check_sig, %r2
1082 copy %r25, %r16 /* save pt_regs */
1084 b handle_interruption
1085 ldo R%intr_check_sig(%r2), %r2
1086 ENDPROC_CFI(intr_save)
1090 * Note for all tlb miss handlers:
1092 * cr24 contains a pointer to the kernel address space
1095 * cr25 contains a pointer to the current user address
1096 * space page directory.
1098 * sr3 will contain the space id of the user address space
1099 * of the current running thread while that thread is
1100 * running in the kernel.
1104 * register number allocations. Note that these are all
1105 * in the shadowed registers
1108 t0 = r1 /* temporary register 0 */
1109 va = r8 /* virtual address for which the trap occurred */
1110 t1 = r9 /* temporary register 1 */
1111 pte = r16 /* pte/phys page # */
1112 prot = r17 /* prot bits */
1113 spc = r24 /* space for which the trap occurred */
1114 ptp = r25 /* page directory/page table pointer */
1119 space_adjust spc,va,t0
1121 space_check spc,t0,dtlb_fault
1123 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1125 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1126 update_accessed ptp,pte,t0,t1
1128 make_insert_tlb spc,pte,prot,t1
1136 dtlb_check_alias_20w:
1137 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1145 space_adjust spc,va,t0
1147 space_check spc,t0,nadtlb_fault
1149 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1151 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1152 update_accessed ptp,pte,t0,t1
1154 make_insert_tlb spc,pte,prot,t1
1162 nadtlb_check_alias_20w:
1163 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1175 space_check spc,t0,dtlb_fault
1177 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1179 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1180 update_accessed ptp,pte,t0,t1
1182 make_insert_tlb_11 spc,pte,prot
1184 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1187 idtlba pte,(%sr1,va)
1188 idtlbp prot,(%sr1,va)
1190 mtsp t1, %sr1 /* Restore sr1 */
1196 dtlb_check_alias_11:
1197 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1208 space_check spc,t0,nadtlb_fault
1210 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1212 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1213 update_accessed ptp,pte,t0,t1
1215 make_insert_tlb_11 spc,pte,prot
1217 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1220 idtlba pte,(%sr1,va)
1221 idtlbp prot,(%sr1,va)
1223 mtsp t1, %sr1 /* Restore sr1 */
1229 nadtlb_check_alias_11:
1230 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1239 space_adjust spc,va,t0
1241 space_check spc,t0,dtlb_fault
1243 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1245 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1246 update_accessed ptp,pte,t0,t1
1248 make_insert_tlb spc,pte,prot,t1
1258 dtlb_check_alias_20:
1259 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1269 space_check spc,t0,nadtlb_fault
1271 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1273 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1274 update_accessed ptp,pte,t0,t1
1276 make_insert_tlb spc,pte,prot,t1
1286 nadtlb_check_alias_20:
1287 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1299 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1300 * probei instructions. We don't want to fault for these
1301 * instructions (not only does it not make sense, it can cause
1302 * deadlocks, since some flushes are done with the mmap
1303 * semaphore held). If the translation doesn't exist, we can't
1304 * insert a translation, so have to emulate the side effects
1305 * of the instruction. Since we don't insert a translation
1306 * we can get a lot of faults during a flush loop, so it makes
1307 * sense to try to do it here with minimum overhead. We only
1308 * emulate fdc,fic,pdc,probew,prober instructions whose base
1309 * and index registers are not shadowed. We defer everything
1310 * else to the "slow" path.
1313 mfctl %cr19,%r9 /* Get iir */
1315 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1316 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1318 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1321 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1322 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1323 BL get_register,%r25
1324 extrw,u %r9,15,5,%r8 /* Get index register # */
1325 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1327 BL get_register,%r25
1328 extrw,u %r9,10,5,%r8 /* Get base register # */
1329 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1330 BL set_register,%r25
1331 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1336 or %r8,%r9,%r8 /* Set PSW_N */
1343 When there is no translation for the probe address then we
1344 must nullify the insn and return zero in the target register.
1345 This will indicate to the calling code that it does not have
1346 write/read privileges to this address.
1348 This should technically work for prober and probew in PA 1.1,
1349 and also probe,r and probe,w in PA 2.0
1351 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1352 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1358 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1359 BL get_register,%r25 /* Find the target register */
1360 extrw,u %r9,31,5,%r8 /* Get target register */
1361 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1362 BL set_register,%r25
1363 copy %r0,%r1 /* Write zero to target register */
1364 b nadtlb_nullify /* Nullify return insn */
1372 * I miss is a little different, since we allow users to fault
1373 * on the gateway page which is in the kernel address space.
1376 space_adjust spc,va,t0
1378 space_check spc,t0,itlb_fault
1380 L3_ptep ptp,pte,t0,va,itlb_fault
1382 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1383 update_accessed ptp,pte,t0,t1
1385 make_insert_tlb spc,pte,prot,t1
1396 * I miss is a little different, since we allow users to fault
1397 * on the gateway page which is in the kernel address space.
1400 space_adjust spc,va,t0
1402 space_check spc,t0,naitlb_fault
1404 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1406 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1407 update_accessed ptp,pte,t0,t1
1409 make_insert_tlb spc,pte,prot,t1
1417 naitlb_check_alias_20w:
1418 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1430 space_check spc,t0,itlb_fault
1432 L2_ptep ptp,pte,t0,va,itlb_fault
1434 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1435 update_accessed ptp,pte,t0,t1
1437 make_insert_tlb_11 spc,pte,prot
1439 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1442 iitlba pte,(%sr1,va)
1443 iitlbp prot,(%sr1,va)
1445 mtsp t1, %sr1 /* Restore sr1 */
1454 space_check spc,t0,naitlb_fault
1456 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1458 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1459 update_accessed ptp,pte,t0,t1
1461 make_insert_tlb_11 spc,pte,prot
1463 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1466 iitlba pte,(%sr1,va)
1467 iitlbp prot,(%sr1,va)
1469 mtsp t1, %sr1 /* Restore sr1 */
1475 naitlb_check_alias_11:
1476 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1478 iitlba pte,(%sr0, va)
1479 iitlbp prot,(%sr0, va)
1488 space_check spc,t0,itlb_fault
1490 L2_ptep ptp,pte,t0,va,itlb_fault
1492 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1493 update_accessed ptp,pte,t0,t1
1495 make_insert_tlb spc,pte,prot,t1
1508 space_check spc,t0,naitlb_fault
1510 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1512 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1513 update_accessed ptp,pte,t0,t1
1515 make_insert_tlb spc,pte,prot,t1
1525 naitlb_check_alias_20:
1526 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1538 space_adjust spc,va,t0
1540 space_check spc,t0,dbit_fault
1542 L3_ptep ptp,pte,t0,va,dbit_fault
1544 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1545 update_dirty ptp,pte,t1
1547 make_insert_tlb spc,pte,prot,t1
1560 space_check spc,t0,dbit_fault
1562 L2_ptep ptp,pte,t0,va,dbit_fault
1564 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1565 update_dirty ptp,pte,t1
1567 make_insert_tlb_11 spc,pte,prot
1569 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1572 idtlba pte,(%sr1,va)
1573 idtlbp prot,(%sr1,va)
1575 mtsp t1, %sr1 /* Restore sr1 */
1584 space_check spc,t0,dbit_fault
1586 L2_ptep ptp,pte,t0,va,dbit_fault
1588 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1589 update_dirty ptp,pte,t1
1591 make_insert_tlb spc,pte,prot,t1
1602 .import handle_interruption,code
1606 ldi 31,%r8 /* Use an unused code */
1614 ldi PARISC_ITLB_TRAP,%r8
1628 /* Register saving semantics for system calls:
1630 %r1 clobbered by system call macro in userspace
1631 %r2 saved in PT_REGS by gateway page
1632 %r3 - %r18 preserved by C code (saved by signal code)
1633 %r19 - %r20 saved in PT_REGS by gateway page
1634 %r21 - %r22 non-standard syscall args
1635 stored in kernel stack by gateway page
1636 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1637 %r27 - %r30 saved in PT_REGS by gateway page
1638 %r31 syscall return pointer
1641 /* Floating point registers (FIXME: what do we do with these?)
1643 %fr0 - %fr3 status/exception, not preserved
1644 %fr4 - %fr7 arguments
1645 %fr8 - %fr11 not preserved by C code
1646 %fr12 - %fr21 preserved by C code
1647 %fr22 - %fr31 not preserved by C code
1650 .macro reg_save regs
1651 STREG %r3, PT_GR3(\regs)
1652 STREG %r4, PT_GR4(\regs)
1653 STREG %r5, PT_GR5(\regs)
1654 STREG %r6, PT_GR6(\regs)
1655 STREG %r7, PT_GR7(\regs)
1656 STREG %r8, PT_GR8(\regs)
1657 STREG %r9, PT_GR9(\regs)
1658 STREG %r10,PT_GR10(\regs)
1659 STREG %r11,PT_GR11(\regs)
1660 STREG %r12,PT_GR12(\regs)
1661 STREG %r13,PT_GR13(\regs)
1662 STREG %r14,PT_GR14(\regs)
1663 STREG %r15,PT_GR15(\regs)
1664 STREG %r16,PT_GR16(\regs)
1665 STREG %r17,PT_GR17(\regs)
1666 STREG %r18,PT_GR18(\regs)
1669 .macro reg_restore regs
1670 LDREG PT_GR3(\regs), %r3
1671 LDREG PT_GR4(\regs), %r4
1672 LDREG PT_GR5(\regs), %r5
1673 LDREG PT_GR6(\regs), %r6
1674 LDREG PT_GR7(\regs), %r7
1675 LDREG PT_GR8(\regs), %r8
1676 LDREG PT_GR9(\regs), %r9
1677 LDREG PT_GR10(\regs),%r10
1678 LDREG PT_GR11(\regs),%r11
1679 LDREG PT_GR12(\regs),%r12
1680 LDREG PT_GR13(\regs),%r13
1681 LDREG PT_GR14(\regs),%r14
1682 LDREG PT_GR15(\regs),%r15
1683 LDREG PT_GR16(\regs),%r16
1684 LDREG PT_GR17(\regs),%r17
1685 LDREG PT_GR18(\regs),%r18
1688 .macro fork_like name
1689 ENTRY_CFI(sys_\name\()_wrapper)
1691 ldo TASK_REGS(%r1),%r1
1694 ldil L%sys_\name, %r31
1695 be R%sys_\name(%sr4,%r31)
1696 STREG %r28, PT_CR27(%r1)
1697 ENDPROC_CFI(sys_\name\()_wrapper)
1705 /* Set the return value for the child */
1707 BL schedule_tail, %r2
1709 finish_child_return:
1711 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1713 LDREG PT_CR27(%r1), %r3
1720 ENTRY_CFI(sys_rt_sigreturn_wrapper)
1722 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1723 /* Don't save regs, we are going to restore them from sigcontext. */
1724 STREG %r2, -RP_OFFSET(%r30)
1726 ldo FRAME_SIZE(%r30), %r30
1727 BL sys_rt_sigreturn,%r2
1728 ldo -16(%r30),%r29 /* Reference param save area */
1730 BL sys_rt_sigreturn,%r2
1731 ldo FRAME_SIZE(%r30), %r30
1734 ldo -FRAME_SIZE(%r30), %r30
1735 LDREG -RP_OFFSET(%r30), %r2
1737 /* FIXME: I think we need to restore a few more things here. */
1739 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1742 /* If the signal was received while the process was blocked on a
1743 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1744 * take us to syscall_exit_rfi and on to intr_return.
1747 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1748 ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1751 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1752 * via syscall_exit_rfi if the signal was received while the process
1756 /* save return value now */
1758 STREG %r28,TASK_PT_GR28(%r1)
1760 /* Seems to me that dp could be wrong here, if the syscall involved
1761 * calling a module, and nothing got round to restoring dp on return.
1765 syscall_check_resched:
1767 /* check for reschedule */
1769 LDREG TASK_TI_FLAGS(%r19),%r19 /* long */
1770 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1772 .import do_signal,code
1775 LDREG TASK_TI_FLAGS(%r19),%r19
1776 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1777 and,COND(<>) %r19, %r26, %r0
1778 b,n syscall_restore /* skip past if we've nothing to do */
1781 /* Save callee-save registers (for sigcontext).
1782 * FIXME: After this point the process structure should be
1783 * consistent with all the relevant state of the process
1784 * before the syscall. We need to verify this.
1787 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1791 ldo -16(%r30),%r29 /* Reference param save area */
1794 BL do_notify_resume,%r2
1795 ldi 1, %r25 /* long in_syscall = 1 */
1798 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1801 b,n syscall_check_sig
1806 /* Are we being ptraced? */
1807 LDREG TASK_TI_FLAGS(%r1),%r19
1808 ldi _TIF_SYSCALL_TRACE_MASK,%r2
1809 and,COND(=) %r19,%r2,%r0
1810 b,n syscall_restore_rfi
1812 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1815 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1818 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1819 LDREG TASK_PT_GR19(%r1),%r19
1820 LDREG TASK_PT_GR20(%r1),%r20
1821 LDREG TASK_PT_GR21(%r1),%r21
1822 LDREG TASK_PT_GR22(%r1),%r22
1823 LDREG TASK_PT_GR23(%r1),%r23
1824 LDREG TASK_PT_GR24(%r1),%r24
1825 LDREG TASK_PT_GR25(%r1),%r25
1826 LDREG TASK_PT_GR26(%r1),%r26
1827 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1828 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1829 LDREG TASK_PT_GR29(%r1),%r29
1830 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1832 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1833 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1835 copy %r1,%r30 /* Restore user sp */
1836 mfsp %sr3,%r1 /* Get user space id */
1837 mtsp %r1,%sr7 /* Restore sr7 */
1840 /* Set sr2 to zero for userspace syscalls to work. */
1842 mtsp %r1,%sr4 /* Restore sr4 */
1843 mtsp %r1,%sr5 /* Restore sr5 */
1844 mtsp %r1,%sr6 /* Restore sr6 */
1846 depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */
1849 /* decide whether to reset the wide mode bit
1851 * For a syscall, the W bit is stored in the lowest bit
1852 * of sp. Extract it and reset W if it is zero */
1853 extrd,u,*<> %r30,63,1,%r1
1855 /* now reset the lowest bit of sp if it was set */
1858 be,n 0(%sr3,%r31) /* return to user space */
1860 /* We have to return via an RFI, so that PSW T and R bits can be set
1862 * This sets up pt_regs so we can return via intr_restore, which is not
1863 * the most efficient way of doing things, but it works.
1865 syscall_restore_rfi:
1866 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1867 mtctl %r2,%cr0 /* for immediate trap */
1868 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1869 ldi 0x0b,%r20 /* Create new PSW */
1870 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1872 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1873 * set in thread_info.h and converted to PA bitmap
1874 * numbers in asm-offsets.c */
1876 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1877 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1878 depi -1,27,1,%r20 /* R bit */
1880 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1881 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1882 depi -1,7,1,%r20 /* T bit */
1884 STREG %r20,TASK_PT_PSW(%r1)
1886 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1889 STREG %r25,TASK_PT_SR3(%r1)
1890 STREG %r25,TASK_PT_SR4(%r1)
1891 STREG %r25,TASK_PT_SR5(%r1)
1892 STREG %r25,TASK_PT_SR6(%r1)
1893 STREG %r25,TASK_PT_SR7(%r1)
1894 STREG %r25,TASK_PT_IASQ0(%r1)
1895 STREG %r25,TASK_PT_IASQ1(%r1)
1898 /* Now if old D bit is clear, it means we didn't save all registers
1899 * on syscall entry, so do that now. This only happens on TRACEME
1900 * calls, or if someone attached to us while we were on a syscall.
1901 * We could make this more efficient by not saving r3-r18, but
1902 * then we wouldn't be able to use the common intr_restore path.
1903 * It is only for traced processes anyway, so performance is not
1906 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1907 ldo TASK_REGS(%r1),%r25
1908 reg_save %r25 /* Save r3 to r18 */
1910 /* Save the current sr */
1912 STREG %r2,TASK_PT_SR0(%r1)
1914 /* Save the scratch sr */
1916 STREG %r2,TASK_PT_SR1(%r1)
1918 /* sr2 should be set to zero for userspace syscalls */
1919 STREG %r0,TASK_PT_SR2(%r1)
1921 LDREG TASK_PT_GR31(%r1),%r2
1922 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1923 STREG %r2,TASK_PT_IAOQ0(%r1)
1925 STREG %r2,TASK_PT_IAOQ1(%r1)
1930 LDREG TASK_PT_IAOQ0(%r1),%r2
1931 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1932 STREG %r2,TASK_PT_IAOQ0(%r1)
1933 LDREG TASK_PT_IAOQ1(%r1),%r2
1934 depi PRIV_USER,31,2,%r2
1935 STREG %r2,TASK_PT_IAOQ1(%r1)
1940 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1941 load32 schedule,%r19
1942 bv %r0(%r19) /* jumps to schedule() */
1944 ldo -16(%r30),%r29 /* Reference param save area */
1951 #ifdef CONFIG_FUNCTION_TRACER
1953 .import ftrace_function_trampoline,code
1954 .align L1_CACHE_BYTES
1955 ENTRY_CFI(mcount, caller)
1957 .export _mcount,data
1959 * The 64bit mcount() function pointer needs 4 dwords, of which the
1960 * first two are free. We optimize it here and put 2 instructions for
1961 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1962 * have all on one L1 cacheline.
1965 b ftrace_function_trampoline
1966 copy %r3, %arg2 /* caller original %sp */
1969 .type ftrace_stub, @function
1978 .dword 0 /* code in head.S puts value of global gp here */
1982 #ifdef CONFIG_DYNAMIC_FTRACE
1985 #define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1987 #define FTRACE_FRAME_SIZE FRAME_SIZE
1989 ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1991 .global ftrace_caller
1993 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1994 ldo -FTRACE_FRAME_SIZE(%sp), %r3
1995 STREG %rp, -RP_OFFSET(%r3)
1997 /* Offset 0 is already allocated for %r1 */
1998 STREG %r23, 2*REG_SZ(%r3)
1999 STREG %r24, 3*REG_SZ(%r3)
2000 STREG %r25, 4*REG_SZ(%r3)
2001 STREG %r26, 5*REG_SZ(%r3)
2002 STREG %r28, 6*REG_SZ(%r3)
2003 STREG %r29, 7*REG_SZ(%r3)
2005 STREG %r19, 8*REG_SZ(%r3)
2006 STREG %r20, 9*REG_SZ(%r3)
2007 STREG %r21, 10*REG_SZ(%r3)
2008 STREG %r22, 11*REG_SZ(%r3)
2009 STREG %r27, 12*REG_SZ(%r3)
2010 STREG %r31, 13*REG_SZ(%r3)
2017 ldi 0, %r23 /* no pt_regs */
2018 b,l ftrace_function_trampoline, %rp
2021 LDREG -RP_OFFSET(%r3), %rp
2022 LDREG 2*REG_SZ(%r3), %r23
2023 LDREG 3*REG_SZ(%r3), %r24
2024 LDREG 4*REG_SZ(%r3), %r25
2025 LDREG 5*REG_SZ(%r3), %r26
2026 LDREG 6*REG_SZ(%r3), %r28
2027 LDREG 7*REG_SZ(%r3), %r29
2029 LDREG 8*REG_SZ(%r3), %r19
2030 LDREG 9*REG_SZ(%r3), %r20
2031 LDREG 10*REG_SZ(%r3), %r21
2032 LDREG 11*REG_SZ(%r3), %r22
2033 LDREG 12*REG_SZ(%r3), %r27
2034 LDREG 13*REG_SZ(%r3), %r31
2036 LDREG 1*REG_SZ(%r3), %r3
2038 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2039 /* Adjust return point to jump back to beginning of traced function */
2043 ENDPROC_CFI(ftrace_caller)
2045 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2046 ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2047 CALLS,SAVE_RP,SAVE_SP)
2049 .global ftrace_regs_caller
2051 ldo -FTRACE_FRAME_SIZE(%sp), %r1
2052 STREG %rp, -RP_OFFSET(%r1)
2055 ldo PT_SZ_ALGN(%sp), %sp
2057 STREG %rp, PT_GR2(%r1)
2058 STREG %r3, PT_GR3(%r1)
2059 STREG %r4, PT_GR4(%r1)
2060 STREG %r5, PT_GR5(%r1)
2061 STREG %r6, PT_GR6(%r1)
2062 STREG %r7, PT_GR7(%r1)
2063 STREG %r8, PT_GR8(%r1)
2064 STREG %r9, PT_GR9(%r1)
2065 STREG %r10, PT_GR10(%r1)
2066 STREG %r11, PT_GR11(%r1)
2067 STREG %r12, PT_GR12(%r1)
2068 STREG %r13, PT_GR13(%r1)
2069 STREG %r14, PT_GR14(%r1)
2070 STREG %r15, PT_GR15(%r1)
2071 STREG %r16, PT_GR16(%r1)
2072 STREG %r17, PT_GR17(%r1)
2073 STREG %r18, PT_GR18(%r1)
2074 STREG %r19, PT_GR19(%r1)
2075 STREG %r20, PT_GR20(%r1)
2076 STREG %r21, PT_GR21(%r1)
2077 STREG %r22, PT_GR22(%r1)
2078 STREG %r23, PT_GR23(%r1)
2079 STREG %r24, PT_GR24(%r1)
2080 STREG %r25, PT_GR25(%r1)
2081 STREG %r26, PT_GR26(%r1)
2082 STREG %r27, PT_GR27(%r1)
2083 STREG %r28, PT_GR28(%r1)
2084 STREG %r29, PT_GR29(%r1)
2085 STREG %r30, PT_GR30(%r1)
2086 STREG %r31, PT_GR31(%r1)
2088 STREG %r26, PT_SAR(%r1)
2091 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2093 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2094 b,l ftrace_function_trampoline, %rp
2095 copy %r1, %arg3 /* struct pt_regs */
2097 ldo -PT_SZ_ALGN(%sp), %r1
2099 LDREG PT_SAR(%r1), %rp
2102 LDREG PT_GR2(%r1), %rp
2103 LDREG PT_GR3(%r1), %r3
2104 LDREG PT_GR4(%r1), %r4
2105 LDREG PT_GR5(%r1), %r5
2106 LDREG PT_GR6(%r1), %r6
2107 LDREG PT_GR7(%r1), %r7
2108 LDREG PT_GR8(%r1), %r8
2109 LDREG PT_GR9(%r1), %r9
2110 LDREG PT_GR10(%r1),%r10
2111 LDREG PT_GR11(%r1),%r11
2112 LDREG PT_GR12(%r1),%r12
2113 LDREG PT_GR13(%r1),%r13
2114 LDREG PT_GR14(%r1),%r14
2115 LDREG PT_GR15(%r1),%r15
2116 LDREG PT_GR16(%r1),%r16
2117 LDREG PT_GR17(%r1),%r17
2118 LDREG PT_GR18(%r1),%r18
2119 LDREG PT_GR19(%r1),%r19
2120 LDREG PT_GR20(%r1),%r20
2121 LDREG PT_GR21(%r1),%r21
2122 LDREG PT_GR22(%r1),%r22
2123 LDREG PT_GR23(%r1),%r23
2124 LDREG PT_GR24(%r1),%r24
2125 LDREG PT_GR25(%r1),%r25
2126 LDREG PT_GR26(%r1),%r26
2127 LDREG PT_GR27(%r1),%r27
2128 LDREG PT_GR28(%r1),%r28
2129 LDREG PT_GR29(%r1),%r29
2130 LDREG PT_GR30(%r1),%r30
2131 LDREG PT_GR31(%r1),%r31
2133 ldo -PT_SZ_ALGN(%sp), %sp
2134 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2135 /* Adjust return point to jump back to beginning of traced function */
2139 ENDPROC_CFI(ftrace_regs_caller)
2144 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2146 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2147 .export parisc_return_to_handler,data
2148 parisc_return_to_handler:
2150 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2152 STREGM %r1,FRAME_SIZE(%sp)
2160 /* call ftrace_return_to_handler(0) */
2161 .import ftrace_return_to_handler,code
2162 load32 ftrace_return_to_handler,%ret0
2163 load32 .Lftrace_ret,%r2
2165 ldo -16(%sp),%ret1 /* Reference param save area */
2174 /* restore original return values */
2178 /* return from function */
2184 LDREGM -FRAME_SIZE(%sp),%r3
2185 ENDPROC_CFI(return_to_handler)
2187 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2189 #endif /* CONFIG_FUNCTION_TRACER */
2191 #ifdef CONFIG_IRQSTACKS
2192 /* void call_on_stack(unsigned long param1, void *func,
2193 unsigned long new_stack) */
2194 ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2195 ENTRY(_call_on_stack)
2198 /* Regarding the HPPA calling conventions for function pointers,
2199 we assume the PIC register is not changed across call. For
2200 CONFIG_64BIT, the argument pointer is left to point at the
2201 argument region allocated for the call to call_on_stack. */
2203 /* Switch to new stack. We allocate two frames. */
2204 ldo 2*FRAME_SIZE(%arg2), %sp
2205 # ifdef CONFIG_64BIT
2206 /* Save previous stack pointer and return pointer in frame marker */
2207 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2208 /* Calls always use function descriptor */
2209 LDREG 16(%arg1), %arg1
2211 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2212 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2214 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2216 /* Save previous stack pointer and return pointer in frame marker */
2217 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2218 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2219 /* Calls use function descriptor if PLABEL bit is set */
2220 bb,>=,n %arg1, 30, 1f
2222 LDREG 0(%arg1), %arg1
2224 be,l 0(%sr4,%arg1), %sr0, %r31
2226 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2228 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2229 # endif /* CONFIG_64BIT */
2230 ENDPROC_CFI(call_on_stack)
2231 #endif /* CONFIG_IRQSTACKS */
2233 ENTRY_CFI(get_register)
2235 * get_register is used by the non access tlb miss handlers to
2236 * copy the value of the general register specified in r8 into
2237 * r1. This routine can't be used for shadowed registers, since
2238 * the rfir will restore the original value. So, for the shadowed
2239 * registers we put a -1 into r1 to indicate that the register
2240 * should not be used (the register being copied could also have
2241 * a -1 in it, but that is OK, it just means that we will have
2242 * to use the slow path instead).
2246 bv %r0(%r25) /* r0 */
2248 bv %r0(%r25) /* r1 - shadowed */
2250 bv %r0(%r25) /* r2 */
2252 bv %r0(%r25) /* r3 */
2254 bv %r0(%r25) /* r4 */
2256 bv %r0(%r25) /* r5 */
2258 bv %r0(%r25) /* r6 */
2260 bv %r0(%r25) /* r7 */
2262 bv %r0(%r25) /* r8 - shadowed */
2264 bv %r0(%r25) /* r9 - shadowed */
2266 bv %r0(%r25) /* r10 */
2268 bv %r0(%r25) /* r11 */
2270 bv %r0(%r25) /* r12 */
2272 bv %r0(%r25) /* r13 */
2274 bv %r0(%r25) /* r14 */
2276 bv %r0(%r25) /* r15 */
2278 bv %r0(%r25) /* r16 - shadowed */
2280 bv %r0(%r25) /* r17 - shadowed */
2282 bv %r0(%r25) /* r18 */
2284 bv %r0(%r25) /* r19 */
2286 bv %r0(%r25) /* r20 */
2288 bv %r0(%r25) /* r21 */
2290 bv %r0(%r25) /* r22 */
2292 bv %r0(%r25) /* r23 */
2294 bv %r0(%r25) /* r24 - shadowed */
2296 bv %r0(%r25) /* r25 - shadowed */
2298 bv %r0(%r25) /* r26 */
2300 bv %r0(%r25) /* r27 */
2302 bv %r0(%r25) /* r28 */
2304 bv %r0(%r25) /* r29 */
2306 bv %r0(%r25) /* r30 */
2308 bv %r0(%r25) /* r31 */
2310 ENDPROC_CFI(get_register)
2313 ENTRY_CFI(set_register)
2315 * set_register is used by the non access tlb miss handlers to
2316 * copy the value of r1 into the general register specified in
2321 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2323 bv %r0(%r25) /* r1 */
2325 bv %r0(%r25) /* r2 */
2327 bv %r0(%r25) /* r3 */
2329 bv %r0(%r25) /* r4 */
2331 bv %r0(%r25) /* r5 */
2333 bv %r0(%r25) /* r6 */
2335 bv %r0(%r25) /* r7 */
2337 bv %r0(%r25) /* r8 */
2339 bv %r0(%r25) /* r9 */
2341 bv %r0(%r25) /* r10 */
2343 bv %r0(%r25) /* r11 */
2345 bv %r0(%r25) /* r12 */
2347 bv %r0(%r25) /* r13 */
2349 bv %r0(%r25) /* r14 */
2351 bv %r0(%r25) /* r15 */
2353 bv %r0(%r25) /* r16 */
2355 bv %r0(%r25) /* r17 */
2357 bv %r0(%r25) /* r18 */
2359 bv %r0(%r25) /* r19 */
2361 bv %r0(%r25) /* r20 */
2363 bv %r0(%r25) /* r21 */
2365 bv %r0(%r25) /* r22 */
2367 bv %r0(%r25) /* r23 */
2369 bv %r0(%r25) /* r24 */
2371 bv %r0(%r25) /* r25 */
2373 bv %r0(%r25) /* r26 */
2375 bv %r0(%r25) /* r27 */
2377 bv %r0(%r25) /* r28 */
2379 bv %r0(%r25) /* r29 */
2381 bv %r0(%r25) /* r30 */
2383 bv %r0(%r25) /* r31 */
2385 ENDPROC_CFI(set_register)