1 /* SPDX-License-Identifier: GPL-2.0-or-later
4 * linux/arch/m68k/kernel/entry.S
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * Linux/m68k support by Hamish Macdonald
10 * 68060 fixes by Jesper Skov
15 * entry.S contains the system-call and fault low-level handling routines.
16 * This also contains the timer-interrupt handler, as well as all interrupts
17 * and faults that can result in a task-switch.
19 * NOTE: This code handles signal-recognition, which happens every time
20 * after a timer-interrupt and after each system call.
25 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
26 * all pointers that used to be 'current' are now entry
27 * number 0 in the 'current_set' list.
29 * 6/05/00 RZ: addedd writeback completion after return from sighandler
33 #include <linux/linkage.h>
34 #include <asm/errno.h>
35 #include <asm/setup.h>
36 #include <asm/traps.h>
37 #include <asm/unistd.h>
38 #include <asm/asm-offsets.h>
39 #include <asm/entry.h>
41 .globl system_call, buserr, trap, resume
43 .globl __sys_fork, __sys_clone, __sys_vfork
45 .globl auto_irqhandler_fixup
46 .globl user_irqvec_fixup
57 pea %sp@(SWITCH_STACK_SIZE)
70 pea %sp@(SWITCH_STACK_SIZE)
77 movel %sp,%a1 | switch_stack pointer
78 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
79 lea %sp@(-84),%sp | leave a gap
83 jra 1f | shared with rt_sigreturn()
85 ENTRY(sys_rt_sigreturn)
87 movel %sp,%a1 | switch_stack pointer
88 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
89 lea %sp@(-84),%sp | leave a gap
93 | [original pt_regs address] [original switch_stack address]
94 | [gap] [switch_stack] [pt_regs] [exception frame]
99 | [original pt_regs address] [original switch_stack address]
100 | [unused part of the gap] [moved switch_stack] [moved pt_regs]
101 | [replacement exception frame]
102 | return value of do_{rt_,}sigreturn() points to moved switch_stack.
104 movel %d0,%sp | discard the leftover junk
106 | stack contents now is just [syscall return address] [pt_regs] [frame]
108 movel %sp@(PT_OFF_D0+4),%d0
114 movel %sp,%sp@- | stack frame pointer argument
117 jra ret_from_exception
122 movel %sp,%sp@- | stack frame pointer argument
125 jra ret_from_exception
127 | After a fork we jump here directly from resume,
128 | so that %d1 contains the previous task
129 | schedule_tail now used regardless of CONFIG_SMP
134 jra ret_from_exception
136 ENTRY(ret_from_kernel_thread)
137 | a3 contains the kernel thread payload, d7 - its argument
143 jra ret_from_exception
145 #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
147 #ifdef TRAP_DBG_INTERRUPT
153 movel %sp,%sp@- /* stack frame pointer argument */
156 jra ret_from_exception
160 /* save top of frame */
164 pea ret_from_exception
167 ENTRY(ret_from_user_signal)
168 moveq #__NR_sigreturn,%d0
171 ENTRY(ret_from_user_rt_signal)
172 movel #__NR_rt_sigreturn,%d0
178 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
181 jbsr syscall_trace_enter
184 addql #1,%d0 | optimization for cmpil #-1,%d0
186 movel %sp@(PT_OFF_ORIG_D0),%d0
187 cmpl #NR_syscalls,%d0
191 movel #-ENOSYS,%sp@(PT_OFF_D0)
197 jbsr syscall_trace_leave
200 jra .Lret_from_exception
209 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
212 tstb %a1@(TINFO_FLAGS+2)
214 | seccomp filter active?
215 btst #5,%a1@(TINFO_FLAGS+2)
217 cmpl #NR_syscalls,%d0
220 jbsr @(sys_call_table,%d0:l:4)@(0)
221 movel %d0,%sp@(PT_OFF_D0) | save the return value
224 movel %curptr@(TASK_STACK),%a1
225 movew %a1@(TINFO_FLAGS+2),%d0
226 jne syscall_exit_work
230 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
231 bnes 1b | if so, skip resched, signals
241 ENTRY(ret_from_exception)
242 .Lret_from_exception:
243 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
244 bnes 1f | if so, skip resched, signals
245 | only allow interrupts when we are really the last one on the
246 | kernel stack, otherwise stack overflow can occur during
247 | heavy interrupt load
251 movel %curptr@(TASK_STACK),%a1
252 moveb %a1@(TINFO_FLAGS+3),%d0
258 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
267 subql #4,%sp | dummy return address
269 pea %sp@(SWITCH_STACK_SIZE)
270 bsrl do_notify_resume
274 jbra resume_userspace
277 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
284 jbra resume_userspace
287 /* This is the main interrupt handler for autovector interrupts */
289 ENTRY(auto_inthandler)
292 | put exception # in d0
293 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
297 movel %d0,%sp@- | put vector # on stack
298 auto_irqhandler_fixup = . + 2
299 jsr do_IRQ | process the IRQ
300 addql #8,%sp | pop parameters off stack
301 jra ret_from_exception
303 /* Handler for user defined interrupt vectors */
305 ENTRY(user_inthandler)
308 | put exception # in d0
309 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
310 user_irqvec_fixup = . + 2
314 movel %d0,%sp@- | put vector # on stack
315 jsr do_IRQ | process the IRQ
316 addql #8,%sp | pop parameters off stack
317 jra ret_from_exception
319 /* Handler for uninitialized and spurious interrupts */
321 ENTRY(bad_inthandler)
328 jra ret_from_exception
332 * Beware - when entering resume, prev (the current task) is
333 * in a0, next (the new task) is in a1,so don't change these
334 * registers until their contents are no longer needed.
338 movew %sr,%a0@(TASK_THREAD+THREAD_SR)
340 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
342 movew %d0,%a0@(TASK_THREAD+THREAD_FC)
345 /* it is better to use a movel here instead of a movew 8*) */
347 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
349 /* save non-scratch registers on stack */
352 /* save current kernel stack pointer */
353 movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
355 /* save floating point context */
356 #ifndef CONFIG_M68KFPU_EMU_ONLY
357 #ifdef CONFIG_M68KFPU_EMU
361 fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
363 #if defined(CONFIG_M68060)
364 #if !defined(CPU_M68060_ONLY)
365 btst #3,m68k_cputype+3
368 /* The 060 FPU keeps status in bits 15-8 of the first longword */
369 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
371 #if !defined(CPU_M68060_ONLY)
374 #endif /* CONFIG_M68060 */
375 #if !defined(CPU_M68060_ONLY)
376 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
379 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
380 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
382 #endif /* CONFIG_M68KFPU_EMU_ONLY */
383 /* Return previous task in %d1 */
386 /* switch to new task (a1 contains new task) */
389 /* restore floating point context */
390 #ifndef CONFIG_M68KFPU_EMU_ONLY
391 #ifdef CONFIG_M68KFPU_EMU
395 #if defined(CONFIG_M68060)
396 #if !defined(CPU_M68060_ONLY)
397 btst #3,m68k_cputype+3
400 /* The 060 FPU keeps status in bits 15-8 of the first longword */
401 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
403 #if !defined(CPU_M68060_ONLY)
406 #endif /* CONFIG_M68060 */
407 #if !defined(CPU_M68060_ONLY)
408 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
411 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
412 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
413 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
415 #endif /* CONFIG_M68KFPU_EMU_ONLY */
417 /* restore the kernel stack pointer */
418 movel %a1@(TASK_THREAD+THREAD_KSP),%sp
420 /* restore non-scratch registers */
423 /* restore user stack pointer */
424 movel %a1@(TASK_THREAD+THREAD_USP),%a0
427 /* restore fs (sfc,%dfc) */
428 movew %a1@(TASK_THREAD+THREAD_FC),%a0
432 /* restore status register */
433 movew %a1@(TASK_THREAD+THREAD_SR),%d0
439 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */