]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle | |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | |
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | |
9 | */ | |
1da177e4 LT |
10 | |
11 | #include <asm/asm.h> | |
12 | #include <asm/asmmacro.h> | |
13 | #include <asm/regdef.h> | |
14 | #include <asm/mipsregs.h> | |
15 | #include <asm/stackframe.h> | |
16 | #include <asm/isadep.h> | |
17 | #include <asm/thread_info.h> | |
18 | #include <asm/war.h> | |
41c594ab RB |
19 | #ifdef CONFIG_MIPS_MT_SMTC |
20 | #include <asm/mipsmtregs.h> | |
21 | #endif | |
1da177e4 | 22 | |
f431baa5 | 23 | #ifndef CONFIG_PREEMPT |
1da177e4 | 24 | #define resume_kernel restore_all |
7da8a581 FBH |
25 | #else |
26 | #define __ret_from_irq ret_from_exception | |
1da177e4 LT |
27 | #endif |
28 | ||
29 | .text | |
30 | .align 5 | |
7da8a581 | 31 | #ifndef CONFIG_PREEMPT |
1da177e4 | 32 | FEXPORT(ret_from_exception) |
7da8a581 FBH |
33 | local_irq_disable # preempt stop |
34 | b __ret_from_irq | |
f431baa5 | 35 | #endif |
7da8a581 FBH |
36 | FEXPORT(ret_from_irq) |
37 | LONG_S s0, TI_REGS($28) | |
38 | FEXPORT(__ret_from_irq) | |
c90e6fbb DA |
39 | /* |
40 | * We can be coming here from a syscall done in the kernel space, | |
41 | * e.g. a failed kernel_execve(). | |
42 | */ | |
43 | resume_userspace_check: | |
1da177e4 LT |
44 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? |
45 | andi t0, t0, KU_USER | |
46 | beqz t0, resume_kernel | |
47 | ||
c2648527 TS |
48 | resume_userspace: |
49 | local_irq_disable # make sure we dont miss an | |
1da177e4 LT |
50 | # interrupt setting need_resched |
51 | # between sampling and return | |
52 | LONG_L a2, TI_FLAGS($28) # current->work | |
c2648527 TS |
53 | andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) |
54 | bnez t0, work_pending | |
1da177e4 LT |
55 | j restore_all |
56 | ||
57 | #ifdef CONFIG_PREEMPT | |
c2648527 | 58 | resume_kernel: |
a18815ab | 59 | local_irq_disable |
1da177e4 LT |
60 | lw t0, TI_PRE_COUNT($28) |
61 | bnez t0, restore_all | |
62 | need_resched: | |
63 | LONG_L t0, TI_FLAGS($28) | |
64 | andi t1, t0, _TIF_NEED_RESCHED | |
65 | beqz t1, restore_all | |
66 | LONG_L t0, PT_STATUS(sp) # Interrupts off? | |
67 | andi t0, 1 | |
68 | beqz t0, restore_all | |
a18815ab | 69 | jal preempt_schedule_irq |
cdaed73a | 70 | b need_resched |
1da177e4 LT |
71 | #endif |
72 | ||
8f54bcac AV |
73 | FEXPORT(ret_from_kernel_thread) |
74 | jal schedule_tail # a0 = struct task_struct *prev | |
75 | move a0, s1 | |
76 | jal s0 | |
9b0e5d42 | 77 | j syscall_exit |
8f54bcac | 78 | |
1da177e4 | 79 | FEXPORT(ret_from_fork) |
36c8b586 | 80 | jal schedule_tail # a0 = struct task_struct *prev |
1da177e4 LT |
81 | |
82 | FEXPORT(syscall_exit) | |
83 | local_irq_disable # make sure need_resched and | |
84 | # signals dont change between | |
85 | # sampling and return | |
86 | LONG_L a2, TI_FLAGS($28) # current->work | |
87 | li t0, _TIF_ALLWORK_MASK | |
88 | and t0, a2, t0 | |
89 | bnez t0, syscall_exit_work | |
90 | ||
02f884ed | 91 | restore_all: # restore full frame |
41c594ab | 92 | #ifdef CONFIG_MIPS_MT_SMTC |
0db34215 | 93 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
41c594ab RB |
94 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ |
95 | mfc0 v0, CP0_TCSTATUS | |
96 | ori v1, v0, TCSTATUS_IXMT | |
97 | mtc0 v1, CP0_TCSTATUS | |
98 | andi v0, TCSTATUS_IXMT | |
4277ff5e | 99 | _ehb |
41c594ab RB |
100 | mfc0 t0, CP0_TCCONTEXT |
101 | DMT 9 # dmt t1 | |
102 | jal mips_ihb | |
103 | mfc0 t2, CP0_STATUS | |
104 | andi t3, t0, 0xff00 | |
105 | or t2, t2, t3 | |
106 | mtc0 t2, CP0_STATUS | |
4277ff5e | 107 | _ehb |
41c594ab RB |
108 | andi t1, t1, VPECONTROL_TE |
109 | beqz t1, 1f | |
110 | EMT | |
111 | 1: | |
112 | mfc0 v1, CP0_TCSTATUS | |
477654fc | 113 | /* We set IXMT above, XOR should clear it here */ |
41c594ab RB |
114 | xori v1, v1, TCSTATUS_IXMT |
115 | or v1, v0, v1 | |
116 | mtc0 v1, CP0_TCSTATUS | |
4277ff5e | 117 | _ehb |
41c594ab RB |
118 | xor t0, t0, t3 |
119 | mtc0 t0, CP0_TCCONTEXT | |
0db34215 | 120 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
d2bb01b0 KK |
121 | /* Detect and execute deferred IPI "interrupts" */ |
122 | LONG_L s0, TI_REGS($28) | |
123 | LONG_S sp, TI_REGS($28) | |
124 | jal deferred_smtc_ipi | |
125 | LONG_S s0, TI_REGS($28) | |
41c594ab | 126 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1da177e4 LT |
127 | .set noat |
128 | RESTORE_TEMP | |
129 | RESTORE_AT | |
130 | RESTORE_STATIC | |
02f884ed | 131 | restore_partial: # restore partial frame |
192ef366 RB |
132 | #ifdef CONFIG_TRACE_IRQFLAGS |
133 | SAVE_STATIC | |
134 | SAVE_AT | |
135 | SAVE_TEMP | |
136 | LONG_L v0, PT_STATUS(sp) | |
cbde5ebc CD |
137 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) |
138 | and v0, ST0_IEP | |
139 | #else | |
140 | and v0, ST0_IE | |
141 | #endif | |
192ef366 RB |
142 | beqz v0, 1f |
143 | jal trace_hardirqs_on | |
144 | b 2f | |
145 | 1: jal trace_hardirqs_off | |
146 | 2: | |
147 | RESTORE_TEMP | |
148 | RESTORE_AT | |
149 | RESTORE_STATIC | |
150 | #endif | |
1da177e4 LT |
151 | RESTORE_SOME |
152 | RESTORE_SP_AND_RET | |
153 | .set at | |
154 | ||
c2648527 TS |
155 | work_pending: |
156 | andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS | |
1da177e4 LT |
157 | beqz t0, work_notifysig |
158 | work_resched: | |
159 | jal schedule | |
160 | ||
c2648527 | 161 | local_irq_disable # make sure need_resched and |
1da177e4 LT |
162 | # signals dont change between |
163 | # sampling and return | |
164 | LONG_L a2, TI_FLAGS($28) | |
165 | andi t0, a2, _TIF_WORK_MASK # is there any work to be done | |
166 | # other than syscall tracing? | |
167 | beqz t0, restore_all | |
168 | andi t0, a2, _TIF_NEED_RESCHED | |
169 | bnez t0, work_resched | |
170 | ||
171 | work_notifysig: # deal with pending signals and | |
172 | # notify-resume requests | |
173 | move a0, sp | |
174 | li a1, 0 | |
175 | jal do_notify_resume # a2 already loaded | |
c90e6fbb | 176 | j resume_userspace_check |
1da177e4 | 177 | |
02f884ed AV |
178 | FEXPORT(syscall_exit_partial) |
179 | local_irq_disable # make sure need_resched doesn't | |
180 | # change between and return | |
181 | LONG_L a2, TI_FLAGS($28) # current->work | |
182 | li t0, _TIF_ALLWORK_MASK | |
183 | and t0, a2 | |
184 | beqz t0, restore_partial | |
1da177e4 | 185 | SAVE_STATIC |
c2648527 | 186 | syscall_exit_work: |
f76f3308 AV |
187 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? |
188 | andi t0, t0, KU_USER | |
189 | beqz t0, resume_kernel | |
c19c20ac | 190 | li t0, _TIF_WORK_SYSCALL_EXIT |
c2648527 TS |
191 | and t0, a2 # a2 is preloaded with TI_FLAGS |
192 | beqz t0, work_pending # trace bit set? | |
8b659a39 | 193 | local_irq_enable # could let syscall_trace_leave() |
1da177e4 LT |
194 | # call schedule() instead |
195 | move a0, sp | |
8b659a39 | 196 | jal syscall_trace_leave |
1da177e4 | 197 | b resume_userspace |
bce1a286 RB |
198 | |
199 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) | |
200 | ||
201 | /* | |
202 | * MIPS32R2 Instruction Hazard Barrier - must be called | |
203 | * | |
204 | * For C code use the inline version named instruction_hazard(). | |
205 | */ | |
206 | LEAF(mips_ihb) | |
207 | .set mips32r2 | |
208 | jr.hb ra | |
209 | nop | |
210 | END(mips_ihb) | |
211 | ||
212 | #endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ |