]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-common.S | |
3 | * | |
4 | * Copyright (C) 2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
1da177e4 | 10 | |
6ebbf2ce | 11 | #include <asm/assembler.h> |
1da177e4 | 12 | #include <asm/unistd.h> |
395a59d0 | 13 | #include <asm/ftrace.h> |
c4c5716e | 14 | #include <asm/unwind.h> |
1da177e4 | 15 | |
13a5045d RH |
16 | #ifdef CONFIG_NEED_RET_TO_USER |
17 | #include <mach/entry-macro.S> | |
18 | #else | |
19 | .macro arch_ret_to_user, tmp1, tmp2 | |
20 | .endm | |
21 | #endif | |
22 | ||
1da177e4 LT |
23 | #include "entry-header.S" |
24 | ||
1da177e4 LT |
25 | |
26 | .align 5 | |
3302cadd | 27 | #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) |
1da177e4 | 28 | /* |
3302cadd RK |
29 | * This is the fast syscall return path. We do as little as possible here, |
30 | * such as avoiding writing r0 to the stack. We only use this path if we | |
31 | * have tracing and context tracking disabled - the overheads from those | |
32 | * features make this path too inefficient. | |
1da177e4 LT |
33 | */ |
34 | ret_fast_syscall: | |
c4c5716e CM |
35 | UNWIND(.fnstart ) |
36 | UNWIND(.cantunwind ) | |
3302cadd | 37 | disable_irq_notrace @ disable interrupts |
1b979372 | 38 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
3302cadd | 39 | tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK |
1da177e4 | 40 | bne fast_work_pending |
f4dc9a4c | 41 | |
f80dff9d DW |
42 | /* perform architecture specific actions before user return */ |
43 | arch_ret_to_user r1, lr | |
44 | ||
b86040a5 | 45 | restore_user_regs fast = 1, offset = S_OFF |
c4c5716e | 46 | UNWIND(.fnend ) |
3302cadd | 47 | ENDPROC(ret_fast_syscall) |
1da177e4 | 48 | |
3302cadd | 49 | /* Ok, we need to do extra processing, enter the slow path. */ |
1da177e4 LT |
50 | fast_work_pending: |
51 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | |
3302cadd RK |
52 | /* fall through to work_pending */ |
53 | #else | |
54 | /* | |
55 | * The "replacement" ret_fast_syscall for when tracing or context tracking | |
56 | * is enabled. As we will need to call out to some C functions, we save | |
57 | * r0 first to avoid needing to save registers around each C function call. | |
58 | */ | |
59 | ret_fast_syscall: | |
60 | UNWIND(.fnstart ) | |
61 | UNWIND(.cantunwind ) | |
62 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
63 | disable_irq_notrace @ disable interrupts | |
64 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing | |
65 | tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK | |
66 | beq no_work_pending | |
67 | UNWIND(.fnend ) | |
68 | ENDPROC(ret_fast_syscall) | |
69 | ||
70 | /* Slower path - fall through to work_pending */ | |
71 | #endif | |
72 | ||
73 | tst r1, #_TIF_SYSCALL_WORK | |
74 | bne __sys_trace_return_nosave | |
75 | slow_work_pending: | |
1da177e4 LT |
76 | mov r0, sp @ 'regs' |
77 | mov r2, why @ 'syscall' | |
0a267fa6 | 78 | bl do_work_pending |
66285217 | 79 | cmp r0, #0 |
81783786 | 80 | beq no_work_pending |
66285217 | 81 | movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) |
81783786 AV |
82 | ldmia sp, {r0 - r6} @ have to reload r0 - r6 |
83 | b local_restart @ ... and off we go | |
e83dd377 | 84 | ENDPROC(ret_fast_syscall) |
81783786 | 85 | |
1da177e4 LT |
86 | /* |
87 | * "slow" syscall return path. "why" tells us if this was a real syscall. | |
3302cadd RK |
88 | * IRQs may be enabled here, so always disable them. Note that we use the |
89 | * "notrace" version to avoid calling into the tracing code unnecessarily. | |
90 | * do_work_pending() will update this state if necessary. | |
1da177e4 LT |
91 | */ |
92 | ENTRY(ret_to_user) | |
93 | ret_slow_syscall: | |
3302cadd | 94 | disable_irq_notrace @ disable interrupts |
9fc2552a | 95 | ENTRY(ret_to_user_from_irq) |
1da177e4 LT |
96 | ldr r1, [tsk, #TI_FLAGS] |
97 | tst r1, #_TIF_WORK_MASK | |
3302cadd | 98 | bne slow_work_pending |
1da177e4 | 99 | no_work_pending: |
3302cadd | 100 | asm_trace_hardirqs_on save = 0 |
651e9499 | 101 | |
f80dff9d DW |
102 | /* perform architecture specific actions before user return */ |
103 | arch_ret_to_user r1, lr | |
b0088480 | 104 | ct_user_enter save = 0 |
f80dff9d | 105 | |
b86040a5 | 106 | restore_user_regs fast = 0, offset = 0 |
9fc2552a | 107 | ENDPROC(ret_to_user_from_irq) |
93ed3970 | 108 | ENDPROC(ret_to_user) |
1da177e4 LT |
109 | |
110 | /* | |
111 | * This is how we return from a fork. | |
112 | */ | |
113 | ENTRY(ret_from_fork) | |
114 | bl schedule_tail | |
9fff2fa0 AV |
115 | cmp r5, #0 |
116 | movne r0, r4 | |
14327c66 | 117 | badrne lr, 1f |
6ebbf2ce | 118 | retne r5 |
68687c84 | 119 | 1: get_thread_info tsk |
1da177e4 | 120 | b ret_slow_syscall |
93ed3970 | 121 | ENDPROC(ret_from_fork) |
1da177e4 | 122 | |
fa1b4f91 AV |
123 | .equ NR_syscalls,0 |
124 | #define CALL(x) .equ NR_syscalls,NR_syscalls+1 | |
1da177e4 | 125 | #include "calls.S" |
1f66e06f WF |
126 | |
127 | /* | |
128 | * Ensure that the system call table is equal to __NR_syscalls, | |
129 | * which is the value the rest of the system sees | |
130 | */ | |
131 | .ifne NR_syscalls - __NR_syscalls | |
132 | .error "__NR_syscalls is not equal to the size of the syscall table" | |
133 | .endif | |
134 | ||
fa1b4f91 AV |
135 | #undef CALL |
136 | #define CALL(x) .long x | |
1da177e4 LT |
137 | |
138 | /*============================================================================= | |
139 | * SWI handler | |
140 | *----------------------------------------------------------------------------- | |
141 | */ | |
142 | ||
1da177e4 LT |
143 | .align 5 |
144 | ENTRY(vector_swi) | |
19c4d593 UKK |
145 | #ifdef CONFIG_CPU_V7M |
146 | v7m_exception_entry | |
147 | #else | |
f4dc9a4c RK |
148 | sub sp, sp, #S_FRAME_SIZE |
149 | stmia sp, {r0 - r12} @ Calling r0 - r12 | |
b86040a5 CM |
150 | ARM( add r8, sp, #S_PC ) |
151 | ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr | |
152 | THUMB( mov r8, sp ) | |
153 | THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr | |
f4dc9a4c RK |
154 | mrs r8, spsr @ called from non-FIQ mode, so ok. |
155 | str lr, [sp, #S_PC] @ Save calling PC | |
156 | str r8, [sp, #S_PSR] @ Save CPSR | |
157 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | |
19c4d593 | 158 | #endif |
1da177e4 | 159 | zero_fp |
195b58ad | 160 | alignment_trap r10, ip, __cr_alignment |
1aa2b3b7 WD |
161 | enable_irq |
162 | ct_user_exit | |
163 | get_thread_info tsk | |
164 | ||
e0f9f4a6 RK |
165 | /* |
166 | * Get the system call number. | |
167 | */ | |
3f2829a3 | 168 | |
dd35afc2 | 169 | #if defined(CONFIG_OABI_COMPAT) |
3f2829a3 | 170 | |
dd35afc2 NP |
171 | /* |
172 | * If we have CONFIG_OABI_COMPAT then we need to look at the swi | |
173 | * value to determine if it is an EABI or an old ABI call. | |
174 | */ | |
175 | #ifdef CONFIG_ARM_THUMB | |
176 | tst r8, #PSR_T_BIT | |
177 | movne r10, #0 @ no thumb OABI emulation | |
1aa2b3b7 | 178 | USER( ldreq r10, [lr, #-4] ) @ get SWI instruction |
dd35afc2 | 179 | #else |
1aa2b3b7 | 180 | USER( ldr r10, [lr, #-4] ) @ get SWI instruction |
dd35afc2 | 181 | #endif |
457c2403 | 182 | ARM_BE8(rev r10, r10) @ little endian instruction |
dd35afc2 NP |
183 | |
184 | #elif defined(CONFIG_AEABI) | |
185 | ||
186 | /* | |
187 | * Pure EABI user space always put syscall number into scno (r7). | |
188 | */ | |
3f2829a3 | 189 | #elif defined(CONFIG_ARM_THUMB) |
dd35afc2 | 190 | /* Legacy ABI only, possibly thumb mode. */ |
e0f9f4a6 RK |
191 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs |
192 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in | |
1aa2b3b7 | 193 | USER( ldreq scno, [lr, #-4] ) |
dd35afc2 | 194 | |
e0f9f4a6 | 195 | #else |
dd35afc2 | 196 | /* Legacy ABI only. */ |
1aa2b3b7 | 197 | USER( ldr scno, [lr, #-4] ) @ get SWI instruction |
e0f9f4a6 | 198 | #endif |
1da177e4 | 199 | |
2190fed6 RK |
200 | uaccess_disable tbl |
201 | ||
dd35afc2 | 202 | adr tbl, sys_call_table @ load syscall table pointer |
dd35afc2 NP |
203 | |
204 | #if defined(CONFIG_OABI_COMPAT) | |
205 | /* | |
206 | * If the swi argument is zero, this is an EABI call and we do nothing. | |
207 | * | |
208 | * If this is an old ABI call, get the syscall number into scno and | |
209 | * get the old ABI syscall table address. | |
210 | */ | |
211 | bics r10, r10, #0xff000000 | |
212 | eorne scno, r10, #__NR_OABI_SYSCALL_BASE | |
213 | ldrne tbl, =sys_oabi_call_table | |
214 | #elif !defined(CONFIG_AEABI) | |
1da177e4 | 215 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
e0f9f4a6 | 216 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
3f2829a3 | 217 | #endif |
dd35afc2 | 218 | |
81783786 | 219 | local_restart: |
70c70d97 | 220 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
3f2829a3 | 221 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
70c70d97 | 222 | |
29ef73b7 | 223 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
1da177e4 LT |
224 | bne __sys_trace |
225 | ||
1da177e4 | 226 | cmp scno, #NR_syscalls @ check upper syscall limit |
14327c66 | 227 | badr lr, ret_fast_syscall @ return address |
1da177e4 LT |
228 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
229 | ||
230 | add r1, sp, #S_OFF | |
d95bc250 | 231 | 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
e0f9f4a6 | 232 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
377747c4 | 233 | bcs arm_syscall |
d95bc250 | 234 | mov why, #0 @ no longer a real syscall |
1da177e4 | 235 | b sys_ni_syscall @ not private func |
1aa2b3b7 WD |
236 | |
237 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) | |
238 | /* | |
239 | * We failed to handle a fault trying to access the page | |
240 | * containing the swi instruction, but we're not really in a | |
241 | * position to return -EFAULT. Instead, return back to the | |
242 | * instruction and re-enter the user fault handling path trying | |
243 | * to page it in. This will likely result in sending SEGV to the | |
244 | * current task. | |
245 | */ | |
246 | 9001: | |
247 | sub lr, lr, #4 | |
248 | str lr, [sp, #S_PC] | |
249 | b ret_fast_syscall | |
250 | #endif | |
93ed3970 | 251 | ENDPROC(vector_swi) |
1da177e4 LT |
252 | |
253 | /* | |
254 | * This is the really slow path. We're going to be doing | |
255 | * context switches, and waiting for our parent to respond. | |
256 | */ | |
257 | __sys_trace: | |
ad722541 WD |
258 | mov r1, scno |
259 | add r0, sp, #S_OFF | |
260 | bl syscall_trace_enter | |
1da177e4 | 261 | |
14327c66 | 262 | badr lr, __sys_trace_return @ return address |
3f471126 | 263 | mov scno, r0 @ syscall number (possibly new) |
1da177e4 LT |
264 | add r1, sp, #S_R0 + S_OFF @ pointer to regs |
265 | cmp scno, #NR_syscalls @ check upper syscall limit | |
c7aa00db WD |
266 | ldmccia r1, {r0 - r6} @ have to reload r0 - r6 |
267 | stmccia sp, {r4, r5} @ and update the stack args | |
1da177e4 | 268 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
ad75b514 KC |
269 | cmp scno, #-1 @ skip the syscall? |
270 | bne 2b | |
271 | add sp, sp, #S_OFF @ restore stack | |
272 | b ret_slow_syscall | |
1da177e4 LT |
273 | |
274 | __sys_trace_return: | |
275 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
ad722541 WD |
276 | mov r0, sp |
277 | bl syscall_trace_exit | |
1da177e4 LT |
278 | b ret_slow_syscall |
279 | ||
3302cadd | 280 | __sys_trace_return_nosave: |
e0aa3a66 | 281 | enable_irq_notrace |
3302cadd RK |
282 | mov r0, sp |
283 | bl syscall_trace_exit | |
284 | b ret_slow_syscall | |
285 | ||
1da177e4 LT |
286 | .align 5 |
287 | #ifdef CONFIG_ALIGNMENT_TRAP | |
288 | .type __cr_alignment, #object | |
289 | __cr_alignment: | |
290 | .word cr_alignment | |
dd35afc2 NP |
291 | #endif |
292 | .ltorg | |
293 | ||
294 | /* | |
295 | * This is the syscall table declaration for native ABI syscalls. | |
296 | * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. | |
297 | */ | |
298 | #define ABI(native, compat) native | |
299 | #ifdef CONFIG_AEABI | |
300 | #define OBSOLETE(syscall) sys_ni_syscall | |
301 | #else | |
302 | #define OBSOLETE(syscall) syscall | |
1da177e4 LT |
303 | #endif |
304 | ||
305 | .type sys_call_table, #object | |
306 | ENTRY(sys_call_table) | |
307 | #include "calls.S" | |
dd35afc2 NP |
308 | #undef ABI |
309 | #undef OBSOLETE | |
1da177e4 LT |
310 | |
311 | /*============================================================================ | |
312 | * Special system call wrappers | |
313 | */ | |
314 | @ r0 = syscall number | |
567bd980 | 315 | @ r8 = syscall table |
1da177e4 | 316 | sys_syscall: |
5247593c | 317 | bic scno, r0, #__NR_OABI_SYSCALL_BASE |
1da177e4 LT |
318 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
319 | cmpne scno, #NR_syscalls @ check range | |
320 | stmloia sp, {r5, r6} @ shuffle args | |
321 | movlo r0, r1 | |
322 | movlo r1, r2 | |
323 | movlo r2, r3 | |
324 | movlo r3, r4 | |
325 | ldrlo pc, [tbl, scno, lsl #2] | |
326 | b sys_ni_syscall | |
93ed3970 | 327 | ENDPROC(sys_syscall) |
1da177e4 | 328 | |
1da177e4 LT |
329 | sys_sigreturn_wrapper: |
330 | add r0, sp, #S_OFF | |
653d48b2 | 331 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 332 | b sys_sigreturn |
93ed3970 | 333 | ENDPROC(sys_sigreturn_wrapper) |
1da177e4 LT |
334 | |
335 | sys_rt_sigreturn_wrapper: | |
336 | add r0, sp, #S_OFF | |
653d48b2 | 337 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 338 | b sys_rt_sigreturn |
93ed3970 | 339 | ENDPROC(sys_rt_sigreturn_wrapper) |
1da177e4 | 340 | |
713c4815 NP |
341 | sys_statfs64_wrapper: |
342 | teq r1, #88 | |
343 | moveq r1, #84 | |
344 | b sys_statfs64 | |
93ed3970 | 345 | ENDPROC(sys_statfs64_wrapper) |
713c4815 NP |
346 | |
347 | sys_fstatfs64_wrapper: | |
348 | teq r1, #88 | |
349 | moveq r1, #84 | |
350 | b sys_fstatfs64 | |
93ed3970 | 351 | ENDPROC(sys_fstatfs64_wrapper) |
713c4815 | 352 | |
1da177e4 LT |
353 | /* |
354 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | |
355 | * offset, we return EINVAL. | |
356 | */ | |
357 | sys_mmap2: | |
358 | #if PAGE_SHIFT > 12 | |
359 | tst r5, #PGOFF_MASK | |
360 | moveq r5, r5, lsr #PAGE_SHIFT - 12 | |
361 | streq r5, [sp, #4] | |
f8b72560 | 362 | beq sys_mmap_pgoff |
1da177e4 | 363 | mov r0, #-EINVAL |
6ebbf2ce | 364 | ret lr |
1da177e4 LT |
365 | #else |
366 | str r5, [sp, #4] | |
f8b72560 | 367 | b sys_mmap_pgoff |
1da177e4 | 368 | #endif |
93ed3970 | 369 | ENDPROC(sys_mmap2) |
687ad019 NP |
370 | |
371 | #ifdef CONFIG_OABI_COMPAT | |
dd35afc2 | 372 | |
687ad019 NP |
373 | /* |
374 | * These are syscalls with argument register differences | |
375 | */ | |
376 | ||
377 | sys_oabi_pread64: | |
378 | stmia sp, {r3, r4} | |
379 | b sys_pread64 | |
93ed3970 | 380 | ENDPROC(sys_oabi_pread64) |
687ad019 NP |
381 | |
382 | sys_oabi_pwrite64: | |
383 | stmia sp, {r3, r4} | |
384 | b sys_pwrite64 | |
93ed3970 | 385 | ENDPROC(sys_oabi_pwrite64) |
687ad019 NP |
386 | |
387 | sys_oabi_truncate64: | |
388 | mov r3, r2 | |
389 | mov r2, r1 | |
390 | b sys_truncate64 | |
93ed3970 | 391 | ENDPROC(sys_oabi_truncate64) |
687ad019 NP |
392 | |
393 | sys_oabi_ftruncate64: | |
394 | mov r3, r2 | |
395 | mov r2, r1 | |
396 | b sys_ftruncate64 | |
93ed3970 | 397 | ENDPROC(sys_oabi_ftruncate64) |
687ad019 NP |
398 | |
399 | sys_oabi_readahead: | |
400 | str r3, [sp] | |
401 | mov r3, r2 | |
402 | mov r2, r1 | |
403 | b sys_readahead | |
93ed3970 | 404 | ENDPROC(sys_oabi_readahead) |
687ad019 | 405 | |
dd35afc2 NP |
406 | /* |
407 | * Let's declare a second syscall table for old ABI binaries | |
408 | * using the compatibility syscall entries. | |
409 | */ | |
410 | #define ABI(native, compat) compat | |
411 | #define OBSOLETE(syscall) syscall | |
412 | ||
413 | .type sys_oabi_call_table, #object | |
414 | ENTRY(sys_oabi_call_table) | |
415 | #include "calls.S" | |
416 | #undef ABI | |
417 | #undef OBSOLETE | |
418 | ||
687ad019 NP |
419 | #endif |
420 |