]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-common.S | |
3 | * | |
4 | * Copyright (C) 2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
1da177e4 | 10 | |
1da177e4 | 11 | #include <asm/unistd.h> |
395a59d0 | 12 | #include <asm/ftrace.h> |
c4c5716e | 13 | #include <asm/unwind.h> |
1da177e4 | 14 | |
13a5045d RH |
15 | #ifdef CONFIG_NEED_RET_TO_USER |
16 | #include <mach/entry-macro.S> | |
17 | #else | |
18 | .macro arch_ret_to_user, tmp1, tmp2 | |
19 | .endm | |
20 | #endif | |
21 | ||
1da177e4 LT |
22 | #include "entry-header.S" |
23 | ||
1da177e4 LT |
24 | |
25 | .align 5 | |
26 | /* | |
27 | * This is the fast syscall return path. We do as little as | |
28 | * possible here, and this includes saving r0 back into the SVC | |
29 | * stack. | |
30 | */ | |
31 | ret_fast_syscall: | |
c4c5716e CM |
32 | UNWIND(.fnstart ) |
33 | UNWIND(.cantunwind ) | |
1ec42c0c | 34 | disable_irq @ disable interrupts |
1da177e4 LT |
35 | ldr r1, [tsk, #TI_FLAGS] |
36 | tst r1, #_TIF_WORK_MASK | |
37 | bne fast_work_pending | |
d13e5edd TAP |
38 | #if defined(CONFIG_IRQSOFF_TRACER) |
39 | asm_trace_hardirqs_on | |
40 | #endif | |
f4dc9a4c | 41 | |
f80dff9d DW |
42 | /* perform architecture specific actions before user return */ |
43 | arch_ret_to_user r1, lr | |
44 | ||
b86040a5 | 45 | restore_user_regs fast = 1, offset = S_OFF |
c4c5716e | 46 | UNWIND(.fnend ) |
1da177e4 LT |
47 | |
48 | /* | |
49 | * Ok, we need to do extra processing, enter the slow path. | |
50 | */ | |
51 | fast_work_pending: | |
52 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | |
53 | work_pending: | |
1da177e4 LT |
54 | mov r0, sp @ 'regs' |
55 | mov r2, why @ 'syscall' | |
0a267fa6 | 56 | bl do_work_pending |
66285217 | 57 | cmp r0, #0 |
81783786 | 58 | beq no_work_pending |
66285217 | 59 | movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) |
81783786 AV |
60 | ldmia sp, {r0 - r6} @ have to reload r0 - r6 |
61 | b local_restart @ ... and off we go | |
62 | ||
1da177e4 LT |
63 | /* |
64 | * "slow" syscall return path. "why" tells us if this was a real syscall. | |
65 | */ | |
66 | ENTRY(ret_to_user) | |
67 | ret_slow_syscall: | |
1ec42c0c | 68 | disable_irq @ disable interrupts |
9fc2552a | 69 | ENTRY(ret_to_user_from_irq) |
1da177e4 LT |
70 | ldr r1, [tsk, #TI_FLAGS] |
71 | tst r1, #_TIF_WORK_MASK | |
72 | bne work_pending | |
73 | no_work_pending: | |
d13e5edd TAP |
74 | #if defined(CONFIG_IRQSOFF_TRACER) |
75 | asm_trace_hardirqs_on | |
76 | #endif | |
f80dff9d DW |
77 | /* perform architecture specific actions before user return */ |
78 | arch_ret_to_user r1, lr | |
79 | ||
b86040a5 | 80 | restore_user_regs fast = 0, offset = 0 |
9fc2552a | 81 | ENDPROC(ret_to_user_from_irq) |
93ed3970 | 82 | ENDPROC(ret_to_user) |
1da177e4 LT |
83 | |
84 | /* | |
85 | * This is how we return from a fork. | |
86 | */ | |
87 | ENTRY(ret_from_fork) | |
88 | bl schedule_tail | |
9fff2fa0 AV |
89 | cmp r5, #0 |
90 | movne r0, r4 | |
91 | movne lr, pc | |
92 | movne pc, r5 | |
1da177e4 | 93 | get_thread_info tsk |
1da177e4 | 94 | b ret_slow_syscall |
93ed3970 | 95 | ENDPROC(ret_from_fork) |
1da177e4 | 96 | |
fa1b4f91 AV |
97 | .equ NR_syscalls,0 |
98 | #define CALL(x) .equ NR_syscalls,NR_syscalls+1 | |
1da177e4 | 99 | #include "calls.S" |
1f66e06f WF |
100 | |
101 | /* | |
102 | * Ensure that the system call table is equal to __NR_syscalls, | |
103 | * which is the value the rest of the system sees | |
104 | */ | |
105 | .ifne NR_syscalls - __NR_syscalls | |
106 | .error "__NR_syscalls is not equal to the size of the syscall table" | |
107 | .endif | |
108 | ||
fa1b4f91 AV |
109 | #undef CALL |
110 | #define CALL(x) .long x | |
1da177e4 | 111 | |
606576ce | 112 | #ifdef CONFIG_FUNCTION_TRACER |
686ff228 RV |
113 | /* |
114 | * When compiling with -pg, gcc inserts a call to the mcount routine at the | |
115 | * start of every function. In mcount, apart from the function's address (in | |
116 | * lr), we need to get hold of the function's caller's address. | |
117 | * | |
118 | * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: | |
119 | * | |
120 | * bl mcount | |
121 | * | |
122 | * These versions have the limitation that in order for the mcount routine to | |
123 | * be able to determine the function's caller's address, an APCS-style frame | |
124 | * pointer (which is set up with something like the code below) is required. | |
125 | * | |
126 | * mov ip, sp | |
127 | * push {fp, ip, lr, pc} | |
128 | * sub fp, ip, #4 | |
129 | * | |
130 | * With EABI, these frame pointers are not available unless -mapcs-frame is | |
131 | * specified, and if building as Thumb-2, not even then. | |
132 | * | |
133 | * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, | |
134 | * with call sites like: | |
135 | * | |
136 | * push {lr} | |
137 | * bl __gnu_mcount_nc | |
138 | * | |
139 | * With these compilers, frame pointers are not necessary. | |
140 | * | |
141 | * mcount can be thought of as a function called in the middle of a subroutine | |
142 | * call. As such, it needs to be transparent for both the caller and the | |
143 | * callee: the original lr needs to be restored when leaving mcount, and no | |
144 | * registers should be clobbered. (In the __gnu_mcount_nc implementation, we | |
145 | * clobber the ip register. This is OK because the ARM calling convention | |
146 | * allows it to be clobbered in subroutines and doesn't use it to hold | |
147 | * parameters.) | |
3b6c223b RV |
148 | * |
149 | * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" | |
150 | * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see | |
151 | * arch/arm/kernel/ftrace.c). | |
686ff228 | 152 | */ |
09bfafac RV |
153 | |
154 | #ifndef CONFIG_OLD_MCOUNT | |
155 | #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) | |
156 | #error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. | |
157 | #endif | |
158 | #endif | |
159 | ||
d68133b5 RV |
160 | .macro mcount_adjust_addr rd, rn |
161 | bic \rd, \rn, #1 @ clear the Thumb bit if present | |
162 | sub \rd, \rd, #MCOUNT_INSN_SIZE | |
163 | .endm | |
164 | ||
d3b9dc9d RV |
165 | .macro __mcount suffix |
166 | mcount_enter | |
167 | ldr r0, =ftrace_trace_function | |
168 | ldr r2, [r0] | |
169 | adr r0, .Lftrace_stub | |
170 | cmp r0, r2 | |
171 | bne 1f | |
3b6c223b | 172 | |
376cfa87 TB |
173 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
174 | ldr r1, =ftrace_graph_return | |
175 | ldr r2, [r1] | |
176 | cmp r0, r2 | |
177 | bne ftrace_graph_caller\suffix | |
178 | ||
179 | ldr r1, =ftrace_graph_entry | |
180 | ldr r2, [r1] | |
181 | ldr r0, =ftrace_graph_entry_stub | |
182 | cmp r0, r2 | |
183 | bne ftrace_graph_caller\suffix | |
184 | #endif | |
185 | ||
d3b9dc9d | 186 | mcount_exit |
3b6c223b | 187 | |
d3b9dc9d | 188 | 1: mcount_get_lr r1 @ lr of instrumented func |
d68133b5 | 189 | mcount_adjust_addr r0, lr @ instrumented function |
d3b9dc9d RV |
190 | adr lr, BSYM(2f) |
191 | mov pc, r2 | |
192 | 2: mcount_exit | |
193 | .endm | |
014c257c | 194 | |
d3b9dc9d RV |
195 | .macro __ftrace_caller suffix |
196 | mcount_enter | |
014c257c | 197 | |
d3b9dc9d | 198 | mcount_get_lr r1 @ lr of instrumented func |
d68133b5 | 199 | mcount_adjust_addr r0, lr @ instrumented function |
d3b9dc9d RV |
200 | |
201 | .globl ftrace_call\suffix | |
202 | ftrace_call\suffix: | |
28e192d6 | 203 | bl ftrace_stub |
d3b9dc9d | 204 | |
dd686eb1 RV |
205 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
206 | .globl ftrace_graph_call\suffix | |
207 | ftrace_graph_call\suffix: | |
208 | mov r0, r0 | |
209 | #endif | |
210 | ||
d3b9dc9d RV |
211 | mcount_exit |
212 | .endm | |
014c257c | 213 | |
376cfa87 TB |
214 | .macro __ftrace_graph_caller |
215 | sub r0, fp, #4 @ &lr of instrumented routine (&parent) | |
dd686eb1 RV |
216 | #ifdef CONFIG_DYNAMIC_FTRACE |
217 | @ called from __ftrace_caller, saved in mcount_enter | |
218 | ldr r1, [sp, #16] @ instrumented routine (func) | |
d68133b5 | 219 | mcount_adjust_addr r1, r1 |
dd686eb1 RV |
220 | #else |
221 | @ called from __mcount, untouched in lr | |
d68133b5 | 222 | mcount_adjust_addr r1, lr @ instrumented routine (func) |
dd686eb1 | 223 | #endif |
376cfa87 TB |
224 | mov r2, fp @ frame pointer |
225 | bl prepare_ftrace_return | |
226 | mcount_exit | |
227 | .endm | |
014c257c | 228 | |
3b6c223b | 229 | #ifdef CONFIG_OLD_MCOUNT |
d3b9dc9d RV |
230 | /* |
231 | * mcount | |
232 | */ | |
233 | ||
234 | .macro mcount_enter | |
235 | stmdb sp!, {r0-r3, lr} | |
236 | .endm | |
237 | ||
238 | .macro mcount_get_lr reg | |
239 | ldr \reg, [fp, #-4] | |
240 | .endm | |
241 | ||
242 | .macro mcount_exit | |
243 | ldr lr, [fp, #-4] | |
244 | ldmia sp!, {r0-r3, pc} | |
245 | .endm | |
246 | ||
3b6c223b | 247 | ENTRY(mcount) |
d3b9dc9d | 248 | #ifdef CONFIG_DYNAMIC_FTRACE |
3b6c223b RV |
249 | stmdb sp!, {lr} |
250 | ldr lr, [fp, #-4] | |
251 | ldmia sp!, {pc} | |
d3b9dc9d RV |
252 | #else |
253 | __mcount _old | |
254 | #endif | |
72fa62fa | 255 | ENDPROC(mcount) |
014c257c | 256 | |
d3b9dc9d | 257 | #ifdef CONFIG_DYNAMIC_FTRACE |
3b6c223b | 258 | ENTRY(ftrace_caller_old) |
d3b9dc9d | 259 | __ftrace_caller _old |
3b6c223b RV |
260 | ENDPROC(ftrace_caller_old) |
261 | #endif | |
014c257c | 262 | |
376cfa87 TB |
263 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
264 | ENTRY(ftrace_graph_caller_old) | |
265 | __ftrace_graph_caller | |
266 | ENDPROC(ftrace_graph_caller_old) | |
267 | #endif | |
014c257c | 268 | |
d3b9dc9d RV |
269 | .purgem mcount_enter |
270 | .purgem mcount_get_lr | |
271 | .purgem mcount_exit | |
272 | #endif | |
014c257c | 273 | |
d3b9dc9d RV |
274 | /* |
275 | * __gnu_mcount_nc | |
276 | */ | |
277 | ||
278 | .macro mcount_enter | |
28e192d6 | 279 | stmdb sp!, {r0-r3, lr} |
d3b9dc9d RV |
280 | .endm |
281 | ||
282 | .macro mcount_get_lr reg | |
283 | ldr \reg, [sp, #20] | |
284 | .endm | |
285 | ||
286 | .macro mcount_exit | |
28e192d6 RV |
287 | ldmia sp!, {r0-r3, ip, lr} |
288 | mov pc, ip | |
d3b9dc9d | 289 | .endm |
181f817e | 290 | |
d3b9dc9d RV |
291 | ENTRY(__gnu_mcount_nc) |
292 | #ifdef CONFIG_DYNAMIC_FTRACE | |
293 | mov ip, lr | |
294 | ldmia sp!, {lr} | |
28e192d6 | 295 | mov pc, ip |
d3b9dc9d RV |
296 | #else |
297 | __mcount | |
298 | #endif | |
72fa62fa | 299 | ENDPROC(__gnu_mcount_nc) |
181f817e | 300 | |
d3b9dc9d RV |
301 | #ifdef CONFIG_DYNAMIC_FTRACE |
302 | ENTRY(ftrace_caller) | |
303 | __ftrace_caller | |
304 | ENDPROC(ftrace_caller) | |
09bfafac | 305 | #endif |
014c257c | 306 | |
376cfa87 TB |
307 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
308 | ENTRY(ftrace_graph_caller) | |
309 | __ftrace_graph_caller | |
310 | ENDPROC(ftrace_graph_caller) | |
09bfafac | 311 | #endif |
014c257c | 312 | |
d3b9dc9d RV |
313 | .purgem mcount_enter |
314 | .purgem mcount_get_lr | |
315 | .purgem mcount_exit | |
014c257c | 316 | |
376cfa87 TB |
317 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
318 | .globl return_to_handler | |
319 | return_to_handler: | |
320 | stmdb sp!, {r0-r3} | |
321 | mov r0, fp @ frame pointer | |
322 | bl ftrace_return_to_handler | |
323 | mov lr, r0 @ r0 has real ret addr | |
324 | ldmia sp!, {r0-r3} | |
325 | mov pc, lr | |
326 | #endif | |
014c257c | 327 | |
72fa62fa | 328 | ENTRY(ftrace_stub) |
a3ba87a6 | 329 | .Lftrace_stub: |
28e192d6 | 330 | mov pc, lr |
72fa62fa | 331 | ENDPROC(ftrace_stub) |
014c257c | 332 | |
606576ce | 333 | #endif /* CONFIG_FUNCTION_TRACER */ |
014c257c | 334 | |
1da177e4 LT |
335 | /*============================================================================= |
336 | * SWI handler | |
337 | *----------------------------------------------------------------------------- | |
338 | */ | |
339 | ||
1da177e4 LT |
340 | .align 5 |
341 | ENTRY(vector_swi) | |
f4dc9a4c RK |
342 | sub sp, sp, #S_FRAME_SIZE |
343 | stmia sp, {r0 - r12} @ Calling r0 - r12 | |
b86040a5 CM |
344 | ARM( add r8, sp, #S_PC ) |
345 | ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr | |
346 | THUMB( mov r8, sp ) | |
347 | THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr | |
f4dc9a4c RK |
348 | mrs r8, spsr @ called from non-FIQ mode, so ok. |
349 | str lr, [sp, #S_PC] @ Save calling PC | |
350 | str r8, [sp, #S_PSR] @ Save CPSR | |
351 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | |
1da177e4 | 352 | zero_fp |
e0f9f4a6 RK |
353 | |
354 | /* | |
355 | * Get the system call number. | |
356 | */ | |
3f2829a3 | 357 | |
dd35afc2 | 358 | #if defined(CONFIG_OABI_COMPAT) |
3f2829a3 | 359 | |
dd35afc2 NP |
360 | /* |
361 | * If we have CONFIG_OABI_COMPAT then we need to look at the swi | |
362 | * value to determine if it is an EABI or an old ABI call. | |
363 | */ | |
364 | #ifdef CONFIG_ARM_THUMB | |
365 | tst r8, #PSR_T_BIT | |
366 | movne r10, #0 @ no thumb OABI emulation | |
367 | ldreq r10, [lr, #-4] @ get SWI instruction | |
368 | #else | |
369 | ldr r10, [lr, #-4] @ get SWI instruction | |
dd35afc2 | 370 | #endif |
26584853 CM |
371 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
372 | rev r10, r10 @ little endian instruction | |
373 | #endif | |
dd35afc2 NP |
374 | |
375 | #elif defined(CONFIG_AEABI) | |
376 | ||
377 | /* | |
378 | * Pure EABI user space always put syscall number into scno (r7). | |
379 | */ | |
3f2829a3 | 380 | #elif defined(CONFIG_ARM_THUMB) |
dd35afc2 | 381 | /* Legacy ABI only, possibly thumb mode. */ |
e0f9f4a6 RK |
382 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs |
383 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in | |
384 | ldreq scno, [lr, #-4] | |
dd35afc2 | 385 | |
e0f9f4a6 | 386 | #else |
dd35afc2 | 387 | /* Legacy ABI only. */ |
e0f9f4a6 RK |
388 | ldr scno, [lr, #-4] @ get SWI instruction |
389 | #endif | |
1da177e4 LT |
390 | |
391 | #ifdef CONFIG_ALIGNMENT_TRAP | |
392 | ldr ip, __cr_alignment | |
393 | ldr ip, [ip] | |
394 | mcr p15, 0, ip, c1, c0 @ update control register | |
395 | #endif | |
1ec42c0c | 396 | enable_irq |
1da177e4 | 397 | |
1da177e4 | 398 | get_thread_info tsk |
dd35afc2 | 399 | adr tbl, sys_call_table @ load syscall table pointer |
dd35afc2 NP |
400 | |
401 | #if defined(CONFIG_OABI_COMPAT) | |
402 | /* | |
403 | * If the swi argument is zero, this is an EABI call and we do nothing. | |
404 | * | |
405 | * If this is an old ABI call, get the syscall number into scno and | |
406 | * get the old ABI syscall table address. | |
407 | */ | |
408 | bics r10, r10, #0xff000000 | |
409 | eorne scno, r10, #__NR_OABI_SYSCALL_BASE | |
410 | ldrne tbl, =sys_oabi_call_table | |
411 | #elif !defined(CONFIG_AEABI) | |
1da177e4 | 412 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
e0f9f4a6 | 413 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
3f2829a3 | 414 | #endif |
dd35afc2 | 415 | |
81783786 | 416 | local_restart: |
70c70d97 | 417 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
3f2829a3 | 418 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
70c70d97 NP |
419 | |
420 | #ifdef CONFIG_SECCOMP | |
421 | tst r10, #_TIF_SECCOMP | |
422 | beq 1f | |
423 | mov r0, scno | |
424 | bl __secure_computing | |
425 | add r0, sp, #S_R0 + S_OFF @ pointer to regs | |
426 | ldmia r0, {r0 - r3} @ have to reload r0 - r3 | |
427 | 1: | |
428 | #endif | |
429 | ||
29ef73b7 | 430 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
1da177e4 LT |
431 | bne __sys_trace |
432 | ||
1da177e4 | 433 | cmp scno, #NR_syscalls @ check upper syscall limit |
b86040a5 | 434 | adr lr, BSYM(ret_fast_syscall) @ return address |
1da177e4 LT |
435 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
436 | ||
437 | add r1, sp, #S_OFF | |
438 | 2: mov why, #0 @ no longer a real syscall | |
e0f9f4a6 RK |
439 | cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
440 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back | |
1da177e4 LT |
441 | bcs arm_syscall |
442 | b sys_ni_syscall @ not private func | |
93ed3970 | 443 | ENDPROC(vector_swi) |
1da177e4 LT |
444 | |
445 | /* | |
446 | * This is the really slow path. We're going to be doing | |
447 | * context switches, and waiting for our parent to respond. | |
448 | */ | |
449 | __sys_trace: | |
ad722541 WD |
450 | mov r1, scno |
451 | add r0, sp, #S_OFF | |
452 | bl syscall_trace_enter | |
1da177e4 | 453 | |
b86040a5 | 454 | adr lr, BSYM(__sys_trace_return) @ return address |
3f471126 | 455 | mov scno, r0 @ syscall number (possibly new) |
1da177e4 LT |
456 | add r1, sp, #S_R0 + S_OFF @ pointer to regs |
457 | cmp scno, #NR_syscalls @ check upper syscall limit | |
c7aa00db WD |
458 | ldmccia r1, {r0 - r6} @ have to reload r0 - r6 |
459 | stmccia sp, {r4, r5} @ and update the stack args | |
1da177e4 LT |
460 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
461 | b 2b | |
462 | ||
463 | __sys_trace_return: | |
464 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
ad722541 WD |
465 | mov r1, scno |
466 | mov r0, sp | |
467 | bl syscall_trace_exit | |
1da177e4 LT |
468 | b ret_slow_syscall |
469 | ||
470 | .align 5 | |
471 | #ifdef CONFIG_ALIGNMENT_TRAP | |
472 | .type __cr_alignment, #object | |
473 | __cr_alignment: | |
474 | .word cr_alignment | |
dd35afc2 NP |
475 | #endif |
476 | .ltorg | |
477 | ||
478 | /* | |
479 | * This is the syscall table declaration for native ABI syscalls. | |
480 | * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. | |
481 | */ | |
482 | #define ABI(native, compat) native | |
483 | #ifdef CONFIG_AEABI | |
484 | #define OBSOLETE(syscall) sys_ni_syscall | |
485 | #else | |
486 | #define OBSOLETE(syscall) syscall | |
1da177e4 LT |
487 | #endif |
488 | ||
489 | .type sys_call_table, #object | |
490 | ENTRY(sys_call_table) | |
491 | #include "calls.S" | |
dd35afc2 NP |
492 | #undef ABI |
493 | #undef OBSOLETE | |
1da177e4 LT |
494 | |
495 | /*============================================================================ | |
496 | * Special system call wrappers | |
497 | */ | |
498 | @ r0 = syscall number | |
567bd980 | 499 | @ r8 = syscall table |
1da177e4 | 500 | sys_syscall: |
5247593c | 501 | bic scno, r0, #__NR_OABI_SYSCALL_BASE |
1da177e4 LT |
502 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
503 | cmpne scno, #NR_syscalls @ check range | |
504 | stmloia sp, {r5, r6} @ shuffle args | |
505 | movlo r0, r1 | |
506 | movlo r1, r2 | |
507 | movlo r2, r3 | |
508 | movlo r3, r4 | |
509 | ldrlo pc, [tbl, scno, lsl #2] | |
510 | b sys_ni_syscall | |
93ed3970 | 511 | ENDPROC(sys_syscall) |
1da177e4 LT |
512 | |
513 | sys_fork_wrapper: | |
514 | add r0, sp, #S_OFF | |
515 | b sys_fork | |
93ed3970 | 516 | ENDPROC(sys_fork_wrapper) |
1da177e4 LT |
517 | |
518 | sys_vfork_wrapper: | |
519 | add r0, sp, #S_OFF | |
520 | b sys_vfork | |
93ed3970 | 521 | ENDPROC(sys_vfork_wrapper) |
1da177e4 | 522 | |
1da177e4 LT |
523 | sys_clone_wrapper: |
524 | add ip, sp, #S_OFF | |
525 | str ip, [sp, #4] | |
526 | b sys_clone | |
93ed3970 | 527 | ENDPROC(sys_clone_wrapper) |
1da177e4 | 528 | |
1da177e4 LT |
529 | sys_sigreturn_wrapper: |
530 | add r0, sp, #S_OFF | |
653d48b2 | 531 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 532 | b sys_sigreturn |
93ed3970 | 533 | ENDPROC(sys_sigreturn_wrapper) |
1da177e4 LT |
534 | |
535 | sys_rt_sigreturn_wrapper: | |
536 | add r0, sp, #S_OFF | |
653d48b2 | 537 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 538 | b sys_rt_sigreturn |
93ed3970 | 539 | ENDPROC(sys_rt_sigreturn_wrapper) |
1da177e4 LT |
540 | |
541 | sys_sigaltstack_wrapper: | |
542 | ldr r2, [sp, #S_OFF + S_SP] | |
543 | b do_sigaltstack | |
93ed3970 | 544 | ENDPROC(sys_sigaltstack_wrapper) |
1da177e4 | 545 | |
713c4815 NP |
546 | sys_statfs64_wrapper: |
547 | teq r1, #88 | |
548 | moveq r1, #84 | |
549 | b sys_statfs64 | |
93ed3970 | 550 | ENDPROC(sys_statfs64_wrapper) |
713c4815 NP |
551 | |
552 | sys_fstatfs64_wrapper: | |
553 | teq r1, #88 | |
554 | moveq r1, #84 | |
555 | b sys_fstatfs64 | |
93ed3970 | 556 | ENDPROC(sys_fstatfs64_wrapper) |
713c4815 | 557 | |
1da177e4 LT |
558 | /* |
559 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | |
560 | * offset, we return EINVAL. | |
561 | */ | |
562 | sys_mmap2: | |
563 | #if PAGE_SHIFT > 12 | |
564 | tst r5, #PGOFF_MASK | |
565 | moveq r5, r5, lsr #PAGE_SHIFT - 12 | |
566 | streq r5, [sp, #4] | |
f8b72560 | 567 | beq sys_mmap_pgoff |
1da177e4 | 568 | mov r0, #-EINVAL |
7999d8d7 | 569 | mov pc, lr |
1da177e4 LT |
570 | #else |
571 | str r5, [sp, #4] | |
f8b72560 | 572 | b sys_mmap_pgoff |
1da177e4 | 573 | #endif |
93ed3970 | 574 | ENDPROC(sys_mmap2) |
687ad019 NP |
575 | |
576 | #ifdef CONFIG_OABI_COMPAT | |
dd35afc2 | 577 | |
687ad019 NP |
578 | /* |
579 | * These are syscalls with argument register differences | |
580 | */ | |
581 | ||
582 | sys_oabi_pread64: | |
583 | stmia sp, {r3, r4} | |
584 | b sys_pread64 | |
93ed3970 | 585 | ENDPROC(sys_oabi_pread64) |
687ad019 NP |
586 | |
587 | sys_oabi_pwrite64: | |
588 | stmia sp, {r3, r4} | |
589 | b sys_pwrite64 | |
93ed3970 | 590 | ENDPROC(sys_oabi_pwrite64) |
687ad019 NP |
591 | |
592 | sys_oabi_truncate64: | |
593 | mov r3, r2 | |
594 | mov r2, r1 | |
595 | b sys_truncate64 | |
93ed3970 | 596 | ENDPROC(sys_oabi_truncate64) |
687ad019 NP |
597 | |
598 | sys_oabi_ftruncate64: | |
599 | mov r3, r2 | |
600 | mov r2, r1 | |
601 | b sys_ftruncate64 | |
93ed3970 | 602 | ENDPROC(sys_oabi_ftruncate64) |
687ad019 NP |
603 | |
604 | sys_oabi_readahead: | |
605 | str r3, [sp] | |
606 | mov r3, r2 | |
607 | mov r2, r1 | |
608 | b sys_readahead | |
93ed3970 | 609 | ENDPROC(sys_oabi_readahead) |
687ad019 | 610 | |
dd35afc2 NP |
611 | /* |
612 | * Let's declare a second syscall table for old ABI binaries | |
613 | * using the compatibility syscall entries. | |
614 | */ | |
615 | #define ABI(native, compat) compat | |
616 | #define OBSOLETE(syscall) syscall | |
617 | ||
618 | .type sys_oabi_call_table, #object | |
619 | ENTRY(sys_oabi_call_table) | |
620 | #include "calls.S" | |
621 | #undef ABI | |
622 | #undef OBSOLETE | |
623 | ||
687ad019 NP |
624 | #endif |
625 |