1 /* SPDX-License-Identifier: GPL-2.0 */
3 * S390 low-level entry points.
5 * Copyright IBM Corp. 1999, 2012
12 #include <linux/init.h>
13 #include <linux/linkage.h>
14 #include <asm/alternative-asm.h>
15 #include <asm/processor.h>
16 #include <asm/cache.h>
17 #include <asm/dwarf.h>
18 #include <asm/errno.h>
19 #include <asm/ptrace.h>
20 #include <asm/thread_info.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/unistd.h>
26 #include <asm/vx-insn.h>
27 #include <asm/setup.h>
29 #include <asm/export.h>
30 #include <asm/nospec-insn.h>
33 __PT_R1 = __PT_GPRS + 8
34 __PT_R2 = __PT_GPRS + 16
35 __PT_R3 = __PT_GPRS + 24
36 __PT_R4 = __PT_GPRS + 32
37 __PT_R5 = __PT_GPRS + 40
38 __PT_R6 = __PT_GPRS + 48
39 __PT_R7 = __PT_GPRS + 56
40 __PT_R8 = __PT_GPRS + 64
41 __PT_R9 = __PT_GPRS + 72
42 __PT_R10 = __PT_GPRS + 80
43 __PT_R11 = __PT_GPRS + 88
44 __PT_R12 = __PT_GPRS + 96
45 __PT_R13 = __PT_GPRS + 104
46 __PT_R14 = __PT_GPRS + 112
47 __PT_R15 = __PT_GPRS + 120
49 STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
50 STACK_SIZE = 1 << STACK_SHIFT
51 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53 _LPP_OFFSET = __LC_LPP
56 ALTERNATIVE "", ".insn s,0xb2010000,\address", 193
60 ALTERNATIVE "", ".insn s,0xb2000000,\address", 193
63 .macro LPSWEY address,lpswe
64 ALTERNATIVE "b \lpswe", ".insn siy,0xeb0000000071,\address,0", 193
68 ALTERNATIVE "", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
71 .macro CHECK_STACK savearea
72 #ifdef CONFIG_CHECK_STACK
73 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
79 .macro CHECK_VMAP_STACK savearea,oklabel
80 #ifdef CONFIG_VMAP_STACK
82 nill %r14,0x10000 - STACK_SIZE
84 clg %r14,__LC_KERNEL_STACK
86 clg %r14,__LC_ASYNC_STACK
88 clg %r14,__LC_MCCK_STACK
90 clg %r14,__LC_NODAT_STACK
92 clg %r14,__LC_RESTART_STACK
102 ALTERNATIVE ".insn s,0xb2050000,\savearea", \
103 ".insn s,0xb27c0000,\savearea", 25
107 * The TSTMSK macro generates a test-under-mask instruction by
108 * calculating the memory offset for the specified mask value.
109 * Mask value can be any constant. The macro shifts the mask
110 * value to calculate the memory offset for the test-under-mask
113 .macro TSTMSK addr, mask, size=8, bytepos=0
114 .if (\bytepos < \size) && (\mask >> 8)
116 .error "Mask exceeds byte boundary"
118 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
122 .error "Mask must not be zero"
124 off = \size - \bytepos - 1
129 ALTERNATIVE "", ".long 0xb2e8c000", 82
133 ALTERNATIVE "", ".long 0xb2e8d000", 82
136 .macro BPENTER tif_ptr,tif_mask
137 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
141 .macro BPEXIT tif_ptr,tif_mask
142 TSTMSK \tif_ptr,\tif_mask
143 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \
144 "jnz .+8; .long 0xb2e8d000", 82
148 * The CHKSTG macro jumps to the provided label in case the
149 * machine check interruption code reports one of unrecoverable
151 * - Storage error uncorrected
152 * - Storage key error uncorrected
153 * - Storage degradation with Failing-storage-address validity
155 .macro CHKSTG errlabel
156 TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
158 TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
160 TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
165 #if IS_ENABLED(CONFIG_KVM)
167 * The OUTSIDE macro jumps to the provided label in case the value
168 * in the provided register is outside of the provided range. The
169 * macro is useful for checking whether a PSW stored in a register
170 * pair points inside or outside of a block of instructions.
171 * @reg: register to check
172 * @start: start of the range
173 * @end: end of the range
174 * @outside_label: jump here if @reg is outside of [@start..@end)
176 .macro OUTSIDE reg,start,end,outside_label
180 lghi %r13,\end - \start
186 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
187 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
188 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
189 larl %r9,sie_exit # skip forward to sie_exit
194 GEN_BR_THUNK %r14,%r13
196 .section .kprobes.text, "ax"
199 * This nop exists only in order to avoid that __bpon starts at
200 * the beginning of the kprobes text section. In that case we would
201 * have several symbols at the same address. E.g. objdump would take
202 * an arbitrary symbol name when disassembling this code.
203 * With the added nop in between the __bpon symbol is unique
215 * Scheduler resume function, called by switch_to
216 * gpr2 = (task_struct *) prev
217 * gpr3 = (task_struct *) next
222 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
223 lghi %r4,__TASK_stack
224 lghi %r1,__TASK_thread
226 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
227 lg %r15,0(%r4,%r3) # start of kernel stack of next
228 agr %r15,%r5 # end of kernel stack of next
229 stg %r3,__LC_CURRENT # store task struct of next
230 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
231 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
233 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
234 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
235 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
239 #if IS_ENABLED(CONFIG_KVM)
241 * sie64a calling convention:
242 * %r2 pointer to sie control block
243 * %r3 guest register save area
246 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
248 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
249 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
250 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
251 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
252 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
253 lg %r14,__LC_GMAP # get gmap pointer
256 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
258 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
259 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
260 tm __SIE_PROG20+3(%r14),3 # last exit...
262 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
263 jo .Lsie_skip # exit if fp/vx regs changed
264 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
268 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
270 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
271 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
273 # some program checks are suppressing. C code (e.g. do_protection_exception)
274 # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
275 # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
276 # Other instructions between sie64a and .Lsie_done should not cause program
277 # interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
286 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
287 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
288 xgr %r0,%r0 # clear guest registers to
289 xgr %r1,%r1 # prevent speculative use
293 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
294 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
298 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
301 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
302 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
303 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
304 EX_TABLE(sie_exit,.Lsie_fault)
306 EXPORT_SYMBOL(sie64a)
307 EXPORT_SYMBOL(sie_exit)
311 * SVC interrupt handler routine. System calls are synchronous events and
312 * are entered with interrupts disabled.
316 stpt __LC_SYS_ENTER_TIMER
317 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
321 STBEAR __LC_LAST_BREAK
322 lctlg %c1,%c1,__LC_KERNEL_ASCE
324 lg %r15,__LC_KERNEL_STACK
325 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
326 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
327 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
328 # clear user controlled register to prevent speculative use
339 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
340 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
343 brasl %r14,__do_syscall
344 lctlg %c1,%c1,__LC_USER_ASCE
345 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
346 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
347 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
348 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
350 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
354 # a new process exits the kernel with ret_from_fork
358 brasl %r14,__ret_from_fork
359 lctlg %c1,%c1,__LC_USER_ASCE
360 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
361 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
362 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
363 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
365 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
366 ENDPROC(ret_from_fork)
369 * Program check handler routine
372 ENTRY(pgm_check_handler)
373 stpt __LC_SYS_ENTER_TIMER
375 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
378 lmg %r8,%r9,__LC_PGM_OLD_PSW
379 tmhh %r8,0x0001 # coming from user space?
381 lctlg %c1,%c1,__LC_KERNEL_ASCE
382 j 3f # -> fault in user space
384 #if IS_ENABLED(CONFIG_KVM)
385 # cleanup critical section for program checks in sie64a
386 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
388 lghi %r10,_PIF_GUEST_FAULT
390 1: tmhh %r8,0x4000 # PER bit set in old PSW ?
391 jnz 2f # -> enabled, can't be a double fault
392 tm __LC_PGM_ILC+3,0x80 # check for per exception
393 jnz .Lpgm_svcper # -> single stepped svc
394 2: CHECK_STACK __LC_SAVE_AREA_SYNC
395 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
396 # CHECK_VMAP_STACK branches to stack_overflow or 4f
397 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
398 3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
399 lg %r15,__LC_KERNEL_STACK
400 4: la %r11,STACK_FRAME_OVERHEAD(%r15)
401 stg %r10,__PT_FLAGS(%r11)
402 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
403 stmg %r0,%r7,__PT_R0(%r11)
404 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
405 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
406 stmg %r8,%r9,__PT_PSW(%r11)
408 # clear user controlled registers to prevent speculative use
417 brasl %r14,__do_pgm_check
418 tmhh %r8,0x0001 # returning to user space?
419 jno .Lpgm_exit_kernel
420 lctlg %c1,%c1,__LC_USER_ASCE
421 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
424 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
425 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
426 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
427 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
430 # single stepped system call
433 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
435 stg %r14,__LC_RETURN_PSW+8
437 LBEAR __LC_PGM_LAST_BREAK
438 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
439 ENDPROC(pgm_check_handler)
442 * Interrupt handler macro used for external and IO interrupts.
444 .macro INT_HANDLER name,lc_old_psw,handler
447 stpt __LC_SYS_ENTER_TIMER
448 STBEAR __LC_LAST_BREAK
450 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
452 lmg %r8,%r9,\lc_old_psw
453 tmhh %r8,0x0001 # interrupting from user ?
455 #if IS_ENABLED(CONFIG_KVM)
456 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
457 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
460 0: CHECK_STACK __LC_SAVE_AREA_ASYNC
461 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
463 1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
464 lctlg %c1,%c1,__LC_KERNEL_ASCE
465 lg %r15,__LC_KERNEL_STACK
466 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
467 la %r11,STACK_FRAME_OVERHEAD(%r15)
468 stmg %r0,%r7,__PT_R0(%r11)
469 # clear user controlled registers to prevent speculative use
478 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
479 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
481 stmg %r8,%r9,__PT_PSW(%r11)
482 tm %r8,0x0001 # coming from user space?
484 lctlg %c1,%c1,__LC_KERNEL_ASCE
485 1: lgr %r2,%r11 # pass pointer to pt_regs
487 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
488 tmhh %r8,0x0001 # returning to user ?
490 lctlg %c1,%c1,__LC_USER_ASCE
491 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
493 2: LBEAR __PT_LAST_BREAK(%r11)
494 lmg %r0,%r15,__PT_R0(%r11)
495 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
499 INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
500 INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
506 stg %r14,(__SF_GPRS+8*8)(%r15)
507 stg %r3,__SF_EMPTY(%r15)
508 larl %r1,psw_idle_exit
509 stg %r1,__SF_EMPTY+8(%r15)
510 larl %r1,smp_cpu_mtid
514 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
516 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
518 STCK __CLOCK_IDLE_ENTER(%r2)
519 stpt __TIMER_IDLE_ENTER(%r2)
520 lpswe __SF_EMPTY(%r15)
527 * Machine check handler routines
529 ENTRY(mcck_int_handler)
532 la %r1,4095 # validate r1
533 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
534 LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
535 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
537 lmg %r8,%r9,__LC_MCK_OLD_PSW
538 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
539 jo .Lmcck_panic # yes -> rest of mcck code invalid
540 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
541 jno .Lmcck_panic # control registers invalid -> panic
543 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
545 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
546 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
547 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
549 la %r14,__LC_SYS_ENTER_TIMER
550 clc 0(8,%r14),__LC_EXIT_TIMER
552 la %r14,__LC_EXIT_TIMER
553 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
555 la %r14,__LC_LAST_UPDATE_TIMER
557 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
558 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
560 tmhh %r8,0x0001 # interrupting from user ?
562 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
564 #if IS_ENABLED(CONFIG_KVM)
565 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f
566 OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f
567 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
569 4: CHKSTG .Lmcck_panic
570 5: larl %r14,.Lstosm_tmp
571 stosm 0(%r14),0x04 # turn dat on, keep irqs off
572 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
576 6: CHKSTG .Lmcck_panic
577 larl %r14,.Lstosm_tmp
578 stosm 0(%r14),0x04 # turn dat on, keep irqs off
579 tmhh %r8,0x0001 # interrupting from user ?
581 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
583 lg %r15,__LC_MCCK_STACK
584 la %r11,STACK_FRAME_OVERHEAD(%r15)
585 stctg %c1,%c1,__PT_CR1(%r11)
586 lctlg %c1,%c1,__LC_KERNEL_ASCE
587 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
588 lghi %r14,__LC_GPREGS_SAVE_AREA+64
589 stmg %r0,%r7,__PT_R0(%r11)
590 # clear user controlled registers to prevent speculative use
599 mvc __PT_R8(64,%r11),0(%r14)
600 stmg %r8,%r9,__PT_PSW(%r11)
601 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
602 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
603 lgr %r2,%r11 # pass pointer to pt_regs
604 brasl %r14,s390_do_machine_check
607 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
608 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
609 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
610 la %r11,STACK_FRAME_OVERHEAD(%r1)
612 brasl %r14,s390_handle_mcck
614 lctlg %c1,%c1,__PT_CR1(%r11)
615 lmg %r0,%r10,__PT_R0(%r11)
616 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
617 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
619 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
621 0: ALTERNATIVE "", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
623 lmg %r11,%r15,__PT_R11(%r11)
624 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
628 * Iterate over all possible CPU addresses in the range 0..0xffff
629 * and stop each CPU using signal processor. Use compare and swap
630 * to allow just one CPU-stopper and prevent concurrent CPUs from
631 * stopping each other while leaving the others running.
636 cs %r5,%r6,0(%r7) # single CPU-stopper only
639 stap 0(%r7) # this CPU address
643 sll %r0,16 # CPU counter
644 lhi %r3,0 # next CPU address
647 1: sigp %r1,%r3,SIGP_STOP # stop next CPU
651 3: sigp %r1,%r4,SIGP_STOP # stop this CPU
654 ENDPROC(mcck_int_handler)
656 ENTRY(restart_int_handler)
657 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
658 stg %r15,__LC_SAVE_AREA_RESTART
659 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
662 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
663 0: larl %r15,.Lstosm_tmp
664 stosm 0(%r15),0x04 # turn dat on, keep irqs off
665 lg %r15,__LC_RESTART_STACK
666 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
667 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
668 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
669 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
670 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
671 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
672 lg %r2,__LC_RESTART_DATA
673 lgf %r3,__LC_RESTART_SOURCE
674 ltgr %r3,%r3 # test source cpu address
675 jm 1f # negative -> skip source stop
676 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
677 brc 10,0b # wait for status stored
678 1: basr %r14,%r1 # call function
679 stap __SF_EMPTY(%r15) # store cpu address
680 llgh %r3,__SF_EMPTY(%r15)
681 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
684 ENDPROC(restart_int_handler)
686 .section .kprobes.text, "ax"
688 #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
690 * The synchronous or the asynchronous stack overflowed. We are dead.
691 * No need to properly save the registers, we are going to panic anyway.
692 * Setup a pt_regs so that show_trace can provide a good call trace.
694 ENTRY(stack_overflow)
695 lg %r15,__LC_NODAT_STACK # change to panic stack
696 la %r11,STACK_FRAME_OVERHEAD(%r15)
697 stmg %r0,%r7,__PT_R0(%r11)
698 stmg %r8,%r9,__PT_PSW(%r11)
699 mvc __PT_R8(64,%r11),0(%r14)
700 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
701 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
702 lgr %r2,%r11 # pass pointer to pt_regs
703 jg kernel_stack_overflow
704 ENDPROC(stack_overflow)
712 .section .rodata, "a"
713 #define SYSCALL(esame,emu) .quad __s390x_ ## esame
714 .globl sys_call_table
716 #include "asm/syscall_table.h"
721 #define SYSCALL(esame,emu) .quad __s390_ ## emu
722 .globl sys_call_table_emu
724 #include "asm/syscall_table.h"