]> Git Repo - linux.git/blame - arch/ia64/kernel/mca.c
[IA64] do notify DIE_MCA_MONARCH_PROCESS for each monarchs
[linux.git] / arch / ia64 / kernel / mca.c
CommitLineData
1da177e4
LT
1/*
2 * File: mca.c
3 * Purpose: Generic MCA handling layer
4 *
1da177e4
LT
5 * Copyright (C) 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <[email protected]>
7 *
8 * Copyright (C) 2002 Dell Inc.
fe77efb8 9 * Copyright (C) Matt Domsch <[email protected]>
1da177e4
LT
10 *
11 * Copyright (C) 2002 Intel
fe77efb8 12 * Copyright (C) Jenna Hall <[email protected]>
1da177e4
LT
13 *
14 * Copyright (C) 2001 Intel
fe77efb8 15 * Copyright (C) Fred Lewis <[email protected]>
1da177e4
LT
16 *
17 * Copyright (C) 2000 Intel
fe77efb8 18 * Copyright (C) Chuck Fleckenstein <[email protected]>
1da177e4 19 *
785285fc 20 * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
fe77efb8 21 * Copyright (C) Vijay Chander <[email protected]>
1da177e4 22 *
fe77efb8
HS
23 * Copyright (C) 2006 FUJITSU LIMITED
24 * Copyright (C) Hidetoshi Seto <[email protected]>
1da177e4 25 *
fe77efb8
HS
26 * 2000-03-29 Chuck Fleckenstein <[email protected]>
27 * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
28 * added min save state dump, added INIT handler.
1da177e4 29 *
fe77efb8
HS
30 * 2001-01-03 Fred Lewis <[email protected]>
31 * Added setup of CMCI and CPEI IRQs, logging of corrected platform
32 * errors, completed code for logging of corrected & uncorrected
33 * machine check errors, and updated for conformance with Nov. 2000
34 * revision of the SAL 3.0 spec.
35 *
36 * 2002-01-04 Jenna Hall <[email protected]>
37 * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
38 * set SAL default return values, changed error record structure to
39 * linked list, added init call to sal_get_state_info_size().
40 *
41 * 2002-03-25 Matt Domsch <[email protected]>
42 * GUID cleanups.
43 *
44 * 2003-04-15 David Mosberger-Tang <[email protected]>
45 * Added INIT backtrace support.
1da177e4
LT
46 *
47 * 2003-12-08 Keith Owens <[email protected]>
fe77efb8
HS
48 * smp_call_function() must not be called from interrupt context
49 * (can deadlock on tasklist_lock).
50 * Use keventd to call smp_call_function().
1da177e4
LT
51 *
52 * 2004-02-01 Keith Owens <[email protected]>
fe77efb8
HS
53 * Avoid deadlock when using printk() for MCA and INIT records.
54 * Delete all record printing code, moved to salinfo_decode in user
55 * space. Mark variables and functions static where possible.
56 * Delete dead variables and functions. Reorder to remove the need
57 * for forward declarations and to consolidate related code.
7f613c7d
KO
58 *
59 * 2005-08-12 Keith Owens <[email protected]>
fe77efb8
HS
60 * Convert MCA/INIT handlers to use per event stacks and SAL/OS
61 * state.
9138d581
KO
62 *
63 * 2005-10-07 Keith Owens <[email protected]>
64 * Add notify_die() hooks.
43ed3baf
HS
65 *
66 * 2006-09-15 Hidetoshi Seto <[email protected]>
fe77efb8 67 * Add printing support for MCA/INIT.
1612b18c
RA
68 *
69 * 2007-04-27 Russ Anderson <[email protected]>
70 * Support multiple cpus going through OS_MCA in the same event.
1da177e4 71 */
5cf1f7ce 72#include <linux/jiffies.h>
1da177e4
LT
73#include <linux/types.h>
74#include <linux/init.h>
75#include <linux/sched.h>
76#include <linux/interrupt.h>
77#include <linux/irq.h>
1da177e4
LT
78#include <linux/bootmem.h>
79#include <linux/acpi.h>
80#include <linux/timer.h>
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/smp.h>
84#include <linux/workqueue.h>
4668f0cd 85#include <linux/cpumask.h>
1eeb66a1 86#include <linux/kdebug.h>
ed5d4026 87#include <linux/cpu.h>
1da177e4
LT
88
89#include <asm/delay.h>
90#include <asm/machvec.h>
91#include <asm/meminit.h>
92#include <asm/page.h>
93#include <asm/ptrace.h>
94#include <asm/system.h>
95#include <asm/sal.h>
96#include <asm/mca.h>
a7956113 97#include <asm/kexec.h>
1da177e4
LT
98
99#include <asm/irq.h>
100#include <asm/hw_irq.h>
96651896 101#include <asm/tlb.h>
1da177e4 102
d2a28ad9 103#include "mca_drv.h"
7f613c7d
KO
104#include "entry.h"
105
1da177e4
LT
106#if defined(IA64_MCA_DEBUG_INFO)
107# define IA64_MCA_DEBUG(fmt...) printk(fmt)
108#else
109# define IA64_MCA_DEBUG(fmt...)
110#endif
111
112/* Used by mca_asm.S */
1da177e4
LT
113DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
114DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
115DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
116DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
96651896 117DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */
1da177e4
LT
118
119unsigned long __per_cpu_mca[NR_CPUS];
120
121/* In mca_asm.S */
7f613c7d
KO
122extern void ia64_os_init_dispatch_monarch (void);
123extern void ia64_os_init_dispatch_slave (void);
124
125static int monarch_cpu = -1;
1da177e4
LT
126
127static ia64_mc_info_t ia64_mc_info;
128
129#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
130#define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
131#define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
132#define CPE_HISTORY_LENGTH 5
133#define CMC_HISTORY_LENGTH 5
134
34eac2ab 135#ifdef CONFIG_ACPI
1da177e4 136static struct timer_list cpe_poll_timer;
34eac2ab 137#endif
1da177e4
LT
138static struct timer_list cmc_poll_timer;
139/*
140 * This variable tells whether we are currently in polling mode.
141 * Start with this in the wrong state so we won't play w/ timers
142 * before the system is ready.
143 */
144static int cmc_polling_enabled = 1;
145
146/*
147 * Clearing this variable prevents CPE polling from getting activated
148 * in mca_late_init. Use it if your system doesn't provide a CPEI,
149 * but encounters problems retrieving CPE logs. This should only be
150 * necessary for debugging.
151 */
152static int cpe_poll_enabled = 1;
153
154extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
155
0881fc8d 156static int mca_init __initdata;
1da177e4 157
43ed3baf
HS
158/*
159 * limited & delayed printing support for MCA/INIT handler
160 */
161
162#define mprintk(fmt...) ia64_mca_printk(fmt)
163
164#define MLOGBUF_SIZE (512+256*NR_CPUS)
165#define MLOGBUF_MSGMAX 256
166static char mlogbuf[MLOGBUF_SIZE];
167static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */
168static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */
169static unsigned long mlogbuf_start;
170static unsigned long mlogbuf_end;
171static unsigned int mlogbuf_finished = 0;
172static unsigned long mlogbuf_timestamp = 0;
173
174static int loglevel_save = -1;
175#define BREAK_LOGLEVEL(__console_loglevel) \
176 oops_in_progress = 1; \
177 if (loglevel_save < 0) \
178 loglevel_save = __console_loglevel; \
179 __console_loglevel = 15;
180
181#define RESTORE_LOGLEVEL(__console_loglevel) \
182 if (loglevel_save >= 0) { \
183 __console_loglevel = loglevel_save; \
184 loglevel_save = -1; \
185 } \
186 mlogbuf_finished = 0; \
187 oops_in_progress = 0;
188
189/*
190 * Push messages into buffer, print them later if not urgent.
191 */
192void ia64_mca_printk(const char *fmt, ...)
193{
194 va_list args;
195 int printed_len;
196 char temp_buf[MLOGBUF_MSGMAX];
197 char *p;
198
199 va_start(args, fmt);
200 printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
201 va_end(args);
202
203 /* Copy the output into mlogbuf */
204 if (oops_in_progress) {
205 /* mlogbuf was abandoned, use printk directly instead. */
206 printk(temp_buf);
207 } else {
208 spin_lock(&mlogbuf_wlock);
209 for (p = temp_buf; *p; p++) {
210 unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
211 if (next != mlogbuf_start) {
212 mlogbuf[mlogbuf_end] = *p;
213 mlogbuf_end = next;
214 } else {
215 /* buffer full */
216 break;
217 }
218 }
219 mlogbuf[mlogbuf_end] = '\0';
220 spin_unlock(&mlogbuf_wlock);
221 }
222}
223EXPORT_SYMBOL(ia64_mca_printk);
224
225/*
226 * Print buffered messages.
227 * NOTE: call this after returning normal context. (ex. from salinfod)
228 */
229void ia64_mlogbuf_dump(void)
230{
231 char temp_buf[MLOGBUF_MSGMAX];
232 char *p;
233 unsigned long index;
234 unsigned long flags;
235 unsigned int printed_len;
236
237 /* Get output from mlogbuf */
238 while (mlogbuf_start != mlogbuf_end) {
239 temp_buf[0] = '\0';
240 p = temp_buf;
241 printed_len = 0;
242
243 spin_lock_irqsave(&mlogbuf_rlock, flags);
244
245 index = mlogbuf_start;
246 while (index != mlogbuf_end) {
247 *p = mlogbuf[index];
248 index = (index + 1) % MLOGBUF_SIZE;
249 if (!*p)
250 break;
251 p++;
252 if (++printed_len >= MLOGBUF_MSGMAX - 1)
253 break;
254 }
255 *p = '\0';
256 if (temp_buf[0])
257 printk(temp_buf);
258 mlogbuf_start = index;
259
260 mlogbuf_timestamp = 0;
261 spin_unlock_irqrestore(&mlogbuf_rlock, flags);
262 }
263}
264EXPORT_SYMBOL(ia64_mlogbuf_dump);
265
266/*
267 * Call this if system is going to down or if immediate flushing messages to
268 * console is required. (ex. recovery was failed, crash dump is going to be
269 * invoked, long-wait rendezvous etc.)
270 * NOTE: this should be called from monarch.
271 */
272static void ia64_mlogbuf_finish(int wait)
273{
274 BREAK_LOGLEVEL(console_loglevel);
275
276 spin_lock_init(&mlogbuf_rlock);
277 ia64_mlogbuf_dump();
278 printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
279 "MCA/INIT might be dodgy or fail.\n");
280
281 if (!wait)
282 return;
283
284 /* wait for console */
285 printk("Delaying for 5 seconds...\n");
286 udelay(5*1000000);
287
288 mlogbuf_finished = 1;
289}
43ed3baf
HS
290
291/*
292 * Print buffered messages from INIT context.
293 */
294static void ia64_mlogbuf_dump_from_init(void)
295{
296 if (mlogbuf_finished)
297 return;
298
5cf1f7ce
ÇO
299 if (mlogbuf_timestamp &&
300 time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
43ed3baf
HS
301 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
302 " and the system seems to be messed up.\n");
303 ia64_mlogbuf_finish(0);
304 return;
305 }
306
307 if (!spin_trylock(&mlogbuf_rlock)) {
308 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
309 "Generated messages other than stack dump will be "
310 "buffered to mlogbuf and will be printed later.\n");
311 printk(KERN_ERR "INIT: If messages would not printed after "
312 "this INIT, wait 30sec and assert INIT again.\n");
313 if (!mlogbuf_timestamp)
314 mlogbuf_timestamp = jiffies;
315 return;
316 }
317 spin_unlock(&mlogbuf_rlock);
318 ia64_mlogbuf_dump();
319}
9138d581
KO
320
321static void inline
322ia64_mca_spin(const char *func)
323{
43ed3baf
HS
324 if (monarch_cpu == smp_processor_id())
325 ia64_mlogbuf_finish(0);
326 mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
9138d581
KO
327 while (1)
328 cpu_relax();
329}
1da177e4
LT
330/*
331 * IA64_MCA log support
332 */
333#define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
334#define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
335
336typedef struct ia64_state_log_s
337{
338 spinlock_t isl_lock;
339 int isl_index;
340 unsigned long isl_count;
341 ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
342} ia64_state_log_t;
343
344static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
345
346#define IA64_LOG_ALLOCATE(it, size) \
347 {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
348 (ia64_err_rec_t *)alloc_bootmem(size); \
349 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
350 (ia64_err_rec_t *)alloc_bootmem(size);}
351#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
352#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
353#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
354#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
355#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
356#define IA64_LOG_INDEX_INC(it) \
357 {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
358 ia64_state_log[it].isl_count++;}
359#define IA64_LOG_INDEX_DEC(it) \
360 ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
361#define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
362#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
363#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
364
365/*
366 * ia64_log_init
367 * Reset the OS ia64 log buffer
368 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
369 * Outputs : None
370 */
0881fc8d 371static void __init
1da177e4
LT
372ia64_log_init(int sal_info_type)
373{
374 u64 max_size = 0;
375
376 IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
377 IA64_LOG_LOCK_INIT(sal_info_type);
378
379 // SAL will tell us the maximum size of any error record of this type
380 max_size = ia64_sal_get_state_info_size(sal_info_type);
381 if (!max_size)
382 /* alloc_bootmem() doesn't like zero-sized allocations! */
383 return;
384
385 // set up OS data structures to hold error info
386 IA64_LOG_ALLOCATE(sal_info_type, max_size);
387 memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
388 memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
389}
390
391/*
392 * ia64_log_get
393 *
394 * Get the current MCA log from SAL and copy it into the OS log buffer.
395 *
396 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
397 * irq_safe whether you can use printk at this point
398 * Outputs : size (total record length)
399 * *buffer (ptr to error record)
400 *
401 */
402static u64
403ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
404{
405 sal_log_record_header_t *log_buffer;
406 u64 total_len = 0;
c53421b1 407 unsigned long s;
1da177e4
LT
408
409 IA64_LOG_LOCK(sal_info_type);
410
411 /* Get the process state information */
412 log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
413
414 total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
415
416 if (total_len) {
417 IA64_LOG_INDEX_INC(sal_info_type);
418 IA64_LOG_UNLOCK(sal_info_type);
419 if (irq_safe) {
d4ed8084
HH
420 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
421 __func__, sal_info_type, total_len);
1da177e4
LT
422 }
423 *buffer = (u8 *) log_buffer;
424 return total_len;
425 } else {
426 IA64_LOG_UNLOCK(sal_info_type);
427 return 0;
428 }
429}
430
431/*
432 * ia64_mca_log_sal_error_record
433 *
434 * This function retrieves a specified error record type from SAL
435 * and wakes up any processes waiting for error records.
436 *
7f613c7d
KO
437 * Inputs : sal_info_type (Type of error record MCA/CMC/CPE)
438 * FIXME: remove MCA and irq_safe.
1da177e4
LT
439 */
440static void
441ia64_mca_log_sal_error_record(int sal_info_type)
442{
443 u8 *buffer;
444 sal_log_record_header_t *rh;
445 u64 size;
7f613c7d 446 int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
1da177e4
LT
447#ifdef IA64_MCA_DEBUG_INFO
448 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
449#endif
450
451 size = ia64_log_get(sal_info_type, &buffer, irq_safe);
452 if (!size)
453 return;
454
455 salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
456
457 if (irq_safe)
458 IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
459 smp_processor_id(),
460 sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
461
462 /* Clear logs from corrected errors in case there's no user-level logger */
463 rh = (sal_log_record_header_t *)buffer;
464 if (rh->severity == sal_log_severity_corrected)
465 ia64_sal_clear_state_info(sal_info_type);
466}
467
d2a28ad9
RA
468/*
469 * search_mca_table
470 * See if the MCA surfaced in an instruction range
471 * that has been tagged as recoverable.
472 *
473 * Inputs
474 * first First address range to check
475 * last Last address range to check
476 * ip Instruction pointer, address we are looking for
477 *
478 * Return value:
479 * 1 on Success (in the table)/ 0 on Failure (not in the table)
480 */
481int
482search_mca_table (const struct mca_table_entry *first,
483 const struct mca_table_entry *last,
484 unsigned long ip)
485{
486 const struct mca_table_entry *curr;
487 u64 curr_start, curr_end;
488
489 curr = first;
490 while (curr <= last) {
491 curr_start = (u64) &curr->start_addr + curr->start_addr;
492 curr_end = (u64) &curr->end_addr + curr->end_addr;
493
494 if ((ip >= curr_start) && (ip <= curr_end)) {
495 return 1;
496 }
497 curr++;
498 }
499 return 0;
500}
501
502/* Given an address, look for it in the mca tables. */
503int mca_recover_range(unsigned long addr)
504{
505 extern struct mca_table_entry __start___mca_table[];
506 extern struct mca_table_entry __stop___mca_table[];
507
508 return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
509}
510EXPORT_SYMBOL_GPL(mca_recover_range);
511
1da177e4
LT
512#ifdef CONFIG_ACPI
513
55e59c51 514int cpe_vector = -1;
ff741906 515int ia64_cpe_irq = -1;
1da177e4
LT
516
517static irqreturn_t
7d12e780 518ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
1da177e4
LT
519{
520 static unsigned long cpe_history[CPE_HISTORY_LENGTH];
521 static int index;
522 static DEFINE_SPINLOCK(cpe_history_lock);
523
524 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
d4ed8084 525 __func__, cpe_irq, smp_processor_id());
1da177e4
LT
526
527 /* SAL spec states this should run w/ interrupts enabled */
528 local_irq_enable();
529
1da177e4
LT
530 spin_lock(&cpe_history_lock);
531 if (!cpe_poll_enabled && cpe_vector >= 0) {
532
533 int i, count = 1; /* we know 1 happened now */
534 unsigned long now = jiffies;
535
536 for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
537 if (now - cpe_history[i] <= HZ)
538 count++;
539 }
540
541 IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
542 if (count >= CPE_HISTORY_LENGTH) {
543
544 cpe_poll_enabled = 1;
545 spin_unlock(&cpe_history_lock);
546 disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
547
548 /*
549 * Corrected errors will still be corrected, but
550 * make sure there's a log somewhere that indicates
551 * something is generating more than we can handle.
552 */
553 printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
554
555 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
556
557 /* lock already released, get out now */
ddb4f0df 558 goto out;
1da177e4
LT
559 } else {
560 cpe_history[index++] = now;
561 if (index == CPE_HISTORY_LENGTH)
562 index = 0;
563 }
564 }
565 spin_unlock(&cpe_history_lock);
ddb4f0df
HS
566out:
567 /* Get the CPE error record and log it */
568 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
569
1da177e4
LT
570 return IRQ_HANDLED;
571}
572
573#endif /* CONFIG_ACPI */
574
1da177e4
LT
575#ifdef CONFIG_ACPI
576/*
577 * ia64_mca_register_cpev
578 *
579 * Register the corrected platform error vector with SAL.
580 *
581 * Inputs
582 * cpev Corrected Platform Error Vector number
583 *
584 * Outputs
585 * None
586 */
1f3b6045 587void
1da177e4
LT
588ia64_mca_register_cpev (int cpev)
589{
590 /* Register the CPE interrupt vector with SAL */
591 struct ia64_sal_retval isrv;
592
593 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
594 if (isrv.status) {
595 printk(KERN_ERR "Failed to register Corrected Platform "
596 "Error interrupt vector with SAL (status %ld)\n", isrv.status);
597 return;
598 }
599
600 IA64_MCA_DEBUG("%s: corrected platform error "
d4ed8084 601 "vector %#x registered\n", __func__, cpev);
1da177e4
LT
602}
603#endif /* CONFIG_ACPI */
604
1da177e4
LT
605/*
606 * ia64_mca_cmc_vector_setup
607 *
608 * Setup the corrected machine check vector register in the processor.
609 * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
610 * This function is invoked on a per-processor basis.
611 *
612 * Inputs
613 * None
614 *
615 * Outputs
616 * None
617 */
0881fc8d 618void __cpuinit
1da177e4
LT
619ia64_mca_cmc_vector_setup (void)
620{
621 cmcv_reg_t cmcv;
622
623 cmcv.cmcv_regval = 0;
624 cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
625 cmcv.cmcv_vector = IA64_CMC_VECTOR;
626 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
627
d4ed8084
HH
628 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
629 __func__, smp_processor_id(), IA64_CMC_VECTOR);
1da177e4
LT
630
631 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
d4ed8084 632 __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
1da177e4
LT
633}
634
635/*
636 * ia64_mca_cmc_vector_disable
637 *
638 * Mask the corrected machine check vector register in the processor.
639 * This function is invoked on a per-processor basis.
640 *
641 * Inputs
642 * dummy(unused)
643 *
644 * Outputs
645 * None
646 */
647static void
648ia64_mca_cmc_vector_disable (void *dummy)
649{
650 cmcv_reg_t cmcv;
651
652 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
653
654 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
655 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
656
d4ed8084
HH
657 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
658 __func__, smp_processor_id(), cmcv.cmcv_vector);
1da177e4
LT
659}
660
661/*
662 * ia64_mca_cmc_vector_enable
663 *
664 * Unmask the corrected machine check vector register in the processor.
665 * This function is invoked on a per-processor basis.
666 *
667 * Inputs
668 * dummy(unused)
669 *
670 * Outputs
671 * None
672 */
673static void
674ia64_mca_cmc_vector_enable (void *dummy)
675{
676 cmcv_reg_t cmcv;
677
678 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
679
680 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
681 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
682
d4ed8084
HH
683 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
684 __func__, smp_processor_id(), cmcv.cmcv_vector);
1da177e4
LT
685}
686
687/*
688 * ia64_mca_cmc_vector_disable_keventd
689 *
690 * Called via keventd (smp_call_function() is not safe in interrupt context) to
691 * disable the cmc interrupt vector.
692 */
693static void
6d5aefb8 694ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
1da177e4
LT
695{
696 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
697}
698
699/*
700 * ia64_mca_cmc_vector_enable_keventd
701 *
702 * Called via keventd (smp_call_function() is not safe in interrupt context) to
703 * enable the cmc interrupt vector.
704 */
705static void
6d5aefb8 706ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
1da177e4
LT
707{
708 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
709}
710
1da177e4
LT
711/*
712 * ia64_mca_wakeup
713 *
e1b1eb01 714 * Send an inter-cpu interrupt to wake-up a particular cpu.
1da177e4
LT
715 *
716 * Inputs : cpuid
717 * Outputs : None
718 */
719static void
720ia64_mca_wakeup(int cpu)
721{
722 platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
1da177e4
LT
723}
724
725/*
726 * ia64_mca_wakeup_all
727 *
e1b1eb01 728 * Wakeup all the slave cpus which have rendez'ed previously.
1da177e4
LT
729 *
730 * Inputs : None
731 * Outputs : None
732 */
733static void
734ia64_mca_wakeup_all(void)
735{
736 int cpu;
737
738 /* Clear the Rendez checkin flag for all cpus */
ddf6d0a0 739 for_each_online_cpu(cpu) {
1da177e4
LT
740 if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
741 ia64_mca_wakeup(cpu);
742 }
743
744}
745
746/*
747 * ia64_mca_rendez_interrupt_handler
748 *
749 * This is handler used to put slave processors into spinloop
750 * while the monarch processor does the mca handling and later
e1b1eb01
RA
751 * wake each slave up once the monarch is done. The state
752 * IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
753 * in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
754 * the cpu has come out of OS rendezvous.
1da177e4
LT
755 *
756 * Inputs : None
757 * Outputs : None
758 */
759static irqreturn_t
7d12e780 760ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
1da177e4
LT
761{
762 unsigned long flags;
763 int cpu = smp_processor_id();
958b166c
KO
764 struct ia64_mca_notify_die nd =
765 { .sos = NULL, .monarch_cpu = &monarch_cpu };
1da177e4
LT
766
767 /* Mask all interrupts */
768 local_irq_save(flags);
7d12e780
DH
769 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
770 (long)&nd, 0, 0) == NOTIFY_STOP)
d4ed8084 771 ia64_mca_spin(__func__);
1da177e4
LT
772
773 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
774 /* Register with the SAL monarch that the slave has
775 * reached SAL
776 */
777 ia64_sal_mc_rendez();
778
7d12e780
DH
779 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
780 (long)&nd, 0, 0) == NOTIFY_STOP)
d4ed8084 781 ia64_mca_spin(__func__);
9138d581 782
7f613c7d
KO
783 /* Wait for the monarch cpu to exit. */
784 while (monarch_cpu != -1)
785 cpu_relax(); /* spin until monarch leaves */
1da177e4 786
7d12e780
DH
787 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
788 (long)&nd, 0, 0) == NOTIFY_STOP)
d4ed8084 789 ia64_mca_spin(__func__);
9138d581 790
e1b1eb01 791 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1da177e4
LT
792 /* Enable all interrupts */
793 local_irq_restore(flags);
794 return IRQ_HANDLED;
795}
796
797/*
798 * ia64_mca_wakeup_int_handler
799 *
800 * The interrupt handler for processing the inter-cpu interrupt to the
801 * slave cpu which was spinning in the rendez loop.
802 * Since this spinning is done by turning off the interrupts and
803 * polling on the wakeup-interrupt bit in the IRR, there is
804 * nothing useful to be done in the handler.
805 *
806 * Inputs : wakeup_irq (Wakeup-interrupt bit)
807 * arg (Interrupt handler specific argument)
1da177e4
LT
808 * Outputs : None
809 *
810 */
811static irqreturn_t
7d12e780 812ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg)
1da177e4
LT
813{
814 return IRQ_HANDLED;
815}
816
1da177e4
LT
817/* Function pointer for extra MCA recovery */
818int (*ia64_mca_ucmc_extension)
7f613c7d 819 (void*,struct ia64_sal_os_state*)
1da177e4
LT
820 = NULL;
821
822int
7f613c7d 823ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
1da177e4
LT
824{
825 if (ia64_mca_ucmc_extension)
826 return 1;
827
828 ia64_mca_ucmc_extension = fn;
829 return 0;
830}
831
832void
833ia64_unreg_MCA_extension(void)
834{
835 if (ia64_mca_ucmc_extension)
836 ia64_mca_ucmc_extension = NULL;
837}
838
839EXPORT_SYMBOL(ia64_reg_MCA_extension);
840EXPORT_SYMBOL(ia64_unreg_MCA_extension);
841
7f613c7d
KO
842
843static inline void
844copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)
845{
846 u64 fslot, tslot, nat;
847 *tr = *fr;
848 fslot = ((unsigned long)fr >> 3) & 63;
849 tslot = ((unsigned long)tr >> 3) & 63;
850 *tnat &= ~(1UL << tslot);
851 nat = (fnat >> fslot) & 1;
852 *tnat |= (nat << tslot);
853}
854
e9ac054d
KO
855/* Change the comm field on the MCA/INT task to include the pid that
856 * was interrupted, it makes for easier debugging. If that pid was 0
857 * (swapper or nested MCA/INIT) then use the start of the previous comm
858 * field suffixed with its cpu.
859 */
860
861static void
36c8b586 862ia64_mca_modify_comm(const struct task_struct *previous_current)
e9ac054d
KO
863{
864 char *p, comm[sizeof(current->comm)];
865 if (previous_current->pid)
866 snprintf(comm, sizeof(comm), "%s %d",
867 current->comm, previous_current->pid);
868 else {
869 int l;
870 if ((p = strchr(previous_current->comm, ' ')))
871 l = p - previous_current->comm;
872 else
873 l = strlen(previous_current->comm);
874 snprintf(comm, sizeof(comm), "%s %*s %d",
875 current->comm, l, previous_current->comm,
876 task_thread_info(previous_current)->cpu);
877 }
878 memcpy(current->comm, comm, sizeof(current->comm));
879}
880
7f613c7d
KO
881/* On entry to this routine, we are running on the per cpu stack, see
882 * mca_asm.h. The original stack has not been touched by this event. Some of
883 * the original stack's registers will be in the RBS on this stack. This stack
884 * also contains a partial pt_regs and switch_stack, the rest of the data is in
885 * PAL minstate.
886 *
887 * The first thing to do is modify the original stack to look like a blocked
888 * task so we can run backtrace on the original task. Also mark the per cpu
889 * stack as current to ensure that we use the correct task state, it also means
890 * that we can do backtrace on the MCA/INIT handler code itself.
891 */
892
36c8b586 893static struct task_struct *
7f613c7d
KO
894ia64_mca_modify_original_stack(struct pt_regs *regs,
895 const struct switch_stack *sw,
896 struct ia64_sal_os_state *sos,
897 const char *type)
898{
e9ac054d 899 char *p;
7f613c7d
KO
900 ia64_va va;
901 extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
902 const pal_min_state_area_t *ms = sos->pal_min_state;
36c8b586 903 struct task_struct *previous_current;
7f613c7d
KO
904 struct pt_regs *old_regs;
905 struct switch_stack *old_sw;
906 unsigned size = sizeof(struct pt_regs) +
907 sizeof(struct switch_stack) + 16;
908 u64 *old_bspstore, *old_bsp;
909 u64 *new_bspstore, *new_bsp;
910 u64 old_unat, old_rnat, new_rnat, nat;
911 u64 slots, loadrs = regs->loadrs;
912 u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
913 u64 ar_bspstore = regs->ar_bspstore;
914 u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
915 const u64 *bank;
916 const char *msg;
917 int cpu = smp_processor_id();
918
919 previous_current = curr_task(cpu);
920 set_curr_task(cpu, current);
921 if ((p = strchr(current->comm, ' ')))
922 *p = '\0';
923
924 /* Best effort attempt to cope with MCA/INIT delivered while in
925 * physical mode.
926 */
927 regs->cr_ipsr = ms->pmsa_ipsr;
928 if (ia64_psr(regs)->dt == 0) {
929 va.l = r12;
930 if (va.f.reg == 0) {
931 va.f.reg = 7;
932 r12 = va.l;
933 }
934 va.l = r13;
935 if (va.f.reg == 0) {
936 va.f.reg = 7;
937 r13 = va.l;
938 }
939 }
940 if (ia64_psr(regs)->rt == 0) {
941 va.l = ar_bspstore;
942 if (va.f.reg == 0) {
943 va.f.reg = 7;
944 ar_bspstore = va.l;
945 }
946 va.l = ar_bsp;
947 if (va.f.reg == 0) {
948 va.f.reg = 7;
949 ar_bsp = va.l;
950 }
951 }
952
953 /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
954 * have been copied to the old stack, the old stack may fail the
955 * validation tests below. So ia64_old_stack() must restore the dirty
956 * registers from the new stack. The old and new bspstore probably
957 * have different alignments, so loadrs calculated on the old bsp
958 * cannot be used to restore from the new bsp. Calculate a suitable
959 * loadrs for the new stack and save it in the new pt_regs, where
960 * ia64_old_stack() can get it.
961 */
962 old_bspstore = (u64 *)ar_bspstore;
963 old_bsp = (u64 *)ar_bsp;
964 slots = ia64_rse_num_regs(old_bspstore, old_bsp);
965 new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET);
966 new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
967 regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
968
969 /* Verify the previous stack state before we change it */
970 if (user_mode(regs)) {
971 msg = "occurred in user space";
e9ac054d
KO
972 /* previous_current is guaranteed to be valid when the task was
973 * in user space, so ...
974 */
975 ia64_mca_modify_comm(previous_current);
7f613c7d
KO
976 goto no_mod;
977 }
d2a28ad9 978
1612b18c
RA
979 if (r13 != sos->prev_IA64_KR_CURRENT) {
980 msg = "inconsistent previous current and r13";
981 goto no_mod;
982 }
983
d2a28ad9 984 if (!mca_recover_range(ms->pmsa_iip)) {
d2a28ad9
RA
985 if ((r12 - r13) >= KERNEL_STACK_SIZE) {
986 msg = "inconsistent r12 and r13";
987 goto no_mod;
988 }
989 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
990 msg = "inconsistent ar.bspstore and r13";
991 goto no_mod;
992 }
993 va.p = old_bspstore;
994 if (va.f.reg < 5) {
995 msg = "old_bspstore is in the wrong region";
996 goto no_mod;
997 }
998 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
999 msg = "inconsistent ar.bsp and r13";
1000 goto no_mod;
1001 }
1002 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
1003 if (ar_bspstore + size > r12) {
1004 msg = "no room for blocked state";
1005 goto no_mod;
1006 }
7f613c7d
KO
1007 }
1008
e9ac054d 1009 ia64_mca_modify_comm(previous_current);
7f613c7d
KO
1010
1011 /* Make the original task look blocked. First stack a struct pt_regs,
1012 * describing the state at the time of interrupt. mca_asm.S built a
1013 * partial pt_regs, copy it and fill in the blanks using minstate.
1014 */
1015 p = (char *)r12 - sizeof(*regs);
1016 old_regs = (struct pt_regs *)p;
1017 memcpy(old_regs, regs, sizeof(*regs));
1018 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
1019 * pmsa_{xip,xpsr,xfs}
1020 */
1021 if (ia64_psr(regs)->ic) {
1022 old_regs->cr_iip = ms->pmsa_iip;
1023 old_regs->cr_ipsr = ms->pmsa_ipsr;
1024 old_regs->cr_ifs = ms->pmsa_ifs;
1025 } else {
1026 old_regs->cr_iip = ms->pmsa_xip;
1027 old_regs->cr_ipsr = ms->pmsa_xpsr;
1028 old_regs->cr_ifs = ms->pmsa_xfs;
1029 }
1030 old_regs->pr = ms->pmsa_pr;
1031 old_regs->b0 = ms->pmsa_br0;
1032 old_regs->loadrs = loadrs;
1033 old_regs->ar_rsc = ms->pmsa_rsc;
1034 old_unat = old_regs->ar_unat;
1035 copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat);
1036 copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
1037 copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
1038 copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
1039 copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
1040 copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
1041 copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
1042 copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
1043 copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
1044 copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
1045 copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
1046 if (ia64_psr(old_regs)->bn)
1047 bank = ms->pmsa_bank1_gr;
1048 else
1049 bank = ms->pmsa_bank0_gr;
1050 copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
1051 copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
1052 copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
1053 copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
1054 copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
1055 copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
1056 copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
1057 copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
1058 copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
1059 copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
1060 copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
1061 copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
1062 copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
1063 copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
1064 copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
1065 copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
1066
1067 /* Next stack a struct switch_stack. mca_asm.S built a partial
1068 * switch_stack, copy it and fill in the blanks using pt_regs and
1069 * minstate.
1070 *
1071 * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
1072 * ar.pfs is set to 0.
1073 *
1074 * unwind.c::unw_unwind() does special processing for interrupt frames.
1075 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
1076 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not
1077 * that this is documented, of course. Set PRED_NON_SYSCALL in the
1078 * switch_stack on the original stack so it will unwind correctly when
1079 * unwind.c reads pt_regs.
1080 *
1081 * thread.ksp is updated to point to the synthesized switch_stack.
1082 */
1083 p -= sizeof(struct switch_stack);
1084 old_sw = (struct switch_stack *)p;
1085 memcpy(old_sw, sw, sizeof(*sw));
1086 old_sw->caller_unat = old_unat;
1087 old_sw->ar_fpsr = old_regs->ar_fpsr;
1088 copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
1089 copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
1090 copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
1091 copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
1092 old_sw->b0 = (u64)ia64_leave_kernel;
1093 old_sw->b1 = ms->pmsa_br1;
1094 old_sw->ar_pfs = 0;
1095 old_sw->ar_unat = old_unat;
1096 old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
1097 previous_current->thread.ksp = (u64)p - 16;
1098
1099 /* Finally copy the original stack's registers back to its RBS.
1100 * Registers from ar.bspstore through ar.bsp at the time of the event
1101 * are in the current RBS, copy them back to the original stack. The
1102 * copy must be done register by register because the original bspstore
1103 * and the current one have different alignments, so the saved RNAT
1104 * data occurs at different places.
1105 *
1106 * mca_asm does cover, so the old_bsp already includes all registers at
1107 * the time of MCA/INIT. It also does flushrs, so all registers before
1108 * this function have been written to backing store on the MCA/INIT
1109 * stack.
1110 */
1111 new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
1112 old_rnat = regs->ar_rnat;
1113 while (slots--) {
1114 if (ia64_rse_is_rnat_slot(new_bspstore)) {
1115 new_rnat = ia64_get_rnat(new_bspstore++);
1116 }
1117 if (ia64_rse_is_rnat_slot(old_bspstore)) {
1118 *old_bspstore++ = old_rnat;
1119 old_rnat = 0;
1120 }
1121 nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
1122 old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
1123 old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
1124 *old_bspstore++ = *new_bspstore++;
1125 }
1126 old_sw->ar_bspstore = (unsigned long)old_bspstore;
1127 old_sw->ar_rnat = old_rnat;
1128
1129 sos->prev_task = previous_current;
1130 return previous_current;
1131
1132no_mod:
1133 printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
1134 smp_processor_id(), type, msg);
1135 return previous_current;
1136}
1137
1138/* The monarch/slave interaction is based on monarch_cpu and requires that all
1139 * slaves have entered rendezvous before the monarch leaves. If any cpu has
1140 * not entered rendezvous yet then wait a bit. The assumption is that any
1141 * slave that has not rendezvoused after a reasonable time is never going to do
1142 * so. In this context, slave includes cpus that respond to the MCA rendezvous
1143 * interrupt, as well as cpus that receive the INIT slave event.
1144 */
1145
1146static void
356a5c1c 1147ia64_wait_for_slaves(int monarch, const char *type)
7f613c7d 1148{
2bc5c282
RA
1149 int c, i , wait;
1150
1151 /*
1152 * wait 5 seconds total for slaves (arbitrary)
1153 */
1154 for (i = 0; i < 5000; i++) {
1155 wait = 0;
1156 for_each_online_cpu(c) {
1157 if (c == monarch)
1158 continue;
1159 if (ia64_mc_info.imi_rendez_checkin[c]
1160 == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
1161 udelay(1000); /* short wait */
1162 wait = 1;
1163 break;
1164 }
7f613c7d 1165 }
2bc5c282
RA
1166 if (!wait)
1167 goto all_in;
7f613c7d 1168 }
2bc5c282 1169
43ed3baf
HS
1170 /*
1171 * Maybe slave(s) dead. Print buffered messages immediately.
1172 */
1173 ia64_mlogbuf_finish(0);
1174 mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
9336b083
KO
1175 for_each_online_cpu(c) {
1176 if (c == monarch)
1177 continue;
1178 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
43ed3baf 1179 mprintk(" %d", c);
9336b083 1180 }
43ed3baf 1181 mprintk("\n");
9336b083
KO
1182 return;
1183
1184all_in:
43ed3baf 1185 mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
9336b083 1186 return;
7f613c7d
KO
1187}
1188
96651896
XZ
1189/* mca_insert_tr
1190 *
1191 * Switch rid when TR reload and needed!
1192 * iord: 1: itr, 2: itr;
1193 *
1194*/
1195static void mca_insert_tr(u64 iord)
1196{
1197
1198 int i;
1199 u64 old_rr;
1200 struct ia64_tr_entry *p;
1201 unsigned long psr;
1202 int cpu = smp_processor_id();
1203
1204 psr = ia64_clear_ic();
1205 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1206 p = &__per_cpu_idtrs[cpu][iord-1][i];
1207 if (p->pte & 0x1) {
1208 old_rr = ia64_get_rr(p->ifa);
1209 if (old_rr != p->rr) {
1210 ia64_set_rr(p->ifa, p->rr);
1211 ia64_srlz_d();
1212 }
1213 ia64_ptr(iord, p->ifa, p->itir >> 2);
1214 ia64_srlz_i();
1215 if (iord & 0x1) {
1216 ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
1217 ia64_srlz_i();
1218 }
1219 if (iord & 0x2) {
1220 ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
1221 ia64_srlz_i();
1222 }
1223 if (old_rr != p->rr) {
1224 ia64_set_rr(p->ifa, old_rr);
1225 ia64_srlz_d();
1226 }
1227 }
1228 }
1229 ia64_set_psr(psr);
1230}
1231
1da177e4 1232/*
7f613c7d 1233 * ia64_mca_handler
1da177e4
LT
1234 *
1235 * This is uncorrectable machine check handler called from OS_MCA
1236 * dispatch code which is in turn called from SAL_CHECK().
1237 * This is the place where the core of OS MCA handling is done.
1238 * Right now the logs are extracted and displayed in a well-defined
1239 * format. This handler code is supposed to be run only on the
1240 * monarch processor. Once the monarch is done with MCA handling
1241 * further MCA logging is enabled by clearing logs.
1242 * Monarch also has the duty of sending wakeup-IPIs to pull the
1243 * slave processors out of rendezvous spinloop.
1612b18c
RA
1244 *
1245 * If multiple processors call into OS_MCA, the first will become
1246 * the monarch. Subsequent cpus will be recorded in the mca_cpu
1247 * bitmask. After the first monarch has processed its MCA, it
1248 * will wake up the next cpu in the mca_cpu bitmask and then go
1249 * into the rendezvous loop. When all processors have serviced
1250 * their MCA, the last monarch frees up the rest of the processors.
1da177e4
LT
1251 */
1252void
7f613c7d
KO
1253ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1254 struct ia64_sal_os_state *sos)
1da177e4 1255{
7f613c7d 1256 int recover, cpu = smp_processor_id();
36c8b586 1257 struct task_struct *previous_current;
958b166c
KO
1258 struct ia64_mca_notify_die nd =
1259 { .sos = sos, .monarch_cpu = &monarch_cpu };
1612b18c
RA
1260 static atomic_t mca_count;
1261 static cpumask_t mca_cpu;
7f613c7d 1262
1612b18c
RA
1263 if (atomic_add_return(1, &mca_count) == 1) {
1264 monarch_cpu = cpu;
1265 sos->monarch = 1;
1266 } else {
1267 cpu_set(cpu, mca_cpu);
1268 sos->monarch = 0;
1269 }
43ed3baf
HS
1270 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
1271 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
9336b083 1272
7f613c7d 1273 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1612b18c 1274
958b166c 1275 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
9138d581 1276 == NOTIFY_STOP)
d4ed8084 1277 ia64_mca_spin(__func__);
e1b1eb01
RA
1278
1279 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1612b18c
RA
1280 if (sos->monarch) {
1281 ia64_wait_for_slaves(cpu, "MCA");
e1b1eb01
RA
1282
1283 /* Wakeup all the processors which are spinning in the
1284 * rendezvous loop. They will leave SAL, then spin in the OS
1285 * with interrupts disabled until this monarch cpu leaves the
1286 * MCA handler. That gets control back to the OS so we can
1287 * backtrace the other cpus, backtrace when spinning in SAL
1288 * does not work.
1289 */
1290 ia64_mca_wakeup_all();
1612b18c 1291 } else {
1612b18c
RA
1292 while (cpu_isset(cpu, mca_cpu))
1293 cpu_relax(); /* spin until monarch wakes us */
284e5427
HS
1294 }
1295
1296 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
1297 == NOTIFY_STOP)
1298 ia64_mca_spin(__func__);
7f613c7d 1299
1da177e4
LT
1300 /* Get the MCA error record and log it */
1301 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
1302
618b206f
RA
1303 /* MCA error recovery */
1304 recover = (ia64_mca_ucmc_extension
1da177e4
LT
1305 && ia64_mca_ucmc_extension(
1306 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
7f613c7d 1307 sos));
1da177e4
LT
1308
1309 if (recover) {
1310 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
1311 rh->severity = sal_log_severity_corrected;
1312 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
7f613c7d 1313 sos->os_status = IA64_MCA_CORRECTED;
43ed3baf
HS
1314 } else {
1315 /* Dump buffered message to console */
1316 ia64_mlogbuf_finish(1);
1da177e4 1317 }
b0247a55 1318
96651896
XZ
1319 if (__get_cpu_var(ia64_mca_tr_reload)) {
1320 mca_insert_tr(0x1); /*Reload dynamic itrs*/
1321 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1322 }
71b264f8 1323
958b166c 1324 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
9138d581 1325 == NOTIFY_STOP)
d4ed8084 1326 ia64_mca_spin(__func__);
1da177e4 1327
1612b18c
RA
1328 if (atomic_dec_return(&mca_count) > 0) {
1329 int i;
1330
1331 /* wake up the next monarch cpu,
1332 * and put this cpu in the rendez loop.
1333 */
1612b18c
RA
1334 for_each_online_cpu(i) {
1335 if (cpu_isset(i, mca_cpu)) {
1336 monarch_cpu = i;
1337 cpu_clear(i, mca_cpu); /* wake next cpu */
1338 while (monarch_cpu != -1)
1339 cpu_relax(); /* spin until last cpu leaves */
1612b18c 1340 set_curr_task(cpu, previous_current);
e1b1eb01
RA
1341 ia64_mc_info.imi_rendez_checkin[cpu]
1342 = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1612b18c
RA
1343 return;
1344 }
1345 }
1346 }
7f613c7d 1347 set_curr_task(cpu, previous_current);
e1b1eb01
RA
1348 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1349 monarch_cpu = -1; /* This frees the slaves and previous monarchs */
1da177e4
LT
1350}
1351
6d5aefb8
DH
1352static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
1353static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
1da177e4
LT
1354
1355/*
1356 * ia64_mca_cmc_int_handler
1357 *
1358 * This is corrected machine check interrupt handler.
1359 * Right now the logs are extracted and displayed in a well-defined
1360 * format.
1361 *
1362 * Inputs
1363 * interrupt number
1364 * client data arg ptr
1da177e4
LT
1365 *
1366 * Outputs
1367 * None
1368 */
1369static irqreturn_t
7d12e780 1370ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
1da177e4
LT
1371{
1372 static unsigned long cmc_history[CMC_HISTORY_LENGTH];
1373 static int index;
1374 static DEFINE_SPINLOCK(cmc_history_lock);
1375
1376 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
d4ed8084 1377 __func__, cmc_irq, smp_processor_id());
1da177e4
LT
1378
1379 /* SAL spec states this should run w/ interrupts enabled */
1380 local_irq_enable();
1381
1da177e4
LT
1382 spin_lock(&cmc_history_lock);
1383 if (!cmc_polling_enabled) {
1384 int i, count = 1; /* we know 1 happened now */
1385 unsigned long now = jiffies;
1386
1387 for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
1388 if (now - cmc_history[i] <= HZ)
1389 count++;
1390 }
1391
1392 IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
1393 if (count >= CMC_HISTORY_LENGTH) {
1394
1395 cmc_polling_enabled = 1;
1396 spin_unlock(&cmc_history_lock);
76e677e2
BS
1397 /* If we're being hit with CMC interrupts, we won't
1398 * ever execute the schedule_work() below. Need to
1399 * disable CMC interrupts on this processor now.
1400 */
1401 ia64_mca_cmc_vector_disable(NULL);
1da177e4
LT
1402 schedule_work(&cmc_disable_work);
1403
1404 /*
1405 * Corrected errors will still be corrected, but
1406 * make sure there's a log somewhere that indicates
1407 * something is generating more than we can handle.
1408 */
1409 printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
1410
1411 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1412
1413 /* lock already released, get out now */
ddb4f0df 1414 goto out;
1da177e4
LT
1415 } else {
1416 cmc_history[index++] = now;
1417 if (index == CMC_HISTORY_LENGTH)
1418 index = 0;
1419 }
1420 }
1421 spin_unlock(&cmc_history_lock);
ddb4f0df
HS
1422out:
1423 /* Get the CMC error record and log it */
1424 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1425
1da177e4
LT
1426 return IRQ_HANDLED;
1427}
1428
1429/*
1430 * ia64_mca_cmc_int_caller
1431 *
1432 * Triggered by sw interrupt from CMC polling routine. Calls
1433 * real interrupt handler and either triggers a sw interrupt
1434 * on the next cpu or does cleanup at the end.
1435 *
1436 * Inputs
1437 * interrupt number
1438 * client data arg ptr
1da177e4
LT
1439 * Outputs
1440 * handled
1441 */
1442static irqreturn_t
7d12e780 1443ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
1da177e4
LT
1444{
1445 static int start_count = -1;
1446 unsigned int cpuid;
1447
1448 cpuid = smp_processor_id();
1449
1450 /* If first cpu, update count */
1451 if (start_count == -1)
1452 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
1453
7d12e780 1454 ia64_mca_cmc_int_handler(cmc_irq, arg);
1da177e4
LT
1455
1456 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
1457
1458 if (cpuid < NR_CPUS) {
1459 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1460 } else {
1461 /* If no log record, switch out of polling mode */
1462 if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
1463
1464 printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
1465 schedule_work(&cmc_enable_work);
1466 cmc_polling_enabled = 0;
1467
1468 } else {
1469
1470 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1471 }
1472
1473 start_count = -1;
1474 }
1475
1476 return IRQ_HANDLED;
1477}
1478
1479/*
1480 * ia64_mca_cmc_poll
1481 *
1482 * Poll for Corrected Machine Checks (CMCs)
1483 *
1484 * Inputs : dummy(unused)
1485 * Outputs : None
1486 *
1487 */
1488static void
1489ia64_mca_cmc_poll (unsigned long dummy)
1490{
1491 /* Trigger a CMC interrupt cascade */
1492 platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1493}
1494
1495/*
1496 * ia64_mca_cpe_int_caller
1497 *
1498 * Triggered by sw interrupt from CPE polling routine. Calls
1499 * real interrupt handler and either triggers a sw interrupt
1500 * on the next cpu or does cleanup at the end.
1501 *
1502 * Inputs
1503 * interrupt number
1504 * client data arg ptr
1da177e4
LT
1505 * Outputs
1506 * handled
1507 */
1508#ifdef CONFIG_ACPI
1509
1510static irqreturn_t
7d12e780 1511ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
1da177e4
LT
1512{
1513 static int start_count = -1;
1514 static int poll_time = MIN_CPE_POLL_INTERVAL;
1515 unsigned int cpuid;
1516
1517 cpuid = smp_processor_id();
1518
1519 /* If first cpu, update count */
1520 if (start_count == -1)
1521 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
1522
7d12e780 1523 ia64_mca_cpe_int_handler(cpe_irq, arg);
1da177e4
LT
1524
1525 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
1526
1527 if (cpuid < NR_CPUS) {
1528 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1529 } else {
1530 /*
1531 * If a log was recorded, increase our polling frequency,
1532 * otherwise, backoff or return to interrupt mode.
1533 */
1534 if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
1535 poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
1536 } else if (cpe_vector < 0) {
1537 poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
1538 } else {
1539 poll_time = MIN_CPE_POLL_INTERVAL;
1540
1541 printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
1542 enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
1543 cpe_poll_enabled = 0;
1544 }
1545
1546 if (cpe_poll_enabled)
1547 mod_timer(&cpe_poll_timer, jiffies + poll_time);
1548 start_count = -1;
1549 }
1550
1551 return IRQ_HANDLED;
1552}
1553
1da177e4
LT
1554/*
1555 * ia64_mca_cpe_poll
1556 *
1557 * Poll for Corrected Platform Errors (CPEs), trigger interrupt
1558 * on first cpu, from there it will trickle through all the cpus.
1559 *
1560 * Inputs : dummy(unused)
1561 * Outputs : None
1562 *
1563 */
1564static void
1565ia64_mca_cpe_poll (unsigned long dummy)
1566{
1567 /* Trigger a CPE interrupt cascade */
1568 platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1569}
1570
b655913b
PC
1571#endif /* CONFIG_ACPI */
1572
9138d581
KO
1573static int
1574default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
1575{
1576 int c;
1577 struct task_struct *g, *t;
1578 if (val != DIE_INIT_MONARCH_PROCESS)
1579 return NOTIFY_DONE;
311f594d
JL
1580#ifdef CONFIG_KEXEC
1581 if (atomic_read(&kdump_in_progress))
1582 return NOTIFY_DONE;
1583#endif
43ed3baf
HS
1584
1585 /*
1586 * FIXME: mlogbuf will brim over with INIT stack dumps.
1587 * To enable show_stack from INIT, we use oops_in_progress which should
1588 * be used in real oops. This would cause something wrong after INIT.
1589 */
1590 BREAK_LOGLEVEL(console_loglevel);
1591 ia64_mlogbuf_dump_from_init();
1592
9138d581
KO
1593 printk(KERN_ERR "Processes interrupted by INIT -");
1594 for_each_online_cpu(c) {
1595 struct ia64_sal_os_state *s;
1596 t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
1597 s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
1598 g = s->prev_task;
1599 if (g) {
1600 if (g->pid)
1601 printk(" %d", g->pid);
1602 else
1603 printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
1604 }
1605 }
1606 printk("\n\n");
1607 if (read_trylock(&tasklist_lock)) {
1608 do_each_thread (g, t) {
1609 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
1610 show_stack(t, NULL);
1611 } while_each_thread (g, t);
1612 read_unlock(&tasklist_lock);
1613 }
43ed3baf
HS
1614 /* FIXME: This will not restore zapped printk locks. */
1615 RESTORE_LOGLEVEL(console_loglevel);
9138d581
KO
1616 return NOTIFY_DONE;
1617}
1618
1da177e4
LT
1619/*
1620 * C portion of the OS INIT handler
1621 *
7f613c7d 1622 * Called from ia64_os_init_dispatch
1da177e4 1623 *
7f613c7d
KO
1624 * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
1625 * this event. This code is used for both monarch and slave INIT events, see
1626 * sos->monarch.
1da177e4 1627 *
7f613c7d
KO
1628 * All INIT events switch to the INIT stack and change the previous process to
1629 * blocked status. If one of the INIT events is the monarch then we are
1630 * probably processing the nmi button/command. Use the monarch cpu to dump all
1631 * the processes. The slave INIT events all spin until the monarch cpu
1632 * returns. We can also get INIT slave events for MCA, in which case the MCA
1633 * process is the monarch.
1da177e4 1634 */
7f613c7d 1635
1da177e4 1636void
7f613c7d
KO
1637ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1638 struct ia64_sal_os_state *sos)
1da177e4 1639{
7f613c7d
KO
1640 static atomic_t slaves;
1641 static atomic_t monarchs;
36c8b586 1642 struct task_struct *previous_current;
9138d581 1643 int cpu = smp_processor_id();
958b166c
KO
1644 struct ia64_mca_notify_die nd =
1645 { .sos = sos, .monarch_cpu = &monarch_cpu };
1da177e4 1646
958b166c
KO
1647 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
1648
43ed3baf 1649 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
7f613c7d
KO
1650 sos->proc_state_param, cpu, sos->monarch);
1651 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
1da177e4 1652
7f613c7d
KO
1653 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
1654 sos->os_status = IA64_INIT_RESUME;
1655
1656 /* FIXME: Workaround for broken proms that drive all INIT events as
1657 * slaves. The last slave that enters is promoted to be a monarch.
1658 * Remove this code in September 2006, that gives platforms a year to
1659 * fix their proms and get their customers updated.
1da177e4 1660 */
7f613c7d 1661 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
43ed3baf 1662 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
d4ed8084 1663 __func__, cpu);
7f613c7d
KO
1664 atomic_dec(&slaves);
1665 sos->monarch = 1;
1666 }
1da177e4 1667
7f613c7d
KO
1668 /* FIXME: Workaround for broken proms that drive all INIT events as
1669 * monarchs. Second and subsequent monarchs are demoted to slaves.
1670 * Remove this code in September 2006, that gives platforms a year to
1671 * fix their proms and get their customers updated.
1672 */
1673 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
43ed3baf 1674 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
d4ed8084 1675 __func__, cpu);
7f613c7d
KO
1676 atomic_dec(&monarchs);
1677 sos->monarch = 0;
1678 }
1679
1680 if (!sos->monarch) {
1681 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1682 while (monarch_cpu == -1)
1683 cpu_relax(); /* spin until monarch enters */
958b166c 1684 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
9138d581 1685 == NOTIFY_STOP)
d4ed8084 1686 ia64_mca_spin(__func__);
958b166c 1687 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
9138d581 1688 == NOTIFY_STOP)
d4ed8084 1689 ia64_mca_spin(__func__);
7f613c7d
KO
1690 while (monarch_cpu != -1)
1691 cpu_relax(); /* spin until monarch leaves */
958b166c 1692 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
9138d581 1693 == NOTIFY_STOP)
d4ed8084 1694 ia64_mca_spin(__func__);
43ed3baf 1695 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
7f613c7d
KO
1696 set_curr_task(cpu, previous_current);
1697 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1698 atomic_dec(&slaves);
1699 return;
1700 }
1701
1702 monarch_cpu = cpu;
958b166c 1703 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
9138d581 1704 == NOTIFY_STOP)
d4ed8084 1705 ia64_mca_spin(__func__);
7f613c7d
KO
1706
1707 /*
1708 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
1709 * generated via the BMC's command-line interface, but since the console is on the
1710 * same serial line, the user will need some time to switch out of the BMC before
1711 * the dump begins.
1712 */
43ed3baf 1713 mprintk("Delaying for 5 seconds...\n");
7f613c7d 1714 udelay(5*1000000);
356a5c1c 1715 ia64_wait_for_slaves(cpu, "INIT");
9138d581
KO
1716 /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
1717 * to default_monarch_init_process() above and just print all the
1718 * tasks.
1719 */
958b166c 1720 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
9138d581 1721 == NOTIFY_STOP)
d4ed8084 1722 ia64_mca_spin(__func__);
958b166c 1723 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
9138d581 1724 == NOTIFY_STOP)
d4ed8084 1725 ia64_mca_spin(__func__);
43ed3baf 1726 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
7f613c7d
KO
1727 atomic_dec(&monarchs);
1728 set_curr_task(cpu, previous_current);
1729 monarch_cpu = -1;
1730 return;
1da177e4
LT
1731}
1732
1733static int __init
1734ia64_mca_disable_cpe_polling(char *str)
1735{
1736 cpe_poll_enabled = 0;
1737 return 1;
1738}
1739
1740__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
1741
1742static struct irqaction cmci_irqaction = {
1743 .handler = ia64_mca_cmc_int_handler,
121a4226 1744 .flags = IRQF_DISABLED,
1da177e4
LT
1745 .name = "cmc_hndlr"
1746};
1747
1748static struct irqaction cmcp_irqaction = {
1749 .handler = ia64_mca_cmc_int_caller,
121a4226 1750 .flags = IRQF_DISABLED,
1da177e4
LT
1751 .name = "cmc_poll"
1752};
1753
1754static struct irqaction mca_rdzv_irqaction = {
1755 .handler = ia64_mca_rendez_int_handler,
121a4226 1756 .flags = IRQF_DISABLED,
1da177e4
LT
1757 .name = "mca_rdzv"
1758};
1759
1760static struct irqaction mca_wkup_irqaction = {
1761 .handler = ia64_mca_wakeup_int_handler,
121a4226 1762 .flags = IRQF_DISABLED,
1da177e4
LT
1763 .name = "mca_wkup"
1764};
1765
1766#ifdef CONFIG_ACPI
1767static struct irqaction mca_cpe_irqaction = {
1768 .handler = ia64_mca_cpe_int_handler,
121a4226 1769 .flags = IRQF_DISABLED,
1da177e4
LT
1770 .name = "cpe_hndlr"
1771};
1772
1773static struct irqaction mca_cpep_irqaction = {
1774 .handler = ia64_mca_cpe_int_caller,
121a4226 1775 .flags = IRQF_DISABLED,
1da177e4
LT
1776 .name = "cpe_poll"
1777};
1778#endif /* CONFIG_ACPI */
1779
7f613c7d
KO
1780/* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
1781 * these stacks can never sleep, they cannot return from the kernel to user
1782 * space, they do not appear in a normal ps listing. So there is no need to
1783 * format most of the fields.
1784 */
1785
0881fc8d 1786static void __cpuinit
7f613c7d
KO
1787format_mca_init_stack(void *mca_data, unsigned long offset,
1788 const char *type, int cpu)
1789{
1790 struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
1791 struct thread_info *ti;
1792 memset(p, 0, KERNEL_STACK_SIZE);
ab03591d 1793 ti = task_thread_info(p);
7f613c7d
KO
1794 ti->flags = _TIF_MCA_INIT;
1795 ti->preempt_count = 1;
1796 ti->task = p;
1797 ti->cpu = cpu;
f7e4217b 1798 p->stack = ti;
7f613c7d 1799 p->state = TASK_UNINTERRUPTIBLE;
4668f0cd 1800 cpu_set(cpu, p->cpus_allowed);
7f613c7d
KO
1801 INIT_LIST_HEAD(&p->tasks);
1802 p->parent = p->real_parent = p->group_leader = p;
1803 INIT_LIST_HEAD(&p->children);
1804 INIT_LIST_HEAD(&p->sibling);
1805 strncpy(p->comm, type, sizeof(p->comm)-1);
1806}
1807
056e6d89
SR
1808/* Caller prevents this from being called after init */
1809static void * __init_refok mca_bootmem(void)
1810{
785285fc
RA
1811 return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
1812 KERNEL_STACK_SIZE, 0);
056e6d89
SR
1813}
1814
1815/* Do per-CPU MCA-related initialization. */
0881fc8d 1816void __cpuinit
1da177e4
LT
1817ia64_mca_cpu_init(void *cpu_data)
1818{
1819 void *pal_vaddr;
785285fc
RA
1820 void *data;
1821 long sz = sizeof(struct ia64_mca_cpu);
1822 int cpu = smp_processor_id();
ff741906 1823 static int first_time = 1;
1da177e4 1824
7f613c7d 1825 /*
785285fc
RA
1826 * Structure will already be allocated if cpu has been online,
1827 * then offlined.
7f613c7d 1828 */
785285fc
RA
1829 if (__per_cpu_mca[cpu]) {
1830 data = __va(__per_cpu_mca[cpu]);
1831 } else {
1832 if (first_time) {
1833 data = mca_bootmem();
1834 first_time = 0;
1835 } else
1836 data = page_address(alloc_pages_node(numa_node_id(),
1837 GFP_KERNEL, get_order(sz)));
1838 if (!data)
1839 panic("Could not allocate MCA memory for cpu %d\n",
1840 cpu);
1841 }
1842 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
1843 "MCA", cpu);
1844 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
1845 "INIT", cpu);
1846 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
1da177e4
LT
1847
1848 /*
1849 * Stash away a copy of the PTE needed to map the per-CPU page.
1850 * We may need it during MCA recovery.
1851 */
1852 __get_cpu_var(ia64_mca_per_cpu_pte) =
1853 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
1854
7f613c7d
KO
1855 /*
1856 * Also, stash away a copy of the PAL address and the PTE
1857 * needed to map it.
1858 */
1859 pal_vaddr = efi_get_pal_addr();
1da177e4
LT
1860 if (!pal_vaddr)
1861 return;
1862 __get_cpu_var(ia64_mca_pal_base) =
1863 GRANULEROUNDDOWN((unsigned long) pal_vaddr);
1864 __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
1865 PAGE_KERNEL));
1866}
1867
ed5d4026
HS
1868static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy)
1869{
1870 unsigned long flags;
1871
1872 local_irq_save(flags);
1873 if (!cmc_polling_enabled)
1874 ia64_mca_cmc_vector_enable(NULL);
1875 local_irq_restore(flags);
1876}
1877
1878static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
1879 unsigned long action,
1880 void *hcpu)
1881{
1882 int hotcpu = (unsigned long) hcpu;
1883
1884 switch (action) {
1885 case CPU_ONLINE:
1886 case CPU_ONLINE_FROZEN:
1887 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
1888 NULL, 1, 0);
1889 break;
1890 }
1891 return NOTIFY_OK;
1892}
1893
1894static struct notifier_block mca_cpu_notifier __cpuinitdata = {
1895 .notifier_call = mca_cpu_callback
1896};
1897
1da177e4
LT
1898/*
1899 * ia64_mca_init
1900 *
1901 * Do all the system level mca specific initialization.
1902 *
1903 * 1. Register spinloop and wakeup request interrupt vectors
1904 *
1905 * 2. Register OS_MCA handler entry point
1906 *
1907 * 3. Register OS_INIT handler entry point
1908 *
1909 * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1910 *
1911 * Note that this initialization is done very early before some kernel
1912 * services are available.
1913 *
1914 * Inputs : None
1915 *
1916 * Outputs : None
1917 */
1918void __init
1919ia64_mca_init(void)
1920{
7f613c7d
KO
1921 ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
1922 ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
1da177e4
LT
1923 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1924 int i;
1925 s64 rc;
1926 struct ia64_sal_retval isrv;
1927 u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
9138d581
KO
1928 static struct notifier_block default_init_monarch_nb = {
1929 .notifier_call = default_monarch_init_process,
1930 .priority = 0/* we need to notified last */
1931 };
1da177e4 1932
d4ed8084 1933 IA64_MCA_DEBUG("%s: begin\n", __func__);
1da177e4
LT
1934
1935 /* Clear the Rendez checkin flag for all cpus */
1936 for(i = 0 ; i < NR_CPUS; i++)
1937 ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1938
1939 /*
1940 * Register the rendezvous spinloop and wakeup mechanism with SAL
1941 */
1942
1943 /* Register the rendezvous interrupt vector with SAL */
1944 while (1) {
1945 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
1946 SAL_MC_PARAM_MECHANISM_INT,
1947 IA64_MCA_RENDEZ_VECTOR,
1948 timeout,
1949 SAL_MC_PARAM_RZ_ALWAYS);
1950 rc = isrv.status;
1951 if (rc == 0)
1952 break;
1953 if (rc == -2) {
1954 printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1955 "%ld to %ld milliseconds\n", timeout, isrv.v0);
1956 timeout = isrv.v0;
958b166c 1957 (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
1da177e4
LT
1958 continue;
1959 }
1960 printk(KERN_ERR "Failed to register rendezvous interrupt "
1961 "with SAL (status %ld)\n", rc);
1962 return;
1963 }
1964
1965 /* Register the wakeup interrupt vector with SAL */
1966 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
1967 SAL_MC_PARAM_MECHANISM_INT,
1968 IA64_MCA_WAKEUP_VECTOR,
1969 0, 0);
1970 rc = isrv.status;
1971 if (rc) {
1972 printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
1973 "(status %ld)\n", rc);
1974 return;
1975 }
1976
d4ed8084 1977 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
1da177e4
LT
1978
1979 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
1980 /*
1981 * XXX - disable SAL checksum by setting size to 0; should be
1982 * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
1983 */
1984 ia64_mc_info.imi_mca_handler_size = 0;
1985
1986 /* Register the os mca handler with SAL */
1987 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
1988 ia64_mc_info.imi_mca_handler,
1989 ia64_tpa(mca_hldlr_ptr->gp),
1990 ia64_mc_info.imi_mca_handler_size,
1991 0, 0, 0)))
1992 {
1993 printk(KERN_ERR "Failed to register OS MCA handler with SAL "
1994 "(status %ld)\n", rc);
1995 return;
1996 }
1997
d4ed8084 1998 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
1da177e4
LT
1999 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
2000
2001 /*
2002 * XXX - disable SAL checksum by setting size to 0, should be
2003 * size of the actual init handler in mca_asm.S.
2004 */
7f613c7d 2005 ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp);
1da177e4 2006 ia64_mc_info.imi_monarch_init_handler_size = 0;
7f613c7d 2007 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
1da177e4
LT
2008 ia64_mc_info.imi_slave_init_handler_size = 0;
2009
d4ed8084 2010 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
1da177e4
LT
2011 ia64_mc_info.imi_monarch_init_handler);
2012
2013 /* Register the os init handler with SAL */
2014 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
2015 ia64_mc_info.imi_monarch_init_handler,
2016 ia64_tpa(ia64_getreg(_IA64_REG_GP)),
2017 ia64_mc_info.imi_monarch_init_handler_size,
2018 ia64_mc_info.imi_slave_init_handler,
2019 ia64_tpa(ia64_getreg(_IA64_REG_GP)),
2020 ia64_mc_info.imi_slave_init_handler_size)))
2021 {
2022 printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
2023 "(status %ld)\n", rc);
2024 return;
2025 }
9138d581
KO
2026 if (register_die_notifier(&default_init_monarch_nb)) {
2027 printk(KERN_ERR "Failed to register default monarch INIT process\n");
2028 return;
2029 }
1da177e4 2030
d4ed8084 2031 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
1da177e4
LT
2032
2033 /*
2034 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2035 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2036 */
2037 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2038 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2039 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
2040
2041 /* Setup the MCA rendezvous interrupt vector */
2042 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2043
2044 /* Setup the MCA wakeup interrupt vector */
2045 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2046
2047#ifdef CONFIG_ACPI
bb68c12b 2048 /* Setup the CPEI/P handler */
1da177e4
LT
2049 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2050#endif
2051
2052 /* Initialize the areas set aside by the OS to buffer the
2053 * platform/processor error states for MCA/INIT/CMC
2054 * handling.
2055 */
2056 ia64_log_init(SAL_INFO_TYPE_MCA);
2057 ia64_log_init(SAL_INFO_TYPE_INIT);
2058 ia64_log_init(SAL_INFO_TYPE_CMC);
2059 ia64_log_init(SAL_INFO_TYPE_CPE);
2060
2061 mca_init = 1;
2062 printk(KERN_INFO "MCA related initialization done\n");
2063}
2064
2065/*
2066 * ia64_mca_late_init
2067 *
2068 * Opportunity to setup things that require initialization later
2069 * than ia64_mca_init. Setup a timer to poll for CPEs if the
2070 * platform doesn't support an interrupt driven mechanism.
2071 *
2072 * Inputs : None
2073 * Outputs : Status
2074 */
2075static int __init
2076ia64_mca_late_init(void)
2077{
2078 if (!mca_init)
2079 return 0;
2080
ed5d4026
HS
2081 register_hotcpu_notifier(&mca_cpu_notifier);
2082
1da177e4
LT
2083 /* Setup the CMCI/P vector and handler */
2084 init_timer(&cmc_poll_timer);
2085 cmc_poll_timer.function = ia64_mca_cmc_poll;
2086
2087 /* Unmask/enable the vector */
2088 cmc_polling_enabled = 0;
2089 schedule_work(&cmc_enable_work);
2090
d4ed8084 2091 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
1da177e4
LT
2092
2093#ifdef CONFIG_ACPI
2094 /* Setup the CPEI/P vector and handler */
bb68c12b 2095 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
1da177e4
LT
2096 init_timer(&cpe_poll_timer);
2097 cpe_poll_timer.function = ia64_mca_cpe_poll;
2098
2099 {
2100 irq_desc_t *desc;
2101 unsigned int irq;
2102
2103 if (cpe_vector >= 0) {
2104 /* If platform supports CPEI, enable the irq. */
a1287476
RA
2105 irq = local_vector_to_irq(cpe_vector);
2106 if (irq > 0) {
2107 cpe_poll_enabled = 0;
2108 desc = irq_desc + irq;
2109 desc->status |= IRQ_PER_CPU;
2110 setup_irq(irq, &mca_cpe_irqaction);
2111 ia64_cpe_irq = irq;
2112 ia64_mca_register_cpev(cpe_vector);
2113 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
d4ed8084 2114 __func__);
a1287476 2115 return 0;
1da177e4 2116 }
a1287476
RA
2117 printk(KERN_ERR "%s: Failed to find irq for CPE "
2118 "interrupt handler, vector %d\n",
d4ed8084 2119 __func__, cpe_vector);
a1287476
RA
2120 }
2121 /* If platform doesn't support CPEI, get the timer going. */
2122 if (cpe_poll_enabled) {
2123 ia64_mca_cpe_poll(0UL);
d4ed8084 2124 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
1da177e4
LT
2125 }
2126 }
2127#endif
2128
2129 return 0;
2130}
2131
2132device_initcall(ia64_mca_late_init);
This page took 0.591152 seconds and 4 git commands to generate.