1 // SPDX-License-Identifier: GPL-2.0-or-later
7 #include <linux/delay.h>
8 #include <linux/list.h>
9 #include <linux/sched.h>
10 #include <linux/semaphore.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/kthread.h>
14 #include <asm/eeh_event.h>
15 #include <asm/ppc-pci.h>
18 * EEH error states may be detected within exception handlers;
19 * however, the recovery processing needs to occur asynchronously
20 * in a normal kernel context and not an interrupt context.
21 * This pair of routines creates an event and queues it onto a
22 * work-queue, where a worker thread can drive recovery.
25 static DEFINE_SPINLOCK(eeh_eventlist_lock);
26 static DECLARE_COMPLETION(eeh_eventlist_event);
27 static LIST_HEAD(eeh_eventlist);
30 * eeh_event_handler - Dispatch EEH events.
33 * The detection of a frozen slot can occur inside an interrupt,
34 * where it can be hard to do anything about it. The goal of this
35 * routine is to pull these detection events out of the context
36 * of the interrupt handler, and re-dispatch them for processing
37 * at a later time in a normal context.
39 static int eeh_event_handler(void * dummy)
42 struct eeh_event *event;
44 while (!kthread_should_stop()) {
45 if (wait_for_completion_interruptible(&eeh_eventlist_event))
48 /* Fetch EEH event from the queue */
49 spin_lock_irqsave(&eeh_eventlist_lock, flags);
51 if (!list_empty(&eeh_eventlist)) {
52 event = list_entry(eeh_eventlist.next,
53 struct eeh_event, list);
54 list_del(&event->list);
56 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
60 /* We might have event without binding PE */
62 eeh_handle_normal_event(event->pe);
64 eeh_handle_special_event();
73 * eeh_event_init - Start kernel thread to handle EEH events
75 * This routine is called to start the kernel thread for processing
78 int eeh_event_init(void)
80 struct task_struct *t;
83 t = kthread_run(eeh_event_handler, NULL, "eehd");
86 pr_err("%s: Failed to start EEH daemon (%d)\n",
95 * eeh_send_failure_event - Generate a PCI error event
98 * This routine can be called within an interrupt context;
99 * the actual event will be delivered in a normal context
100 * (from a workqueue).
102 int __eeh_send_failure_event(struct eeh_pe *pe)
105 struct eeh_event *event;
107 event = kzalloc(sizeof(*event), GFP_ATOMIC);
109 pr_err("EEH: out of memory, event not handled\n");
115 * Mark the PE as recovering before inserting it in the queue.
116 * This prevents the PE from being free()ed by a hotplug driver
117 * while the PE is sitting in the event queue.
120 #ifdef CONFIG_STACKTRACE
122 * Save the current stack trace so we can dump it from the
123 * event handler thread.
125 pe->trace_entries = stack_trace_save(pe->stack_trace,
126 ARRAY_SIZE(pe->stack_trace), 0);
127 #endif /* CONFIG_STACKTRACE */
129 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
132 /* We may or may not be called in an interrupt context */
133 spin_lock_irqsave(&eeh_eventlist_lock, flags);
134 list_add(&event->list, &eeh_eventlist);
135 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
137 /* For EEH deamon to knick in */
138 complete(&eeh_eventlist_event);
143 int eeh_send_failure_event(struct eeh_pe *pe)
146 * If we've manually suppressed recovery events via debugfs
147 * then just drop it on the floor.
149 if (eeh_debugfs_no_recover) {
150 pr_err("EEH: Event dropped due to no_recover setting\n");
154 return __eeh_send_failure_event(pe);
158 * eeh_remove_event - Remove EEH event from the queue
159 * @pe: Event binding to the PE
160 * @force: Event will be removed unconditionally
162 * On PowerNV platform, we might have subsequent coming events
163 * is part of the former one. For that case, those subsequent
164 * coming events are totally duplicated and unnecessary, thus
165 * they should be removed.
167 void eeh_remove_event(struct eeh_pe *pe, bool force)
170 struct eeh_event *event, *tmp;
173 * If we have NULL PE passed in, we have dead IOC
174 * or we're sure we can report all existing errors
177 * With "force", the event with associated PE that
178 * have been isolated, the event won't be removed
179 * to avoid event lost.
181 spin_lock_irqsave(&eeh_eventlist_lock, flags);
182 list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
183 if (!force && event->pe &&
184 (event->pe->state & EEH_PE_ISOLATED))
188 list_del(&event->list);
190 } else if (pe->type & EEH_PE_PHB) {
191 if (event->pe && event->pe->phb == pe->phb) {
192 list_del(&event->list);
195 } else if (event->pe == pe) {
196 list_del(&event->list);
200 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);