]> Git Repo - linux.git/blob - drivers/scsi/libsas/sas_event.c
platform/x86: amd-pmc: Move to later in the suspend process
[linux.git] / drivers / scsi / libsas / sas_event.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Serial Attached SCSI (SAS) Event processing
4  *
5  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6  * Copyright (C) 2005 Luben Tuikov <[email protected]>
7  */
8
9 #include <linux/export.h>
10 #include <scsi/scsi_host.h>
11 #include "sas_internal.h"
12
13 int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
14 {
15         /* it's added to the defer_q when draining so return succeed */
16         int rc = 1;
17
18         if (!test_bit(SAS_HA_REGISTERED, &ha->state))
19                 return 0;
20
21         if (test_bit(SAS_HA_DRAINING, &ha->state)) {
22                 /* add it to the defer list, if not already pending */
23                 if (list_empty(&sw->drain_node))
24                         list_add_tail(&sw->drain_node, &ha->defer_q);
25         } else
26                 rc = queue_work(ha->event_q, &sw->work);
27
28         return rc;
29 }
30
31 static int sas_queue_event(int event, struct sas_work *work,
32                             struct sas_ha_struct *ha)
33 {
34         unsigned long flags;
35         int rc;
36
37         spin_lock_irqsave(&ha->lock, flags);
38         rc = sas_queue_work(ha, work);
39         spin_unlock_irqrestore(&ha->lock, flags);
40
41         return rc;
42 }
43
44 void sas_queue_deferred_work(struct sas_ha_struct *ha)
45 {
46         struct sas_work *sw, *_sw;
47         int ret;
48
49         spin_lock_irq(&ha->lock);
50         list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
51                 list_del_init(&sw->drain_node);
52                 ret = sas_queue_work(ha, sw);
53                 if (ret != 1) {
54                         pm_runtime_put(ha->dev);
55                         sas_free_event(to_asd_sas_event(&sw->work));
56                 }
57         }
58         spin_unlock_irq(&ha->lock);
59 }
60
61 void __sas_drain_work(struct sas_ha_struct *ha)
62 {
63         set_bit(SAS_HA_DRAINING, &ha->state);
64         /* flush submitters */
65         spin_lock_irq(&ha->lock);
66         spin_unlock_irq(&ha->lock);
67
68         drain_workqueue(ha->event_q);
69         drain_workqueue(ha->disco_q);
70
71         clear_bit(SAS_HA_DRAINING, &ha->state);
72         sas_queue_deferred_work(ha);
73 }
74
75 int sas_drain_work(struct sas_ha_struct *ha)
76 {
77         int err;
78
79         err = mutex_lock_interruptible(&ha->drain_mutex);
80         if (err)
81                 return err;
82         if (test_bit(SAS_HA_REGISTERED, &ha->state))
83                 __sas_drain_work(ha);
84         mutex_unlock(&ha->drain_mutex);
85
86         return 0;
87 }
88 EXPORT_SYMBOL_GPL(sas_drain_work);
89
90 void sas_disable_revalidation(struct sas_ha_struct *ha)
91 {
92         mutex_lock(&ha->disco_mutex);
93         set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
94         mutex_unlock(&ha->disco_mutex);
95 }
96
97 void sas_enable_revalidation(struct sas_ha_struct *ha)
98 {
99         int i;
100
101         mutex_lock(&ha->disco_mutex);
102         clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
103         for (i = 0; i < ha->num_phys; i++) {
104                 struct asd_sas_port *port = ha->sas_port[i];
105                 const int ev = DISCE_REVALIDATE_DOMAIN;
106                 struct sas_discovery *d = &port->disc;
107                 struct asd_sas_phy *sas_phy;
108
109                 if (!test_and_clear_bit(ev, &d->pending))
110                         continue;
111
112                 spin_lock(&port->phy_list_lock);
113                 if (list_empty(&port->phy_list)) {
114                         spin_unlock(&port->phy_list_lock);
115                         continue;
116                 }
117
118                 sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
119                                 port_phy_el);
120                 spin_unlock(&port->phy_list_lock);
121                 sas_notify_port_event(sas_phy,
122                                 PORTE_BROADCAST_RCVD, GFP_KERNEL);
123         }
124         mutex_unlock(&ha->disco_mutex);
125 }
126
127
128 static void sas_port_event_worker(struct work_struct *work)
129 {
130         struct asd_sas_event *ev = to_asd_sas_event(work);
131         struct asd_sas_phy *phy = ev->phy;
132         struct sas_ha_struct *ha = phy->ha;
133
134         sas_port_event_fns[ev->event](work);
135         pm_runtime_put(ha->dev);
136         sas_free_event(ev);
137 }
138
139 static void sas_phy_event_worker(struct work_struct *work)
140 {
141         struct asd_sas_event *ev = to_asd_sas_event(work);
142         struct asd_sas_phy *phy = ev->phy;
143         struct sas_ha_struct *ha = phy->ha;
144
145         sas_phy_event_fns[ev->event](work);
146         pm_runtime_put(ha->dev);
147         sas_free_event(ev);
148 }
149
150 /* defer works of new phys during suspend */
151 static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
152 {
153         struct sas_ha_struct *ha = phy->ha;
154         unsigned long flags;
155         bool deferred = false;
156
157         spin_lock_irqsave(&ha->lock, flags);
158         if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
159                 struct sas_work *sw = &ev->work;
160
161                 list_add_tail(&sw->drain_node, &ha->defer_q);
162                 deferred = true;
163         }
164         spin_unlock_irqrestore(&ha->lock, flags);
165         return deferred;
166 }
167
168 int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
169                           gfp_t gfp_flags)
170 {
171         struct sas_ha_struct *ha = phy->ha;
172         struct asd_sas_event *ev;
173         int ret;
174
175         BUG_ON(event >= PORT_NUM_EVENTS);
176
177         ev = sas_alloc_event(phy, gfp_flags);
178         if (!ev)
179                 return -ENOMEM;
180
181         /* Call pm_runtime_put() with pairs in sas_port_event_worker() */
182         pm_runtime_get_noresume(ha->dev);
183
184         INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
185
186         if (sas_defer_event(phy, ev))
187                 return 0;
188
189         ret = sas_queue_event(event, &ev->work, ha);
190         if (ret != 1) {
191                 pm_runtime_put(ha->dev);
192                 sas_free_event(ev);
193         }
194
195         return ret;
196 }
197 EXPORT_SYMBOL_GPL(sas_notify_port_event);
198
199 int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
200                          gfp_t gfp_flags)
201 {
202         struct sas_ha_struct *ha = phy->ha;
203         struct asd_sas_event *ev;
204         int ret;
205
206         BUG_ON(event >= PHY_NUM_EVENTS);
207
208         ev = sas_alloc_event(phy, gfp_flags);
209         if (!ev)
210                 return -ENOMEM;
211
212         /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
213         pm_runtime_get_noresume(ha->dev);
214
215         INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
216
217         if (sas_defer_event(phy, ev))
218                 return 0;
219
220         ret = sas_queue_event(event, &ev->work, ha);
221         if (ret != 1) {
222                 pm_runtime_put(ha->dev);
223                 sas_free_event(ev);
224         }
225
226         return ret;
227 }
228 EXPORT_SYMBOL_GPL(sas_notify_phy_event);
This page took 0.046945 seconds and 4 git commands to generate.