]> Git Repo - linux.git/blob - drivers/net/ethernet/amd/pds_core/adminq.c
dma-mapping: don't return errors from dma_set_max_seg_size
[linux.git] / drivers / net / ethernet / amd / pds_core / adminq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4 #include <linux/dynamic_debug.h>
5
6 #include "core.h"
7
8 struct pdsc_wait_context {
9         struct pdsc_qcq *qcq;
10         struct completion wait_completion;
11 };
12
13 static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
14 {
15         union pds_core_notifyq_comp *comp;
16         struct pdsc *pdsc = qcq->pdsc;
17         struct pdsc_cq *cq = &qcq->cq;
18         struct pdsc_cq_info *cq_info;
19         int nq_work = 0;
20         u64 eid;
21
22         cq_info = &cq->info[cq->tail_idx];
23         comp = cq_info->comp;
24         eid = le64_to_cpu(comp->event.eid);
25         while (eid > pdsc->last_eid) {
26                 u16 ecode = le16_to_cpu(comp->event.ecode);
27
28                 switch (ecode) {
29                 case PDS_EVENT_LINK_CHANGE:
30                         dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
31                                  ecode, eid);
32                         pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
33                         break;
34
35                 case PDS_EVENT_RESET:
36                         dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
37                                  ecode, eid);
38                         pdsc_notify(PDS_EVENT_RESET, comp);
39                         break;
40
41                 case PDS_EVENT_XCVR:
42                         dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
43                                  ecode, eid);
44                         break;
45
46                 default:
47                         dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
48                                  ecode, eid);
49                         break;
50                 }
51
52                 pdsc->last_eid = eid;
53                 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
54                 cq_info = &cq->info[cq->tail_idx];
55                 comp = cq_info->comp;
56                 eid = le64_to_cpu(comp->event.eid);
57
58                 nq_work++;
59         }
60
61         qcq->accum_work += nq_work;
62
63         return nq_work;
64 }
65
66 static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
67 {
68         if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
69             pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
70                 return false;
71
72         return refcount_inc_not_zero(&pdsc->adminq_refcnt);
73 }
74
75 void pdsc_process_adminq(struct pdsc_qcq *qcq)
76 {
77         union pds_core_adminq_comp *comp;
78         struct pdsc_queue *q = &qcq->q;
79         struct pdsc *pdsc = qcq->pdsc;
80         struct pdsc_cq *cq = &qcq->cq;
81         struct pdsc_q_info *q_info;
82         unsigned long irqflags;
83         int nq_work = 0;
84         int aq_work = 0;
85
86         /* Don't process AdminQ when it's not up */
87         if (!pdsc_adminq_inc_if_up(pdsc)) {
88                 dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
89                         __func__);
90                 return;
91         }
92
93         /* Check for NotifyQ event */
94         nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
95
96         /* Check for empty queue, which can happen if the interrupt was
97          * for a NotifyQ event and there are no new AdminQ completions.
98          */
99         if (q->tail_idx == q->head_idx)
100                 goto credits;
101
102         /* Find the first completion to clean,
103          * run the callback in the related q_info,
104          * and continue while we still match done color
105          */
106         spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
107         comp = cq->info[cq->tail_idx].comp;
108         while (pdsc_color_match(comp->color, cq->done_color)) {
109                 q_info = &q->info[q->tail_idx];
110                 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
111
112                 /* Copy out the completion data */
113                 memcpy(q_info->dest, comp, sizeof(*comp));
114
115                 complete_all(&q_info->wc->wait_completion);
116
117                 if (cq->tail_idx == cq->num_descs - 1)
118                         cq->done_color = !cq->done_color;
119                 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
120                 comp = cq->info[cq->tail_idx].comp;
121
122                 aq_work++;
123         }
124         spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
125
126         qcq->accum_work += aq_work;
127
128 credits:
129         /* Return the interrupt credits, one for each completion */
130         pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
131                               nq_work + aq_work,
132                               PDS_CORE_INTR_CRED_REARM);
133         refcount_dec(&pdsc->adminq_refcnt);
134 }
135
136 void pdsc_work_thread(struct work_struct *work)
137 {
138         struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
139
140         pdsc_process_adminq(qcq);
141 }
142
143 irqreturn_t pdsc_adminq_isr(int irq, void *data)
144 {
145         struct pdsc *pdsc = data;
146         struct pdsc_qcq *qcq;
147
148         /* Don't process AdminQ when it's not up */
149         if (!pdsc_adminq_inc_if_up(pdsc)) {
150                 dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
151                         __func__);
152                 return IRQ_HANDLED;
153         }
154
155         qcq = &pdsc->adminqcq;
156         queue_work(pdsc->wq, &qcq->work);
157         refcount_dec(&pdsc->adminq_refcnt);
158
159         return IRQ_HANDLED;
160 }
161
162 static int __pdsc_adminq_post(struct pdsc *pdsc,
163                               struct pdsc_qcq *qcq,
164                               union pds_core_adminq_cmd *cmd,
165                               union pds_core_adminq_comp *comp,
166                               struct pdsc_wait_context *wc)
167 {
168         struct pdsc_queue *q = &qcq->q;
169         struct pdsc_q_info *q_info;
170         unsigned long irqflags;
171         unsigned int avail;
172         int index;
173         int ret;
174
175         spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
176
177         /* Check for space in the queue */
178         avail = q->tail_idx;
179         if (q->head_idx >= avail)
180                 avail += q->num_descs - q->head_idx - 1;
181         else
182                 avail -= q->head_idx + 1;
183         if (!avail) {
184                 ret = -ENOSPC;
185                 goto err_out_unlock;
186         }
187
188         /* Check that the FW is running */
189         if (!pdsc_is_fw_running(pdsc)) {
190                 if (pdsc->info_regs) {
191                         u8 fw_status =
192                                 ioread8(&pdsc->info_regs->fw_status);
193
194                         dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
195                                  __func__, fw_status);
196                 } else {
197                         dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
198                                  __func__);
199                 }
200                 ret = -ENXIO;
201
202                 goto err_out_unlock;
203         }
204
205         /* Post the request */
206         index = q->head_idx;
207         q_info = &q->info[index];
208         q_info->wc = wc;
209         q_info->dest = comp;
210         memcpy(q_info->desc, cmd, sizeof(*cmd));
211
212         dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
213                 q->head_idx, q->tail_idx);
214         dev_dbg(pdsc->dev, "post admin queue command:\n");
215         dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
216                          cmd, sizeof(*cmd), true);
217
218         q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
219
220         pds_core_dbell_ring(pdsc->kern_dbpage,
221                             q->hw_type, q->dbval | q->head_idx);
222         ret = index;
223
224 err_out_unlock:
225         spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
226         return ret;
227 }
228
229 int pdsc_adminq_post(struct pdsc *pdsc,
230                      union pds_core_adminq_cmd *cmd,
231                      union pds_core_adminq_comp *comp,
232                      bool fast_poll)
233 {
234         struct pdsc_wait_context wc = {
235                 .wait_completion =
236                         COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
237         };
238         unsigned long poll_interval = 1;
239         unsigned long poll_jiffies;
240         unsigned long time_limit;
241         unsigned long time_start;
242         unsigned long time_done;
243         unsigned long remaining;
244         int err = 0;
245         int index;
246
247         if (!pdsc_adminq_inc_if_up(pdsc)) {
248                 dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
249                         __func__, cmd->opcode);
250                 return -ENXIO;
251         }
252
253         wc.qcq = &pdsc->adminqcq;
254         index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
255         if (index < 0) {
256                 err = index;
257                 goto err_out;
258         }
259
260         time_start = jiffies;
261         time_limit = time_start + HZ * pdsc->devcmd_timeout;
262         do {
263                 /* Timeslice the actual wait to catch IO errors etc early */
264                 poll_jiffies = msecs_to_jiffies(poll_interval);
265                 remaining = wait_for_completion_timeout(&wc.wait_completion,
266                                                         poll_jiffies);
267                 if (remaining)
268                         break;
269
270                 if (!pdsc_is_fw_running(pdsc)) {
271                         if (pdsc->info_regs) {
272                                 u8 fw_status =
273                                         ioread8(&pdsc->info_regs->fw_status);
274
275                                 dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
276                                         __func__, fw_status);
277                         } else {
278                                 dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
279                                         __func__);
280                         }
281                         err = -ENXIO;
282                         break;
283                 }
284
285                 /* When fast_poll is not requested, prevent aggressive polling
286                  * on failures due to timeouts by doing exponential back off.
287                  */
288                 if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
289                         poll_interval <<= 1;
290         } while (time_before(jiffies, time_limit));
291         time_done = jiffies;
292         dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
293                 __func__, jiffies_to_msecs(time_done - time_start));
294
295         /* Check the results */
296         if (time_after_eq(time_done, time_limit))
297                 err = -ETIMEDOUT;
298
299         dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
300         dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
301                          comp, sizeof(*comp), true);
302
303         if (remaining && comp->status)
304                 err = pdsc_err_to_errno(comp->status);
305
306 err_out:
307         if (err) {
308                 dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
309                         __func__, cmd->opcode, comp->status, ERR_PTR(err));
310                 if (err == -ENXIO || err == -ETIMEDOUT)
311                         queue_work(pdsc->wq, &pdsc->health_work);
312         }
313
314         refcount_dec(&pdsc->adminq_refcnt);
315
316         return err;
317 }
318 EXPORT_SYMBOL_GPL(pdsc_adminq_post);
This page took 0.051623 seconds and 4 git commands to generate.