]> Git Repo - J-linux.git/blob - drivers/net/ethernet/pensando/ionic/ionic_lif.c
Merge tag 'kbuild-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[J-linux.git] / drivers / net / ethernet / pensando / ionic / ionic_lif.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/ethtool.h>
5 #include <linux/printk.h>
6 #include <linux/dynamic_debug.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/cpumask.h>
14 #include <linux/crash_dump.h>
15 #include <linux/vmalloc.h>
16
17 #include "ionic.h"
18 #include "ionic_bus.h"
19 #include "ionic_dev.h"
20 #include "ionic_lif.h"
21 #include "ionic_txrx.h"
22 #include "ionic_ethtool.h"
23 #include "ionic_debugfs.h"
24
25 /* queuetype support level */
26 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
27         [IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
28         [IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
29         [IONIC_QTYPE_RXQ]     = 2,   /* 0 = Base version with CQ+SG support
30                                       * 2 =       ... with CMB rings
31                                       */
32         [IONIC_QTYPE_TXQ]     = 3,   /* 0 = Base version with CQ+SG support
33                                       * 1 =       ... with Tx SG version 1
34                                       * 3 =       ... with CMB rings
35                                       */
36 };
37
38 static void ionic_link_status_check(struct ionic_lif *lif);
39 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
40 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
41 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
42
43 static void ionic_txrx_deinit(struct ionic_lif *lif);
44 static int ionic_txrx_init(struct ionic_lif *lif);
45 static int ionic_start_queues(struct ionic_lif *lif);
46 static void ionic_stop_queues(struct ionic_lif *lif);
47 static void ionic_lif_queue_identify(struct ionic_lif *lif);
48
49 static int ionic_xdp_queues_config(struct ionic_lif *lif);
50 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
51
52 static void ionic_dim_work(struct work_struct *work)
53 {
54         struct dim *dim = container_of(work, struct dim, work);
55         struct dim_cq_moder cur_moder;
56         struct ionic_intr_info *intr;
57         struct ionic_qcq *qcq;
58         struct ionic_lif *lif;
59         struct ionic_queue *q;
60         u32 new_coal;
61
62         qcq = container_of(dim, struct ionic_qcq, dim);
63         q = &qcq->q;
64         if (q->type == IONIC_QTYPE_RXQ)
65                 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
66         else
67                 cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
68         lif = q->lif;
69         new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
70         new_coal = new_coal ? new_coal : 1;
71
72         intr = &qcq->intr;
73         if (intr->dim_coal_hw != new_coal) {
74                 intr->dim_coal_hw = new_coal;
75
76                 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
77                                      intr->index, intr->dim_coal_hw);
78         }
79
80         dim->state = DIM_START_MEASURE;
81 }
82
83 static void ionic_lif_deferred_work(struct work_struct *work)
84 {
85         struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
86         struct ionic_deferred *def = &lif->deferred;
87         struct ionic_deferred_work *w = NULL;
88
89         do {
90                 spin_lock_bh(&def->lock);
91                 if (!list_empty(&def->list)) {
92                         w = list_first_entry(&def->list,
93                                              struct ionic_deferred_work, list);
94                         list_del(&w->list);
95                 }
96                 spin_unlock_bh(&def->lock);
97
98                 if (!w)
99                         break;
100
101                 switch (w->type) {
102                 case IONIC_DW_TYPE_RX_MODE:
103                         ionic_lif_rx_mode(lif);
104                         break;
105                 case IONIC_DW_TYPE_LINK_STATUS:
106                         ionic_link_status_check(lif);
107                         break;
108                 case IONIC_DW_TYPE_LIF_RESET:
109                         if (w->fw_status) {
110                                 ionic_lif_handle_fw_up(lif);
111                         } else {
112                                 ionic_lif_handle_fw_down(lif);
113
114                                 /* Fire off another watchdog to see
115                                  * if the FW is already back rather than
116                                  * waiting another whole cycle
117                                  */
118                                 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1);
119                         }
120                         break;
121                 default:
122                         break;
123                 }
124                 kfree(w);
125                 w = NULL;
126         } while (true);
127 }
128
129 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
130                                 struct ionic_deferred_work *work)
131 {
132         spin_lock_bh(&def->lock);
133         list_add_tail(&work->list, &def->list);
134         spin_unlock_bh(&def->lock);
135         schedule_work(&def->work);
136 }
137
138 static void ionic_link_status_check(struct ionic_lif *lif)
139 {
140         struct net_device *netdev = lif->netdev;
141         u16 link_status;
142         bool link_up;
143
144         if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
145                 return;
146
147         /* Don't put carrier back up if we're in a broken state */
148         if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
149                 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
150                 return;
151         }
152
153         link_status = le16_to_cpu(lif->info->status.link_status);
154         link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
155
156         if (link_up) {
157                 int err = 0;
158
159                 if (netdev->flags & IFF_UP && netif_running(netdev)) {
160                         mutex_lock(&lif->queue_lock);
161                         err = ionic_start_queues(lif);
162                         if (err && err != -EBUSY) {
163                                 netdev_err(netdev,
164                                            "Failed to start queues: %d\n", err);
165                                 set_bit(IONIC_LIF_F_BROKEN, lif->state);
166                                 netif_carrier_off(lif->netdev);
167                         }
168                         mutex_unlock(&lif->queue_lock);
169                 }
170
171                 if (!err && !netif_carrier_ok(netdev)) {
172                         ionic_port_identify(lif->ionic);
173                         netdev_info(netdev, "Link up - %d Gbps\n",
174                                     le32_to_cpu(lif->info->status.link_speed) / 1000);
175                         netif_carrier_on(netdev);
176                 }
177         } else {
178                 if (netif_carrier_ok(netdev)) {
179                         lif->link_down_count++;
180                         netdev_info(netdev, "Link down\n");
181                         netif_carrier_off(netdev);
182                 }
183
184                 if (netdev->flags & IFF_UP && netif_running(netdev)) {
185                         mutex_lock(&lif->queue_lock);
186                         ionic_stop_queues(lif);
187                         mutex_unlock(&lif->queue_lock);
188                 }
189         }
190
191         clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
192 }
193
194 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
195 {
196         struct ionic_deferred_work *work;
197
198         /* we only need one request outstanding at a time */
199         if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
200                 return;
201
202         if (!can_sleep) {
203                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
204                 if (!work) {
205                         clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
206                         return;
207                 }
208
209                 work->type = IONIC_DW_TYPE_LINK_STATUS;
210                 ionic_lif_deferred_enqueue(&lif->deferred, work);
211         } else {
212                 ionic_link_status_check(lif);
213         }
214 }
215
216 static void ionic_napi_deadline(struct timer_list *timer)
217 {
218         struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
219
220         napi_schedule(&qcq->napi);
221 }
222
223 static irqreturn_t ionic_isr(int irq, void *data)
224 {
225         struct napi_struct *napi = data;
226
227         napi_schedule_irqoff(napi);
228
229         return IRQ_HANDLED;
230 }
231
232 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
233 {
234         struct ionic_intr_info *intr = &qcq->intr;
235         struct device *dev = lif->ionic->dev;
236         struct ionic_queue *q = &qcq->q;
237         const char *name;
238
239         if (lif->registered)
240                 name = lif->netdev->name;
241         else
242                 name = dev_name(dev);
243
244         snprintf(intr->name, sizeof(intr->name),
245                  "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
246
247         return devm_request_irq(dev, intr->vector, ionic_isr,
248                                 0, intr->name, &qcq->napi);
249 }
250
251 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
252 {
253         struct ionic *ionic = lif->ionic;
254         int index;
255
256         index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
257         if (index == ionic->nintrs) {
258                 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
259                             __func__, index, ionic->nintrs);
260                 return -ENOSPC;
261         }
262
263         set_bit(index, ionic->intrs);
264         ionic_intr_init(&ionic->idev, intr, index);
265
266         return 0;
267 }
268
269 static void ionic_intr_free(struct ionic *ionic, int index)
270 {
271         if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
272                 clear_bit(index, ionic->intrs);
273 }
274
275 static int ionic_qcq_enable(struct ionic_qcq *qcq)
276 {
277         struct ionic_queue *q = &qcq->q;
278         struct ionic_lif *lif = q->lif;
279         struct ionic_dev *idev;
280         struct device *dev;
281
282         struct ionic_admin_ctx ctx = {
283                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
284                 .cmd.q_control = {
285                         .opcode = IONIC_CMD_Q_CONTROL,
286                         .lif_index = cpu_to_le16(lif->index),
287                         .type = q->type,
288                         .index = cpu_to_le32(q->index),
289                         .oper = IONIC_Q_ENABLE,
290                 },
291         };
292         int ret;
293
294         idev = &lif->ionic->idev;
295         dev = lif->ionic->dev;
296
297         dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
298                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
299
300         if (qcq->flags & IONIC_QCQ_F_INTR)
301                 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
302
303         ret = ionic_adminq_post_wait(lif, &ctx);
304         if (ret)
305                 return ret;
306
307         if (qcq->napi.poll)
308                 napi_enable(&qcq->napi);
309
310         if (qcq->flags & IONIC_QCQ_F_INTR) {
311                 irq_set_affinity_hint(qcq->intr.vector,
312                                       &qcq->intr.affinity_mask);
313                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
314                                 IONIC_INTR_MASK_CLEAR);
315         }
316
317         return 0;
318 }
319
320 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
321 {
322         struct ionic_queue *q;
323
324         struct ionic_admin_ctx ctx = {
325                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
326                 .cmd.q_control = {
327                         .opcode = IONIC_CMD_Q_CONTROL,
328                         .oper = IONIC_Q_DISABLE,
329                 },
330         };
331
332         if (!qcq) {
333                 netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
334                 return -ENXIO;
335         }
336
337         q = &qcq->q;
338
339         if (qcq->flags & IONIC_QCQ_F_INTR) {
340                 struct ionic_dev *idev = &lif->ionic->idev;
341
342                 cancel_work_sync(&qcq->dim.work);
343                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
344                                 IONIC_INTR_MASK_SET);
345                 synchronize_irq(qcq->intr.vector);
346                 irq_set_affinity_hint(qcq->intr.vector, NULL);
347                 napi_disable(&qcq->napi);
348                 del_timer_sync(&qcq->napi_deadline);
349         }
350
351         /* If there was a previous fw communcation error, don't bother with
352          * sending the adminq command and just return the same error value.
353          */
354         if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
355                 return fw_err;
356
357         ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
358         ctx.cmd.q_control.type = q->type;
359         ctx.cmd.q_control.index = cpu_to_le32(q->index);
360         dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
361                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
362
363         return ionic_adminq_post_wait(lif, &ctx);
364 }
365
366 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
367 {
368         struct ionic_dev *idev = &lif->ionic->idev;
369
370         if (!qcq)
371                 return;
372
373         if (!(qcq->flags & IONIC_QCQ_F_INITED))
374                 return;
375
376         if (qcq->flags & IONIC_QCQ_F_INTR) {
377                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
378                                 IONIC_INTR_MASK_SET);
379                 netif_napi_del(&qcq->napi);
380         }
381
382         qcq->flags &= ~IONIC_QCQ_F_INITED;
383 }
384
385 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
386 {
387         if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
388                 return;
389
390         irq_set_affinity_hint(qcq->intr.vector, NULL);
391         devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
392         qcq->intr.vector = 0;
393         ionic_intr_free(lif->ionic, qcq->intr.index);
394         qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
395 }
396
397 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
398 {
399         struct device *dev = lif->ionic->dev;
400
401         if (!qcq)
402                 return;
403
404         ionic_debugfs_del_qcq(qcq);
405
406         if (qcq->q_base) {
407                 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
408                 qcq->q_base = NULL;
409                 qcq->q_base_pa = 0;
410         }
411
412         if (qcq->cmb_q_base) {
413                 iounmap(qcq->cmb_q_base);
414                 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
415                 qcq->cmb_pgid = 0;
416                 qcq->cmb_order = 0;
417                 qcq->cmb_q_base = NULL;
418                 qcq->cmb_q_base_pa = 0;
419         }
420
421         if (qcq->cq_base) {
422                 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
423                 qcq->cq_base = NULL;
424                 qcq->cq_base_pa = 0;
425         }
426
427         if (qcq->sg_base) {
428                 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
429                 qcq->sg_base = NULL;
430                 qcq->sg_base_pa = 0;
431         }
432
433         ionic_xdp_unregister_rxq_info(&qcq->q);
434         ionic_qcq_intr_free(lif, qcq);
435
436         vfree(qcq->q.info);
437         qcq->q.info = NULL;
438 }
439
440 void ionic_qcqs_free(struct ionic_lif *lif)
441 {
442         struct device *dev = lif->ionic->dev;
443         struct ionic_qcq *adminqcq;
444         unsigned long irqflags;
445
446         if (lif->notifyqcq) {
447                 ionic_qcq_free(lif, lif->notifyqcq);
448                 devm_kfree(dev, lif->notifyqcq);
449                 lif->notifyqcq = NULL;
450         }
451
452         if (lif->adminqcq) {
453                 spin_lock_irqsave(&lif->adminq_lock, irqflags);
454                 adminqcq = READ_ONCE(lif->adminqcq);
455                 lif->adminqcq = NULL;
456                 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
457                 if (adminqcq) {
458                         ionic_qcq_free(lif, adminqcq);
459                         devm_kfree(dev, adminqcq);
460                 }
461         }
462
463         if (lif->rxqcqs) {
464                 devm_kfree(dev, lif->rxqstats);
465                 lif->rxqstats = NULL;
466                 devm_kfree(dev, lif->rxqcqs);
467                 lif->rxqcqs = NULL;
468         }
469
470         if (lif->txqcqs) {
471                 devm_kfree(dev, lif->txqstats);
472                 lif->txqstats = NULL;
473                 devm_kfree(dev, lif->txqcqs);
474                 lif->txqcqs = NULL;
475         }
476 }
477
478 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
479                                       struct ionic_qcq *n_qcq)
480 {
481         n_qcq->intr.vector = src_qcq->intr.vector;
482         n_qcq->intr.index = src_qcq->intr.index;
483         n_qcq->napi_qcq = src_qcq->napi_qcq;
484 }
485
486 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
487 {
488         int err;
489
490         if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
491                 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
492                 return 0;
493         }
494
495         err = ionic_intr_alloc(lif, &qcq->intr);
496         if (err) {
497                 netdev_warn(lif->netdev, "no intr for %s: %d\n",
498                             qcq->q.name, err);
499                 goto err_out;
500         }
501
502         err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
503         if (err < 0) {
504                 netdev_warn(lif->netdev, "no vector for %s: %d\n",
505                             qcq->q.name, err);
506                 goto err_out_free_intr;
507         }
508         qcq->intr.vector = err;
509         ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
510                                IONIC_INTR_MASK_SET);
511
512         err = ionic_request_irq(lif, qcq);
513         if (err) {
514                 netdev_warn(lif->netdev, "irq request failed %d\n", err);
515                 goto err_out_free_intr;
516         }
517
518         /* try to get the irq on the local numa node first */
519         qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
520                                              dev_to_node(lif->ionic->dev));
521         if (qcq->intr.cpu != -1)
522                 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
523
524         netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
525         return 0;
526
527 err_out_free_intr:
528         ionic_intr_free(lif->ionic, qcq->intr.index);
529 err_out:
530         return err;
531 }
532
533 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
534                            unsigned int index,
535                            const char *name, unsigned int flags,
536                            unsigned int num_descs, unsigned int desc_size,
537                            unsigned int cq_desc_size,
538                            unsigned int sg_desc_size,
539                            unsigned int desc_info_size,
540                            unsigned int pid, struct ionic_qcq **qcq)
541 {
542         struct ionic_dev *idev = &lif->ionic->idev;
543         struct device *dev = lif->ionic->dev;
544         struct ionic_qcq *new;
545         int err;
546
547         *qcq = NULL;
548
549         new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
550         if (!new) {
551                 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
552                 err = -ENOMEM;
553                 goto err_out;
554         }
555
556         new->q.dev = dev;
557         new->flags = flags;
558
559         new->q.info = vcalloc(num_descs, desc_info_size);
560         if (!new->q.info) {
561                 netdev_err(lif->netdev, "Cannot allocate queue info\n");
562                 err = -ENOMEM;
563                 goto err_out_free_qcq;
564         }
565
566         new->q.type = type;
567         new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
568
569         err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
570                            desc_size, sg_desc_size, pid);
571         if (err) {
572                 netdev_err(lif->netdev, "Cannot initialize queue\n");
573                 goto err_out_free_q_info;
574         }
575
576         err = ionic_alloc_qcq_interrupt(lif, new);
577         if (err)
578                 goto err_out_free_q_info;
579
580         err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
581         if (err) {
582                 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
583                 goto err_out_free_irq;
584         }
585
586         if (flags & IONIC_QCQ_F_NOTIFYQ) {
587                 int q_size;
588
589                 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
590                  * and don't alloc qc.  We leave new->qc_size and new->qc_base
591                  * as 0 to be sure we don't try to free it later.
592                  */
593                 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
594                 new->q_size = PAGE_SIZE + q_size +
595                               ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
596                 new->q_base = dma_alloc_coherent(dev, new->q_size,
597                                                  &new->q_base_pa, GFP_KERNEL);
598                 if (!new->q_base) {
599                         netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
600                         err = -ENOMEM;
601                         goto err_out_free_irq;
602                 }
603                 new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
604                 new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
605
606                 /* Base the NotifyQ cq.base off of the ALIGNed q.base */
607                 new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE);
608                 new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
609                 new->cq.bound_q = &new->q;
610         } else {
611                 /* regular DMA q descriptors */
612                 new->q_size = PAGE_SIZE + (num_descs * desc_size);
613                 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
614                                                  GFP_KERNEL);
615                 if (!new->q_base) {
616                         netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
617                         err = -ENOMEM;
618                         goto err_out_free_irq;
619                 }
620                 new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
621                 new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
622
623                 if (flags & IONIC_QCQ_F_CMB_RINGS) {
624                         /* on-chip CMB q descriptors */
625                         new->cmb_q_size = num_descs * desc_size;
626                         new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
627
628                         err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
629                                             new->cmb_order);
630                         if (err) {
631                                 netdev_err(lif->netdev,
632                                            "Cannot allocate queue order %d from cmb: err %d\n",
633                                            new->cmb_order, err);
634                                 goto err_out_free_q;
635                         }
636
637                         new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
638                         if (!new->cmb_q_base) {
639                                 netdev_err(lif->netdev, "Cannot map queue from cmb\n");
640                                 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
641                                 err = -ENOMEM;
642                                 goto err_out_free_q;
643                         }
644
645                         new->cmb_q_base_pa -= idev->phy_cmb_pages;
646                         new->q.cmb_base = new->cmb_q_base;
647                         new->q.cmb_base_pa = new->cmb_q_base_pa;
648                 }
649
650                 /* cq DMA descriptors */
651                 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
652                 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
653                                                   GFP_KERNEL);
654                 if (!new->cq_base) {
655                         netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
656                         err = -ENOMEM;
657                         goto err_out_free_q;
658                 }
659                 new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
660                 new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
661                 new->cq.bound_q = &new->q;
662         }
663
664         if (flags & IONIC_QCQ_F_SG) {
665                 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
666                 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
667                                                   GFP_KERNEL);
668                 if (!new->sg_base) {
669                         netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
670                         err = -ENOMEM;
671                         goto err_out_free_cq;
672                 }
673                 new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
674                 new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
675         }
676
677         INIT_WORK(&new->dim.work, ionic_dim_work);
678         new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
679
680         *qcq = new;
681
682         return 0;
683
684 err_out_free_cq:
685         dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
686 err_out_free_q:
687         if (new->cmb_q_base) {
688                 iounmap(new->cmb_q_base);
689                 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
690         }
691         dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
692 err_out_free_irq:
693         if (flags & IONIC_QCQ_F_INTR) {
694                 devm_free_irq(dev, new->intr.vector, &new->napi);
695                 ionic_intr_free(lif->ionic, new->intr.index);
696         }
697 err_out_free_q_info:
698         vfree(new->q.info);
699 err_out_free_qcq:
700         devm_kfree(dev, new);
701 err_out:
702         dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
703         return err;
704 }
705
706 static int ionic_qcqs_alloc(struct ionic_lif *lif)
707 {
708         struct device *dev = lif->ionic->dev;
709         unsigned int flags;
710         int err;
711
712         flags = IONIC_QCQ_F_INTR;
713         err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
714                               IONIC_ADMINQ_LENGTH,
715                               sizeof(struct ionic_admin_cmd),
716                               sizeof(struct ionic_admin_comp),
717                               0,
718                               sizeof(struct ionic_admin_desc_info),
719                               lif->kern_pid, &lif->adminqcq);
720         if (err)
721                 return err;
722         ionic_debugfs_add_qcq(lif, lif->adminqcq);
723
724         if (lif->ionic->nnqs_per_lif) {
725                 flags = IONIC_QCQ_F_NOTIFYQ;
726                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
727                                       flags, IONIC_NOTIFYQ_LENGTH,
728                                       sizeof(struct ionic_notifyq_cmd),
729                                       sizeof(union ionic_notifyq_comp),
730                                       0,
731                                       sizeof(struct ionic_admin_desc_info),
732                                       lif->kern_pid, &lif->notifyqcq);
733                 if (err)
734                         goto err_out;
735                 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
736
737                 /* Let the notifyq ride on the adminq interrupt */
738                 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
739         }
740
741         err = -ENOMEM;
742         lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
743                                    sizeof(*lif->txqcqs), GFP_KERNEL);
744         if (!lif->txqcqs)
745                 goto err_out;
746         lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
747                                    sizeof(*lif->rxqcqs), GFP_KERNEL);
748         if (!lif->rxqcqs)
749                 goto err_out;
750
751         lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
752                                      sizeof(*lif->txqstats), GFP_KERNEL);
753         if (!lif->txqstats)
754                 goto err_out;
755         lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
756                                      sizeof(*lif->rxqstats), GFP_KERNEL);
757         if (!lif->rxqstats)
758                 goto err_out;
759
760         return 0;
761
762 err_out:
763         ionic_qcqs_free(lif);
764         return err;
765 }
766
767 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
768 {
769         qcq->q.tail_idx = 0;
770         qcq->q.head_idx = 0;
771         qcq->cq.tail_idx = 0;
772         qcq->cq.done_color = 1;
773         memset(qcq->q_base, 0, qcq->q_size);
774         if (qcq->cmb_q_base)
775                 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
776         memset(qcq->cq_base, 0, qcq->cq_size);
777         memset(qcq->sg_base, 0, qcq->sg_size);
778 }
779
780 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
781 {
782         struct device *dev = lif->ionic->dev;
783         struct ionic_queue *q = &qcq->q;
784         struct ionic_cq *cq = &qcq->cq;
785         struct ionic_admin_ctx ctx = {
786                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
787                 .cmd.q_init = {
788                         .opcode = IONIC_CMD_Q_INIT,
789                         .lif_index = cpu_to_le16(lif->index),
790                         .type = q->type,
791                         .ver = lif->qtype_info[q->type].version,
792                         .index = cpu_to_le32(q->index),
793                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
794                                              IONIC_QINIT_F_SG),
795                         .intr_index = cpu_to_le16(qcq->intr.index),
796                         .pid = cpu_to_le16(q->pid),
797                         .ring_size = ilog2(q->num_descs),
798                         .ring_base = cpu_to_le64(q->base_pa),
799                         .cq_ring_base = cpu_to_le64(cq->base_pa),
800                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
801                         .features = cpu_to_le64(q->features),
802                 },
803         };
804         int err;
805
806         if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
807                 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
808                 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
809         }
810
811         dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
812         dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
813         dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
814         dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
815         dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
816         dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
817         dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
818         dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
819         dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
820
821         ionic_qcq_sanitize(qcq);
822
823         err = ionic_adminq_post_wait(lif, &ctx);
824         if (err)
825                 return err;
826
827         q->hw_type = ctx.comp.q_init.hw_type;
828         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
829         q->dbval = IONIC_DBELL_QID(q->hw_index);
830
831         dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
832         dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
833
834         q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
835         q->dbell_jiffies = jiffies;
836
837         if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
838                 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
839                 qcq->napi_qcq = qcq;
840                 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
841         }
842
843         qcq->flags |= IONIC_QCQ_F_INITED;
844
845         return 0;
846 }
847
848 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
849 {
850         struct device *dev = lif->ionic->dev;
851         struct ionic_queue *q = &qcq->q;
852         struct ionic_cq *cq = &qcq->cq;
853         struct ionic_admin_ctx ctx = {
854                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
855                 .cmd.q_init = {
856                         .opcode = IONIC_CMD_Q_INIT,
857                         .lif_index = cpu_to_le16(lif->index),
858                         .type = q->type,
859                         .ver = lif->qtype_info[q->type].version,
860                         .index = cpu_to_le32(q->index),
861                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
862                         .intr_index = cpu_to_le16(cq->bound_intr->index),
863                         .pid = cpu_to_le16(q->pid),
864                         .ring_size = ilog2(q->num_descs),
865                         .ring_base = cpu_to_le64(q->base_pa),
866                         .cq_ring_base = cpu_to_le64(cq->base_pa),
867                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
868                         .features = cpu_to_le64(q->features),
869                 },
870         };
871         int err;
872
873         q->partner = &lif->txqcqs[q->index]->q;
874         q->partner->partner = q;
875
876         if (!lif->xdp_prog ||
877             (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags))
878                 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG);
879
880         if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
881                 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
882                 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
883         }
884
885         dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
886         dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
887         dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
888         dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
889         dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
890         dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
891         dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
892
893         ionic_qcq_sanitize(qcq);
894
895         err = ionic_adminq_post_wait(lif, &ctx);
896         if (err)
897                 return err;
898
899         q->hw_type = ctx.comp.q_init.hw_type;
900         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
901         q->dbval = IONIC_DBELL_QID(q->hw_index);
902
903         dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
904         dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
905
906         q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
907         q->dbell_jiffies = jiffies;
908
909         if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
910                 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
911         else
912                 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
913
914         qcq->napi_qcq = qcq;
915         timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
916
917         qcq->flags |= IONIC_QCQ_F_INITED;
918
919         return 0;
920 }
921
922 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
923 {
924         unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
925         unsigned int txq_i, flags;
926         struct ionic_qcq *txq;
927         u64 features;
928         int err;
929
930         if (lif->hwstamp_txq)
931                 return 0;
932
933         features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
934
935         num_desc = IONIC_MIN_TXRX_DESC;
936         desc_sz = sizeof(struct ionic_txq_desc);
937         comp_sz = 2 * sizeof(struct ionic_txq_comp);
938
939         if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
940             lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
941                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
942         else
943                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
944
945         txq_i = lif->ionic->ntxqs_per_lif;
946         flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
947
948         err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
949                               num_desc, desc_sz, comp_sz, sg_desc_sz,
950                               sizeof(struct ionic_tx_desc_info),
951                               lif->kern_pid, &txq);
952         if (err)
953                 goto err_qcq_alloc;
954
955         txq->q.features = features;
956
957         ionic_link_qcq_interrupts(lif->adminqcq, txq);
958         ionic_debugfs_add_qcq(lif, txq);
959
960         lif->hwstamp_txq = txq;
961
962         if (netif_running(lif->netdev)) {
963                 err = ionic_lif_txq_init(lif, txq);
964                 if (err)
965                         goto err_qcq_init;
966
967                 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
968                         err = ionic_qcq_enable(txq);
969                         if (err)
970                                 goto err_qcq_enable;
971                 }
972         }
973
974         return 0;
975
976 err_qcq_enable:
977         ionic_lif_qcq_deinit(lif, txq);
978 err_qcq_init:
979         lif->hwstamp_txq = NULL;
980         ionic_debugfs_del_qcq(txq);
981         ionic_qcq_free(lif, txq);
982         devm_kfree(lif->ionic->dev, txq);
983 err_qcq_alloc:
984         return err;
985 }
986
987 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
988 {
989         unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
990         unsigned int rxq_i, flags;
991         struct ionic_qcq *rxq;
992         u64 features;
993         int err;
994
995         if (lif->hwstamp_rxq)
996                 return 0;
997
998         features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
999
1000         num_desc = IONIC_MIN_TXRX_DESC;
1001         desc_sz = sizeof(struct ionic_rxq_desc);
1002         comp_sz = 2 * sizeof(struct ionic_rxq_comp);
1003         sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
1004
1005         rxq_i = lif->ionic->nrxqs_per_lif;
1006         flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
1007
1008         err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
1009                               num_desc, desc_sz, comp_sz, sg_desc_sz,
1010                               sizeof(struct ionic_rx_desc_info),
1011                               lif->kern_pid, &rxq);
1012         if (err)
1013                 goto err_qcq_alloc;
1014
1015         rxq->q.features = features;
1016
1017         ionic_link_qcq_interrupts(lif->adminqcq, rxq);
1018         ionic_debugfs_add_qcq(lif, rxq);
1019
1020         lif->hwstamp_rxq = rxq;
1021
1022         if (netif_running(lif->netdev)) {
1023                 err = ionic_lif_rxq_init(lif, rxq);
1024                 if (err)
1025                         goto err_qcq_init;
1026
1027                 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
1028                         ionic_rx_fill(&rxq->q);
1029                         err = ionic_qcq_enable(rxq);
1030                         if (err)
1031                                 goto err_qcq_enable;
1032                 }
1033         }
1034
1035         return 0;
1036
1037 err_qcq_enable:
1038         ionic_lif_qcq_deinit(lif, rxq);
1039 err_qcq_init:
1040         lif->hwstamp_rxq = NULL;
1041         ionic_debugfs_del_qcq(rxq);
1042         ionic_qcq_free(lif, rxq);
1043         devm_kfree(lif->ionic->dev, rxq);
1044 err_qcq_alloc:
1045         return err;
1046 }
1047
1048 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
1049 {
1050         struct ionic_queue_params qparam;
1051
1052         ionic_init_queue_params(lif, &qparam);
1053
1054         if (rx_all)
1055                 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
1056         else
1057                 qparam.rxq_features = 0;
1058
1059         /* if we're not running, just set the values and return */
1060         if (!netif_running(lif->netdev)) {
1061                 lif->rxq_features = qparam.rxq_features;
1062                 return 0;
1063         }
1064
1065         return ionic_reconfigure_queues(lif, &qparam);
1066 }
1067
1068 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
1069 {
1070         struct ionic_admin_ctx ctx = {
1071                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1072                 .cmd.lif_setattr = {
1073                         .opcode = IONIC_CMD_LIF_SETATTR,
1074                         .index = cpu_to_le16(lif->index),
1075                         .attr = IONIC_LIF_ATTR_TXSTAMP,
1076                         .txstamp_mode = cpu_to_le16(txstamp_mode),
1077                 },
1078         };
1079
1080         return ionic_adminq_post_wait(lif, &ctx);
1081 }
1082
1083 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
1084 {
1085         struct ionic_admin_ctx ctx = {
1086                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1087                 .cmd.rx_filter_del = {
1088                         .opcode = IONIC_CMD_RX_FILTER_DEL,
1089                         .lif_index = cpu_to_le16(lif->index),
1090                 },
1091         };
1092         struct ionic_rx_filter *f;
1093         u32 filter_id;
1094         int err;
1095
1096         spin_lock_bh(&lif->rx_filters.lock);
1097
1098         f = ionic_rx_filter_rxsteer(lif);
1099         if (!f) {
1100                 spin_unlock_bh(&lif->rx_filters.lock);
1101                 return;
1102         }
1103
1104         filter_id = f->filter_id;
1105         ionic_rx_filter_free(lif, f);
1106
1107         spin_unlock_bh(&lif->rx_filters.lock);
1108
1109         netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
1110
1111         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
1112
1113         err = ionic_adminq_post_wait(lif, &ctx);
1114         if (err && err != -EEXIST)
1115                 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
1116 }
1117
1118 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1119 {
1120         struct ionic_admin_ctx ctx = {
1121                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1122                 .cmd.rx_filter_add = {
1123                         .opcode = IONIC_CMD_RX_FILTER_ADD,
1124                         .lif_index = cpu_to_le16(lif->index),
1125                         .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
1126                         .pkt_class = cpu_to_le64(pkt_class),
1127                 },
1128         };
1129         u8 qtype;
1130         u32 qid;
1131         int err;
1132
1133         if (!lif->hwstamp_rxq)
1134                 return -EINVAL;
1135
1136         qtype = lif->hwstamp_rxq->q.type;
1137         ctx.cmd.rx_filter_add.qtype = qtype;
1138
1139         qid = lif->hwstamp_rxq->q.index;
1140         ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
1141
1142         netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
1143         err = ionic_adminq_post_wait(lif, &ctx);
1144         if (err && err != -EEXIST)
1145                 return err;
1146
1147         spin_lock_bh(&lif->rx_filters.lock);
1148         err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
1149         spin_unlock_bh(&lif->rx_filters.lock);
1150
1151         return err;
1152 }
1153
1154 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1155 {
1156         ionic_lif_del_hwstamp_rxfilt(lif);
1157
1158         if (!pkt_class)
1159                 return 0;
1160
1161         return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
1162 }
1163
1164 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
1165 {
1166         struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
1167         struct ionic_lif *lif = napi_to_cq(napi)->lif;
1168         struct ionic_dev *idev = &lif->ionic->idev;
1169         unsigned long irqflags;
1170         unsigned int flags = 0;
1171         bool resched = false;
1172         int rx_work = 0;
1173         int tx_work = 0;
1174         int n_work = 0;
1175         int a_work = 0;
1176         int work_done;
1177         int credits;
1178
1179         if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
1180                 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
1181                                           ionic_notifyq_service, NULL, NULL);
1182
1183         spin_lock_irqsave(&lif->adminq_lock, irqflags);
1184         if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
1185                 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
1186                                           ionic_adminq_service, NULL, NULL);
1187         spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
1188
1189         if (lif->hwstamp_rxq)
1190                 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
1191                                            ionic_rx_service, NULL, NULL);
1192
1193         if (lif->hwstamp_txq)
1194                 tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
1195
1196         work_done = max(max(n_work, a_work), max(rx_work, tx_work));
1197         if (work_done < budget && napi_complete_done(napi, work_done)) {
1198                 flags |= IONIC_INTR_CRED_UNMASK;
1199                 intr->rearm_count++;
1200         }
1201
1202         if (work_done || flags) {
1203                 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1204                 credits = n_work + a_work + rx_work + tx_work;
1205                 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
1206         }
1207
1208         if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
1209                 resched = true;
1210         if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
1211                 resched = true;
1212         if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
1213                 resched = true;
1214         if (resched)
1215                 mod_timer(&lif->adminqcq->napi_deadline,
1216                           jiffies + IONIC_NAPI_DEADLINE);
1217
1218         return work_done;
1219 }
1220
1221 void ionic_get_stats64(struct net_device *netdev,
1222                        struct rtnl_link_stats64 *ns)
1223 {
1224         struct ionic_lif *lif = netdev_priv(netdev);
1225         struct ionic_lif_stats *ls;
1226
1227         memset(ns, 0, sizeof(*ns));
1228         ls = &lif->info->stats;
1229
1230         ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
1231                          le64_to_cpu(ls->rx_mcast_packets) +
1232                          le64_to_cpu(ls->rx_bcast_packets);
1233
1234         ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
1235                          le64_to_cpu(ls->tx_mcast_packets) +
1236                          le64_to_cpu(ls->tx_bcast_packets);
1237
1238         ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
1239                        le64_to_cpu(ls->rx_mcast_bytes) +
1240                        le64_to_cpu(ls->rx_bcast_bytes);
1241
1242         ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
1243                        le64_to_cpu(ls->tx_mcast_bytes) +
1244                        le64_to_cpu(ls->tx_bcast_bytes);
1245
1246         ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
1247                          le64_to_cpu(ls->rx_mcast_drop_packets) +
1248                          le64_to_cpu(ls->rx_bcast_drop_packets);
1249
1250         ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
1251                          le64_to_cpu(ls->tx_mcast_drop_packets) +
1252                          le64_to_cpu(ls->tx_bcast_drop_packets);
1253
1254         ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
1255
1256         ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
1257
1258         ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
1259                                le64_to_cpu(ls->rx_queue_disabled) +
1260                                le64_to_cpu(ls->rx_desc_fetch_error) +
1261                                le64_to_cpu(ls->rx_desc_data_error);
1262
1263         ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
1264                                 le64_to_cpu(ls->tx_queue_disabled) +
1265                                 le64_to_cpu(ls->tx_desc_fetch_error) +
1266                                 le64_to_cpu(ls->tx_desc_data_error);
1267
1268         ns->rx_errors = ns->rx_over_errors +
1269                         ns->rx_missed_errors;
1270
1271         ns->tx_errors = ns->tx_aborted_errors;
1272 }
1273
1274 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1275 {
1276         return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
1277 }
1278
1279 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1280 {
1281         /* Don't delete our own address from the uc list */
1282         if (ether_addr_equal(addr, netdev->dev_addr))
1283                 return 0;
1284
1285         return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
1286 }
1287
1288 void ionic_lif_rx_mode(struct ionic_lif *lif)
1289 {
1290         struct net_device *netdev = lif->netdev;
1291         unsigned int nfilters;
1292         unsigned int nd_flags;
1293         char buf[128];
1294         u16 rx_mode;
1295         int i;
1296 #define REMAIN(__x) (sizeof(buf) - (__x))
1297
1298         mutex_lock(&lif->config_lock);
1299
1300         /* grab the flags once for local use */
1301         nd_flags = netdev->flags;
1302
1303         rx_mode = IONIC_RX_MODE_F_UNICAST;
1304         rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1305         rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1306         rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1307         rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1308
1309         /* sync the filters */
1310         ionic_rx_filter_sync(lif);
1311
1312         /* check for overflow state
1313          *    if so, we track that we overflowed and enable NIC PROMISC
1314          *    else if the overflow is set and not needed
1315          *       we remove our overflow flag and check the netdev flags
1316          *       to see if we can disable NIC PROMISC
1317          */
1318         nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1319
1320         if (((lif->nucast + lif->nmcast) >= nfilters) ||
1321             (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
1322                 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1323                 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1324         } else {
1325                 if (!(nd_flags & IFF_PROMISC))
1326                         rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1327                 if (!(nd_flags & IFF_ALLMULTI))
1328                         rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1329         }
1330
1331         i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1332                       lif->rx_mode, rx_mode);
1333         if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1334                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1335         if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1336                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1337         if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1338                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1339         if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1340                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1341         if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1342                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1343         if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
1344                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
1345         netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
1346
1347         if (lif->rx_mode != rx_mode) {
1348                 struct ionic_admin_ctx ctx = {
1349                         .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1350                         .cmd.rx_mode_set = {
1351                                 .opcode = IONIC_CMD_RX_MODE_SET,
1352                                 .lif_index = cpu_to_le16(lif->index),
1353                         },
1354                 };
1355                 int err;
1356
1357                 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
1358                 err = ionic_adminq_post_wait(lif, &ctx);
1359                 if (err)
1360                         netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
1361                                     rx_mode, err);
1362                 else
1363                         lif->rx_mode = rx_mode;
1364         }
1365
1366         mutex_unlock(&lif->config_lock);
1367 }
1368
1369 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1370 {
1371         struct ionic_lif *lif = netdev_priv(netdev);
1372         struct ionic_deferred_work *work;
1373
1374         /* Sync the kernel filter list with the driver filter list */
1375         __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1376         __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1377
1378         /* Shove off the rest of the rxmode work to the work task
1379          * which will include syncing the filters to the firmware.
1380          */
1381         work = kzalloc(sizeof(*work), GFP_ATOMIC);
1382         if (!work) {
1383                 netdev_err(lif->netdev, "rxmode change dropped\n");
1384                 return;
1385         }
1386         work->type = IONIC_DW_TYPE_RX_MODE;
1387         netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1388         ionic_lif_deferred_enqueue(&lif->deferred, work);
1389 }
1390
1391 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1392 {
1393         u64 wanted = 0;
1394
1395         if (features & NETIF_F_HW_VLAN_CTAG_TX)
1396                 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1397         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1398                 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1399         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1400                 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1401         if (features & NETIF_F_RXHASH)
1402                 wanted |= IONIC_ETH_HW_RX_HASH;
1403         if (features & NETIF_F_RXCSUM)
1404                 wanted |= IONIC_ETH_HW_RX_CSUM;
1405         if (features & NETIF_F_SG)
1406                 wanted |= IONIC_ETH_HW_TX_SG;
1407         if (features & NETIF_F_HW_CSUM)
1408                 wanted |= IONIC_ETH_HW_TX_CSUM;
1409         if (features & NETIF_F_TSO)
1410                 wanted |= IONIC_ETH_HW_TSO;
1411         if (features & NETIF_F_TSO6)
1412                 wanted |= IONIC_ETH_HW_TSO_IPV6;
1413         if (features & NETIF_F_TSO_ECN)
1414                 wanted |= IONIC_ETH_HW_TSO_ECN;
1415         if (features & NETIF_F_GSO_GRE)
1416                 wanted |= IONIC_ETH_HW_TSO_GRE;
1417         if (features & NETIF_F_GSO_GRE_CSUM)
1418                 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1419         if (features & NETIF_F_GSO_IPXIP4)
1420                 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1421         if (features & NETIF_F_GSO_IPXIP6)
1422                 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1423         if (features & NETIF_F_GSO_UDP_TUNNEL)
1424                 wanted |= IONIC_ETH_HW_TSO_UDP;
1425         if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1426                 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1427
1428         return cpu_to_le64(wanted);
1429 }
1430
1431 static int ionic_set_nic_features(struct ionic_lif *lif,
1432                                   netdev_features_t features)
1433 {
1434         struct device *dev = lif->ionic->dev;
1435         struct ionic_admin_ctx ctx = {
1436                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1437                 .cmd.lif_setattr = {
1438                         .opcode = IONIC_CMD_LIF_SETATTR,
1439                         .index = cpu_to_le16(lif->index),
1440                         .attr = IONIC_LIF_ATTR_FEATURES,
1441                 },
1442         };
1443         u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1444                          IONIC_ETH_HW_VLAN_RX_STRIP |
1445                          IONIC_ETH_HW_VLAN_RX_FILTER;
1446         u64 old_hw_features;
1447         int err;
1448
1449         ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1450
1451         if (lif->phc)
1452                 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
1453
1454         err = ionic_adminq_post_wait(lif, &ctx);
1455         if (err)
1456                 return err;
1457
1458         old_hw_features = lif->hw_features;
1459         lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1460                                        ctx.comp.lif_setattr.features);
1461
1462         if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1463                 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1464
1465         if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
1466             !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1467                 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1468
1469         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1470                 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1471         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1472                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1473         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1474                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1475         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1476                 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1477         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1478                 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1479         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1480                 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1481         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1482                 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1483         if (lif->hw_features & IONIC_ETH_HW_TSO)
1484                 dev_dbg(dev, "feature ETH_HW_TSO\n");
1485         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1486                 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1487         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1488                 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1489         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1490                 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1491         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1492                 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1493         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1494                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1495         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1496                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1497         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1498                 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1499         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1500                 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1501         if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
1502                 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
1503
1504         return 0;
1505 }
1506
1507 static int ionic_init_nic_features(struct ionic_lif *lif)
1508 {
1509         struct net_device *netdev = lif->netdev;
1510         netdev_features_t features;
1511         int err;
1512
1513         /* set up what we expect to support by default */
1514         features = NETIF_F_HW_VLAN_CTAG_TX |
1515                    NETIF_F_HW_VLAN_CTAG_RX |
1516                    NETIF_F_HW_VLAN_CTAG_FILTER |
1517                    NETIF_F_SG |
1518                    NETIF_F_HW_CSUM |
1519                    NETIF_F_RXCSUM |
1520                    NETIF_F_TSO |
1521                    NETIF_F_TSO6 |
1522                    NETIF_F_TSO_ECN |
1523                    NETIF_F_GSO_GRE |
1524                    NETIF_F_GSO_GRE_CSUM |
1525                    NETIF_F_GSO_IPXIP4 |
1526                    NETIF_F_GSO_IPXIP6 |
1527                    NETIF_F_GSO_UDP_TUNNEL |
1528                    NETIF_F_GSO_UDP_TUNNEL_CSUM;
1529
1530         if (lif->nxqs > 1)
1531                 features |= NETIF_F_RXHASH;
1532
1533         err = ionic_set_nic_features(lif, features);
1534         if (err)
1535                 return err;
1536
1537         /* tell the netdev what we actually can support */
1538         netdev->features |= NETIF_F_HIGHDMA;
1539
1540         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1541                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1542         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1543                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1544         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1545                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1546         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1547                 netdev->hw_features |= NETIF_F_RXHASH;
1548         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1549                 netdev->hw_features |= NETIF_F_SG;
1550
1551         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1552                 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1553         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1554                 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1555         if (lif->hw_features & IONIC_ETH_HW_TSO)
1556                 netdev->hw_enc_features |= NETIF_F_TSO;
1557         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1558                 netdev->hw_enc_features |= NETIF_F_TSO6;
1559         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1560                 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1561         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1562                 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1563         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1564                 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1565         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1566                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1567         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1568                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1569         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1570                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1571         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1572                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1573
1574         netdev->hw_features |= netdev->hw_enc_features;
1575         netdev->features |= netdev->hw_features;
1576         netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1577
1578         netdev->priv_flags |= IFF_UNICAST_FLT |
1579                               IFF_LIVE_ADDR_CHANGE;
1580
1581         netdev->xdp_features = NETDEV_XDP_ACT_BASIC    |
1582                                NETDEV_XDP_ACT_REDIRECT |
1583                                NETDEV_XDP_ACT_RX_SG    |
1584                                NETDEV_XDP_ACT_NDO_XMIT |
1585                                NETDEV_XDP_ACT_NDO_XMIT_SG;
1586
1587         return 0;
1588 }
1589
1590 static int ionic_set_features(struct net_device *netdev,
1591                               netdev_features_t features)
1592 {
1593         struct ionic_lif *lif = netdev_priv(netdev);
1594         int err;
1595
1596         netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1597                    __func__, (u64)lif->netdev->features, (u64)features);
1598
1599         err = ionic_set_nic_features(lif, features);
1600
1601         return err;
1602 }
1603
1604 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
1605 {
1606         struct ionic_admin_ctx ctx = {
1607                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1608                 .cmd.lif_setattr = {
1609                         .opcode = IONIC_CMD_LIF_SETATTR,
1610                         .index = cpu_to_le16(lif->index),
1611                         .attr = IONIC_LIF_ATTR_MAC,
1612                 },
1613         };
1614
1615         ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
1616         return ionic_adminq_post_wait(lif, &ctx);
1617 }
1618
1619 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
1620 {
1621         struct ionic_admin_ctx ctx = {
1622                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1623                 .cmd.lif_getattr = {
1624                         .opcode = IONIC_CMD_LIF_GETATTR,
1625                         .index = cpu_to_le16(lif->index),
1626                         .attr = IONIC_LIF_ATTR_MAC,
1627                 },
1628         };
1629         int err;
1630
1631         err = ionic_adminq_post_wait(lif, &ctx);
1632         if (err)
1633                 return err;
1634
1635         ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
1636         return 0;
1637 }
1638
1639 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
1640 {
1641         u8  get_mac[ETH_ALEN];
1642         int err;
1643
1644         err = ionic_set_attr_mac(lif, mac);
1645         if (err)
1646                 return err;
1647
1648         err = ionic_get_attr_mac(lif, get_mac);
1649         if (err)
1650                 return err;
1651
1652         /* To deal with older firmware that silently ignores the set attr mac:
1653          * doesn't actually change the mac and doesn't return an error, so we
1654          * do the get attr to verify whether or not the set actually happened
1655          */
1656         if (!ether_addr_equal(get_mac, mac))
1657                 return 1;
1658
1659         return 0;
1660 }
1661
1662 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1663 {
1664         struct ionic_lif *lif = netdev_priv(netdev);
1665         struct sockaddr *addr = sa;
1666         u8 *mac;
1667         int err;
1668
1669         mac = (u8 *)addr->sa_data;
1670         if (ether_addr_equal(netdev->dev_addr, mac))
1671                 return 0;
1672
1673         err = ionic_program_mac(lif, mac);
1674         if (err < 0)
1675                 return err;
1676
1677         if (err > 0)
1678                 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
1679                            __func__);
1680
1681         err = eth_prepare_mac_addr_change(netdev, addr);
1682         if (err)
1683                 return err;
1684
1685         if (!is_zero_ether_addr(netdev->dev_addr)) {
1686                 netdev_info(netdev, "deleting mac addr %pM\n",
1687                             netdev->dev_addr);
1688                 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr);
1689         }
1690
1691         eth_commit_mac_addr_change(netdev, addr);
1692         netdev_info(netdev, "updating mac addr %pM\n", mac);
1693
1694         return ionic_lif_addr_add(netdev_priv(netdev), mac);
1695 }
1696
1697 void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1698 {
1699         /* Stop and clean the queues before reconfiguration */
1700         netif_device_detach(lif->netdev);
1701         ionic_stop_queues(lif);
1702         ionic_txrx_deinit(lif);
1703 }
1704
1705 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1706 {
1707         int err;
1708
1709         /* Re-init the queues after reconfiguration */
1710
1711         /* The only way txrx_init can fail here is if communication
1712          * with FW is suddenly broken.  There's not much we can do
1713          * at this point - error messages have already been printed,
1714          * so we can continue on and the user can eventually do a
1715          * DOWN and UP to try to reset and clear the issue.
1716          */
1717         err = ionic_txrx_init(lif);
1718         ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1719         netif_device_attach(lif->netdev);
1720
1721         return err;
1722 }
1723
1724 static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu,
1725                                    struct bpf_prog *xdp_prog)
1726 {
1727         if (!xdp_prog)
1728                 return true;
1729
1730         if (mtu <= IONIC_XDP_MAX_LINEAR_MTU)
1731                 return true;
1732
1733         if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags)
1734                 return true;
1735
1736         return false;
1737 }
1738
1739 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1740 {
1741         struct ionic_lif *lif = netdev_priv(netdev);
1742         struct ionic_admin_ctx ctx = {
1743                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1744                 .cmd.lif_setattr = {
1745                         .opcode = IONIC_CMD_LIF_SETATTR,
1746                         .index = cpu_to_le16(lif->index),
1747                         .attr = IONIC_LIF_ATTR_MTU,
1748                         .mtu = cpu_to_le32(new_mtu),
1749                 },
1750         };
1751         struct bpf_prog *xdp_prog;
1752         int err;
1753
1754         xdp_prog = READ_ONCE(lif->xdp_prog);
1755         if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog))
1756                 return -EINVAL;
1757
1758         err = ionic_adminq_post_wait(lif, &ctx);
1759         if (err)
1760                 return err;
1761
1762         /* if we're not running, nothing more to do */
1763         if (!netif_running(netdev)) {
1764                 netdev->mtu = new_mtu;
1765                 return 0;
1766         }
1767
1768         mutex_lock(&lif->queue_lock);
1769         ionic_stop_queues_reconfig(lif);
1770         netdev->mtu = new_mtu;
1771         err = ionic_start_queues_reconfig(lif);
1772         mutex_unlock(&lif->queue_lock);
1773
1774         return err;
1775 }
1776
1777 static void ionic_tx_timeout_work(struct work_struct *ws)
1778 {
1779         struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1780         int err;
1781
1782         if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1783                 return;
1784
1785         /* if we were stopped before this scheduled job was launched,
1786          * don't bother the queues as they are already stopped.
1787          */
1788         if (!netif_running(lif->netdev))
1789                 return;
1790
1791         mutex_lock(&lif->queue_lock);
1792         ionic_stop_queues_reconfig(lif);
1793         err = ionic_start_queues_reconfig(lif);
1794         mutex_unlock(&lif->queue_lock);
1795
1796         if (err)
1797                 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__);
1798 }
1799
1800 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1801 {
1802         struct ionic_lif *lif = netdev_priv(netdev);
1803
1804         netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
1805         schedule_work(&lif->tx_timeout_work);
1806 }
1807
1808 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1809                                  u16 vid)
1810 {
1811         struct ionic_lif *lif = netdev_priv(netdev);
1812         int err;
1813
1814         err = ionic_lif_vlan_add(lif, vid);
1815         if (err)
1816                 return err;
1817
1818         ionic_lif_rx_mode(lif);
1819
1820         return 0;
1821 }
1822
1823 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1824                                   u16 vid)
1825 {
1826         struct ionic_lif *lif = netdev_priv(netdev);
1827         int err;
1828
1829         err = ionic_lif_vlan_del(lif, vid);
1830         if (err)
1831                 return err;
1832
1833         ionic_lif_rx_mode(lif);
1834
1835         return 0;
1836 }
1837
1838 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1839                          const u8 *key, const u32 *indir)
1840 {
1841         struct ionic_admin_ctx ctx = {
1842                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1843                 .cmd.lif_setattr = {
1844                         .opcode = IONIC_CMD_LIF_SETATTR,
1845                         .attr = IONIC_LIF_ATTR_RSS,
1846                         .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1847                 },
1848         };
1849         unsigned int i, tbl_sz;
1850
1851         if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1852                 lif->rss_types = types;
1853                 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1854         }
1855
1856         if (key)
1857                 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1858
1859         if (indir) {
1860                 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1861                 for (i = 0; i < tbl_sz; i++)
1862                         lif->rss_ind_tbl[i] = indir[i];
1863         }
1864
1865         memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1866                IONIC_RSS_HASH_KEY_SIZE);
1867
1868         return ionic_adminq_post_wait(lif, &ctx);
1869 }
1870
1871 static int ionic_lif_rss_init(struct ionic_lif *lif)
1872 {
1873         unsigned int tbl_sz;
1874         unsigned int i;
1875
1876         lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1877                          IONIC_RSS_TYPE_IPV4_TCP |
1878                          IONIC_RSS_TYPE_IPV4_UDP |
1879                          IONIC_RSS_TYPE_IPV6     |
1880                          IONIC_RSS_TYPE_IPV6_TCP |
1881                          IONIC_RSS_TYPE_IPV6_UDP;
1882
1883         /* Fill indirection table with 'default' values */
1884         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1885         for (i = 0; i < tbl_sz; i++)
1886                 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1887
1888         return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1889 }
1890
1891 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1892 {
1893         int tbl_sz;
1894
1895         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1896         memset(lif->rss_ind_tbl, 0, tbl_sz);
1897         memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1898
1899         ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1900 }
1901
1902 static void ionic_lif_quiesce(struct ionic_lif *lif)
1903 {
1904         struct ionic_admin_ctx ctx = {
1905                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1906                 .cmd.lif_setattr = {
1907                         .opcode = IONIC_CMD_LIF_SETATTR,
1908                         .index = cpu_to_le16(lif->index),
1909                         .attr = IONIC_LIF_ATTR_STATE,
1910                         .state = IONIC_LIF_QUIESCE,
1911                 },
1912         };
1913         int err;
1914
1915         err = ionic_adminq_post_wait(lif, &ctx);
1916         if (err)
1917                 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
1918 }
1919
1920 static void ionic_txrx_disable(struct ionic_lif *lif)
1921 {
1922         unsigned int i;
1923         int err = 0;
1924
1925         if (lif->txqcqs) {
1926                 for (i = 0; i < lif->nxqs; i++)
1927                         err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
1928         }
1929
1930         if (lif->hwstamp_txq)
1931                 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
1932
1933         if (lif->rxqcqs) {
1934                 for (i = 0; i < lif->nxqs; i++)
1935                         err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
1936         }
1937
1938         if (lif->hwstamp_rxq)
1939                 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
1940
1941         ionic_lif_quiesce(lif);
1942 }
1943
1944 static void ionic_txrx_deinit(struct ionic_lif *lif)
1945 {
1946         unsigned int i;
1947
1948         if (lif->txqcqs) {
1949                 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1950                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1951                         ionic_tx_flush(&lif->txqcqs[i]->cq);
1952                         ionic_tx_empty(&lif->txqcqs[i]->q);
1953                 }
1954         }
1955
1956         if (lif->rxqcqs) {
1957                 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1958                         ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1959                         ionic_rx_empty(&lif->rxqcqs[i]->q);
1960                 }
1961         }
1962         lif->rx_mode = 0;
1963
1964         if (lif->hwstamp_txq) {
1965                 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
1966                 ionic_tx_flush(&lif->hwstamp_txq->cq);
1967                 ionic_tx_empty(&lif->hwstamp_txq->q);
1968         }
1969
1970         if (lif->hwstamp_rxq) {
1971                 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
1972                 ionic_rx_empty(&lif->hwstamp_rxq->q);
1973         }
1974 }
1975
1976 void ionic_txrx_free(struct ionic_lif *lif)
1977 {
1978         unsigned int i;
1979
1980         if (lif->txqcqs) {
1981                 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
1982                         ionic_qcq_free(lif, lif->txqcqs[i]);
1983                         devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
1984                         lif->txqcqs[i] = NULL;
1985                 }
1986         }
1987
1988         if (lif->rxqcqs) {
1989                 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
1990                         ionic_qcq_free(lif, lif->rxqcqs[i]);
1991                         devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
1992                         lif->rxqcqs[i] = NULL;
1993                 }
1994         }
1995
1996         if (lif->hwstamp_txq) {
1997                 ionic_qcq_free(lif, lif->hwstamp_txq);
1998                 devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
1999                 lif->hwstamp_txq = NULL;
2000         }
2001
2002         if (lif->hwstamp_rxq) {
2003                 ionic_qcq_free(lif, lif->hwstamp_rxq);
2004                 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
2005                 lif->hwstamp_rxq = NULL;
2006         }
2007 }
2008
2009 static int ionic_txrx_alloc(struct ionic_lif *lif)
2010 {
2011         unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2012         unsigned int flags, i;
2013         int err = 0;
2014
2015         num_desc = lif->ntxq_descs;
2016         desc_sz = sizeof(struct ionic_txq_desc);
2017         comp_sz = sizeof(struct ionic_txq_comp);
2018
2019         if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2020             lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2021                                           sizeof(struct ionic_txq_sg_desc_v1))
2022                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2023         else
2024                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2025
2026         flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2027
2028         if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
2029                 flags |= IONIC_QCQ_F_CMB_RINGS;
2030
2031         if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2032                 flags |= IONIC_QCQ_F_INTR;
2033
2034         for (i = 0; i < lif->nxqs; i++) {
2035                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2036                                       num_desc, desc_sz, comp_sz, sg_desc_sz,
2037                                       sizeof(struct ionic_tx_desc_info),
2038                                       lif->kern_pid, &lif->txqcqs[i]);
2039                 if (err)
2040                         goto err_out;
2041
2042                 if (flags & IONIC_QCQ_F_INTR) {
2043                         ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2044                                              lif->txqcqs[i]->intr.index,
2045                                              lif->tx_coalesce_hw);
2046                         if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2047                                 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2048                 }
2049
2050                 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2051         }
2052
2053         flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
2054
2055         if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
2056                 flags |= IONIC_QCQ_F_CMB_RINGS;
2057
2058         num_desc = lif->nrxq_descs;
2059         desc_sz = sizeof(struct ionic_rxq_desc);
2060         comp_sz = sizeof(struct ionic_rxq_comp);
2061         sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2062
2063         if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2064                 comp_sz *= 2;
2065
2066         for (i = 0; i < lif->nxqs; i++) {
2067                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2068                                       num_desc, desc_sz, comp_sz, sg_desc_sz,
2069                                       sizeof(struct ionic_rx_desc_info),
2070                                       lif->kern_pid, &lif->rxqcqs[i]);
2071                 if (err)
2072                         goto err_out;
2073
2074                 lif->rxqcqs[i]->q.features = lif->rxq_features;
2075
2076                 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2077                                      lif->rxqcqs[i]->intr.index,
2078                                      lif->rx_coalesce_hw);
2079                 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
2080                         lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
2081
2082                 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2083                         ionic_link_qcq_interrupts(lif->rxqcqs[i],
2084                                                   lif->txqcqs[i]);
2085
2086                 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2087         }
2088
2089         return 0;
2090
2091 err_out:
2092         ionic_txrx_free(lif);
2093
2094         return err;
2095 }
2096
2097 static int ionic_txrx_init(struct ionic_lif *lif)
2098 {
2099         unsigned int i;
2100         int err;
2101
2102         for (i = 0; i < lif->nxqs; i++) {
2103                 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
2104                 if (err)
2105                         goto err_out;
2106
2107                 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
2108                 if (err) {
2109                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2110                         goto err_out;
2111                 }
2112         }
2113
2114         if (lif->netdev->features & NETIF_F_RXHASH)
2115                 ionic_lif_rss_init(lif);
2116
2117         ionic_lif_rx_mode(lif);
2118
2119         return 0;
2120
2121 err_out:
2122         while (i--) {
2123                 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2124                 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2125         }
2126
2127         return err;
2128 }
2129
2130 static int ionic_txrx_enable(struct ionic_lif *lif)
2131 {
2132         int derr = 0;
2133         int i, err;
2134
2135         err = ionic_xdp_queues_config(lif);
2136         if (err)
2137                 return err;
2138
2139         for (i = 0; i < lif->nxqs; i++) {
2140                 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
2141                         dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
2142                         err = -ENXIO;
2143                         goto err_out;
2144                 }
2145
2146                 ionic_rx_fill(&lif->rxqcqs[i]->q);
2147                 err = ionic_qcq_enable(lif->rxqcqs[i]);
2148                 if (err)
2149                         goto err_out;
2150
2151                 err = ionic_qcq_enable(lif->txqcqs[i]);
2152                 if (err) {
2153                         derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
2154                         goto err_out;
2155                 }
2156         }
2157
2158         if (lif->hwstamp_rxq) {
2159                 ionic_rx_fill(&lif->hwstamp_rxq->q);
2160                 err = ionic_qcq_enable(lif->hwstamp_rxq);
2161                 if (err)
2162                         goto err_out_hwstamp_rx;
2163         }
2164
2165         if (lif->hwstamp_txq) {
2166                 err = ionic_qcq_enable(lif->hwstamp_txq);
2167                 if (err)
2168                         goto err_out_hwstamp_tx;
2169         }
2170
2171         return 0;
2172
2173 err_out_hwstamp_tx:
2174         if (lif->hwstamp_rxq)
2175                 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
2176 err_out_hwstamp_rx:
2177         i = lif->nxqs;
2178 err_out:
2179         while (i--) {
2180                 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
2181                 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
2182         }
2183
2184         ionic_xdp_queues_config(lif);
2185
2186         return err;
2187 }
2188
2189 static int ionic_start_queues(struct ionic_lif *lif)
2190 {
2191         int err;
2192
2193         if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
2194                 return -EIO;
2195
2196         if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2197                 return -EBUSY;
2198
2199         if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
2200                 return 0;
2201
2202         err = ionic_txrx_enable(lif);
2203         if (err) {
2204                 clear_bit(IONIC_LIF_F_UP, lif->state);
2205                 return err;
2206         }
2207         netif_tx_wake_all_queues(lif->netdev);
2208
2209         return 0;
2210 }
2211
2212 static int ionic_open(struct net_device *netdev)
2213 {
2214         struct ionic_lif *lif = netdev_priv(netdev);
2215         int err;
2216
2217         /* If recovering from a broken state, clear the bit and we'll try again */
2218         if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
2219                 netdev_info(netdev, "clearing broken state\n");
2220
2221         mutex_lock(&lif->queue_lock);
2222
2223         err = ionic_txrx_alloc(lif);
2224         if (err)
2225                 goto err_unlock;
2226
2227         err = ionic_txrx_init(lif);
2228         if (err)
2229                 goto err_txrx_free;
2230
2231         err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
2232         if (err)
2233                 goto err_txrx_deinit;
2234
2235         err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
2236         if (err)
2237                 goto err_txrx_deinit;
2238
2239         /* don't start the queues until we have link */
2240         if (netif_carrier_ok(netdev)) {
2241                 err = ionic_start_queues(lif);
2242                 if (err)
2243                         goto err_txrx_deinit;
2244         }
2245
2246         /* If hardware timestamping is enabled, but the queues were freed by
2247          * ionic_stop, those need to be reallocated and initialized, too.
2248          */
2249         ionic_lif_hwstamp_recreate_queues(lif);
2250
2251         mutex_unlock(&lif->queue_lock);
2252
2253         return 0;
2254
2255 err_txrx_deinit:
2256         ionic_txrx_deinit(lif);
2257 err_txrx_free:
2258         ionic_txrx_free(lif);
2259 err_unlock:
2260         mutex_unlock(&lif->queue_lock);
2261         return err;
2262 }
2263
2264 static void ionic_stop_queues(struct ionic_lif *lif)
2265 {
2266         if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
2267                 return;
2268
2269         netif_tx_disable(lif->netdev);
2270         ionic_txrx_disable(lif);
2271 }
2272
2273 static int ionic_stop(struct net_device *netdev)
2274 {
2275         struct ionic_lif *lif = netdev_priv(netdev);
2276
2277         if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2278                 return 0;
2279
2280         mutex_lock(&lif->queue_lock);
2281         ionic_stop_queues(lif);
2282         ionic_txrx_deinit(lif);
2283         ionic_txrx_free(lif);
2284         mutex_unlock(&lif->queue_lock);
2285
2286         return 0;
2287 }
2288
2289 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2290 {
2291         struct ionic_lif *lif = netdev_priv(netdev);
2292
2293         switch (cmd) {
2294         case SIOCSHWTSTAMP:
2295                 return ionic_lif_hwstamp_set(lif, ifr);
2296         case SIOCGHWTSTAMP:
2297                 return ionic_lif_hwstamp_get(lif, ifr);
2298         default:
2299                 return -EOPNOTSUPP;
2300         }
2301 }
2302
2303 static int ionic_get_vf_config(struct net_device *netdev,
2304                                int vf, struct ifla_vf_info *ivf)
2305 {
2306         struct ionic_lif *lif = netdev_priv(netdev);
2307         struct ionic *ionic = lif->ionic;
2308         int ret = 0;
2309
2310         if (!netif_device_present(netdev))
2311                 return -EBUSY;
2312
2313         down_read(&ionic->vf_op_lock);
2314
2315         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2316                 ret = -EINVAL;
2317         } else {
2318                 struct ionic_vf *vfdata = &ionic->vfs[vf];
2319
2320                 ivf->vf           = vf;
2321                 ivf->qos          = 0;
2322                 ivf->vlan         = le16_to_cpu(vfdata->vlanid);
2323                 ivf->spoofchk     = vfdata->spoofchk;
2324                 ivf->linkstate    = vfdata->linkstate;
2325                 ivf->max_tx_rate  = le32_to_cpu(vfdata->maxrate);
2326                 ivf->trusted      = vfdata->trusted;
2327                 ether_addr_copy(ivf->mac, vfdata->macaddr);
2328         }
2329
2330         up_read(&ionic->vf_op_lock);
2331         return ret;
2332 }
2333
2334 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
2335                               struct ifla_vf_stats *vf_stats)
2336 {
2337         struct ionic_lif *lif = netdev_priv(netdev);
2338         struct ionic *ionic = lif->ionic;
2339         struct ionic_lif_stats *vs;
2340         int ret = 0;
2341
2342         if (!netif_device_present(netdev))
2343                 return -EBUSY;
2344
2345         down_read(&ionic->vf_op_lock);
2346
2347         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2348                 ret = -EINVAL;
2349         } else {
2350                 memset(vf_stats, 0, sizeof(*vf_stats));
2351                 vs = &ionic->vfs[vf].stats;
2352
2353                 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
2354                 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
2355                 vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
2356                 vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
2357                 vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
2358                 vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
2359                 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
2360                                        le64_to_cpu(vs->rx_mcast_drop_packets) +
2361                                        le64_to_cpu(vs->rx_bcast_drop_packets);
2362                 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
2363                                        le64_to_cpu(vs->tx_mcast_drop_packets) +
2364                                        le64_to_cpu(vs->tx_bcast_drop_packets);
2365         }
2366
2367         up_read(&ionic->vf_op_lock);
2368         return ret;
2369 }
2370
2371 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2372 {
2373         struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
2374         struct ionic_lif *lif = netdev_priv(netdev);
2375         struct ionic *ionic = lif->ionic;
2376         int ret;
2377
2378         if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
2379                 return -EINVAL;
2380
2381         if (!netif_device_present(netdev))
2382                 return -EBUSY;
2383
2384         down_write(&ionic->vf_op_lock);
2385
2386         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2387                 ret = -EINVAL;
2388         } else {
2389                 ether_addr_copy(vfc.macaddr, mac);
2390                 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
2391                         __func__, vf, vfc.macaddr);
2392
2393                 ret = ionic_set_vf_config(ionic, vf, &vfc);
2394                 if (!ret)
2395                         ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2396         }
2397
2398         up_write(&ionic->vf_op_lock);
2399         return ret;
2400 }
2401
2402 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2403                              u8 qos, __be16 proto)
2404 {
2405         struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
2406         struct ionic_lif *lif = netdev_priv(netdev);
2407         struct ionic *ionic = lif->ionic;
2408         int ret;
2409
2410         /* until someday when we support qos */
2411         if (qos)
2412                 return -EINVAL;
2413
2414         if (vlan > 4095)
2415                 return -EINVAL;
2416
2417         if (proto != htons(ETH_P_8021Q))
2418                 return -EPROTONOSUPPORT;
2419
2420         if (!netif_device_present(netdev))
2421                 return -EBUSY;
2422
2423         down_write(&ionic->vf_op_lock);
2424
2425         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2426                 ret = -EINVAL;
2427         } else {
2428                 vfc.vlanid = cpu_to_le16(vlan);
2429                 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
2430                         __func__, vf, le16_to_cpu(vfc.vlanid));
2431
2432                 ret = ionic_set_vf_config(ionic, vf, &vfc);
2433                 if (!ret)
2434                         ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2435         }
2436
2437         up_write(&ionic->vf_op_lock);
2438         return ret;
2439 }
2440
2441 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2442                              int tx_min, int tx_max)
2443 {
2444         struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
2445         struct ionic_lif *lif = netdev_priv(netdev);
2446         struct ionic *ionic = lif->ionic;
2447         int ret;
2448
2449         /* setting the min just seems silly */
2450         if (tx_min)
2451                 return -EINVAL;
2452
2453         if (!netif_device_present(netdev))
2454                 return -EBUSY;
2455
2456         down_write(&ionic->vf_op_lock);
2457
2458         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2459                 ret = -EINVAL;
2460         } else {
2461                 vfc.maxrate = cpu_to_le32(tx_max);
2462                 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
2463                         __func__, vf, le32_to_cpu(vfc.maxrate));
2464
2465                 ret = ionic_set_vf_config(ionic, vf, &vfc);
2466                 if (!ret)
2467                         ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2468         }
2469
2470         up_write(&ionic->vf_op_lock);
2471         return ret;
2472 }
2473
2474 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2475 {
2476         struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
2477         struct ionic_lif *lif = netdev_priv(netdev);
2478         struct ionic *ionic = lif->ionic;
2479         int ret;
2480
2481         if (!netif_device_present(netdev))
2482                 return -EBUSY;
2483
2484         down_write(&ionic->vf_op_lock);
2485
2486         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2487                 ret = -EINVAL;
2488         } else {
2489                 vfc.spoofchk = set;
2490                 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
2491                         __func__, vf, vfc.spoofchk);
2492
2493                 ret = ionic_set_vf_config(ionic, vf, &vfc);
2494                 if (!ret)
2495                         ionic->vfs[vf].spoofchk = set;
2496         }
2497
2498         up_write(&ionic->vf_op_lock);
2499         return ret;
2500 }
2501
2502 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2503 {
2504         struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
2505         struct ionic_lif *lif = netdev_priv(netdev);
2506         struct ionic *ionic = lif->ionic;
2507         int ret;
2508
2509         if (!netif_device_present(netdev))
2510                 return -EBUSY;
2511
2512         down_write(&ionic->vf_op_lock);
2513
2514         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2515                 ret = -EINVAL;
2516         } else {
2517                 vfc.trust = set;
2518                 dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
2519                         __func__, vf, vfc.trust);
2520
2521                 ret = ionic_set_vf_config(ionic, vf, &vfc);
2522                 if (!ret)
2523                         ionic->vfs[vf].trusted = set;
2524         }
2525
2526         up_write(&ionic->vf_op_lock);
2527         return ret;
2528 }
2529
2530 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2531 {
2532         struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
2533         struct ionic_lif *lif = netdev_priv(netdev);
2534         struct ionic *ionic = lif->ionic;
2535         u8 vfls;
2536         int ret;
2537
2538         switch (set) {
2539         case IFLA_VF_LINK_STATE_ENABLE:
2540                 vfls = IONIC_VF_LINK_STATUS_UP;
2541                 break;
2542         case IFLA_VF_LINK_STATE_DISABLE:
2543                 vfls = IONIC_VF_LINK_STATUS_DOWN;
2544                 break;
2545         case IFLA_VF_LINK_STATE_AUTO:
2546                 vfls = IONIC_VF_LINK_STATUS_AUTO;
2547                 break;
2548         default:
2549                 return -EINVAL;
2550         }
2551
2552         if (!netif_device_present(netdev))
2553                 return -EBUSY;
2554
2555         down_write(&ionic->vf_op_lock);
2556
2557         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2558                 ret = -EINVAL;
2559         } else {
2560                 vfc.linkstate = vfls;
2561                 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
2562                         __func__, vf, vfc.linkstate);
2563
2564                 ret = ionic_set_vf_config(ionic, vf, &vfc);
2565                 if (!ret)
2566                         ionic->vfs[vf].linkstate = set;
2567         }
2568
2569         up_write(&ionic->vf_op_lock);
2570         return ret;
2571 }
2572
2573 static void ionic_vf_attr_replay(struct ionic_lif *lif)
2574 {
2575         struct ionic_vf_setattr_cmd vfc = { };
2576         struct ionic *ionic = lif->ionic;
2577         struct ionic_vf *v;
2578         int i;
2579
2580         if (!ionic->vfs)
2581                 return;
2582
2583         down_read(&ionic->vf_op_lock);
2584
2585         for (i = 0; i < ionic->num_vfs; i++) {
2586                 v = &ionic->vfs[i];
2587
2588                 if (v->stats_pa) {
2589                         vfc.attr = IONIC_VF_ATTR_STATSADDR;
2590                         vfc.stats_pa = cpu_to_le64(v->stats_pa);
2591                         ionic_set_vf_config(ionic, i, &vfc);
2592                         vfc.stats_pa = 0;
2593                 }
2594
2595                 if (!is_zero_ether_addr(v->macaddr)) {
2596                         vfc.attr = IONIC_VF_ATTR_MAC;
2597                         ether_addr_copy(vfc.macaddr, v->macaddr);
2598                         ionic_set_vf_config(ionic, i, &vfc);
2599                         eth_zero_addr(vfc.macaddr);
2600                 }
2601
2602                 if (v->vlanid) {
2603                         vfc.attr = IONIC_VF_ATTR_VLAN;
2604                         vfc.vlanid = v->vlanid;
2605                         ionic_set_vf_config(ionic, i, &vfc);
2606                         vfc.vlanid = 0;
2607                 }
2608
2609                 if (v->maxrate) {
2610                         vfc.attr = IONIC_VF_ATTR_RATE;
2611                         vfc.maxrate = v->maxrate;
2612                         ionic_set_vf_config(ionic, i, &vfc);
2613                         vfc.maxrate = 0;
2614                 }
2615
2616                 if (v->spoofchk) {
2617                         vfc.attr = IONIC_VF_ATTR_SPOOFCHK;
2618                         vfc.spoofchk = v->spoofchk;
2619                         ionic_set_vf_config(ionic, i, &vfc);
2620                         vfc.spoofchk = 0;
2621                 }
2622
2623                 if (v->trusted) {
2624                         vfc.attr = IONIC_VF_ATTR_TRUST;
2625                         vfc.trust = v->trusted;
2626                         ionic_set_vf_config(ionic, i, &vfc);
2627                         vfc.trust = 0;
2628                 }
2629
2630                 if (v->linkstate) {
2631                         vfc.attr = IONIC_VF_ATTR_LINKSTATE;
2632                         vfc.linkstate = v->linkstate;
2633                         ionic_set_vf_config(ionic, i, &vfc);
2634                         vfc.linkstate = 0;
2635                 }
2636         }
2637
2638         up_read(&ionic->vf_op_lock);
2639
2640         ionic_vf_start(ionic);
2641 }
2642
2643 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
2644 {
2645         struct xdp_rxq_info *xi;
2646
2647         if (!q->xdp_rxq_info)
2648                 return;
2649
2650         xi = q->xdp_rxq_info;
2651         q->xdp_rxq_info = NULL;
2652
2653         xdp_rxq_info_unreg(xi);
2654         kfree(xi);
2655 }
2656
2657 static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
2658 {
2659         struct xdp_rxq_info *rxq_info;
2660         int err;
2661
2662         rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
2663         if (!rxq_info)
2664                 return -ENOMEM;
2665
2666         err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
2667         if (err) {
2668                 dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
2669                         q->index, err);
2670                 goto err_out;
2671         }
2672
2673         err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
2674         if (err) {
2675                 dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
2676                         q->index, err);
2677                 xdp_rxq_info_unreg(rxq_info);
2678                 goto err_out;
2679         }
2680
2681         q->xdp_rxq_info = rxq_info;
2682
2683         return 0;
2684
2685 err_out:
2686         kfree(rxq_info);
2687         return err;
2688 }
2689
2690 static int ionic_xdp_queues_config(struct ionic_lif *lif)
2691 {
2692         unsigned int i;
2693         int err;
2694
2695         if (!lif->rxqcqs)
2696                 return 0;
2697
2698         /* There's no need to rework memory if not going to/from NULL program.
2699          * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
2700          * This way we don't need to keep an *xdp_prog in every queue struct.
2701          */
2702         if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
2703                 return 0;
2704
2705         for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
2706                 struct ionic_queue *q = &lif->rxqcqs[i]->q;
2707
2708                 if (q->xdp_rxq_info) {
2709                         ionic_xdp_unregister_rxq_info(q);
2710                         continue;
2711                 }
2712
2713                 err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
2714                 if (err) {
2715                         dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
2716                                 i, err);
2717                         goto err_out;
2718                 }
2719         }
2720
2721         return 0;
2722
2723 err_out:
2724         for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
2725                 ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
2726
2727         return err;
2728 }
2729
2730 static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
2731 {
2732         struct ionic_lif *lif = netdev_priv(netdev);
2733         struct bpf_prog *old_prog;
2734         u32 maxfs;
2735
2736         if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
2737 #define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
2738                 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT);
2739                 netdev_info(lif->netdev, XDP_ERR_SPLIT);
2740                 return -EOPNOTSUPP;
2741         }
2742
2743         if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) {
2744 #define XDP_ERR_MTU "MTU is too large for XDP without frags support"
2745                 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU);
2746                 netdev_info(lif->netdev, XDP_ERR_MTU);
2747                 return -EINVAL;
2748         }
2749
2750         maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
2751         if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags))
2752                 maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
2753         netdev->max_mtu = maxfs;
2754
2755         if (!netif_running(netdev)) {
2756                 old_prog = xchg(&lif->xdp_prog, bpf->prog);
2757         } else {
2758                 mutex_lock(&lif->queue_lock);
2759                 ionic_stop_queues_reconfig(lif);
2760                 old_prog = xchg(&lif->xdp_prog, bpf->prog);
2761                 ionic_start_queues_reconfig(lif);
2762                 mutex_unlock(&lif->queue_lock);
2763         }
2764
2765         if (old_prog)
2766                 bpf_prog_put(old_prog);
2767
2768         return 0;
2769 }
2770
2771 static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
2772 {
2773         switch (bpf->command) {
2774         case XDP_SETUP_PROG:
2775                 return ionic_xdp_config(netdev, bpf);
2776         default:
2777                 return -EINVAL;
2778         }
2779 }
2780
2781 static const struct net_device_ops ionic_netdev_ops = {
2782         .ndo_open               = ionic_open,
2783         .ndo_stop               = ionic_stop,
2784         .ndo_eth_ioctl          = ionic_eth_ioctl,
2785         .ndo_start_xmit         = ionic_start_xmit,
2786         .ndo_bpf                = ionic_xdp,
2787         .ndo_xdp_xmit           = ionic_xdp_xmit,
2788         .ndo_get_stats64        = ionic_get_stats64,
2789         .ndo_set_rx_mode        = ionic_ndo_set_rx_mode,
2790         .ndo_set_features       = ionic_set_features,
2791         .ndo_set_mac_address    = ionic_set_mac_address,
2792         .ndo_validate_addr      = eth_validate_addr,
2793         .ndo_tx_timeout         = ionic_tx_timeout,
2794         .ndo_change_mtu         = ionic_change_mtu,
2795         .ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
2796         .ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
2797         .ndo_set_vf_vlan        = ionic_set_vf_vlan,
2798         .ndo_set_vf_trust       = ionic_set_vf_trust,
2799         .ndo_set_vf_mac         = ionic_set_vf_mac,
2800         .ndo_set_vf_rate        = ionic_set_vf_rate,
2801         .ndo_set_vf_spoofchk    = ionic_set_vf_spoofchk,
2802         .ndo_get_vf_config      = ionic_get_vf_config,
2803         .ndo_set_vf_link_state  = ionic_set_vf_link_state,
2804         .ndo_get_vf_stats       = ionic_get_vf_stats,
2805 };
2806
2807 static int ionic_cmb_reconfig(struct ionic_lif *lif,
2808                               struct ionic_queue_params *qparam)
2809 {
2810         struct ionic_queue_params start_qparams;
2811         int err = 0;
2812
2813         /* When changing CMB queue parameters, we're using limited
2814          * on-device memory and don't have extra memory to use for
2815          * duplicate allocations, so we free it all first then
2816          * re-allocate with the new parameters.
2817          */
2818
2819         /* Checkpoint for possible unwind */
2820         ionic_init_queue_params(lif, &start_qparams);
2821
2822         /* Stop and free the queues */
2823         ionic_stop_queues_reconfig(lif);
2824         ionic_txrx_free(lif);
2825
2826         /* Set up new qparams */
2827         ionic_set_queue_params(lif, qparam);
2828
2829         if (netif_running(lif->netdev)) {
2830                 /* Alloc and start the new configuration */
2831                 err = ionic_txrx_alloc(lif);
2832                 if (err) {
2833                         dev_warn(lif->ionic->dev,
2834                                  "CMB reconfig failed, restoring values: %d\n", err);
2835
2836                         /* Back out the changes */
2837                         ionic_set_queue_params(lif, &start_qparams);
2838                         err = ionic_txrx_alloc(lif);
2839                         if (err) {
2840                                 dev_err(lif->ionic->dev,
2841                                         "CMB restore failed: %d\n", err);
2842                                 goto err_out;
2843                         }
2844                 }
2845
2846                 err = ionic_start_queues_reconfig(lif);
2847                 if (err) {
2848                         dev_err(lif->ionic->dev,
2849                                 "CMB reconfig failed: %d\n", err);
2850                         goto err_out;
2851                 }
2852         }
2853
2854 err_out:
2855         /* This was detached in ionic_stop_queues_reconfig() */
2856         netif_device_attach(lif->netdev);
2857
2858         return err;
2859 }
2860
2861 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2862 {
2863         /* only swapping the queues, not the napi, flags, or other stuff */
2864         swap(a->q.features,   b->q.features);
2865         swap(a->q.num_descs,  b->q.num_descs);
2866         swap(a->q.desc_size,  b->q.desc_size);
2867         swap(a->q.base,       b->q.base);
2868         swap(a->q.base_pa,    b->q.base_pa);
2869         swap(a->q.info,       b->q.info);
2870         swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
2871         swap(a->q.partner,    b->q.partner);
2872         swap(a->q_base,       b->q_base);
2873         swap(a->q_base_pa,    b->q_base_pa);
2874         swap(a->q_size,       b->q_size);
2875
2876         swap(a->q.sg_desc_size, b->q.sg_desc_size);
2877         swap(a->q.sg_base,    b->q.sg_base);
2878         swap(a->q.sg_base_pa, b->q.sg_base_pa);
2879         swap(a->sg_base,      b->sg_base);
2880         swap(a->sg_base_pa,   b->sg_base_pa);
2881         swap(a->sg_size,      b->sg_size);
2882
2883         swap(a->cq.num_descs, b->cq.num_descs);
2884         swap(a->cq.desc_size, b->cq.desc_size);
2885         swap(a->cq.base,      b->cq.base);
2886         swap(a->cq.base_pa,   b->cq.base_pa);
2887         swap(a->cq_base,      b->cq_base);
2888         swap(a->cq_base_pa,   b->cq_base_pa);
2889         swap(a->cq_size,      b->cq_size);
2890
2891         ionic_debugfs_del_qcq(a);
2892         ionic_debugfs_add_qcq(a->q.lif, a);
2893 }
2894
2895 int ionic_reconfigure_queues(struct ionic_lif *lif,
2896                              struct ionic_queue_params *qparam)
2897 {
2898         unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2899         struct ionic_qcq **tx_qcqs = NULL;
2900         struct ionic_qcq **rx_qcqs = NULL;
2901         unsigned int flags, i;
2902         int err = 0;
2903
2904         /* Are we changing q params while CMB is on */
2905         if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
2906             (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
2907                 return ionic_cmb_reconfig(lif, qparam);
2908
2909         /* allocate temporary qcq arrays to hold new queue structs */
2910         if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2911                 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2912                                        sizeof(struct ionic_qcq *), GFP_KERNEL);
2913                 if (!tx_qcqs) {
2914                         err = -ENOMEM;
2915                         goto err_out;
2916                 }
2917         }
2918         if (qparam->nxqs != lif->nxqs ||
2919             qparam->nrxq_descs != lif->nrxq_descs ||
2920             qparam->rxq_features != lif->rxq_features) {
2921                 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2922                                        sizeof(struct ionic_qcq *), GFP_KERNEL);
2923                 if (!rx_qcqs) {
2924                         err = -ENOMEM;
2925                         goto err_out;
2926                 }
2927         }
2928
2929         /* allocate new desc_info and rings, but leave the interrupt setup
2930          * until later so as to not mess with the still-running queues
2931          */
2932         if (tx_qcqs) {
2933                 num_desc = qparam->ntxq_descs;
2934                 desc_sz = sizeof(struct ionic_txq_desc);
2935                 comp_sz = sizeof(struct ionic_txq_comp);
2936
2937                 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2938                     lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2939                     sizeof(struct ionic_txq_sg_desc_v1))
2940                         sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2941                 else
2942                         sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2943
2944                 for (i = 0; i < qparam->nxqs; i++) {
2945                         /* If missing, short placeholder qcq needed for swap */
2946                         if (!lif->txqcqs[i]) {
2947                                 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2948                                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2949                                                       4, desc_sz, comp_sz, sg_desc_sz,
2950                                                       sizeof(struct ionic_tx_desc_info),
2951                                                       lif->kern_pid, &lif->txqcqs[i]);
2952                                 if (err)
2953                                         goto err_out;
2954                         }
2955
2956                         flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2957                         err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2958                                               num_desc, desc_sz, comp_sz, sg_desc_sz,
2959                                               sizeof(struct ionic_tx_desc_info),
2960                                               lif->kern_pid, &tx_qcqs[i]);
2961                         if (err)
2962                                 goto err_out;
2963                 }
2964         }
2965
2966         if (rx_qcqs) {
2967                 num_desc = qparam->nrxq_descs;
2968                 desc_sz = sizeof(struct ionic_rxq_desc);
2969                 comp_sz = sizeof(struct ionic_rxq_comp);
2970                 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2971
2972                 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2973                         comp_sz *= 2;
2974
2975                 for (i = 0; i < qparam->nxqs; i++) {
2976                         /* If missing, short placeholder qcq needed for swap */
2977                         if (!lif->rxqcqs[i]) {
2978                                 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
2979                                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2980                                                       4, desc_sz, comp_sz, sg_desc_sz,
2981                                                       sizeof(struct ionic_rx_desc_info),
2982                                                       lif->kern_pid, &lif->rxqcqs[i]);
2983                                 if (err)
2984                                         goto err_out;
2985                         }
2986
2987                         flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2988                         err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2989                                               num_desc, desc_sz, comp_sz, sg_desc_sz,
2990                                               sizeof(struct ionic_rx_desc_info),
2991                                               lif->kern_pid, &rx_qcqs[i]);
2992                         if (err)
2993                                 goto err_out;
2994
2995                         rx_qcqs[i]->q.features = qparam->rxq_features;
2996                 }
2997         }
2998
2999         /* stop and clean the queues */
3000         ionic_stop_queues_reconfig(lif);
3001
3002         if (qparam->nxqs != lif->nxqs) {
3003                 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
3004                 if (err)
3005                         goto err_out_reinit_unlock;
3006                 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
3007                 if (err) {
3008                         netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
3009                         goto err_out_reinit_unlock;
3010                 }
3011         }
3012
3013         /* swap new desc_info and rings, keeping existing interrupt config */
3014         if (tx_qcqs) {
3015                 lif->ntxq_descs = qparam->ntxq_descs;
3016                 for (i = 0; i < qparam->nxqs; i++)
3017                         ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
3018         }
3019
3020         if (rx_qcqs) {
3021                 lif->nrxq_descs = qparam->nrxq_descs;
3022                 for (i = 0; i < qparam->nxqs; i++)
3023                         ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
3024         }
3025
3026         /* if we need to change the interrupt layout, this is the time */
3027         if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
3028             qparam->nxqs != lif->nxqs) {
3029                 if (qparam->intr_split) {
3030                         set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
3031                 } else {
3032                         clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
3033                         lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
3034                         lif->tx_coalesce_hw = lif->rx_coalesce_hw;
3035                 }
3036
3037                 /* Clear existing interrupt assignments.  We check for NULL here
3038                  * because we're checking the whole array for potential qcqs, not
3039                  * just those qcqs that have just been set up.
3040                  */
3041                 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
3042                         if (lif->txqcqs[i])
3043                                 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
3044                         if (lif->rxqcqs[i])
3045                                 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
3046                 }
3047
3048                 /* re-assign the interrupts */
3049                 for (i = 0; i < qparam->nxqs; i++) {
3050                         lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
3051                         err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
3052                         ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
3053                                              lif->rxqcqs[i]->intr.index,
3054                                              lif->rx_coalesce_hw);
3055
3056                         if (qparam->intr_split) {
3057                                 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
3058                                 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
3059                                 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
3060                                                      lif->txqcqs[i]->intr.index,
3061                                                      lif->tx_coalesce_hw);
3062                                 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
3063                                         lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
3064                         } else {
3065                                 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3066                                 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
3067                         }
3068                 }
3069         }
3070
3071         /* now we can rework the debugfs mappings */
3072         if (tx_qcqs) {
3073                 for (i = 0; i < qparam->nxqs; i++) {
3074                         ionic_debugfs_del_qcq(lif->txqcqs[i]);
3075                         ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
3076                 }
3077         }
3078
3079         if (rx_qcqs) {
3080                 for (i = 0; i < qparam->nxqs; i++) {
3081                         ionic_debugfs_del_qcq(lif->rxqcqs[i]);
3082                         ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
3083                 }
3084         }
3085
3086         swap(lif->nxqs, qparam->nxqs);
3087         swap(lif->rxq_features, qparam->rxq_features);
3088
3089 err_out_reinit_unlock:
3090         /* re-init the queues, but don't lose an error code */
3091         if (err)
3092                 ionic_start_queues_reconfig(lif);
3093         else
3094                 err = ionic_start_queues_reconfig(lif);
3095
3096 err_out:
3097         /* free old allocs without cleaning intr */
3098         for (i = 0; i < qparam->nxqs; i++) {
3099                 if (tx_qcqs && tx_qcqs[i]) {
3100                         tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3101                         ionic_qcq_free(lif, tx_qcqs[i]);
3102                         devm_kfree(lif->ionic->dev, tx_qcqs[i]);
3103                         tx_qcqs[i] = NULL;
3104                 }
3105                 if (rx_qcqs && rx_qcqs[i]) {
3106                         rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3107                         ionic_qcq_free(lif, rx_qcqs[i]);
3108                         devm_kfree(lif->ionic->dev, rx_qcqs[i]);
3109                         rx_qcqs[i] = NULL;
3110                 }
3111         }
3112
3113         /* free q array */
3114         if (rx_qcqs) {
3115                 devm_kfree(lif->ionic->dev, rx_qcqs);
3116                 rx_qcqs = NULL;
3117         }
3118         if (tx_qcqs) {
3119                 devm_kfree(lif->ionic->dev, tx_qcqs);
3120                 tx_qcqs = NULL;
3121         }
3122
3123         /* clean the unused dma and info allocations when new set is smaller
3124          * than the full array, but leave the qcq shells in place
3125          */
3126         for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
3127                 if (lif->txqcqs && lif->txqcqs[i]) {
3128                         lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3129                         ionic_qcq_free(lif, lif->txqcqs[i]);
3130                 }
3131
3132                 if (lif->rxqcqs && lif->rxqcqs[i]) {
3133                         lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3134                         ionic_qcq_free(lif, lif->rxqcqs[i]);
3135                 }
3136         }
3137
3138         if (err)
3139                 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
3140
3141         return err;
3142 }
3143
3144 int ionic_lif_alloc(struct ionic *ionic)
3145 {
3146         struct device *dev = ionic->dev;
3147         union ionic_lif_identity *lid;
3148         struct net_device *netdev;
3149         struct ionic_lif *lif;
3150         int tbl_sz;
3151         int err;
3152
3153         lid = kzalloc(sizeof(*lid), GFP_KERNEL);
3154         if (!lid)
3155                 return -ENOMEM;
3156
3157         netdev = alloc_etherdev_mqs(sizeof(*lif),
3158                                     ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
3159         if (!netdev) {
3160                 dev_err(dev, "Cannot allocate netdev, aborting\n");
3161                 err = -ENOMEM;
3162                 goto err_out_free_lid;
3163         }
3164
3165         SET_NETDEV_DEV(netdev, dev);
3166
3167         lif = netdev_priv(netdev);
3168         lif->netdev = netdev;
3169         ionic->lif = lif;
3170         lif->ionic = ionic;
3171         netdev->netdev_ops = &ionic_netdev_ops;
3172         ionic_ethtool_set_ops(netdev);
3173
3174         netdev->watchdog_timeo = 2 * HZ;
3175         netif_carrier_off(netdev);
3176
3177         lif->identity = lid;
3178         lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
3179         err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
3180         if (err) {
3181                 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
3182                         lif->lif_type, err);
3183                 goto err_out_free_netdev;
3184         }
3185         lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
3186                                      le32_to_cpu(lif->identity->eth.min_frame_size));
3187         lif->netdev->max_mtu =
3188                 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
3189
3190         lif->neqs = ionic->neqs_per_lif;
3191         lif->nxqs = ionic->ntxqs_per_lif;
3192
3193         lif->index = 0;
3194
3195         if (is_kdump_kernel()) {
3196                 lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
3197                 lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
3198         } else {
3199                 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
3200                 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
3201         }
3202
3203         /* Convert the default coalesce value to actual hw resolution */
3204         lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
3205         lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
3206                                                     lif->rx_coalesce_usecs);
3207         lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
3208         lif->tx_coalesce_hw = lif->rx_coalesce_hw;
3209         set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
3210         set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
3211
3212         snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
3213
3214         mutex_init(&lif->queue_lock);
3215         mutex_init(&lif->config_lock);
3216
3217         spin_lock_init(&lif->adminq_lock);
3218
3219         spin_lock_init(&lif->deferred.lock);
3220         INIT_LIST_HEAD(&lif->deferred.list);
3221         INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
3222
3223         /* allocate lif info */
3224         lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
3225         lif->info = dma_alloc_coherent(dev, lif->info_sz,
3226                                        &lif->info_pa, GFP_KERNEL);
3227         if (!lif->info) {
3228                 dev_err(dev, "Failed to allocate lif info, aborting\n");
3229                 err = -ENOMEM;
3230                 goto err_out_free_mutex;
3231         }
3232
3233         ionic_debugfs_add_lif(lif);
3234
3235         /* allocate control queues and txrx queue arrays */
3236         ionic_lif_queue_identify(lif);
3237         err = ionic_qcqs_alloc(lif);
3238         if (err)
3239                 goto err_out_free_lif_info;
3240
3241         /* allocate rss indirection table */
3242         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
3243         lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
3244         lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
3245                                               &lif->rss_ind_tbl_pa,
3246                                               GFP_KERNEL);
3247
3248         if (!lif->rss_ind_tbl) {
3249                 err = -ENOMEM;
3250                 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
3251                 goto err_out_free_qcqs;
3252         }
3253         netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
3254
3255         ionic_lif_alloc_phc(lif);
3256
3257         return 0;
3258
3259 err_out_free_qcqs:
3260         ionic_qcqs_free(lif);
3261 err_out_free_lif_info:
3262         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3263         lif->info = NULL;
3264         lif->info_pa = 0;
3265 err_out_free_mutex:
3266         mutex_destroy(&lif->config_lock);
3267         mutex_destroy(&lif->queue_lock);
3268 err_out_free_netdev:
3269         free_netdev(lif->netdev);
3270         lif = NULL;
3271 err_out_free_lid:
3272         kfree(lid);
3273
3274         return err;
3275 }
3276
3277 static void ionic_lif_reset(struct ionic_lif *lif)
3278 {
3279         struct ionic_dev *idev = &lif->ionic->idev;
3280
3281         if (!ionic_is_fw_running(idev))
3282                 return;
3283
3284         mutex_lock(&lif->ionic->dev_cmd_lock);
3285         ionic_dev_cmd_lif_reset(idev, lif->index);
3286         ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3287         mutex_unlock(&lif->ionic->dev_cmd_lock);
3288 }
3289
3290 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3291 {
3292         struct ionic *ionic = lif->ionic;
3293
3294         if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
3295                 return;
3296
3297         dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
3298
3299         netif_device_detach(lif->netdev);
3300
3301         mutex_lock(&lif->queue_lock);
3302         if (test_bit(IONIC_LIF_F_UP, lif->state)) {
3303                 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
3304                 ionic_stop_queues(lif);
3305         }
3306
3307         if (netif_running(lif->netdev)) {
3308                 ionic_txrx_deinit(lif);
3309                 ionic_txrx_free(lif);
3310         }
3311         ionic_lif_deinit(lif);
3312         ionic_reset(ionic);
3313         ionic_qcqs_free(lif);
3314
3315         mutex_unlock(&lif->queue_lock);
3316
3317         clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
3318         dev_info(ionic->dev, "FW Down: LIFs stopped\n");
3319 }
3320
3321 int ionic_restart_lif(struct ionic_lif *lif)
3322 {
3323         struct ionic *ionic = lif->ionic;
3324         int err;
3325
3326         mutex_lock(&lif->queue_lock);
3327
3328         if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
3329                 dev_info(ionic->dev, "FW Up: clearing broken state\n");
3330
3331         err = ionic_qcqs_alloc(lif);
3332         if (err)
3333                 goto err_unlock;
3334
3335         err = ionic_lif_init(lif);
3336         if (err)
3337                 goto err_qcqs_free;
3338
3339         ionic_vf_attr_replay(lif);
3340
3341         if (lif->registered)
3342                 ionic_lif_set_netdev_info(lif);
3343
3344         ionic_rx_filter_replay(lif);
3345
3346         if (netif_running(lif->netdev)) {
3347                 err = ionic_txrx_alloc(lif);
3348                 if (err)
3349                         goto err_lifs_deinit;
3350
3351                 err = ionic_txrx_init(lif);
3352                 if (err)
3353                         goto err_txrx_free;
3354         }
3355
3356         mutex_unlock(&lif->queue_lock);
3357
3358         clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3359         ionic_link_status_check_request(lif, CAN_SLEEP);
3360         netif_device_attach(lif->netdev);
3361
3362         return 0;
3363
3364 err_txrx_free:
3365         ionic_txrx_free(lif);
3366 err_lifs_deinit:
3367         ionic_lif_deinit(lif);
3368 err_qcqs_free:
3369         ionic_qcqs_free(lif);
3370 err_unlock:
3371         mutex_unlock(&lif->queue_lock);
3372
3373         return err;
3374 }
3375
3376 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3377 {
3378         struct ionic *ionic = lif->ionic;
3379         int err;
3380
3381         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3382                 return;
3383
3384         dev_info(ionic->dev, "FW Up: restarting LIFs\n");
3385
3386         /* This is a little different from what happens at
3387          * probe time because the LIF already exists so we
3388          * just need to reanimate it.
3389          */
3390         ionic_init_devinfo(ionic);
3391         err = ionic_identify(ionic);
3392         if (err)
3393                 goto err_out;
3394         err = ionic_port_identify(ionic);
3395         if (err)
3396                 goto err_out;
3397         err = ionic_port_init(ionic);
3398         if (err)
3399                 goto err_out;
3400
3401         err = ionic_restart_lif(lif);
3402         if (err)
3403                 goto err_out;
3404
3405         dev_info(ionic->dev, "FW Up: LIFs restarted\n");
3406
3407         /* restore the hardware timestamping queues */
3408         ionic_lif_hwstamp_replay(lif);
3409
3410         return;
3411
3412 err_out:
3413         dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3414 }
3415
3416 void ionic_lif_free(struct ionic_lif *lif)
3417 {
3418         struct device *dev = lif->ionic->dev;
3419
3420         ionic_lif_free_phc(lif);
3421
3422         /* free rss indirection table */
3423         dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
3424                           lif->rss_ind_tbl_pa);
3425         lif->rss_ind_tbl = NULL;
3426         lif->rss_ind_tbl_pa = 0;
3427
3428         /* free queues */
3429         ionic_qcqs_free(lif);
3430         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3431                 ionic_lif_reset(lif);
3432
3433         /* free lif info */
3434         kfree(lif->identity);
3435         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3436         lif->info = NULL;
3437         lif->info_pa = 0;
3438
3439         /* unmap doorbell page */
3440         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3441         lif->kern_dbpage = NULL;
3442
3443         mutex_destroy(&lif->config_lock);
3444         mutex_destroy(&lif->queue_lock);
3445
3446         /* free netdev & lif */
3447         ionic_debugfs_del_lif(lif);
3448         free_netdev(lif->netdev);
3449 }
3450
3451 void ionic_lif_deinit(struct ionic_lif *lif)
3452 {
3453         if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
3454                 return;
3455
3456         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3457                 cancel_work_sync(&lif->deferred.work);
3458                 cancel_work_sync(&lif->tx_timeout_work);
3459                 ionic_rx_filters_deinit(lif);
3460                 if (lif->netdev->features & NETIF_F_RXHASH)
3461                         ionic_lif_rss_deinit(lif);
3462         }
3463
3464         napi_disable(&lif->adminqcq->napi);
3465         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3466         ionic_lif_qcq_deinit(lif, lif->adminqcq);
3467
3468         ionic_lif_reset(lif);
3469 }
3470
3471 static int ionic_lif_adminq_init(struct ionic_lif *lif)
3472 {
3473         struct device *dev = lif->ionic->dev;
3474         struct ionic_q_init_comp comp;
3475         struct ionic_dev *idev;
3476         struct ionic_qcq *qcq;
3477         struct ionic_queue *q;
3478         int err;
3479
3480         idev = &lif->ionic->idev;
3481         qcq = lif->adminqcq;
3482         q = &qcq->q;
3483
3484         mutex_lock(&lif->ionic->dev_cmd_lock);
3485         ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
3486         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3487         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3488         mutex_unlock(&lif->ionic->dev_cmd_lock);
3489         if (err) {
3490                 netdev_err(lif->netdev, "adminq init failed %d\n", err);
3491                 return err;
3492         }
3493
3494         q->hw_type = comp.hw_type;
3495         q->hw_index = le32_to_cpu(comp.hw_index);
3496         q->dbval = IONIC_DBELL_QID(q->hw_index);
3497
3498         dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
3499         dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
3500
3501         q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
3502         q->dbell_jiffies = jiffies;
3503
3504         netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
3505
3506         qcq->napi_qcq = qcq;
3507         timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
3508
3509         napi_enable(&qcq->napi);
3510
3511         if (qcq->flags & IONIC_QCQ_F_INTR) {
3512                 irq_set_affinity_hint(qcq->intr.vector,
3513                                       &qcq->intr.affinity_mask);
3514                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
3515                                 IONIC_INTR_MASK_CLEAR);
3516         }
3517
3518         qcq->flags |= IONIC_QCQ_F_INITED;
3519
3520         return 0;
3521 }
3522
3523 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
3524 {
3525         struct ionic_qcq *qcq = lif->notifyqcq;
3526         struct device *dev = lif->ionic->dev;
3527         struct ionic_queue *q = &qcq->q;
3528         int err;
3529
3530         struct ionic_admin_ctx ctx = {
3531                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3532                 .cmd.q_init = {
3533                         .opcode = IONIC_CMD_Q_INIT,
3534                         .lif_index = cpu_to_le16(lif->index),
3535                         .type = q->type,
3536                         .ver = lif->qtype_info[q->type].version,
3537                         .index = cpu_to_le32(q->index),
3538                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
3539                                              IONIC_QINIT_F_ENA),
3540                         .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
3541                         .pid = cpu_to_le16(q->pid),
3542                         .ring_size = ilog2(q->num_descs),
3543                         .ring_base = cpu_to_le64(q->base_pa),
3544                 }
3545         };
3546
3547         dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
3548         dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
3549         dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
3550         dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
3551
3552         err = ionic_adminq_post_wait(lif, &ctx);
3553         if (err)
3554                 return err;
3555
3556         lif->last_eid = 0;
3557         q->hw_type = ctx.comp.q_init.hw_type;
3558         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
3559         q->dbval = IONIC_DBELL_QID(q->hw_index);
3560
3561         dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
3562         dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
3563
3564         /* preset the callback info */
3565         q->admin_info[0].ctx = lif;
3566
3567         qcq->flags |= IONIC_QCQ_F_INITED;
3568
3569         return 0;
3570 }
3571
3572 static int ionic_station_set(struct ionic_lif *lif)
3573 {
3574         struct net_device *netdev = lif->netdev;
3575         struct ionic_admin_ctx ctx = {
3576                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3577                 .cmd.lif_getattr = {
3578                         .opcode = IONIC_CMD_LIF_GETATTR,
3579                         .index = cpu_to_le16(lif->index),
3580                         .attr = IONIC_LIF_ATTR_MAC,
3581                 },
3582         };
3583         u8 mac_address[ETH_ALEN];
3584         struct sockaddr addr;
3585         int err;
3586
3587         err = ionic_adminq_post_wait(lif, &ctx);
3588         if (err)
3589                 return err;
3590         netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3591                    ctx.comp.lif_getattr.mac);
3592         ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
3593
3594         if (is_zero_ether_addr(mac_address)) {
3595                 eth_hw_addr_random(netdev);
3596                 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
3597                 ether_addr_copy(mac_address, netdev->dev_addr);
3598
3599                 err = ionic_program_mac(lif, mac_address);
3600                 if (err < 0)
3601                         return err;
3602
3603                 if (err > 0) {
3604                         netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3605                                    __func__);
3606                         return 0;
3607                 }
3608         }
3609
3610         if (!is_zero_ether_addr(netdev->dev_addr)) {
3611                 /* If the netdev mac is non-zero and doesn't match the default
3612                  * device address, it was set by something earlier and we're
3613                  * likely here again after a fw-upgrade reset.  We need to be
3614                  * sure the netdev mac is in our filter list.
3615                  */
3616                 if (!ether_addr_equal(mac_address, netdev->dev_addr))
3617                         ionic_lif_addr_add(lif, netdev->dev_addr);
3618         } else {
3619                 /* Update the netdev mac with the device's mac */
3620                 ether_addr_copy(addr.sa_data, mac_address);
3621                 addr.sa_family = AF_INET;
3622                 err = eth_prepare_mac_addr_change(netdev, &addr);
3623                 if (err) {
3624                         netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
3625                                     addr.sa_data, err);
3626                         return 0;
3627                 }
3628
3629                 eth_commit_mac_addr_change(netdev, &addr);
3630         }
3631
3632         netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
3633                    netdev->dev_addr);
3634         ionic_lif_addr_add(lif, netdev->dev_addr);
3635
3636         return 0;
3637 }
3638
3639 int ionic_lif_init(struct ionic_lif *lif)
3640 {
3641         struct ionic_dev *idev = &lif->ionic->idev;
3642         struct device *dev = lif->ionic->dev;
3643         struct ionic_lif_init_comp comp;
3644         int dbpage_num;
3645         int err;
3646
3647         mutex_lock(&lif->ionic->dev_cmd_lock);
3648         ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
3649         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3650         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3651         mutex_unlock(&lif->ionic->dev_cmd_lock);
3652         if (err)
3653                 return err;
3654
3655         lif->hw_index = le16_to_cpu(comp.hw_index);
3656
3657         /* now that we have the hw_index we can figure out our doorbell page */
3658         lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
3659         if (!lif->dbid_count) {
3660                 dev_err(dev, "No doorbell pages, aborting\n");
3661                 return -EINVAL;
3662         }
3663
3664         lif->kern_pid = 0;
3665         dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
3666         lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
3667         if (!lif->kern_dbpage) {
3668                 dev_err(dev, "Cannot map dbpage, aborting\n");
3669                 return -ENOMEM;
3670         }
3671
3672         err = ionic_lif_adminq_init(lif);
3673         if (err)
3674                 goto err_out_adminq_deinit;
3675
3676         if (lif->ionic->nnqs_per_lif) {
3677                 err = ionic_lif_notifyq_init(lif);
3678                 if (err)
3679                         goto err_out_notifyq_deinit;
3680         }
3681
3682         if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3683                 err = ionic_set_nic_features(lif, lif->netdev->features);
3684         else
3685                 err = ionic_init_nic_features(lif);
3686         if (err)
3687                 goto err_out_notifyq_deinit;
3688
3689         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3690                 err = ionic_rx_filters_init(lif);
3691                 if (err)
3692                         goto err_out_notifyq_deinit;
3693         }
3694
3695         err = ionic_station_set(lif);
3696         if (err)
3697                 goto err_out_notifyq_deinit;
3698
3699         lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
3700
3701         set_bit(IONIC_LIF_F_INITED, lif->state);
3702
3703         INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
3704
3705         return 0;
3706
3707 err_out_notifyq_deinit:
3708         napi_disable(&lif->adminqcq->napi);
3709         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3710 err_out_adminq_deinit:
3711         ionic_lif_qcq_deinit(lif, lif->adminqcq);
3712         ionic_lif_reset(lif);
3713         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3714         lif->kern_dbpage = NULL;
3715
3716         return err;
3717 }
3718
3719 static void ionic_lif_notify_work(struct work_struct *ws)
3720 {
3721 }
3722
3723 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
3724 {
3725         struct ionic_admin_ctx ctx = {
3726                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3727                 .cmd.lif_setattr = {
3728                         .opcode = IONIC_CMD_LIF_SETATTR,
3729                         .index = cpu_to_le16(lif->index),
3730                         .attr = IONIC_LIF_ATTR_NAME,
3731                 },
3732         };
3733
3734         strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
3735                 sizeof(ctx.cmd.lif_setattr.name));
3736
3737         ionic_adminq_post_wait(lif, &ctx);
3738 }
3739
3740 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
3741 {
3742         if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
3743                 return NULL;
3744
3745         return netdev_priv(netdev);
3746 }
3747
3748 static int ionic_lif_notify(struct notifier_block *nb,
3749                             unsigned long event, void *info)
3750 {
3751         struct net_device *ndev = netdev_notifier_info_to_dev(info);
3752         struct ionic *ionic = container_of(nb, struct ionic, nb);
3753         struct ionic_lif *lif = ionic_netdev_lif(ndev);
3754
3755         if (!lif || lif->ionic != ionic)
3756                 return NOTIFY_DONE;
3757
3758         switch (event) {
3759         case NETDEV_CHANGENAME:
3760                 ionic_lif_set_netdev_info(lif);
3761                 break;
3762         }
3763
3764         return NOTIFY_DONE;
3765 }
3766
3767 int ionic_lif_register(struct ionic_lif *lif)
3768 {
3769         int err;
3770
3771         ionic_lif_register_phc(lif);
3772
3773         INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
3774
3775         lif->ionic->nb.notifier_call = ionic_lif_notify;
3776
3777         err = register_netdevice_notifier(&lif->ionic->nb);
3778         if (err)
3779                 lif->ionic->nb.notifier_call = NULL;
3780
3781         /* only register LIF0 for now */
3782         err = register_netdev(lif->netdev);
3783         if (err) {
3784                 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
3785                 ionic_lif_unregister_phc(lif);
3786                 return err;
3787         }
3788
3789         ionic_link_status_check_request(lif, CAN_SLEEP);
3790         lif->registered = true;
3791         ionic_lif_set_netdev_info(lif);
3792
3793         return 0;
3794 }
3795
3796 void ionic_lif_unregister(struct ionic_lif *lif)
3797 {
3798         if (lif->ionic->nb.notifier_call) {
3799                 unregister_netdevice_notifier(&lif->ionic->nb);
3800                 cancel_work_sync(&lif->ionic->nb_work);
3801                 lif->ionic->nb.notifier_call = NULL;
3802         }
3803
3804         if (lif->netdev->reg_state == NETREG_REGISTERED)
3805                 unregister_netdev(lif->netdev);
3806
3807         ionic_lif_unregister_phc(lif);
3808
3809         lif->registered = false;
3810 }
3811
3812 static void ionic_lif_queue_identify(struct ionic_lif *lif)
3813 {
3814         union ionic_q_identity __iomem *q_ident;
3815         struct ionic *ionic = lif->ionic;
3816         struct ionic_dev *idev;
3817         u16 max_frags;
3818         int qtype;
3819         int err;
3820
3821         idev = &lif->ionic->idev;
3822         q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
3823
3824         for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3825                 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3826
3827                 /* filter out the ones we know about */
3828                 switch (qtype) {
3829                 case IONIC_QTYPE_ADMINQ:
3830                 case IONIC_QTYPE_NOTIFYQ:
3831                 case IONIC_QTYPE_RXQ:
3832                 case IONIC_QTYPE_TXQ:
3833                         break;
3834                 default:
3835                         continue;
3836                 }
3837
3838                 memset(qti, 0, sizeof(*qti));
3839
3840                 mutex_lock(&ionic->dev_cmd_lock);
3841                 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3842                                              ionic_qtype_versions[qtype]);
3843                 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3844                 if (!err) {
3845                         qti->version   = readb(&q_ident->version);
3846                         qti->supported = readb(&q_ident->supported);
3847                         qti->features  = readq(&q_ident->features);
3848                         qti->desc_sz   = readw(&q_ident->desc_sz);
3849                         qti->comp_sz   = readw(&q_ident->comp_sz);
3850                         qti->sg_desc_sz   = readw(&q_ident->sg_desc_sz);
3851                         qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3852                         qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3853                 }
3854                 mutex_unlock(&ionic->dev_cmd_lock);
3855
3856                 if (err == -EINVAL) {
3857                         dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3858                         continue;
3859                 } else if (err == -EIO) {
3860                         dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3861                         return;
3862                 } else if (err) {
3863                         dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3864                                 qtype, err);
3865                         return;
3866                 }
3867
3868                 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3869                         qtype, qti->version);
3870                 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3871                         qtype, qti->supported);
3872                 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3873                         qtype, qti->features);
3874                 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3875                         qtype, qti->desc_sz);
3876                 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3877                         qtype, qti->comp_sz);
3878                 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3879                         qtype, qti->sg_desc_sz);
3880                 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3881                         qtype, qti->max_sg_elems);
3882                 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3883                         qtype, qti->sg_desc_stride);
3884
3885                 if (qtype == IONIC_QTYPE_TXQ)
3886                         max_frags = IONIC_TX_MAX_FRAGS;
3887                 else if (qtype == IONIC_QTYPE_RXQ)
3888                         max_frags = IONIC_RX_MAX_FRAGS;
3889                 else
3890                         max_frags = 1;
3891
3892                 qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS);
3893                 dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n",
3894                         qtype, qti->max_sg_elems);
3895         }
3896 }
3897
3898 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3899                        union ionic_lif_identity *lid)
3900 {
3901         struct ionic_dev *idev = &ionic->idev;
3902         size_t sz;
3903         int err;
3904
3905         sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3906
3907         mutex_lock(&ionic->dev_cmd_lock);
3908         ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3909         err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3910         memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3911         mutex_unlock(&ionic->dev_cmd_lock);
3912         if (err)
3913                 return (err);
3914
3915         dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3916                 le64_to_cpu(lid->capabilities));
3917
3918         dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3919                 le32_to_cpu(lid->eth.max_ucast_filters));
3920         dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3921                 le32_to_cpu(lid->eth.max_mcast_filters));
3922         dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3923                 le64_to_cpu(lid->eth.config.features));
3924         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3925                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3926         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3927                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3928         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3929                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3930         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3931                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3932         dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3933         dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3934         dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3935                 le32_to_cpu(lid->eth.config.mtu));
3936
3937         return 0;
3938 }
3939
3940 int ionic_lif_size(struct ionic *ionic)
3941 {
3942         struct ionic_identity *ident = &ionic->ident;
3943         unsigned int nintrs, dev_nintrs;
3944         union ionic_lif_config *lc;
3945         unsigned int ntxqs_per_lif;
3946         unsigned int nrxqs_per_lif;
3947         unsigned int neqs_per_lif;
3948         unsigned int nnqs_per_lif;
3949         unsigned int nxqs, neqs;
3950         unsigned int min_intrs;
3951         int err;
3952
3953         /* retrieve basic values from FW */
3954         lc = &ident->lif.eth.config;
3955         dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3956         neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3957         nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3958         ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3959         nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3960
3961         /* limit values to play nice with kdump */
3962         if (is_kdump_kernel()) {
3963                 dev_nintrs = 2;
3964                 neqs_per_lif = 0;
3965                 nnqs_per_lif = 0;
3966                 ntxqs_per_lif = 1;
3967                 nrxqs_per_lif = 1;
3968         }
3969
3970         /* reserve last queue id for hardware timestamping */
3971         if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
3972                 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
3973                         lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
3974                 } else {
3975                         ntxqs_per_lif -= 1;
3976                         nrxqs_per_lif -= 1;
3977                 }
3978         }
3979
3980         nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3981         nxqs = min(nxqs, num_online_cpus());
3982         neqs = min(neqs_per_lif, num_online_cpus());
3983
3984 try_again:
3985         /* interrupt usage:
3986          *    1 for master lif adminq/notifyq
3987          *    1 for each CPU for master lif TxRx queue pairs
3988          *    whatever's left is for RDMA queues
3989          */
3990         nintrs = 1 + nxqs + neqs;
3991         min_intrs = 2;  /* adminq + 1 TxRx queue pair */
3992
3993         if (nintrs > dev_nintrs)
3994                 goto try_fewer;
3995
3996         err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3997         if (err < 0 && err != -ENOSPC) {
3998                 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3999                 return err;
4000         }
4001         if (err == -ENOSPC)
4002                 goto try_fewer;
4003
4004         if (err != nintrs) {
4005                 ionic_bus_free_irq_vectors(ionic);
4006                 goto try_fewer;
4007         }
4008
4009         ionic->nnqs_per_lif = nnqs_per_lif;
4010         ionic->neqs_per_lif = neqs;
4011         ionic->ntxqs_per_lif = nxqs;
4012         ionic->nrxqs_per_lif = nxqs;
4013         ionic->nintrs = nintrs;
4014
4015         ionic_debugfs_add_sizes(ionic);
4016
4017         return 0;
4018
4019 try_fewer:
4020         if (nnqs_per_lif > 1) {
4021                 nnqs_per_lif >>= 1;
4022                 goto try_again;
4023         }
4024         if (neqs > 1) {
4025                 neqs >>= 1;
4026                 goto try_again;
4027         }
4028         if (nxqs > 1) {
4029                 nxqs >>= 1;
4030                 goto try_again;
4031         }
4032         dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
4033         return -ENOSPC;
4034 }
This page took 0.283286 seconds and 4 git commands to generate.