]> Git Repo - linux.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
894aa69a 4 * Copyright (c) 2016-2018 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
34#include <linux/if.h>
35#include <linux/if_vlan.h>
32e8239c 36#include <linux/if_bridge.h>
5ac67d8b 37#include <linux/rtc.h>
c6d30e83 38#include <linux/bpf.h>
c0c050c5
MC
39#include <net/ip.h>
40#include <net/tcp.h>
41#include <net/udp.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
ad51b8e9 44#include <net/udp_tunnel.h>
c0c050c5
MC
45#include <linux/workqueue.h>
46#include <linux/prefetch.h>
47#include <linux/cache.h>
48#include <linux/log2.h>
49#include <linux/aer.h>
50#include <linux/bitmap.h>
51#include <linux/cpu_rmap.h>
56f0fd80 52#include <linux/cpumask.h>
2ae7408f 53#include <net/pkt_cls.h>
cde49a42
VV
54#include <linux/hwmon.h>
55#include <linux/hwmon-sysfs.h>
c0c050c5
MC
56
57#include "bnxt_hsi.h"
58#include "bnxt.h"
a588e458 59#include "bnxt_ulp.h"
c0c050c5
MC
60#include "bnxt_sriov.h"
61#include "bnxt_ethtool.h"
7df4ae9f 62#include "bnxt_dcb.h"
c6d30e83 63#include "bnxt_xdp.h"
4ab0c6a8 64#include "bnxt_vfr.h"
2ae7408f 65#include "bnxt_tc.h"
3c467bf3 66#include "bnxt_devlink.h"
cabfb09d 67#include "bnxt_debugfs.h"
c0c050c5
MC
68
69#define BNXT_TX_TIMEOUT (5 * HZ)
70
71static const char version[] =
72 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
73
74MODULE_LICENSE("GPL");
75MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76MODULE_VERSION(DRV_MODULE_VERSION);
77
78#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80#define BNXT_RX_COPY_THRESH 256
81
4419dbe6 82#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
83
84enum board_idx {
fbc9a523 85 BCM57301,
c0c050c5
MC
86 BCM57302,
87 BCM57304,
1f681688 88 BCM57417_NPAR,
fa853dda 89 BCM58700,
b24eb6ae
MC
90 BCM57311,
91 BCM57312,
fbc9a523 92 BCM57402,
c0c050c5
MC
93 BCM57404,
94 BCM57406,
1f681688
MC
95 BCM57402_NPAR,
96 BCM57407,
b24eb6ae
MC
97 BCM57412,
98 BCM57414,
99 BCM57416,
100 BCM57417,
1f681688 101 BCM57412_NPAR,
5049e33b 102 BCM57314,
1f681688
MC
103 BCM57417_SFP,
104 BCM57416_SFP,
105 BCM57404_NPAR,
106 BCM57406_NPAR,
107 BCM57407_SFP,
adbc8305 108 BCM57407_NPAR,
1f681688
MC
109 BCM57414_NPAR,
110 BCM57416_NPAR,
32b40798
DK
111 BCM57452,
112 BCM57454,
92abef36 113 BCM5745x_NPAR,
1ab968d2 114 BCM57508,
4a58139b 115 BCM58802,
8ed693b7 116 BCM58804,
4a58139b 117 BCM58808,
adbc8305
MC
118 NETXTREME_E_VF,
119 NETXTREME_C_VF,
618784e3 120 NETXTREME_S_VF,
c0c050c5
MC
121};
122
123/* indexed by enum above */
124static const struct {
125 char *name;
126} board_info[] = {
27573a7d
SB
127 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
128 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
129 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
130 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
131 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
132 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
133 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
134 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
135 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
136 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
137 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
138 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
139 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
140 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
141 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
142 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
143 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
144 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
145 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
146 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
147 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
148 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
149 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
150 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
151 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
152 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
153 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
154 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 155 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 156 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
27573a7d 157 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 158 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
159 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
160 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
161 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 162 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
c0c050c5
MC
163};
164
165static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
166 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
167 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 168 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 169 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 170 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
171 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
172 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 173 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 174 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
175 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
176 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 177 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
178 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
179 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
180 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
182 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
183 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
184 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
185 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 186 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 187 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
188 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
189 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
190 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
193 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 195 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 196 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 197 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 198 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 199 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 200 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 201 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
4a58139b 202 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 203 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 204#ifdef CONFIG_BNXT_SRIOV
c7ef35eb
DK
205 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
adbc8305
MC
207 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
618784e3 213 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
214#endif
215 { 0 }
216};
217
218MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
219
220static const u16 bnxt_vf_req_snif[] = {
221 HWRM_FUNC_CFG,
91cdda40 222 HWRM_FUNC_VF_CFG,
c0c050c5
MC
223 HWRM_PORT_PHY_QCFG,
224 HWRM_CFA_L2_FILTER_ALLOC,
225};
226
25be8623 227static const u16 bnxt_async_events_arr[] = {
87c374de
MC
228 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
229 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
230 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
231 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
232 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
25be8623
MC
233};
234
c213eae8
MC
235static struct workqueue_struct *bnxt_pf_wq;
236
c0c050c5
MC
237static bool bnxt_vf_pciid(enum board_idx idx)
238{
618784e3
RM
239 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
240 idx == NETXTREME_S_VF);
c0c050c5
MC
241}
242
243#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
244#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
245#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
246
c0c050c5
MC
247#define BNXT_CP_DB_IRQ_DIS(db) \
248 writel(DB_CP_IRQ_DIS_FLAGS, db)
249
697197e5
MC
250#define BNXT_DB_CQ(db, idx) \
251 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
252
253#define BNXT_DB_NQ_P5(db, idx) \
254 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
255
256#define BNXT_DB_CQ_ARM(db, idx) \
257 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
258
259#define BNXT_DB_NQ_ARM_P5(db, idx) \
260 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
261
262static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
263{
264 if (bp->flags & BNXT_FLAG_CHIP_P5)
265 BNXT_DB_NQ_P5(db, idx);
266 else
267 BNXT_DB_CQ(db, idx);
268}
269
270static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
271{
272 if (bp->flags & BNXT_FLAG_CHIP_P5)
273 BNXT_DB_NQ_ARM_P5(db, idx);
274 else
275 BNXT_DB_CQ_ARM(db, idx);
276}
277
278static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
279{
280 if (bp->flags & BNXT_FLAG_CHIP_P5)
281 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
282 db->doorbell);
283 else
284 BNXT_DB_CQ(db, idx);
285}
286
38413406 287const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
288 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
289 TX_BD_FLAGS_LHINT_512_TO_1023,
290 TX_BD_FLAGS_LHINT_1024_TO_2047,
291 TX_BD_FLAGS_LHINT_1024_TO_2047,
292 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
293 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
294 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
295 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
296 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
297 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
298 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
299 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307};
308
ee5c7fb3
SP
309static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
310{
311 struct metadata_dst *md_dst = skb_metadata_dst(skb);
312
313 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
314 return 0;
315
316 return md_dst->u.port_info.port_id;
317}
318
c0c050c5
MC
319static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
320{
321 struct bnxt *bp = netdev_priv(dev);
322 struct tx_bd *txbd;
323 struct tx_bd_ext *txbd1;
324 struct netdev_queue *txq;
325 int i;
326 dma_addr_t mapping;
327 unsigned int length, pad = 0;
328 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
329 u16 prod, last_frag;
330 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
331 struct bnxt_tx_ring_info *txr;
332 struct bnxt_sw_tx_bd *tx_buf;
333
334 i = skb_get_queue_mapping(skb);
335 if (unlikely(i >= bp->tx_nr_rings)) {
336 dev_kfree_skb_any(skb);
337 return NETDEV_TX_OK;
338 }
339
c0c050c5 340 txq = netdev_get_tx_queue(dev, i);
a960dec9 341 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
342 prod = txr->tx_prod;
343
344 free_size = bnxt_tx_avail(bp, txr);
345 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
346 netif_tx_stop_queue(txq);
347 return NETDEV_TX_BUSY;
348 }
349
350 length = skb->len;
351 len = skb_headlen(skb);
352 last_frag = skb_shinfo(skb)->nr_frags;
353
354 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
355
356 txbd->tx_bd_opaque = prod;
357
358 tx_buf = &txr->tx_buf_ring[prod];
359 tx_buf->skb = skb;
360 tx_buf->nr_frags = last_frag;
361
362 vlan_tag_flags = 0;
ee5c7fb3 363 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
364 if (skb_vlan_tag_present(skb)) {
365 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
366 skb_vlan_tag_get(skb);
367 /* Currently supports 8021Q, 8021AD vlan offloads
368 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
369 */
370 if (skb->vlan_proto == htons(ETH_P_8021Q))
371 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
372 }
373
374 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
4419dbe6
MC
375 struct tx_push_buffer *tx_push_buf = txr->tx_push;
376 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
377 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 378 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
379 void *pdata = tx_push_buf->data;
380 u64 *end;
381 int j, push_len;
c0c050c5
MC
382
383 /* Set COAL_NOW to be ready quickly for the next push */
384 tx_push->tx_bd_len_flags_type =
385 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
386 TX_BD_TYPE_LONG_TX_BD |
387 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
388 TX_BD_FLAGS_COAL_NOW |
389 TX_BD_FLAGS_PACKET_END |
390 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
391
392 if (skb->ip_summed == CHECKSUM_PARTIAL)
393 tx_push1->tx_bd_hsize_lflags =
394 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
395 else
396 tx_push1->tx_bd_hsize_lflags = 0;
397
398 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
399 tx_push1->tx_bd_cfa_action =
400 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 401
fbb0fa8b
MC
402 end = pdata + length;
403 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
404 *end = 0;
405
c0c050c5
MC
406 skb_copy_from_linear_data(skb, pdata, len);
407 pdata += len;
408 for (j = 0; j < last_frag; j++) {
409 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
410 void *fptr;
411
412 fptr = skb_frag_address_safe(frag);
413 if (!fptr)
414 goto normal_tx;
415
416 memcpy(pdata, fptr, skb_frag_size(frag));
417 pdata += skb_frag_size(frag);
418 }
419
4419dbe6
MC
420 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
421 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
422 prod = NEXT_TX(prod);
423 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
424 memcpy(txbd, tx_push1, sizeof(*txbd));
425 prod = NEXT_TX(prod);
4419dbe6 426 tx_push->doorbell =
c0c050c5
MC
427 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
428 txr->tx_prod = prod;
429
b9a8460a 430 tx_buf->is_push = 1;
c0c050c5 431 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 432 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 433
4419dbe6
MC
434 push_len = (length + sizeof(*tx_push) + 7) / 8;
435 if (push_len > 16) {
697197e5
MC
436 __iowrite64_copy(db, tx_push_buf, 16);
437 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 438 (push_len - 16) << 1);
4419dbe6 439 } else {
697197e5 440 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 441 }
c0c050c5 442
c0c050c5
MC
443 goto tx_done;
444 }
445
446normal_tx:
447 if (length < BNXT_MIN_PKT_SIZE) {
448 pad = BNXT_MIN_PKT_SIZE - length;
449 if (skb_pad(skb, pad)) {
450 /* SKB already freed. */
451 tx_buf->skb = NULL;
452 return NETDEV_TX_OK;
453 }
454 length = BNXT_MIN_PKT_SIZE;
455 }
456
457 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
458
459 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
460 dev_kfree_skb_any(skb);
461 tx_buf->skb = NULL;
462 return NETDEV_TX_OK;
463 }
464
465 dma_unmap_addr_set(tx_buf, mapping, mapping);
466 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
467 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
468
469 txbd->tx_bd_haddr = cpu_to_le64(mapping);
470
471 prod = NEXT_TX(prod);
472 txbd1 = (struct tx_bd_ext *)
473 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
474
475 txbd1->tx_bd_hsize_lflags = 0;
476 if (skb_is_gso(skb)) {
477 u32 hdr_len;
478
479 if (skb->encapsulation)
480 hdr_len = skb_inner_network_offset(skb) +
481 skb_inner_network_header_len(skb) +
482 inner_tcp_hdrlen(skb);
483 else
484 hdr_len = skb_transport_offset(skb) +
485 tcp_hdrlen(skb);
486
487 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
488 TX_BD_FLAGS_T_IPID |
489 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
490 length = skb_shinfo(skb)->gso_size;
491 txbd1->tx_bd_mss = cpu_to_le32(length);
492 length += hdr_len;
493 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
494 txbd1->tx_bd_hsize_lflags =
495 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
496 txbd1->tx_bd_mss = 0;
497 }
498
499 length >>= 9;
500 flags |= bnxt_lhint_arr[length];
501 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
502
503 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
504 txbd1->tx_bd_cfa_action =
505 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
506 for (i = 0; i < last_frag; i++) {
507 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
508
509 prod = NEXT_TX(prod);
510 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
511
512 len = skb_frag_size(frag);
513 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
514 DMA_TO_DEVICE);
515
516 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
517 goto tx_dma_error;
518
519 tx_buf = &txr->tx_buf_ring[prod];
520 dma_unmap_addr_set(tx_buf, mapping, mapping);
521
522 txbd->tx_bd_haddr = cpu_to_le64(mapping);
523
524 flags = len << TX_BD_LEN_SHIFT;
525 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
526 }
527
528 flags &= ~TX_BD_LEN;
529 txbd->tx_bd_len_flags_type =
530 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
531 TX_BD_FLAGS_PACKET_END);
532
533 netdev_tx_sent_queue(txq, skb->len);
534
535 /* Sync BD data before updating doorbell */
536 wmb();
537
538 prod = NEXT_TX(prod);
539 txr->tx_prod = prod;
540
ffe40645 541 if (!skb->xmit_more || netif_xmit_stopped(txq))
697197e5 542 bnxt_db_write(bp, &txr->tx_db, prod);
c0c050c5
MC
543
544tx_done:
545
546 mmiowb();
547
548 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
4d172f21 549 if (skb->xmit_more && !tx_buf->is_push)
697197e5 550 bnxt_db_write(bp, &txr->tx_db, prod);
4d172f21 551
c0c050c5
MC
552 netif_tx_stop_queue(txq);
553
554 /* netif_tx_stop_queue() must be done before checking
555 * tx index in bnxt_tx_avail() below, because in
556 * bnxt_tx_int(), we update tx index before checking for
557 * netif_tx_queue_stopped().
558 */
559 smp_mb();
560 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
561 netif_tx_wake_queue(txq);
562 }
563 return NETDEV_TX_OK;
564
565tx_dma_error:
566 last_frag = i;
567
568 /* start back at beginning and unmap skb */
569 prod = txr->tx_prod;
570 tx_buf = &txr->tx_buf_ring[prod];
571 tx_buf->skb = NULL;
572 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
573 skb_headlen(skb), PCI_DMA_TODEVICE);
574 prod = NEXT_TX(prod);
575
576 /* unmap remaining mapped pages */
577 for (i = 0; i < last_frag; i++) {
578 prod = NEXT_TX(prod);
579 tx_buf = &txr->tx_buf_ring[prod];
580 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
581 skb_frag_size(&skb_shinfo(skb)->frags[i]),
582 PCI_DMA_TODEVICE);
583 }
584
585 dev_kfree_skb_any(skb);
586 return NETDEV_TX_OK;
587}
588
589static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
590{
b6ab4b01 591 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 592 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
593 u16 cons = txr->tx_cons;
594 struct pci_dev *pdev = bp->pdev;
595 int i;
596 unsigned int tx_bytes = 0;
597
598 for (i = 0; i < nr_pkts; i++) {
599 struct bnxt_sw_tx_bd *tx_buf;
600 struct sk_buff *skb;
601 int j, last;
602
603 tx_buf = &txr->tx_buf_ring[cons];
604 cons = NEXT_TX(cons);
605 skb = tx_buf->skb;
606 tx_buf->skb = NULL;
607
608 if (tx_buf->is_push) {
609 tx_buf->is_push = 0;
610 goto next_tx_int;
611 }
612
613 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
614 skb_headlen(skb), PCI_DMA_TODEVICE);
615 last = tx_buf->nr_frags;
616
617 for (j = 0; j < last; j++) {
618 cons = NEXT_TX(cons);
619 tx_buf = &txr->tx_buf_ring[cons];
620 dma_unmap_page(
621 &pdev->dev,
622 dma_unmap_addr(tx_buf, mapping),
623 skb_frag_size(&skb_shinfo(skb)->frags[j]),
624 PCI_DMA_TODEVICE);
625 }
626
627next_tx_int:
628 cons = NEXT_TX(cons);
629
630 tx_bytes += skb->len;
631 dev_kfree_skb_any(skb);
632 }
633
634 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
635 txr->tx_cons = cons;
636
637 /* Need to make the tx_cons update visible to bnxt_start_xmit()
638 * before checking for netif_tx_queue_stopped(). Without the
639 * memory barrier, there is a small possibility that bnxt_start_xmit()
640 * will miss it and cause the queue to be stopped forever.
641 */
642 smp_mb();
643
644 if (unlikely(netif_tx_queue_stopped(txq)) &&
645 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
646 __netif_tx_lock(txq, smp_processor_id());
647 if (netif_tx_queue_stopped(txq) &&
648 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
649 txr->dev_state != BNXT_DEV_STATE_CLOSING)
650 netif_tx_wake_queue(txq);
651 __netif_tx_unlock(txq);
652 }
653}
654
c61fb99c
MC
655static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
656 gfp_t gfp)
657{
658 struct device *dev = &bp->pdev->dev;
659 struct page *page;
660
661 page = alloc_page(gfp);
662 if (!page)
663 return NULL;
664
c519fe9a
SN
665 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
666 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
667 if (dma_mapping_error(dev, *mapping)) {
668 __free_page(page);
669 return NULL;
670 }
671 *mapping += bp->rx_dma_offset;
672 return page;
673}
674
c0c050c5
MC
675static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
676 gfp_t gfp)
677{
678 u8 *data;
679 struct pci_dev *pdev = bp->pdev;
680
681 data = kmalloc(bp->rx_buf_size, gfp);
682 if (!data)
683 return NULL;
684
c519fe9a
SN
685 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
686 bp->rx_buf_use_size, bp->rx_dir,
687 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
688
689 if (dma_mapping_error(&pdev->dev, *mapping)) {
690 kfree(data);
691 data = NULL;
692 }
693 return data;
694}
695
38413406
MC
696int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
697 u16 prod, gfp_t gfp)
c0c050c5
MC
698{
699 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
700 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
701 dma_addr_t mapping;
702
c61fb99c
MC
703 if (BNXT_RX_PAGE_MODE(bp)) {
704 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
c0c050c5 705
c61fb99c
MC
706 if (!page)
707 return -ENOMEM;
708
709 rx_buf->data = page;
710 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
711 } else {
712 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
713
714 if (!data)
715 return -ENOMEM;
716
717 rx_buf->data = data;
718 rx_buf->data_ptr = data + bp->rx_offset;
719 }
11cd119d 720 rx_buf->mapping = mapping;
c0c050c5
MC
721
722 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
723 return 0;
724}
725
c6d30e83 726void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
727{
728 u16 prod = rxr->rx_prod;
729 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
730 struct rx_bd *cons_bd, *prod_bd;
731
732 prod_rx_buf = &rxr->rx_buf_ring[prod];
733 cons_rx_buf = &rxr->rx_buf_ring[cons];
734
735 prod_rx_buf->data = data;
6bb19474 736 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 737
11cd119d 738 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
739
740 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
741 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
742
743 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
744}
745
746static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
747{
748 u16 next, max = rxr->rx_agg_bmap_size;
749
750 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
751 if (next >= max)
752 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
753 return next;
754}
755
756static inline int bnxt_alloc_rx_page(struct bnxt *bp,
757 struct bnxt_rx_ring_info *rxr,
758 u16 prod, gfp_t gfp)
759{
760 struct rx_bd *rxbd =
761 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
762 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
763 struct pci_dev *pdev = bp->pdev;
764 struct page *page;
765 dma_addr_t mapping;
766 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 767 unsigned int offset = 0;
c0c050c5 768
89d0a06c
MC
769 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
770 page = rxr->rx_page;
771 if (!page) {
772 page = alloc_page(gfp);
773 if (!page)
774 return -ENOMEM;
775 rxr->rx_page = page;
776 rxr->rx_page_offset = 0;
777 }
778 offset = rxr->rx_page_offset;
779 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
780 if (rxr->rx_page_offset == PAGE_SIZE)
781 rxr->rx_page = NULL;
782 else
783 get_page(page);
784 } else {
785 page = alloc_page(gfp);
786 if (!page)
787 return -ENOMEM;
788 }
c0c050c5 789
c519fe9a
SN
790 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
791 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
792 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
793 if (dma_mapping_error(&pdev->dev, mapping)) {
794 __free_page(page);
795 return -EIO;
796 }
797
798 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
799 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
800
801 __set_bit(sw_prod, rxr->rx_agg_bmap);
802 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
803 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
804
805 rx_agg_buf->page = page;
89d0a06c 806 rx_agg_buf->offset = offset;
c0c050c5
MC
807 rx_agg_buf->mapping = mapping;
808 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
809 rxbd->rx_bd_opaque = sw_prod;
810 return 0;
811}
812
e44758b7 813static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
c0c050c5
MC
814 u32 agg_bufs)
815{
e44758b7 816 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 817 struct bnxt *bp = bnapi->bp;
b6ab4b01 818 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
819 u16 prod = rxr->rx_agg_prod;
820 u16 sw_prod = rxr->rx_sw_agg_prod;
821 u32 i;
822
823 for (i = 0; i < agg_bufs; i++) {
824 u16 cons;
825 struct rx_agg_cmp *agg;
826 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
827 struct rx_bd *prod_bd;
828 struct page *page;
829
830 agg = (struct rx_agg_cmp *)
831 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
832 cons = agg->rx_agg_cmp_opaque;
833 __clear_bit(cons, rxr->rx_agg_bmap);
834
835 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
836 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
837
838 __set_bit(sw_prod, rxr->rx_agg_bmap);
839 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
840 cons_rx_buf = &rxr->rx_agg_ring[cons];
841
842 /* It is possible for sw_prod to be equal to cons, so
843 * set cons_rx_buf->page to NULL first.
844 */
845 page = cons_rx_buf->page;
846 cons_rx_buf->page = NULL;
847 prod_rx_buf->page = page;
89d0a06c 848 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
849
850 prod_rx_buf->mapping = cons_rx_buf->mapping;
851
852 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
853
854 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
855 prod_bd->rx_bd_opaque = sw_prod;
856
857 prod = NEXT_RX_AGG(prod);
858 sw_prod = NEXT_RX_AGG(sw_prod);
859 cp_cons = NEXT_CMP(cp_cons);
860 }
861 rxr->rx_agg_prod = prod;
862 rxr->rx_sw_agg_prod = sw_prod;
863}
864
c61fb99c
MC
865static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
866 struct bnxt_rx_ring_info *rxr,
867 u16 cons, void *data, u8 *data_ptr,
868 dma_addr_t dma_addr,
869 unsigned int offset_and_len)
870{
871 unsigned int payload = offset_and_len >> 16;
872 unsigned int len = offset_and_len & 0xffff;
873 struct skb_frag_struct *frag;
874 struct page *page = data;
875 u16 prod = rxr->rx_prod;
876 struct sk_buff *skb;
877 int off, err;
878
879 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
880 if (unlikely(err)) {
881 bnxt_reuse_rx_data(rxr, cons, data);
882 return NULL;
883 }
884 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
885 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
886 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
887
888 if (unlikely(!payload))
889 payload = eth_get_headlen(data_ptr, len);
890
891 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
892 if (!skb) {
893 __free_page(page);
894 return NULL;
895 }
896
897 off = (void *)data_ptr - page_address(page);
898 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
899 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
900 payload + NET_IP_ALIGN);
901
902 frag = &skb_shinfo(skb)->frags[0];
903 skb_frag_size_sub(frag, payload);
904 frag->page_offset += payload;
905 skb->data_len -= payload;
906 skb->tail += payload;
907
908 return skb;
909}
910
c0c050c5
MC
911static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
912 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
913 void *data, u8 *data_ptr,
914 dma_addr_t dma_addr,
915 unsigned int offset_and_len)
c0c050c5 916{
6bb19474 917 u16 prod = rxr->rx_prod;
c0c050c5 918 struct sk_buff *skb;
6bb19474 919 int err;
c0c050c5
MC
920
921 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
922 if (unlikely(err)) {
923 bnxt_reuse_rx_data(rxr, cons, data);
924 return NULL;
925 }
926
927 skb = build_skb(data, 0);
c519fe9a
SN
928 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
929 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
930 if (!skb) {
931 kfree(data);
932 return NULL;
933 }
934
b3dba77c 935 skb_reserve(skb, bp->rx_offset);
6bb19474 936 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
937 return skb;
938}
939
e44758b7
MC
940static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
941 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
942 struct sk_buff *skb, u16 cp_cons,
943 u32 agg_bufs)
944{
e44758b7 945 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 946 struct pci_dev *pdev = bp->pdev;
b6ab4b01 947 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
948 u16 prod = rxr->rx_agg_prod;
949 u32 i;
950
951 for (i = 0; i < agg_bufs; i++) {
952 u16 cons, frag_len;
953 struct rx_agg_cmp *agg;
954 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
955 struct page *page;
956 dma_addr_t mapping;
957
958 agg = (struct rx_agg_cmp *)
959 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
960 cons = agg->rx_agg_cmp_opaque;
961 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
962 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
963
964 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
965 skb_fill_page_desc(skb, i, cons_rx_buf->page,
966 cons_rx_buf->offset, frag_len);
c0c050c5
MC
967 __clear_bit(cons, rxr->rx_agg_bmap);
968
969 /* It is possible for bnxt_alloc_rx_page() to allocate
970 * a sw_prod index that equals the cons index, so we
971 * need to clear the cons entry now.
972 */
11cd119d 973 mapping = cons_rx_buf->mapping;
c0c050c5
MC
974 page = cons_rx_buf->page;
975 cons_rx_buf->page = NULL;
976
977 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
978 struct skb_shared_info *shinfo;
979 unsigned int nr_frags;
980
981 shinfo = skb_shinfo(skb);
982 nr_frags = --shinfo->nr_frags;
983 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
984
985 dev_kfree_skb(skb);
986
987 cons_rx_buf->page = page;
988
989 /* Update prod since possibly some pages have been
990 * allocated already.
991 */
992 rxr->rx_agg_prod = prod;
e44758b7 993 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
c0c050c5
MC
994 return NULL;
995 }
996
c519fe9a
SN
997 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
998 PCI_DMA_FROMDEVICE,
999 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1000
1001 skb->data_len += frag_len;
1002 skb->len += frag_len;
1003 skb->truesize += PAGE_SIZE;
1004
1005 prod = NEXT_RX_AGG(prod);
1006 cp_cons = NEXT_CMP(cp_cons);
1007 }
1008 rxr->rx_agg_prod = prod;
1009 return skb;
1010}
1011
1012static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1013 u8 agg_bufs, u32 *raw_cons)
1014{
1015 u16 last;
1016 struct rx_agg_cmp *agg;
1017
1018 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1019 last = RING_CMP(*raw_cons);
1020 agg = (struct rx_agg_cmp *)
1021 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1022 return RX_AGG_CMP_VALID(agg, *raw_cons);
1023}
1024
1025static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1026 unsigned int len,
1027 dma_addr_t mapping)
1028{
1029 struct bnxt *bp = bnapi->bp;
1030 struct pci_dev *pdev = bp->pdev;
1031 struct sk_buff *skb;
1032
1033 skb = napi_alloc_skb(&bnapi->napi, len);
1034 if (!skb)
1035 return NULL;
1036
745fc05c
MC
1037 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1038 bp->rx_dir);
c0c050c5 1039
6bb19474
MC
1040 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1041 len + NET_IP_ALIGN);
c0c050c5 1042
745fc05c
MC
1043 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1044 bp->rx_dir);
c0c050c5
MC
1045
1046 skb_put(skb, len);
1047 return skb;
1048}
1049
e44758b7 1050static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1051 u32 *raw_cons, void *cmp)
1052{
fa7e2812
MC
1053 struct rx_cmp *rxcmp = cmp;
1054 u32 tmp_raw_cons = *raw_cons;
1055 u8 cmp_type, agg_bufs = 0;
1056
1057 cmp_type = RX_CMP_TYPE(rxcmp);
1058
1059 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1060 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1061 RX_CMP_AGG_BUFS) >>
1062 RX_CMP_AGG_BUFS_SHIFT;
1063 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1064 struct rx_tpa_end_cmp *tpa_end = cmp;
1065
1066 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1067 RX_TPA_END_CMP_AGG_BUFS) >>
1068 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1069 }
1070
1071 if (agg_bufs) {
1072 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1073 return -EBUSY;
1074 }
1075 *raw_cons = tmp_raw_cons;
1076 return 0;
1077}
1078
c213eae8
MC
1079static void bnxt_queue_sp_work(struct bnxt *bp)
1080{
1081 if (BNXT_PF(bp))
1082 queue_work(bnxt_pf_wq, &bp->sp_task);
1083 else
1084 schedule_work(&bp->sp_task);
1085}
1086
1087static void bnxt_cancel_sp_work(struct bnxt *bp)
1088{
1089 if (BNXT_PF(bp))
1090 flush_workqueue(bnxt_pf_wq);
1091 else
1092 cancel_work_sync(&bp->sp_task);
1093}
1094
fa7e2812
MC
1095static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1096{
1097 if (!rxr->bnapi->in_reset) {
1098 rxr->bnapi->in_reset = true;
1099 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 1100 bnxt_queue_sp_work(bp);
fa7e2812
MC
1101 }
1102 rxr->rx_next_cons = 0xffff;
1103}
1104
c0c050c5
MC
1105static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1106 struct rx_tpa_start_cmp *tpa_start,
1107 struct rx_tpa_start_cmp_ext *tpa_start1)
1108{
1109 u8 agg_id = TPA_START_AGG_ID(tpa_start);
1110 u16 cons, prod;
1111 struct bnxt_tpa_info *tpa_info;
1112 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1113 struct rx_bd *prod_bd;
1114 dma_addr_t mapping;
1115
1116 cons = tpa_start->rx_tpa_start_cmp_opaque;
1117 prod = rxr->rx_prod;
1118 cons_rx_buf = &rxr->rx_buf_ring[cons];
1119 prod_rx_buf = &rxr->rx_buf_ring[prod];
1120 tpa_info = &rxr->rx_tpa[agg_id];
1121
fa7e2812
MC
1122 if (unlikely(cons != rxr->rx_next_cons)) {
1123 bnxt_sched_reset(bp, rxr);
1124 return;
1125 }
ee5c7fb3
SP
1126 /* Store cfa_code in tpa_info to use in tpa_end
1127 * completion processing.
1128 */
1129 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1130 prod_rx_buf->data = tpa_info->data;
6bb19474 1131 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1132
1133 mapping = tpa_info->mapping;
11cd119d 1134 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1135
1136 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1137
1138 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1139
1140 tpa_info->data = cons_rx_buf->data;
6bb19474 1141 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1142 cons_rx_buf->data = NULL;
11cd119d 1143 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1144
1145 tpa_info->len =
1146 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1147 RX_TPA_START_CMP_LEN_SHIFT;
1148 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1149 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1150
1151 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1152 tpa_info->gso_type = SKB_GSO_TCPV4;
1153 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1154 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1155 tpa_info->gso_type = SKB_GSO_TCPV6;
1156 tpa_info->rss_hash =
1157 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1158 } else {
1159 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1160 tpa_info->gso_type = 0;
1161 if (netif_msg_rx_err(bp))
1162 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1163 }
1164 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1165 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1166 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
c0c050c5
MC
1167
1168 rxr->rx_prod = NEXT_RX(prod);
1169 cons = NEXT_RX(cons);
376a5b86 1170 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1171 cons_rx_buf = &rxr->rx_buf_ring[cons];
1172
1173 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1174 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1175 cons_rx_buf->data = NULL;
1176}
1177
e44758b7
MC
1178static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1179 u32 agg_bufs)
c0c050c5
MC
1180{
1181 if (agg_bufs)
e44758b7 1182 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1183}
1184
94758f8d
MC
1185static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1186 int payload_off, int tcp_ts,
1187 struct sk_buff *skb)
1188{
1189#ifdef CONFIG_INET
1190 struct tcphdr *th;
1191 int len, nw_off;
1192 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1193 u32 hdr_info = tpa_info->hdr_info;
1194 bool loopback = false;
1195
1196 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1197 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1198 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1199
1200 /* If the packet is an internal loopback packet, the offsets will
1201 * have an extra 4 bytes.
1202 */
1203 if (inner_mac_off == 4) {
1204 loopback = true;
1205 } else if (inner_mac_off > 4) {
1206 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1207 ETH_HLEN - 2));
1208
1209 /* We only support inner iPv4/ipv6. If we don't see the
1210 * correct protocol ID, it must be a loopback packet where
1211 * the offsets are off by 4.
1212 */
09a7636a 1213 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1214 loopback = true;
1215 }
1216 if (loopback) {
1217 /* internal loopback packet, subtract all offsets by 4 */
1218 inner_ip_off -= 4;
1219 inner_mac_off -= 4;
1220 outer_ip_off -= 4;
1221 }
1222
1223 nw_off = inner_ip_off - ETH_HLEN;
1224 skb_set_network_header(skb, nw_off);
1225 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1226 struct ipv6hdr *iph = ipv6_hdr(skb);
1227
1228 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1229 len = skb->len - skb_transport_offset(skb);
1230 th = tcp_hdr(skb);
1231 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1232 } else {
1233 struct iphdr *iph = ip_hdr(skb);
1234
1235 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1236 len = skb->len - skb_transport_offset(skb);
1237 th = tcp_hdr(skb);
1238 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1239 }
1240
1241 if (inner_mac_off) { /* tunnel */
1242 struct udphdr *uh = NULL;
1243 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1244 ETH_HLEN - 2));
1245
1246 if (proto == htons(ETH_P_IP)) {
1247 struct iphdr *iph = (struct iphdr *)skb->data;
1248
1249 if (iph->protocol == IPPROTO_UDP)
1250 uh = (struct udphdr *)(iph + 1);
1251 } else {
1252 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1253
1254 if (iph->nexthdr == IPPROTO_UDP)
1255 uh = (struct udphdr *)(iph + 1);
1256 }
1257 if (uh) {
1258 if (uh->check)
1259 skb_shinfo(skb)->gso_type |=
1260 SKB_GSO_UDP_TUNNEL_CSUM;
1261 else
1262 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1263 }
1264 }
1265#endif
1266 return skb;
1267}
1268
c0c050c5
MC
1269#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1270#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1271
309369c9
MC
1272static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1273 int payload_off, int tcp_ts,
c0c050c5
MC
1274 struct sk_buff *skb)
1275{
d1611c3a 1276#ifdef CONFIG_INET
c0c050c5 1277 struct tcphdr *th;
719ca811 1278 int len, nw_off, tcp_opt_len = 0;
27e24189 1279
309369c9 1280 if (tcp_ts)
c0c050c5
MC
1281 tcp_opt_len = 12;
1282
c0c050c5
MC
1283 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1284 struct iphdr *iph;
1285
1286 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1287 ETH_HLEN;
1288 skb_set_network_header(skb, nw_off);
1289 iph = ip_hdr(skb);
1290 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1291 len = skb->len - skb_transport_offset(skb);
1292 th = tcp_hdr(skb);
1293 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1294 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1295 struct ipv6hdr *iph;
1296
1297 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1298 ETH_HLEN;
1299 skb_set_network_header(skb, nw_off);
1300 iph = ipv6_hdr(skb);
1301 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1302 len = skb->len - skb_transport_offset(skb);
1303 th = tcp_hdr(skb);
1304 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1305 } else {
1306 dev_kfree_skb_any(skb);
1307 return NULL;
1308 }
c0c050c5
MC
1309
1310 if (nw_off) { /* tunnel */
1311 struct udphdr *uh = NULL;
1312
1313 if (skb->protocol == htons(ETH_P_IP)) {
1314 struct iphdr *iph = (struct iphdr *)skb->data;
1315
1316 if (iph->protocol == IPPROTO_UDP)
1317 uh = (struct udphdr *)(iph + 1);
1318 } else {
1319 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1320
1321 if (iph->nexthdr == IPPROTO_UDP)
1322 uh = (struct udphdr *)(iph + 1);
1323 }
1324 if (uh) {
1325 if (uh->check)
1326 skb_shinfo(skb)->gso_type |=
1327 SKB_GSO_UDP_TUNNEL_CSUM;
1328 else
1329 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1330 }
1331 }
1332#endif
1333 return skb;
1334}
1335
309369c9
MC
1336static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1337 struct bnxt_tpa_info *tpa_info,
1338 struct rx_tpa_end_cmp *tpa_end,
1339 struct rx_tpa_end_cmp_ext *tpa_end1,
1340 struct sk_buff *skb)
1341{
1342#ifdef CONFIG_INET
1343 int payload_off;
1344 u16 segs;
1345
1346 segs = TPA_END_TPA_SEGS(tpa_end);
1347 if (segs == 1)
1348 return skb;
1349
1350 NAPI_GRO_CB(skb)->count = segs;
1351 skb_shinfo(skb)->gso_size =
1352 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1353 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1354 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1355 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1356 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1357 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1358 if (likely(skb))
1359 tcp_gro_complete(skb);
309369c9
MC
1360#endif
1361 return skb;
1362}
1363
ee5c7fb3
SP
1364/* Given the cfa_code of a received packet determine which
1365 * netdev (vf-rep or PF) the packet is destined to.
1366 */
1367static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1368{
1369 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1370
1371 /* if vf-rep dev is NULL, the must belongs to the PF */
1372 return dev ? dev : bp->dev;
1373}
1374
c0c050c5 1375static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1376 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1377 u32 *raw_cons,
1378 struct rx_tpa_end_cmp *tpa_end,
1379 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1380 u8 *event)
c0c050c5 1381{
e44758b7 1382 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1383 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1384 u8 agg_id = TPA_END_AGG_ID(tpa_end);
6bb19474 1385 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1386 u16 cp_cons = RING_CMP(*raw_cons);
1387 unsigned int len;
1388 struct bnxt_tpa_info *tpa_info;
1389 dma_addr_t mapping;
1390 struct sk_buff *skb;
6bb19474 1391 void *data;
c0c050c5 1392
fa7e2812 1393 if (unlikely(bnapi->in_reset)) {
e44758b7 1394 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1395
1396 if (rc < 0)
1397 return ERR_PTR(-EBUSY);
1398 return NULL;
1399 }
1400
c0c050c5
MC
1401 tpa_info = &rxr->rx_tpa[agg_id];
1402 data = tpa_info->data;
6bb19474
MC
1403 data_ptr = tpa_info->data_ptr;
1404 prefetch(data_ptr);
c0c050c5
MC
1405 len = tpa_info->len;
1406 mapping = tpa_info->mapping;
1407
1408 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1409 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1410
1411 if (agg_bufs) {
1412 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1413 return ERR_PTR(-EBUSY);
1414
4e5dbbda 1415 *event |= BNXT_AGG_EVENT;
c0c050c5
MC
1416 cp_cons = NEXT_CMP(cp_cons);
1417 }
1418
69c149e2 1419 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
e44758b7 1420 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
69c149e2
MC
1421 if (agg_bufs > MAX_SKB_FRAGS)
1422 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1423 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1424 return NULL;
1425 }
1426
1427 if (len <= bp->rx_copy_thresh) {
6bb19474 1428 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1429 if (!skb) {
e44758b7 1430 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1431 return NULL;
1432 }
1433 } else {
1434 u8 *new_data;
1435 dma_addr_t new_mapping;
1436
1437 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1438 if (!new_data) {
e44758b7 1439 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1440 return NULL;
1441 }
1442
1443 tpa_info->data = new_data;
b3dba77c 1444 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1445 tpa_info->mapping = new_mapping;
1446
1447 skb = build_skb(data, 0);
c519fe9a
SN
1448 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1449 bp->rx_buf_use_size, bp->rx_dir,
1450 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1451
1452 if (!skb) {
1453 kfree(data);
e44758b7 1454 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1455 return NULL;
1456 }
b3dba77c 1457 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1458 skb_put(skb, len);
1459 }
1460
1461 if (agg_bufs) {
e44758b7 1462 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
c0c050c5
MC
1463 if (!skb) {
1464 /* Page reuse already handled by bnxt_rx_pages(). */
1465 return NULL;
1466 }
1467 }
ee5c7fb3
SP
1468
1469 skb->protocol =
1470 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1471
1472 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1473 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1474
8852ddb4
MC
1475 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1476 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5
MC
1477 u16 vlan_proto = tpa_info->metadata >>
1478 RX_CMP_FLAGS2_METADATA_TPID_SFT;
ed7bc602 1479 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1480
8852ddb4 1481 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1482 }
1483
1484 skb_checksum_none_assert(skb);
1485 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1486 skb->ip_summed = CHECKSUM_UNNECESSARY;
1487 skb->csum_level =
1488 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1489 }
1490
1491 if (TPA_END_GRO(tpa_end))
309369c9 1492 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1493
1494 return skb;
1495}
1496
ee5c7fb3
SP
1497static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1498 struct sk_buff *skb)
1499{
1500 if (skb->dev != bp->dev) {
1501 /* this packet belongs to a vf-rep */
1502 bnxt_vf_rep_rx(bp, skb);
1503 return;
1504 }
1505 skb_record_rx_queue(skb, bnapi->index);
1506 napi_gro_receive(&bnapi->napi, skb);
1507}
1508
c0c050c5
MC
1509/* returns the following:
1510 * 1 - 1 packet successfully received
1511 * 0 - successful TPA_START, packet not completed yet
1512 * -EBUSY - completion ring does not have all the agg buffers yet
1513 * -ENOMEM - packet aborted due to out of memory
1514 * -EIO - packet aborted due to hw error indicated in BD
1515 */
e44758b7
MC
1516static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1517 u32 *raw_cons, u8 *event)
c0c050c5 1518{
e44758b7 1519 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1520 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1521 struct net_device *dev = bp->dev;
1522 struct rx_cmp *rxcmp;
1523 struct rx_cmp_ext *rxcmp1;
1524 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1525 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1526 struct bnxt_sw_rx_bd *rx_buf;
1527 unsigned int len;
6bb19474 1528 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1529 dma_addr_t dma_addr;
1530 struct sk_buff *skb;
6bb19474 1531 void *data;
c0c050c5 1532 int rc = 0;
c61fb99c 1533 u32 misc;
c0c050c5
MC
1534
1535 rxcmp = (struct rx_cmp *)
1536 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1537
1538 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1539 cp_cons = RING_CMP(tmp_raw_cons);
1540 rxcmp1 = (struct rx_cmp_ext *)
1541 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1542
1543 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1544 return -EBUSY;
1545
1546 cmp_type = RX_CMP_TYPE(rxcmp);
1547
1548 prod = rxr->rx_prod;
1549
1550 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1551 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1552 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1553
4e5dbbda 1554 *event |= BNXT_RX_EVENT;
e7e70fa6 1555 goto next_rx_no_prod_no_len;
c0c050c5
MC
1556
1557 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1558 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1559 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1560 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1561
1fac4b2f 1562 if (IS_ERR(skb))
c0c050c5
MC
1563 return -EBUSY;
1564
1565 rc = -ENOMEM;
1566 if (likely(skb)) {
ee5c7fb3 1567 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1568 rc = 1;
1569 }
4e5dbbda 1570 *event |= BNXT_RX_EVENT;
e7e70fa6 1571 goto next_rx_no_prod_no_len;
c0c050c5
MC
1572 }
1573
1574 cons = rxcmp->rx_cmp_opaque;
1575 rx_buf = &rxr->rx_buf_ring[cons];
1576 data = rx_buf->data;
6bb19474 1577 data_ptr = rx_buf->data_ptr;
fa7e2812 1578 if (unlikely(cons != rxr->rx_next_cons)) {
e44758b7 1579 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
fa7e2812
MC
1580
1581 bnxt_sched_reset(bp, rxr);
1582 return rc1;
1583 }
6bb19474 1584 prefetch(data_ptr);
c0c050c5 1585
c61fb99c
MC
1586 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1587 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1588
1589 if (agg_bufs) {
1590 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1591 return -EBUSY;
1592
1593 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1594 *event |= BNXT_AGG_EVENT;
c0c050c5 1595 }
4e5dbbda 1596 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1597
1598 rx_buf->data = NULL;
1599 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1600 bnxt_reuse_rx_data(rxr, cons, data);
1601 if (agg_bufs)
e44758b7 1602 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1603
1604 rc = -EIO;
1605 goto next_rx;
1606 }
1607
1608 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
11cd119d 1609 dma_addr = rx_buf->mapping;
c0c050c5 1610
c6d30e83
MC
1611 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1612 rc = 1;
1613 goto next_rx;
1614 }
1615
c0c050c5 1616 if (len <= bp->rx_copy_thresh) {
6bb19474 1617 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1618 bnxt_reuse_rx_data(rxr, cons, data);
1619 if (!skb) {
1620 rc = -ENOMEM;
1621 goto next_rx;
1622 }
1623 } else {
c61fb99c
MC
1624 u32 payload;
1625
c6d30e83
MC
1626 if (rx_buf->data_ptr == data_ptr)
1627 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1628 else
1629 payload = 0;
6bb19474 1630 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1631 payload | len);
c0c050c5
MC
1632 if (!skb) {
1633 rc = -ENOMEM;
1634 goto next_rx;
1635 }
1636 }
1637
1638 if (agg_bufs) {
e44758b7 1639 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
c0c050c5
MC
1640 if (!skb) {
1641 rc = -ENOMEM;
1642 goto next_rx;
1643 }
1644 }
1645
1646 if (RX_CMP_HASH_VALID(rxcmp)) {
1647 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1648 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1649
1650 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1651 if (hash_type != 1 && hash_type != 3)
1652 type = PKT_HASH_TYPE_L3;
1653 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1654 }
1655
ee5c7fb3
SP
1656 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1657 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1658
8852ddb4
MC
1659 if ((rxcmp1->rx_cmp_flags2 &
1660 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1661 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5 1662 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1663 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5
MC
1664 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1665
8852ddb4 1666 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1667 }
1668
1669 skb_checksum_none_assert(skb);
1670 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1671 if (dev->features & NETIF_F_RXCSUM) {
1672 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1674 }
1675 } else {
665e350d
SB
1676 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1677 if (dev->features & NETIF_F_RXCSUM)
d1981929 1678 bnapi->cp_ring.rx_l4_csum_errors++;
665e350d 1679 }
c0c050c5
MC
1680 }
1681
ee5c7fb3 1682 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1683 rc = 1;
1684
1685next_rx:
1686 rxr->rx_prod = NEXT_RX(prod);
376a5b86 1687 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5 1688
6a8788f2
AG
1689 cpr->rx_packets += 1;
1690 cpr->rx_bytes += len;
e7e70fa6
CIK
1691
1692next_rx_no_prod_no_len:
c0c050c5
MC
1693 *raw_cons = tmp_raw_cons;
1694
1695 return rc;
1696}
1697
2270bc5d
MC
1698/* In netpoll mode, if we are using a combined completion ring, we need to
1699 * discard the rx packets and recycle the buffers.
1700 */
e44758b7
MC
1701static int bnxt_force_rx_discard(struct bnxt *bp,
1702 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
1703 u32 *raw_cons, u8 *event)
1704{
2270bc5d
MC
1705 u32 tmp_raw_cons = *raw_cons;
1706 struct rx_cmp_ext *rxcmp1;
1707 struct rx_cmp *rxcmp;
1708 u16 cp_cons;
1709 u8 cmp_type;
1710
1711 cp_cons = RING_CMP(tmp_raw_cons);
1712 rxcmp = (struct rx_cmp *)
1713 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1714
1715 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1716 cp_cons = RING_CMP(tmp_raw_cons);
1717 rxcmp1 = (struct rx_cmp_ext *)
1718 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1719
1720 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1721 return -EBUSY;
1722
1723 cmp_type = RX_CMP_TYPE(rxcmp);
1724 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1725 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1726 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1727 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1728 struct rx_tpa_end_cmp_ext *tpa_end1;
1729
1730 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1731 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1732 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1733 }
e44758b7 1734 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2270bc5d
MC
1735}
1736
4bb13abf 1737#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
1738 ((data) & \
1739 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 1740
c0c050c5
MC
1741static int bnxt_async_event_process(struct bnxt *bp,
1742 struct hwrm_async_event_cmpl *cmpl)
1743{
1744 u16 event_id = le16_to_cpu(cmpl->event_id);
1745
1746 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1747 switch (event_id) {
87c374de 1748 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
1749 u32 data1 = le32_to_cpu(cmpl->event_data1);
1750 struct bnxt_link_info *link_info = &bp->link_info;
1751
1752 if (BNXT_VF(bp))
1753 goto async_event_process_exit;
a8168b6c
MC
1754
1755 /* print unsupported speed warning in forced speed mode only */
1756 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1757 (data1 & 0x20000)) {
8cbde117
MC
1758 u16 fw_speed = link_info->force_link_speed;
1759 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1760
a8168b6c
MC
1761 if (speed != SPEED_UNKNOWN)
1762 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1763 speed);
8cbde117 1764 }
286ef9d6 1765 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 1766 }
bc171e87 1767 /* fall through */
87c374de 1768 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 1769 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 1770 break;
87c374de 1771 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 1772 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 1773 break;
87c374de 1774 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
1775 u32 data1 = le32_to_cpu(cmpl->event_data1);
1776 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1777
1778 if (BNXT_VF(bp))
1779 break;
1780
1781 if (bp->pf.port_id != port_id)
1782 break;
1783
4bb13abf
MC
1784 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1785 break;
1786 }
87c374de 1787 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
1788 if (BNXT_PF(bp))
1789 goto async_event_process_exit;
1790 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1791 break;
c0c050c5 1792 default:
19241368 1793 goto async_event_process_exit;
c0c050c5 1794 }
c213eae8 1795 bnxt_queue_sp_work(bp);
19241368 1796async_event_process_exit:
a588e458 1797 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
1798 return 0;
1799}
1800
1801static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1802{
1803 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1804 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1805 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1806 (struct hwrm_fwd_req_cmpl *)txcmp;
1807
1808 switch (cmpl_type) {
1809 case CMPL_BASE_TYPE_HWRM_DONE:
1810 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1811 if (seq_id == bp->hwrm_intr_seq_id)
1812 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1813 else
1814 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1815 break;
1816
1817 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1818 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1819
1820 if ((vf_id < bp->pf.first_vf_id) ||
1821 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1822 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1823 vf_id);
1824 return -EINVAL;
1825 }
1826
1827 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1828 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 1829 bnxt_queue_sp_work(bp);
c0c050c5
MC
1830 break;
1831
1832 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1833 bnxt_async_event_process(bp,
1834 (struct hwrm_async_event_cmpl *)txcmp);
1835
1836 default:
1837 break;
1838 }
1839
1840 return 0;
1841}
1842
1843static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1844{
1845 struct bnxt_napi *bnapi = dev_instance;
1846 struct bnxt *bp = bnapi->bp;
1847 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1848 u32 cons = RING_CMP(cpr->cp_raw_cons);
1849
6a8788f2 1850 cpr->event_ctr++;
c0c050c5
MC
1851 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1852 napi_schedule(&bnapi->napi);
1853 return IRQ_HANDLED;
1854}
1855
1856static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1857{
1858 u32 raw_cons = cpr->cp_raw_cons;
1859 u16 cons = RING_CMP(raw_cons);
1860 struct tx_cmp *txcmp;
1861
1862 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1863
1864 return TX_CMP_VALID(txcmp, raw_cons);
1865}
1866
c0c050c5
MC
1867static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1868{
1869 struct bnxt_napi *bnapi = dev_instance;
1870 struct bnxt *bp = bnapi->bp;
1871 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1872 u32 cons = RING_CMP(cpr->cp_raw_cons);
1873 u32 int_status;
1874
1875 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1876
1877 if (!bnxt_has_work(bp, cpr)) {
11809490 1878 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
1879 /* return if erroneous interrupt */
1880 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1881 return IRQ_NONE;
1882 }
1883
1884 /* disable ring IRQ */
697197e5 1885 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
1886
1887 /* Return here if interrupt is shared and is disabled. */
1888 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1889 return IRQ_HANDLED;
1890
1891 napi_schedule(&bnapi->napi);
1892 return IRQ_HANDLED;
1893}
1894
3675b92f
MC
1895static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1896 int budget)
c0c050c5 1897{
e44758b7 1898 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
1899 u32 raw_cons = cpr->cp_raw_cons;
1900 u32 cons;
1901 int tx_pkts = 0;
1902 int rx_pkts = 0;
4e5dbbda 1903 u8 event = 0;
c0c050c5
MC
1904 struct tx_cmp *txcmp;
1905
0fcec985 1906 cpr->has_more_work = 0;
c0c050c5
MC
1907 while (1) {
1908 int rc;
1909
1910 cons = RING_CMP(raw_cons);
1911 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1912
1913 if (!TX_CMP_VALID(txcmp, raw_cons))
1914 break;
1915
67a95e20
MC
1916 /* The valid test of the entry must be done first before
1917 * reading any further.
1918 */
b67daab0 1919 dma_rmb();
3675b92f 1920 cpr->had_work_done = 1;
c0c050c5
MC
1921 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1922 tx_pkts++;
1923 /* return full budget so NAPI will complete. */
73f21c65 1924 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
c0c050c5 1925 rx_pkts = budget;
73f21c65 1926 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
1927 if (budget)
1928 cpr->has_more_work = 1;
73f21c65
MC
1929 break;
1930 }
c0c050c5 1931 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 1932 if (likely(budget))
e44758b7 1933 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 1934 else
e44758b7 1935 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 1936 &event);
c0c050c5
MC
1937 if (likely(rc >= 0))
1938 rx_pkts += rc;
903649e7
MC
1939 /* Increment rx_pkts when rc is -ENOMEM to count towards
1940 * the NAPI budget. Otherwise, we may potentially loop
1941 * here forever if we consistently cannot allocate
1942 * buffers.
1943 */
2edbdb31 1944 else if (rc == -ENOMEM && budget)
903649e7 1945 rx_pkts++;
c0c050c5
MC
1946 else if (rc == -EBUSY) /* partial completion */
1947 break;
c0c050c5
MC
1948 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1949 CMPL_BASE_TYPE_HWRM_DONE) ||
1950 (TX_CMP_TYPE(txcmp) ==
1951 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1952 (TX_CMP_TYPE(txcmp) ==
1953 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1954 bnxt_hwrm_handler(bp, txcmp);
1955 }
1956 raw_cons = NEXT_RAW_CMP(raw_cons);
1957
0fcec985
MC
1958 if (rx_pkts && rx_pkts == budget) {
1959 cpr->has_more_work = 1;
c0c050c5 1960 break;
0fcec985 1961 }
c0c050c5
MC
1962 }
1963
38413406
MC
1964 if (event & BNXT_TX_EVENT) {
1965 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
1966 u16 prod = txr->tx_prod;
1967
1968 /* Sync BD data before updating doorbell */
1969 wmb();
1970
697197e5 1971 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
1972 }
1973
c0c050c5 1974 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
1975 bnapi->tx_pkts += tx_pkts;
1976 bnapi->events |= event;
1977 return rx_pkts;
1978}
c0c050c5 1979
3675b92f
MC
1980static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1981{
1982 if (bnapi->tx_pkts) {
1983 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
1984 bnapi->tx_pkts = 0;
1985 }
c0c050c5 1986
3675b92f 1987 if (bnapi->events & BNXT_RX_EVENT) {
b6ab4b01 1988 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1989
697197e5 1990 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3675b92f 1991 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 1992 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
c0c050c5 1993 }
3675b92f
MC
1994 bnapi->events = 0;
1995}
1996
1997static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1998 int budget)
1999{
2000 struct bnxt_napi *bnapi = cpr->bnapi;
2001 int rx_pkts;
2002
2003 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2004
2005 /* ACK completion ring before freeing tx ring and producing new
2006 * buffers in rx/agg rings to prevent overflowing the completion
2007 * ring.
2008 */
2009 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2010
2011 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2012 return rx_pkts;
2013}
2014
10bbdaf5
PS
2015static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2016{
2017 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2018 struct bnxt *bp = bnapi->bp;
2019 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2020 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2021 struct tx_cmp *txcmp;
2022 struct rx_cmp_ext *rxcmp1;
2023 u32 cp_cons, tmp_raw_cons;
2024 u32 raw_cons = cpr->cp_raw_cons;
2025 u32 rx_pkts = 0;
4e5dbbda 2026 u8 event = 0;
10bbdaf5
PS
2027
2028 while (1) {
2029 int rc;
2030
2031 cp_cons = RING_CMP(raw_cons);
2032 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2033
2034 if (!TX_CMP_VALID(txcmp, raw_cons))
2035 break;
2036
2037 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2038 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2039 cp_cons = RING_CMP(tmp_raw_cons);
2040 rxcmp1 = (struct rx_cmp_ext *)
2041 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2042
2043 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2044 break;
2045
2046 /* force an error to recycle the buffer */
2047 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2048 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2049
e44758b7 2050 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2051 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2052 rx_pkts++;
2053 else if (rc == -EBUSY) /* partial completion */
2054 break;
2055 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2056 CMPL_BASE_TYPE_HWRM_DONE)) {
2057 bnxt_hwrm_handler(bp, txcmp);
2058 } else {
2059 netdev_err(bp->dev,
2060 "Invalid completion received on special ring\n");
2061 }
2062 raw_cons = NEXT_RAW_CMP(raw_cons);
2063
2064 if (rx_pkts == budget)
2065 break;
2066 }
2067
2068 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2069 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2070 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2071
434c975a 2072 if (event & BNXT_AGG_EVENT)
697197e5 2073 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2074
2075 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2076 napi_complete_done(napi, rx_pkts);
697197e5 2077 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2078 }
2079 return rx_pkts;
2080}
2081
c0c050c5
MC
2082static int bnxt_poll(struct napi_struct *napi, int budget)
2083{
2084 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2085 struct bnxt *bp = bnapi->bp;
2086 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2087 int work_done = 0;
2088
c0c050c5 2089 while (1) {
e44758b7 2090 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2091
73f21c65
MC
2092 if (work_done >= budget) {
2093 if (!budget)
697197e5 2094 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2095 break;
73f21c65 2096 }
c0c050c5
MC
2097
2098 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2099 if (napi_complete_done(napi, work_done))
697197e5 2100 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2101 break;
2102 }
2103 }
6a8788f2
AG
2104 if (bp->flags & BNXT_FLAG_DIM) {
2105 struct net_dim_sample dim_sample;
2106
2107 net_dim_sample(cpr->event_ctr,
2108 cpr->rx_packets,
2109 cpr->rx_bytes,
2110 &dim_sample);
2111 net_dim(&cpr->dim, dim_sample);
2112 }
c0c050c5 2113 mmiowb();
c0c050c5
MC
2114 return work_done;
2115}
2116
0fcec985
MC
2117static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2118{
2119 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2120 int i, work_done = 0;
2121
2122 for (i = 0; i < 2; i++) {
2123 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2124
2125 if (cpr2) {
2126 work_done += __bnxt_poll_work(bp, cpr2,
2127 budget - work_done);
2128 cpr->has_more_work |= cpr2->has_more_work;
2129 }
2130 }
2131 return work_done;
2132}
2133
2134static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2135 u64 dbr_type, bool all)
2136{
2137 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2138 int i;
2139
2140 for (i = 0; i < 2; i++) {
2141 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2142 struct bnxt_db_info *db;
2143
2144 if (cpr2 && (all || cpr2->had_work_done)) {
2145 db = &cpr2->cp_db;
2146 writeq(db->db_key64 | dbr_type |
2147 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2148 cpr2->had_work_done = 0;
2149 }
2150 }
2151 __bnxt_poll_work_done(bp, bnapi);
2152}
2153
2154static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2155{
2156 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2157 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2158 u32 raw_cons = cpr->cp_raw_cons;
2159 struct bnxt *bp = bnapi->bp;
2160 struct nqe_cn *nqcmp;
2161 int work_done = 0;
2162 u32 cons;
2163
2164 if (cpr->has_more_work) {
2165 cpr->has_more_work = 0;
2166 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2167 if (cpr->has_more_work) {
2168 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2169 return work_done;
2170 }
2171 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2172 if (napi_complete_done(napi, work_done))
2173 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2174 return work_done;
2175 }
2176 while (1) {
2177 cons = RING_CMP(raw_cons);
2178 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2179
2180 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2181 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2182 false);
2183 cpr->cp_raw_cons = raw_cons;
2184 if (napi_complete_done(napi, work_done))
2185 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2186 cpr->cp_raw_cons);
2187 return work_done;
2188 }
2189
2190 /* The valid test of the entry must be done first before
2191 * reading any further.
2192 */
2193 dma_rmb();
2194
2195 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2196 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2197 struct bnxt_cp_ring_info *cpr2;
2198
2199 cpr2 = cpr->cp_ring_arr[idx];
2200 work_done += __bnxt_poll_work(bp, cpr2,
2201 budget - work_done);
2202 cpr->has_more_work = cpr2->has_more_work;
2203 } else {
2204 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2205 }
2206 raw_cons = NEXT_RAW_CMP(raw_cons);
2207 if (cpr->has_more_work)
2208 break;
2209 }
2210 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2211 cpr->cp_raw_cons = raw_cons;
2212 return work_done;
2213}
2214
c0c050c5
MC
2215static void bnxt_free_tx_skbs(struct bnxt *bp)
2216{
2217 int i, max_idx;
2218 struct pci_dev *pdev = bp->pdev;
2219
b6ab4b01 2220 if (!bp->tx_ring)
c0c050c5
MC
2221 return;
2222
2223 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2224 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2225 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2226 int j;
2227
c0c050c5
MC
2228 for (j = 0; j < max_idx;) {
2229 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2230 struct sk_buff *skb = tx_buf->skb;
2231 int k, last;
2232
2233 if (!skb) {
2234 j++;
2235 continue;
2236 }
2237
2238 tx_buf->skb = NULL;
2239
2240 if (tx_buf->is_push) {
2241 dev_kfree_skb(skb);
2242 j += 2;
2243 continue;
2244 }
2245
2246 dma_unmap_single(&pdev->dev,
2247 dma_unmap_addr(tx_buf, mapping),
2248 skb_headlen(skb),
2249 PCI_DMA_TODEVICE);
2250
2251 last = tx_buf->nr_frags;
2252 j += 2;
d612a579
MC
2253 for (k = 0; k < last; k++, j++) {
2254 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2255 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2256
d612a579 2257 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2258 dma_unmap_page(
2259 &pdev->dev,
2260 dma_unmap_addr(tx_buf, mapping),
2261 skb_frag_size(frag), PCI_DMA_TODEVICE);
2262 }
2263 dev_kfree_skb(skb);
2264 }
2265 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2266 }
2267}
2268
2269static void bnxt_free_rx_skbs(struct bnxt *bp)
2270{
2271 int i, max_idx, max_agg_idx;
2272 struct pci_dev *pdev = bp->pdev;
2273
b6ab4b01 2274 if (!bp->rx_ring)
c0c050c5
MC
2275 return;
2276
2277 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2278 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2279 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2280 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2281 int j;
2282
c0c050c5
MC
2283 if (rxr->rx_tpa) {
2284 for (j = 0; j < MAX_TPA; j++) {
2285 struct bnxt_tpa_info *tpa_info =
2286 &rxr->rx_tpa[j];
2287 u8 *data = tpa_info->data;
2288
2289 if (!data)
2290 continue;
2291
c519fe9a
SN
2292 dma_unmap_single_attrs(&pdev->dev,
2293 tpa_info->mapping,
2294 bp->rx_buf_use_size,
2295 bp->rx_dir,
2296 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2297
2298 tpa_info->data = NULL;
2299
2300 kfree(data);
2301 }
2302 }
2303
2304 for (j = 0; j < max_idx; j++) {
2305 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
3ed3a83e 2306 dma_addr_t mapping = rx_buf->mapping;
6bb19474 2307 void *data = rx_buf->data;
c0c050c5
MC
2308
2309 if (!data)
2310 continue;
2311
c0c050c5
MC
2312 rx_buf->data = NULL;
2313
3ed3a83e
MC
2314 if (BNXT_RX_PAGE_MODE(bp)) {
2315 mapping -= bp->rx_dma_offset;
c519fe9a
SN
2316 dma_unmap_page_attrs(&pdev->dev, mapping,
2317 PAGE_SIZE, bp->rx_dir,
2318 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2319 __free_page(data);
3ed3a83e 2320 } else {
c519fe9a
SN
2321 dma_unmap_single_attrs(&pdev->dev, mapping,
2322 bp->rx_buf_use_size,
2323 bp->rx_dir,
2324 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2325 kfree(data);
3ed3a83e 2326 }
c0c050c5
MC
2327 }
2328
2329 for (j = 0; j < max_agg_idx; j++) {
2330 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2331 &rxr->rx_agg_ring[j];
2332 struct page *page = rx_agg_buf->page;
2333
2334 if (!page)
2335 continue;
2336
c519fe9a
SN
2337 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2338 BNXT_RX_PAGE_SIZE,
2339 PCI_DMA_FROMDEVICE,
2340 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2341
2342 rx_agg_buf->page = NULL;
2343 __clear_bit(j, rxr->rx_agg_bmap);
2344
2345 __free_page(page);
2346 }
89d0a06c
MC
2347 if (rxr->rx_page) {
2348 __free_page(rxr->rx_page);
2349 rxr->rx_page = NULL;
2350 }
c0c050c5
MC
2351 }
2352}
2353
2354static void bnxt_free_skbs(struct bnxt *bp)
2355{
2356 bnxt_free_tx_skbs(bp);
2357 bnxt_free_rx_skbs(bp);
2358}
2359
6fe19886 2360static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2361{
2362 struct pci_dev *pdev = bp->pdev;
2363 int i;
2364
6fe19886
MC
2365 for (i = 0; i < rmem->nr_pages; i++) {
2366 if (!rmem->pg_arr[i])
c0c050c5
MC
2367 continue;
2368
6fe19886
MC
2369 dma_free_coherent(&pdev->dev, rmem->page_size,
2370 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2371
6fe19886 2372 rmem->pg_arr[i] = NULL;
c0c050c5 2373 }
6fe19886
MC
2374 if (rmem->pg_tbl) {
2375 dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
2376 rmem->pg_tbl, rmem->pg_tbl_map);
2377 rmem->pg_tbl = NULL;
c0c050c5 2378 }
6fe19886
MC
2379 if (rmem->vmem_size && *rmem->vmem) {
2380 vfree(*rmem->vmem);
2381 *rmem->vmem = NULL;
c0c050c5
MC
2382 }
2383}
2384
6fe19886 2385static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2386{
c0c050c5 2387 struct pci_dev *pdev = bp->pdev;
66cca20a 2388 u64 valid_bit = 0;
6fe19886 2389 int i;
c0c050c5 2390
66cca20a
MC
2391 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2392 valid_bit = PTU_PTE_VALID;
6fe19886
MC
2393 if (rmem->nr_pages > 1) {
2394 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
2395 rmem->nr_pages * 8,
2396 &rmem->pg_tbl_map,
c0c050c5 2397 GFP_KERNEL);
6fe19886 2398 if (!rmem->pg_tbl)
c0c050c5
MC
2399 return -ENOMEM;
2400 }
2401
6fe19886 2402 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2403 u64 extra_bits = valid_bit;
2404
6fe19886
MC
2405 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2406 rmem->page_size,
2407 &rmem->dma_arr[i],
c0c050c5 2408 GFP_KERNEL);
6fe19886 2409 if (!rmem->pg_arr[i])
c0c050c5
MC
2410 return -ENOMEM;
2411
66cca20a
MC
2412 if (rmem->nr_pages > 1) {
2413 if (i == rmem->nr_pages - 2 &&
2414 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2415 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2416 else if (i == rmem->nr_pages - 1 &&
2417 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2418 extra_bits |= PTU_PTE_LAST;
2419 rmem->pg_tbl[i] =
2420 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2421 }
c0c050c5
MC
2422 }
2423
6fe19886
MC
2424 if (rmem->vmem_size) {
2425 *rmem->vmem = vzalloc(rmem->vmem_size);
2426 if (!(*rmem->vmem))
c0c050c5
MC
2427 return -ENOMEM;
2428 }
2429 return 0;
2430}
2431
2432static void bnxt_free_rx_rings(struct bnxt *bp)
2433{
2434 int i;
2435
b6ab4b01 2436 if (!bp->rx_ring)
c0c050c5
MC
2437 return;
2438
2439 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2440 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2441 struct bnxt_ring_struct *ring;
2442
c6d30e83
MC
2443 if (rxr->xdp_prog)
2444 bpf_prog_put(rxr->xdp_prog);
2445
96a8604f
JDB
2446 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2447 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2448
c0c050c5
MC
2449 kfree(rxr->rx_tpa);
2450 rxr->rx_tpa = NULL;
2451
2452 kfree(rxr->rx_agg_bmap);
2453 rxr->rx_agg_bmap = NULL;
2454
2455 ring = &rxr->rx_ring_struct;
6fe19886 2456 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2457
2458 ring = &rxr->rx_agg_ring_struct;
6fe19886 2459 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2460 }
2461}
2462
2463static int bnxt_alloc_rx_rings(struct bnxt *bp)
2464{
2465 int i, rc, agg_rings = 0, tpa_rings = 0;
2466
b6ab4b01
MC
2467 if (!bp->rx_ring)
2468 return -ENOMEM;
2469
c0c050c5
MC
2470 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2471 agg_rings = 1;
2472
2473 if (bp->flags & BNXT_FLAG_TPA)
2474 tpa_rings = 1;
2475
2476 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2477 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2478 struct bnxt_ring_struct *ring;
2479
c0c050c5
MC
2480 ring = &rxr->rx_ring_struct;
2481
96a8604f
JDB
2482 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2483 if (rc < 0)
2484 return rc;
2485
6fe19886 2486 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2487 if (rc)
2488 return rc;
2489
2c61d211 2490 ring->grp_idx = i;
c0c050c5
MC
2491 if (agg_rings) {
2492 u16 mem_size;
2493
2494 ring = &rxr->rx_agg_ring_struct;
6fe19886 2495 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2496 if (rc)
2497 return rc;
2498
9899bb59 2499 ring->grp_idx = i;
c0c050c5
MC
2500 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2501 mem_size = rxr->rx_agg_bmap_size / 8;
2502 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2503 if (!rxr->rx_agg_bmap)
2504 return -ENOMEM;
2505
2506 if (tpa_rings) {
2507 rxr->rx_tpa = kcalloc(MAX_TPA,
2508 sizeof(struct bnxt_tpa_info),
2509 GFP_KERNEL);
2510 if (!rxr->rx_tpa)
2511 return -ENOMEM;
2512 }
2513 }
2514 }
2515 return 0;
2516}
2517
2518static void bnxt_free_tx_rings(struct bnxt *bp)
2519{
2520 int i;
2521 struct pci_dev *pdev = bp->pdev;
2522
b6ab4b01 2523 if (!bp->tx_ring)
c0c050c5
MC
2524 return;
2525
2526 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2527 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2528 struct bnxt_ring_struct *ring;
2529
c0c050c5
MC
2530 if (txr->tx_push) {
2531 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2532 txr->tx_push, txr->tx_push_mapping);
2533 txr->tx_push = NULL;
2534 }
2535
2536 ring = &txr->tx_ring_struct;
2537
6fe19886 2538 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2539 }
2540}
2541
2542static int bnxt_alloc_tx_rings(struct bnxt *bp)
2543{
2544 int i, j, rc;
2545 struct pci_dev *pdev = bp->pdev;
2546
2547 bp->tx_push_size = 0;
2548 if (bp->tx_push_thresh) {
2549 int push_size;
2550
2551 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2552 bp->tx_push_thresh);
2553
4419dbe6 2554 if (push_size > 256) {
c0c050c5
MC
2555 push_size = 0;
2556 bp->tx_push_thresh = 0;
2557 }
2558
2559 bp->tx_push_size = push_size;
2560 }
2561
2562 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2563 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 2564 struct bnxt_ring_struct *ring;
2e8ef77e 2565 u8 qidx;
c0c050c5 2566
c0c050c5
MC
2567 ring = &txr->tx_ring_struct;
2568
6fe19886 2569 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2570 if (rc)
2571 return rc;
2572
9899bb59 2573 ring->grp_idx = txr->bnapi->index;
c0c050c5 2574 if (bp->tx_push_size) {
c0c050c5
MC
2575 dma_addr_t mapping;
2576
2577 /* One pre-allocated DMA buffer to backup
2578 * TX push operation
2579 */
2580 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2581 bp->tx_push_size,
2582 &txr->tx_push_mapping,
2583 GFP_KERNEL);
2584
2585 if (!txr->tx_push)
2586 return -ENOMEM;
2587
c0c050c5
MC
2588 mapping = txr->tx_push_mapping +
2589 sizeof(struct tx_push_bd);
4419dbe6 2590 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 2591
4419dbe6 2592 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
c0c050c5 2593 }
2e8ef77e
MC
2594 qidx = bp->tc_to_qidx[j];
2595 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
2596 if (i < bp->tx_nr_rings_xdp)
2597 continue;
c0c050c5
MC
2598 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2599 j++;
2600 }
2601 return 0;
2602}
2603
2604static void bnxt_free_cp_rings(struct bnxt *bp)
2605{
2606 int i;
2607
2608 if (!bp->bnapi)
2609 return;
2610
2611 for (i = 0; i < bp->cp_nr_rings; i++) {
2612 struct bnxt_napi *bnapi = bp->bnapi[i];
2613 struct bnxt_cp_ring_info *cpr;
2614 struct bnxt_ring_struct *ring;
50e3ab78 2615 int j;
c0c050c5
MC
2616
2617 if (!bnapi)
2618 continue;
2619
2620 cpr = &bnapi->cp_ring;
2621 ring = &cpr->cp_ring_struct;
2622
6fe19886 2623 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
2624
2625 for (j = 0; j < 2; j++) {
2626 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2627
2628 if (cpr2) {
2629 ring = &cpr2->cp_ring_struct;
2630 bnxt_free_ring(bp, &ring->ring_mem);
2631 kfree(cpr2);
2632 cpr->cp_ring_arr[j] = NULL;
2633 }
2634 }
c0c050c5
MC
2635 }
2636}
2637
50e3ab78
MC
2638static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2639{
2640 struct bnxt_ring_mem_info *rmem;
2641 struct bnxt_ring_struct *ring;
2642 struct bnxt_cp_ring_info *cpr;
2643 int rc;
2644
2645 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2646 if (!cpr)
2647 return NULL;
2648
2649 ring = &cpr->cp_ring_struct;
2650 rmem = &ring->ring_mem;
2651 rmem->nr_pages = bp->cp_nr_pages;
2652 rmem->page_size = HW_CMPD_RING_SIZE;
2653 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2654 rmem->dma_arr = cpr->cp_desc_mapping;
2655 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2656 rc = bnxt_alloc_ring(bp, rmem);
2657 if (rc) {
2658 bnxt_free_ring(bp, rmem);
2659 kfree(cpr);
2660 cpr = NULL;
2661 }
2662 return cpr;
2663}
2664
c0c050c5
MC
2665static int bnxt_alloc_cp_rings(struct bnxt *bp)
2666{
50e3ab78 2667 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 2668 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 2669
e5811b8c
MC
2670 ulp_msix = bnxt_get_ulp_msix_num(bp);
2671 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
2672 for (i = 0; i < bp->cp_nr_rings; i++) {
2673 struct bnxt_napi *bnapi = bp->bnapi[i];
2674 struct bnxt_cp_ring_info *cpr;
2675 struct bnxt_ring_struct *ring;
2676
2677 if (!bnapi)
2678 continue;
2679
2680 cpr = &bnapi->cp_ring;
50e3ab78 2681 cpr->bnapi = bnapi;
c0c050c5
MC
2682 ring = &cpr->cp_ring_struct;
2683
6fe19886 2684 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2685 if (rc)
2686 return rc;
e5811b8c
MC
2687
2688 if (ulp_msix && i >= ulp_base_vec)
2689 ring->map_idx = i + ulp_msix;
2690 else
2691 ring->map_idx = i;
50e3ab78
MC
2692
2693 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2694 continue;
2695
2696 if (i < bp->rx_nr_rings) {
2697 struct bnxt_cp_ring_info *cpr2 =
2698 bnxt_alloc_cp_sub_ring(bp);
2699
2700 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2701 if (!cpr2)
2702 return -ENOMEM;
2703 cpr2->bnapi = bnapi;
2704 }
2705 if ((sh && i < bp->tx_nr_rings) ||
2706 (!sh && i >= bp->rx_nr_rings)) {
2707 struct bnxt_cp_ring_info *cpr2 =
2708 bnxt_alloc_cp_sub_ring(bp);
2709
2710 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2711 if (!cpr2)
2712 return -ENOMEM;
2713 cpr2->bnapi = bnapi;
2714 }
c0c050c5
MC
2715 }
2716 return 0;
2717}
2718
2719static void bnxt_init_ring_struct(struct bnxt *bp)
2720{
2721 int i;
2722
2723 for (i = 0; i < bp->cp_nr_rings; i++) {
2724 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 2725 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
2726 struct bnxt_cp_ring_info *cpr;
2727 struct bnxt_rx_ring_info *rxr;
2728 struct bnxt_tx_ring_info *txr;
2729 struct bnxt_ring_struct *ring;
2730
2731 if (!bnapi)
2732 continue;
2733
2734 cpr = &bnapi->cp_ring;
2735 ring = &cpr->cp_ring_struct;
6fe19886
MC
2736 rmem = &ring->ring_mem;
2737 rmem->nr_pages = bp->cp_nr_pages;
2738 rmem->page_size = HW_CMPD_RING_SIZE;
2739 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2740 rmem->dma_arr = cpr->cp_desc_mapping;
2741 rmem->vmem_size = 0;
c0c050c5 2742
b6ab4b01 2743 rxr = bnapi->rx_ring;
3b2b7d9d
MC
2744 if (!rxr)
2745 goto skip_rx;
2746
c0c050c5 2747 ring = &rxr->rx_ring_struct;
6fe19886
MC
2748 rmem = &ring->ring_mem;
2749 rmem->nr_pages = bp->rx_nr_pages;
2750 rmem->page_size = HW_RXBD_RING_SIZE;
2751 rmem->pg_arr = (void **)rxr->rx_desc_ring;
2752 rmem->dma_arr = rxr->rx_desc_mapping;
2753 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2754 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
2755
2756 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
2757 rmem = &ring->ring_mem;
2758 rmem->nr_pages = bp->rx_agg_nr_pages;
2759 rmem->page_size = HW_RXBD_RING_SIZE;
2760 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2761 rmem->dma_arr = rxr->rx_agg_desc_mapping;
2762 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2763 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 2764
3b2b7d9d 2765skip_rx:
b6ab4b01 2766 txr = bnapi->tx_ring;
3b2b7d9d
MC
2767 if (!txr)
2768 continue;
2769
c0c050c5 2770 ring = &txr->tx_ring_struct;
6fe19886
MC
2771 rmem = &ring->ring_mem;
2772 rmem->nr_pages = bp->tx_nr_pages;
2773 rmem->page_size = HW_RXBD_RING_SIZE;
2774 rmem->pg_arr = (void **)txr->tx_desc_ring;
2775 rmem->dma_arr = txr->tx_desc_mapping;
2776 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2777 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
2778 }
2779}
2780
2781static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2782{
2783 int i;
2784 u32 prod;
2785 struct rx_bd **rx_buf_ring;
2786
6fe19886
MC
2787 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2788 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
2789 int j;
2790 struct rx_bd *rxbd;
2791
2792 rxbd = rx_buf_ring[i];
2793 if (!rxbd)
2794 continue;
2795
2796 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2797 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2798 rxbd->rx_bd_opaque = prod;
2799 }
2800 }
2801}
2802
2803static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2804{
2805 struct net_device *dev = bp->dev;
c0c050c5
MC
2806 struct bnxt_rx_ring_info *rxr;
2807 struct bnxt_ring_struct *ring;
2808 u32 prod, type;
2809 int i;
2810
c0c050c5
MC
2811 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2812 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2813
2814 if (NET_IP_ALIGN == 2)
2815 type |= RX_BD_FLAGS_SOP;
2816
b6ab4b01 2817 rxr = &bp->rx_ring[ring_nr];
c0c050c5
MC
2818 ring = &rxr->rx_ring_struct;
2819 bnxt_init_rxbd_pages(ring, type);
2820
c6d30e83
MC
2821 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2822 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2823 if (IS_ERR(rxr->xdp_prog)) {
2824 int rc = PTR_ERR(rxr->xdp_prog);
2825
2826 rxr->xdp_prog = NULL;
2827 return rc;
2828 }
2829 }
c0c050c5
MC
2830 prod = rxr->rx_prod;
2831 for (i = 0; i < bp->rx_ring_size; i++) {
2832 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2833 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2834 ring_nr, i, bp->rx_ring_size);
2835 break;
2836 }
2837 prod = NEXT_RX(prod);
2838 }
2839 rxr->rx_prod = prod;
2840 ring->fw_ring_id = INVALID_HW_RING_ID;
2841
edd0c2cc
MC
2842 ring = &rxr->rx_agg_ring_struct;
2843 ring->fw_ring_id = INVALID_HW_RING_ID;
2844
c0c050c5
MC
2845 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2846 return 0;
2847
2839f28b 2848 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
c0c050c5
MC
2849 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2850
2851 bnxt_init_rxbd_pages(ring, type);
2852
2853 prod = rxr->rx_agg_prod;
2854 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2855 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2856 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2857 ring_nr, i, bp->rx_ring_size);
2858 break;
2859 }
2860 prod = NEXT_RX_AGG(prod);
2861 }
2862 rxr->rx_agg_prod = prod;
c0c050c5
MC
2863
2864 if (bp->flags & BNXT_FLAG_TPA) {
2865 if (rxr->rx_tpa) {
2866 u8 *data;
2867 dma_addr_t mapping;
2868
2869 for (i = 0; i < MAX_TPA; i++) {
2870 data = __bnxt_alloc_rx_data(bp, &mapping,
2871 GFP_KERNEL);
2872 if (!data)
2873 return -ENOMEM;
2874
2875 rxr->rx_tpa[i].data = data;
b3dba77c 2876 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
c0c050c5
MC
2877 rxr->rx_tpa[i].mapping = mapping;
2878 }
2879 } else {
2880 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2881 return -ENOMEM;
2882 }
2883 }
2884
2885 return 0;
2886}
2887
2247925f
SP
2888static void bnxt_init_cp_rings(struct bnxt *bp)
2889{
3e08b184 2890 int i, j;
2247925f
SP
2891
2892 for (i = 0; i < bp->cp_nr_rings; i++) {
2893 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2894 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2895
2896 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
2897 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2898 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
2899 for (j = 0; j < 2; j++) {
2900 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2901
2902 if (!cpr2)
2903 continue;
2904
2905 ring = &cpr2->cp_ring_struct;
2906 ring->fw_ring_id = INVALID_HW_RING_ID;
2907 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2908 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2909 }
2247925f
SP
2910 }
2911}
2912
c0c050c5
MC
2913static int bnxt_init_rx_rings(struct bnxt *bp)
2914{
2915 int i, rc = 0;
2916
c61fb99c 2917 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
2918 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2919 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
2920 } else {
2921 bp->rx_offset = BNXT_RX_OFFSET;
2922 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2923 }
b3dba77c 2924
c0c050c5
MC
2925 for (i = 0; i < bp->rx_nr_rings; i++) {
2926 rc = bnxt_init_one_rx_ring(bp, i);
2927 if (rc)
2928 break;
2929 }
2930
2931 return rc;
2932}
2933
2934static int bnxt_init_tx_rings(struct bnxt *bp)
2935{
2936 u16 i;
2937
2938 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2939 MAX_SKB_FRAGS + 1);
2940
2941 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2942 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2943 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2944
2945 ring->fw_ring_id = INVALID_HW_RING_ID;
2946 }
2947
2948 return 0;
2949}
2950
2951static void bnxt_free_ring_grps(struct bnxt *bp)
2952{
2953 kfree(bp->grp_info);
2954 bp->grp_info = NULL;
2955}
2956
2957static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2958{
2959 int i;
2960
2961 if (irq_re_init) {
2962 bp->grp_info = kcalloc(bp->cp_nr_rings,
2963 sizeof(struct bnxt_ring_grp_info),
2964 GFP_KERNEL);
2965 if (!bp->grp_info)
2966 return -ENOMEM;
2967 }
2968 for (i = 0; i < bp->cp_nr_rings; i++) {
2969 if (irq_re_init)
2970 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2971 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2972 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2973 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2974 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2975 }
2976 return 0;
2977}
2978
2979static void bnxt_free_vnics(struct bnxt *bp)
2980{
2981 kfree(bp->vnic_info);
2982 bp->vnic_info = NULL;
2983 bp->nr_vnics = 0;
2984}
2985
2986static int bnxt_alloc_vnics(struct bnxt *bp)
2987{
2988 int num_vnics = 1;
2989
2990#ifdef CONFIG_RFS_ACCEL
2991 if (bp->flags & BNXT_FLAG_RFS)
2992 num_vnics += bp->rx_nr_rings;
2993#endif
2994
dc52c6c7
PS
2995 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2996 num_vnics++;
2997
c0c050c5
MC
2998 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2999 GFP_KERNEL);
3000 if (!bp->vnic_info)
3001 return -ENOMEM;
3002
3003 bp->nr_vnics = num_vnics;
3004 return 0;
3005}
3006
3007static void bnxt_init_vnics(struct bnxt *bp)
3008{
3009 int i;
3010
3011 for (i = 0; i < bp->nr_vnics; i++) {
3012 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3013 int j;
c0c050c5
MC
3014
3015 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3016 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3017 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3018
c0c050c5
MC
3019 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3020
3021 if (bp->vnic_info[i].rss_hash_key) {
3022 if (i == 0)
3023 prandom_bytes(vnic->rss_hash_key,
3024 HW_HASH_KEY_SIZE);
3025 else
3026 memcpy(vnic->rss_hash_key,
3027 bp->vnic_info[0].rss_hash_key,
3028 HW_HASH_KEY_SIZE);
3029 }
3030 }
3031}
3032
3033static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3034{
3035 int pages;
3036
3037 pages = ring_size / desc_per_pg;
3038
3039 if (!pages)
3040 return 1;
3041
3042 pages++;
3043
3044 while (pages & (pages - 1))
3045 pages++;
3046
3047 return pages;
3048}
3049
c6d30e83 3050void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3051{
3052 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3053 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3054 return;
c0c050c5
MC
3055 if (bp->dev->features & NETIF_F_LRO)
3056 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3057 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3058 bp->flags |= BNXT_FLAG_GRO;
3059}
3060
3061/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3062 * be set on entry.
3063 */
3064void bnxt_set_ring_params(struct bnxt *bp)
3065{
3066 u32 ring_size, rx_size, rx_space;
3067 u32 agg_factor = 0, agg_ring_size = 0;
3068
3069 /* 8 for CRC and VLAN */
3070 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3071
3072 rx_space = rx_size + NET_SKB_PAD +
3073 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3074
3075 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3076 ring_size = bp->rx_ring_size;
3077 bp->rx_agg_ring_size = 0;
3078 bp->rx_agg_nr_pages = 0;
3079
3080 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3081 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3082
3083 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3084 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3085 u32 jumbo_factor;
3086
3087 bp->flags |= BNXT_FLAG_JUMBO;
3088 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3089 if (jumbo_factor > agg_factor)
3090 agg_factor = jumbo_factor;
3091 }
3092 agg_ring_size = ring_size * agg_factor;
3093
3094 if (agg_ring_size) {
3095 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3096 RX_DESC_CNT);
3097 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3098 u32 tmp = agg_ring_size;
3099
3100 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3101 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3102 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3103 tmp, agg_ring_size);
3104 }
3105 bp->rx_agg_ring_size = agg_ring_size;
3106 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3107 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3108 rx_space = rx_size + NET_SKB_PAD +
3109 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3110 }
3111
3112 bp->rx_buf_use_size = rx_size;
3113 bp->rx_buf_size = rx_space;
3114
3115 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3116 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3117
3118 ring_size = bp->tx_ring_size;
3119 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3120 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3121
3122 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3123 bp->cp_ring_size = ring_size;
3124
3125 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3126 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3127 bp->cp_nr_pages = MAX_CP_PAGES;
3128 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3129 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3130 ring_size, bp->cp_ring_size);
3131 }
3132 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3133 bp->cp_ring_mask = bp->cp_bit - 1;
3134}
3135
96a8604f
JDB
3136/* Changing allocation mode of RX rings.
3137 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3138 */
c61fb99c 3139int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3140{
c61fb99c
MC
3141 if (page_mode) {
3142 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3143 return -EOPNOTSUPP;
7eb9bb3a
MC
3144 bp->dev->max_mtu =
3145 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3146 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3147 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3148 bp->rx_dir = DMA_BIDIRECTIONAL;
3149 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3150 /* Disable LRO or GRO_HW */
3151 netdev_update_features(bp->dev);
c61fb99c 3152 } else {
7eb9bb3a 3153 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3154 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3155 bp->rx_dir = DMA_FROM_DEVICE;
3156 bp->rx_skb_func = bnxt_rx_skb;
3157 }
6bb19474
MC
3158 return 0;
3159}
3160
c0c050c5
MC
3161static void bnxt_free_vnic_attributes(struct bnxt *bp)
3162{
3163 int i;
3164 struct bnxt_vnic_info *vnic;
3165 struct pci_dev *pdev = bp->pdev;
3166
3167 if (!bp->vnic_info)
3168 return;
3169
3170 for (i = 0; i < bp->nr_vnics; i++) {
3171 vnic = &bp->vnic_info[i];
3172
3173 kfree(vnic->fw_grp_ids);
3174 vnic->fw_grp_ids = NULL;
3175
3176 kfree(vnic->uc_list);
3177 vnic->uc_list = NULL;
3178
3179 if (vnic->mc_list) {
3180 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3181 vnic->mc_list, vnic->mc_list_mapping);
3182 vnic->mc_list = NULL;
3183 }
3184
3185 if (vnic->rss_table) {
3186 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3187 vnic->rss_table,
3188 vnic->rss_table_dma_addr);
3189 vnic->rss_table = NULL;
3190 }
3191
3192 vnic->rss_hash_key = NULL;
3193 vnic->flags = 0;
3194 }
3195}
3196
3197static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3198{
3199 int i, rc = 0, size;
3200 struct bnxt_vnic_info *vnic;
3201 struct pci_dev *pdev = bp->pdev;
3202 int max_rings;
3203
3204 for (i = 0; i < bp->nr_vnics; i++) {
3205 vnic = &bp->vnic_info[i];
3206
3207 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3208 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3209
3210 if (mem_size > 0) {
3211 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3212 if (!vnic->uc_list) {
3213 rc = -ENOMEM;
3214 goto out;
3215 }
3216 }
3217 }
3218
3219 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3220 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3221 vnic->mc_list =
3222 dma_alloc_coherent(&pdev->dev,
3223 vnic->mc_list_size,
3224 &vnic->mc_list_mapping,
3225 GFP_KERNEL);
3226 if (!vnic->mc_list) {
3227 rc = -ENOMEM;
3228 goto out;
3229 }
3230 }
3231
44c6f72a
MC
3232 if (bp->flags & BNXT_FLAG_CHIP_P5)
3233 goto vnic_skip_grps;
3234
c0c050c5
MC
3235 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3236 max_rings = bp->rx_nr_rings;
3237 else
3238 max_rings = 1;
3239
3240 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3241 if (!vnic->fw_grp_ids) {
3242 rc = -ENOMEM;
3243 goto out;
3244 }
44c6f72a 3245vnic_skip_grps:
ae10ae74
MC
3246 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3247 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3248 continue;
3249
c0c050c5
MC
3250 /* Allocate rss table and hash key */
3251 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3252 &vnic->rss_table_dma_addr,
3253 GFP_KERNEL);
3254 if (!vnic->rss_table) {
3255 rc = -ENOMEM;
3256 goto out;
3257 }
3258
3259 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3260
3261 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3262 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3263 }
3264 return 0;
3265
3266out:
3267 return rc;
3268}
3269
3270static void bnxt_free_hwrm_resources(struct bnxt *bp)
3271{
3272 struct pci_dev *pdev = bp->pdev;
3273
a2bf74f4
VD
3274 if (bp->hwrm_cmd_resp_addr) {
3275 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3276 bp->hwrm_cmd_resp_dma_addr);
3277 bp->hwrm_cmd_resp_addr = NULL;
3278 }
c0c050c5
MC
3279}
3280
3281static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3282{
3283 struct pci_dev *pdev = bp->pdev;
3284
3285 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3286 &bp->hwrm_cmd_resp_dma_addr,
3287 GFP_KERNEL);
3288 if (!bp->hwrm_cmd_resp_addr)
3289 return -ENOMEM;
c0c050c5
MC
3290
3291 return 0;
3292}
3293
e605db80
DK
3294static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3295{
3296 if (bp->hwrm_short_cmd_req_addr) {
3297 struct pci_dev *pdev = bp->pdev;
3298
1dfddc41 3299 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3300 bp->hwrm_short_cmd_req_addr,
3301 bp->hwrm_short_cmd_req_dma_addr);
3302 bp->hwrm_short_cmd_req_addr = NULL;
3303 }
3304}
3305
3306static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3307{
3308 struct pci_dev *pdev = bp->pdev;
3309
3310 bp->hwrm_short_cmd_req_addr =
1dfddc41 3311 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3312 &bp->hwrm_short_cmd_req_dma_addr,
3313 GFP_KERNEL);
3314 if (!bp->hwrm_short_cmd_req_addr)
3315 return -ENOMEM;
3316
3317 return 0;
3318}
3319
c0c050c5
MC
3320static void bnxt_free_stats(struct bnxt *bp)
3321{
3322 u32 size, i;
3323 struct pci_dev *pdev = bp->pdev;
3324
00db3cba
VV
3325 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3326 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3327
3bdf56c4
MC
3328 if (bp->hw_rx_port_stats) {
3329 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3330 bp->hw_rx_port_stats,
3331 bp->hw_rx_port_stats_map);
3332 bp->hw_rx_port_stats = NULL;
00db3cba
VV
3333 }
3334
36e53349
MC
3335 if (bp->hw_tx_port_stats_ext) {
3336 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3337 bp->hw_tx_port_stats_ext,
3338 bp->hw_tx_port_stats_ext_map);
3339 bp->hw_tx_port_stats_ext = NULL;
3340 }
3341
00db3cba
VV
3342 if (bp->hw_rx_port_stats_ext) {
3343 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3344 bp->hw_rx_port_stats_ext,
3345 bp->hw_rx_port_stats_ext_map);
3346 bp->hw_rx_port_stats_ext = NULL;
3bdf56c4
MC
3347 }
3348
c0c050c5
MC
3349 if (!bp->bnapi)
3350 return;
3351
3352 size = sizeof(struct ctx_hw_stats);
3353
3354 for (i = 0; i < bp->cp_nr_rings; i++) {
3355 struct bnxt_napi *bnapi = bp->bnapi[i];
3356 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3357
3358 if (cpr->hw_stats) {
3359 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3360 cpr->hw_stats_map);
3361 cpr->hw_stats = NULL;
3362 }
3363 }
3364}
3365
3366static int bnxt_alloc_stats(struct bnxt *bp)
3367{
3368 u32 size, i;
3369 struct pci_dev *pdev = bp->pdev;
3370
3371 size = sizeof(struct ctx_hw_stats);
3372
3373 for (i = 0; i < bp->cp_nr_rings; i++) {
3374 struct bnxt_napi *bnapi = bp->bnapi[i];
3375 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3376
3377 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3378 &cpr->hw_stats_map,
3379 GFP_KERNEL);
3380 if (!cpr->hw_stats)
3381 return -ENOMEM;
3382
3383 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3384 }
3bdf56c4 3385
3e8060fa 3386 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3bdf56c4
MC
3387 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3388 sizeof(struct tx_port_stats) + 1024;
3389
3390 bp->hw_rx_port_stats =
3391 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3392 &bp->hw_rx_port_stats_map,
3393 GFP_KERNEL);
3394 if (!bp->hw_rx_port_stats)
3395 return -ENOMEM;
3396
3397 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3398 512;
3399 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3400 sizeof(struct rx_port_stats) + 512;
3401 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba
VV
3402
3403 /* Display extended statistics only if FW supports it */
3404 if (bp->hwrm_spec_code < 0x10804 ||
3405 bp->hwrm_spec_code == 0x10900)
3406 return 0;
3407
3408 bp->hw_rx_port_stats_ext =
3409 dma_zalloc_coherent(&pdev->dev,
3410 sizeof(struct rx_port_stats_ext),
3411 &bp->hw_rx_port_stats_ext_map,
3412 GFP_KERNEL);
3413 if (!bp->hw_rx_port_stats_ext)
3414 return 0;
3415
36e53349
MC
3416 if (bp->hwrm_spec_code >= 0x10902) {
3417 bp->hw_tx_port_stats_ext =
3418 dma_zalloc_coherent(&pdev->dev,
3419 sizeof(struct tx_port_stats_ext),
3420 &bp->hw_tx_port_stats_ext_map,
3421 GFP_KERNEL);
3422 }
00db3cba 3423 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3bdf56c4 3424 }
c0c050c5
MC
3425 return 0;
3426}
3427
3428static void bnxt_clear_ring_indices(struct bnxt *bp)
3429{
3430 int i;
3431
3432 if (!bp->bnapi)
3433 return;
3434
3435 for (i = 0; i < bp->cp_nr_rings; i++) {
3436 struct bnxt_napi *bnapi = bp->bnapi[i];
3437 struct bnxt_cp_ring_info *cpr;
3438 struct bnxt_rx_ring_info *rxr;
3439 struct bnxt_tx_ring_info *txr;
3440
3441 if (!bnapi)
3442 continue;
3443
3444 cpr = &bnapi->cp_ring;
3445 cpr->cp_raw_cons = 0;
3446
b6ab4b01 3447 txr = bnapi->tx_ring;
3b2b7d9d
MC
3448 if (txr) {
3449 txr->tx_prod = 0;
3450 txr->tx_cons = 0;
3451 }
c0c050c5 3452
b6ab4b01 3453 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3454 if (rxr) {
3455 rxr->rx_prod = 0;
3456 rxr->rx_agg_prod = 0;
3457 rxr->rx_sw_agg_prod = 0;
376a5b86 3458 rxr->rx_next_cons = 0;
3b2b7d9d 3459 }
c0c050c5
MC
3460 }
3461}
3462
3463static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3464{
3465#ifdef CONFIG_RFS_ACCEL
3466 int i;
3467
3468 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3469 * safe to delete the hash table.
3470 */
3471 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3472 struct hlist_head *head;
3473 struct hlist_node *tmp;
3474 struct bnxt_ntuple_filter *fltr;
3475
3476 head = &bp->ntp_fltr_hash_tbl[i];
3477 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3478 hlist_del(&fltr->hash);
3479 kfree(fltr);
3480 }
3481 }
3482 if (irq_reinit) {
3483 kfree(bp->ntp_fltr_bmap);
3484 bp->ntp_fltr_bmap = NULL;
3485 }
3486 bp->ntp_fltr_count = 0;
3487#endif
3488}
3489
3490static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3491{
3492#ifdef CONFIG_RFS_ACCEL
3493 int i, rc = 0;
3494
3495 if (!(bp->flags & BNXT_FLAG_RFS))
3496 return 0;
3497
3498 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3499 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3500
3501 bp->ntp_fltr_count = 0;
ac45bd93
DC
3502 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3503 sizeof(long),
c0c050c5
MC
3504 GFP_KERNEL);
3505
3506 if (!bp->ntp_fltr_bmap)
3507 rc = -ENOMEM;
3508
3509 return rc;
3510#else
3511 return 0;
3512#endif
3513}
3514
3515static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3516{
3517 bnxt_free_vnic_attributes(bp);
3518 bnxt_free_tx_rings(bp);
3519 bnxt_free_rx_rings(bp);
3520 bnxt_free_cp_rings(bp);
3521 bnxt_free_ntp_fltrs(bp, irq_re_init);
3522 if (irq_re_init) {
3523 bnxt_free_stats(bp);
3524 bnxt_free_ring_grps(bp);
3525 bnxt_free_vnics(bp);
a960dec9
MC
3526 kfree(bp->tx_ring_map);
3527 bp->tx_ring_map = NULL;
b6ab4b01
MC
3528 kfree(bp->tx_ring);
3529 bp->tx_ring = NULL;
3530 kfree(bp->rx_ring);
3531 bp->rx_ring = NULL;
c0c050c5
MC
3532 kfree(bp->bnapi);
3533 bp->bnapi = NULL;
3534 } else {
3535 bnxt_clear_ring_indices(bp);
3536 }
3537}
3538
3539static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3540{
01657bcd 3541 int i, j, rc, size, arr_size;
c0c050c5
MC
3542 void *bnapi;
3543
3544 if (irq_re_init) {
3545 /* Allocate bnapi mem pointer array and mem block for
3546 * all queues
3547 */
3548 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3549 bp->cp_nr_rings);
3550 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3551 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3552 if (!bnapi)
3553 return -ENOMEM;
3554
3555 bp->bnapi = bnapi;
3556 bnapi += arr_size;
3557 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3558 bp->bnapi[i] = bnapi;
3559 bp->bnapi[i]->index = i;
3560 bp->bnapi[i]->bp = bp;
e38287b7
MC
3561 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3562 struct bnxt_cp_ring_info *cpr =
3563 &bp->bnapi[i]->cp_ring;
3564
3565 cpr->cp_ring_struct.ring_mem.flags =
3566 BNXT_RMEM_RING_PTE_FLAG;
3567 }
c0c050c5
MC
3568 }
3569
b6ab4b01
MC
3570 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3571 sizeof(struct bnxt_rx_ring_info),
3572 GFP_KERNEL);
3573 if (!bp->rx_ring)
3574 return -ENOMEM;
3575
3576 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
3577 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3578
3579 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3580 rxr->rx_ring_struct.ring_mem.flags =
3581 BNXT_RMEM_RING_PTE_FLAG;
3582 rxr->rx_agg_ring_struct.ring_mem.flags =
3583 BNXT_RMEM_RING_PTE_FLAG;
3584 }
3585 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
3586 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3587 }
3588
3589 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3590 sizeof(struct bnxt_tx_ring_info),
3591 GFP_KERNEL);
3592 if (!bp->tx_ring)
3593 return -ENOMEM;
3594
a960dec9
MC
3595 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3596 GFP_KERNEL);
3597
3598 if (!bp->tx_ring_map)
3599 return -ENOMEM;
3600
01657bcd
MC
3601 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3602 j = 0;
3603 else
3604 j = bp->rx_nr_rings;
3605
3606 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
3607 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3608
3609 if (bp->flags & BNXT_FLAG_CHIP_P5)
3610 txr->tx_ring_struct.ring_mem.flags =
3611 BNXT_RMEM_RING_PTE_FLAG;
3612 txr->bnapi = bp->bnapi[j];
3613 bp->bnapi[j]->tx_ring = txr;
5f449249 3614 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 3615 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 3616 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
3617 bp->bnapi[j]->tx_int = bnxt_tx_int;
3618 } else {
fa3e93e8 3619 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
3620 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3621 }
b6ab4b01
MC
3622 }
3623
c0c050c5
MC
3624 rc = bnxt_alloc_stats(bp);
3625 if (rc)
3626 goto alloc_mem_err;
3627
3628 rc = bnxt_alloc_ntp_fltrs(bp);
3629 if (rc)
3630 goto alloc_mem_err;
3631
3632 rc = bnxt_alloc_vnics(bp);
3633 if (rc)
3634 goto alloc_mem_err;
3635 }
3636
3637 bnxt_init_ring_struct(bp);
3638
3639 rc = bnxt_alloc_rx_rings(bp);
3640 if (rc)
3641 goto alloc_mem_err;
3642
3643 rc = bnxt_alloc_tx_rings(bp);
3644 if (rc)
3645 goto alloc_mem_err;
3646
3647 rc = bnxt_alloc_cp_rings(bp);
3648 if (rc)
3649 goto alloc_mem_err;
3650
3651 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3652 BNXT_VNIC_UCAST_FLAG;
3653 rc = bnxt_alloc_vnic_attributes(bp);
3654 if (rc)
3655 goto alloc_mem_err;
3656 return 0;
3657
3658alloc_mem_err:
3659 bnxt_free_mem(bp, true);
3660 return rc;
3661}
3662
9d8bc097
MC
3663static void bnxt_disable_int(struct bnxt *bp)
3664{
3665 int i;
3666
3667 if (!bp->bnapi)
3668 return;
3669
3670 for (i = 0; i < bp->cp_nr_rings; i++) {
3671 struct bnxt_napi *bnapi = bp->bnapi[i];
3672 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 3673 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 3674
daf1f1e7 3675 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 3676 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
3677 }
3678}
3679
e5811b8c
MC
3680static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3681{
3682 struct bnxt_napi *bnapi = bp->bnapi[n];
3683 struct bnxt_cp_ring_info *cpr;
3684
3685 cpr = &bnapi->cp_ring;
3686 return cpr->cp_ring_struct.map_idx;
3687}
3688
9d8bc097
MC
3689static void bnxt_disable_int_sync(struct bnxt *bp)
3690{
3691 int i;
3692
3693 atomic_inc(&bp->intr_sem);
3694
3695 bnxt_disable_int(bp);
e5811b8c
MC
3696 for (i = 0; i < bp->cp_nr_rings; i++) {
3697 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3698
3699 synchronize_irq(bp->irq_tbl[map_idx].vector);
3700 }
9d8bc097
MC
3701}
3702
3703static void bnxt_enable_int(struct bnxt *bp)
3704{
3705 int i;
3706
3707 atomic_set(&bp->intr_sem, 0);
3708 for (i = 0; i < bp->cp_nr_rings; i++) {
3709 struct bnxt_napi *bnapi = bp->bnapi[i];
3710 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3711
697197e5 3712 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
3713 }
3714}
3715
c0c050c5
MC
3716void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3717 u16 cmpl_ring, u16 target_id)
3718{
a8643e16 3719 struct input *req = request;
c0c050c5 3720
a8643e16
MC
3721 req->req_type = cpu_to_le16(req_type);
3722 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3723 req->target_id = cpu_to_le16(target_id);
c0c050c5
MC
3724 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3725}
3726
fbfbc485
MC
3727static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3728 int timeout, bool silent)
c0c050c5 3729{
a11fa2be 3730 int i, intr_process, rc, tmo_count;
a8643e16 3731 struct input *req = msg;
c0c050c5 3732 u32 *data = msg;
845adfe4
MC
3733 __le32 *resp_len;
3734 u8 *valid;
c0c050c5
MC
3735 u16 cp_ring_id, len = 0;
3736 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 3737 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
ebd5818c 3738 struct hwrm_short_input short_input = {0};
c0c050c5 3739
a8643e16 3740 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
c0c050c5 3741 memset(resp, 0, PAGE_SIZE);
a8643e16 3742 cp_ring_id = le16_to_cpu(req->cmpl_ring);
c0c050c5
MC
3743 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3744
1dfddc41
MC
3745 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3746 if (msg_len > bp->hwrm_max_ext_req_len ||
3747 !bp->hwrm_short_cmd_req_addr)
3748 return -EINVAL;
3749 }
3750
3751 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
3752 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80 3753 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
1dfddc41
MC
3754 u16 max_msg_len;
3755
3756 /* Set boundary for maximum extended request length for short
3757 * cmd format. If passed up from device use the max supported
3758 * internal req length.
3759 */
3760 max_msg_len = bp->hwrm_max_ext_req_len;
e605db80
DK
3761
3762 memcpy(short_cmd_req, req, msg_len);
1dfddc41
MC
3763 if (msg_len < max_msg_len)
3764 memset(short_cmd_req + msg_len, 0,
3765 max_msg_len - msg_len);
e605db80
DK
3766
3767 short_input.req_type = req->req_type;
3768 short_input.signature =
3769 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3770 short_input.size = cpu_to_le16(msg_len);
3771 short_input.req_addr =
3772 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3773
3774 data = (u32 *)&short_input;
3775 msg_len = sizeof(short_input);
3776
3777 /* Sync memory write before updating doorbell */
3778 wmb();
3779
3780 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3781 }
3782
c0c050c5
MC
3783 /* Write request msg to hwrm channel */
3784 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3785
e605db80 3786 for (i = msg_len; i < max_req_len; i += 4)
d79979a1
MC
3787 writel(0, bp->bar0 + i);
3788
c0c050c5
MC
3789 /* currently supports only one outstanding message */
3790 if (intr_process)
a8643e16 3791 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
c0c050c5
MC
3792
3793 /* Ring channel doorbell */
3794 writel(1, bp->bar0 + 0x100);
3795
ff4fe81d
MC
3796 if (!timeout)
3797 timeout = DFLT_HWRM_CMD_TIMEOUT;
9751e8e7
AG
3798 /* convert timeout to usec */
3799 timeout *= 1000;
ff4fe81d 3800
c0c050c5 3801 i = 0;
9751e8e7
AG
3802 /* Short timeout for the first few iterations:
3803 * number of loops = number of loops for short timeout +
3804 * number of loops for standard timeout.
3805 */
3806 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3807 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3808 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
845adfe4 3809 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
c0c050c5
MC
3810 if (intr_process) {
3811 /* Wait until hwrm response cmpl interrupt is processed */
3812 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
a11fa2be 3813 i++ < tmo_count) {
9751e8e7
AG
3814 /* on first few passes, just barely sleep */
3815 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3816 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3817 HWRM_SHORT_MAX_TIMEOUT);
3818 else
3819 usleep_range(HWRM_MIN_TIMEOUT,
3820 HWRM_MAX_TIMEOUT);
c0c050c5
MC
3821 }
3822
3823 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3824 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
a8643e16 3825 le16_to_cpu(req->req_type));
c0c050c5
MC
3826 return -1;
3827 }
845adfe4
MC
3828 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3829 HWRM_RESP_LEN_SFT;
3830 valid = bp->hwrm_cmd_resp_addr + len - 1;
c0c050c5 3831 } else {
cc559c1a
MC
3832 int j;
3833
c0c050c5 3834 /* Check if response len is updated */
a11fa2be 3835 for (i = 0; i < tmo_count; i++) {
c0c050c5
MC
3836 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3837 HWRM_RESP_LEN_SFT;
3838 if (len)
3839 break;
9751e8e7
AG
3840 /* on first few passes, just barely sleep */
3841 if (i < DFLT_HWRM_CMD_TIMEOUT)
3842 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3843 HWRM_SHORT_MAX_TIMEOUT);
3844 else
3845 usleep_range(HWRM_MIN_TIMEOUT,
3846 HWRM_MAX_TIMEOUT);
c0c050c5
MC
3847 }
3848
a11fa2be 3849 if (i >= tmo_count) {
c0c050c5 3850 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
cc559c1a
MC
3851 HWRM_TOTAL_TIMEOUT(i),
3852 le16_to_cpu(req->req_type),
8578d6c1 3853 le16_to_cpu(req->seq_id), len);
c0c050c5
MC
3854 return -1;
3855 }
3856
845adfe4
MC
3857 /* Last byte of resp contains valid bit */
3858 valid = bp->hwrm_cmd_resp_addr + len - 1;
cc559c1a 3859 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
845adfe4
MC
3860 /* make sure we read from updated DMA memory */
3861 dma_rmb();
3862 if (*valid)
c0c050c5 3863 break;
a11fa2be 3864 udelay(1);
c0c050c5
MC
3865 }
3866
cc559c1a 3867 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
c0c050c5 3868 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
cc559c1a
MC
3869 HWRM_TOTAL_TIMEOUT(i),
3870 le16_to_cpu(req->req_type),
a8643e16 3871 le16_to_cpu(req->seq_id), len, *valid);
c0c050c5
MC
3872 return -1;
3873 }
3874 }
3875
845adfe4
MC
3876 /* Zero valid bit for compatibility. Valid bit in an older spec
3877 * may become a new field in a newer spec. We must make sure that
3878 * a new field not implemented by old spec will read zero.
3879 */
3880 *valid = 0;
c0c050c5 3881 rc = le16_to_cpu(resp->error_code);
fbfbc485 3882 if (rc && !silent)
c0c050c5
MC
3883 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3884 le16_to_cpu(resp->req_type),
3885 le16_to_cpu(resp->seq_id), rc);
fbfbc485
MC
3886 return rc;
3887}
3888
3889int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3890{
3891 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
3892}
3893
cc72f3b1
MC
3894int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3895 int timeout)
3896{
3897 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3898}
3899
c0c050c5
MC
3900int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3901{
3902 int rc;
3903
3904 mutex_lock(&bp->hwrm_cmd_lock);
3905 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3906 mutex_unlock(&bp->hwrm_cmd_lock);
3907 return rc;
3908}
3909
90e20921
MC
3910int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3911 int timeout)
3912{
3913 int rc;
3914
3915 mutex_lock(&bp->hwrm_cmd_lock);
3916 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3917 mutex_unlock(&bp->hwrm_cmd_lock);
3918 return rc;
3919}
3920
a1653b13
MC
3921int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3922 int bmap_size)
c0c050c5
MC
3923{
3924 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
3925 DECLARE_BITMAP(async_events_bmap, 256);
3926 u32 *events = (u32 *)async_events_bmap;
a1653b13 3927 int i;
c0c050c5
MC
3928
3929 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3930
3931 req.enables =
a1653b13 3932 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
c0c050c5 3933
25be8623
MC
3934 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3935 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3936 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3937
a1653b13
MC
3938 if (bmap && bmap_size) {
3939 for (i = 0; i < bmap_size; i++) {
3940 if (test_bit(i, bmap))
3941 __set_bit(i, async_events_bmap);
3942 }
3943 }
3944
25be8623
MC
3945 for (i = 0; i < 8; i++)
3946 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3947
a1653b13
MC
3948 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3949}
3950
3951static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3952{
25e1acd6 3953 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
a1653b13 3954 struct hwrm_func_drv_rgtr_input req = {0};
25e1acd6 3955 int rc;
a1653b13
MC
3956
3957 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3958
3959 req.enables =
3960 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3961 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3962
11f15ed3 3963 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
d4f52de0
MC
3964 req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
3965 req.ver_maj_8b = DRV_VER_MAJ;
3966 req.ver_min_8b = DRV_VER_MIN;
3967 req.ver_upd_8b = DRV_VER_UPD;
3968 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
3969 req.ver_min = cpu_to_le16(DRV_VER_MIN);
3970 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
3971
3972 if (BNXT_PF(bp)) {
9b0436c3 3973 u32 data[8];
a1653b13 3974 int i;
c0c050c5 3975
9b0436c3
MC
3976 memset(data, 0, sizeof(data));
3977 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
3978 u16 cmd = bnxt_vf_req_snif[i];
3979 unsigned int bit, idx;
3980
3981 idx = cmd / 32;
3982 bit = cmd % 32;
3983 data[idx] |= 1 << bit;
3984 }
c0c050c5 3985
de68f5de
MC
3986 for (i = 0; i < 8; i++)
3987 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3988
c0c050c5
MC
3989 req.enables |=
3990 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3991 }
3992
25e1acd6
MC
3993 mutex_lock(&bp->hwrm_cmd_lock);
3994 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3995 if (rc)
3996 rc = -EIO;
3997 else if (resp->flags &
3998 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
3999 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4000 mutex_unlock(&bp->hwrm_cmd_lock);
4001 return rc;
c0c050c5
MC
4002}
4003
be58a0da
JH
4004static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4005{
4006 struct hwrm_func_drv_unrgtr_input req = {0};
4007
4008 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4009 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4010}
4011
c0c050c5
MC
4012static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4013{
4014 u32 rc = 0;
4015 struct hwrm_tunnel_dst_port_free_input req = {0};
4016
4017 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4018 req.tunnel_type = tunnel_type;
4019
4020 switch (tunnel_type) {
4021 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4022 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4023 break;
4024 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4025 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4026 break;
4027 default:
4028 break;
4029 }
4030
4031 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4032 if (rc)
4033 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4034 rc);
4035 return rc;
4036}
4037
4038static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4039 u8 tunnel_type)
4040{
4041 u32 rc = 0;
4042 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4043 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4044
4045 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4046
4047 req.tunnel_type = tunnel_type;
4048 req.tunnel_dst_port_val = port;
4049
4050 mutex_lock(&bp->hwrm_cmd_lock);
4051 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4052 if (rc) {
4053 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4054 rc);
4055 goto err_out;
4056 }
4057
57aac71b
CJ
4058 switch (tunnel_type) {
4059 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
c0c050c5 4060 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4061 break;
4062 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
c0c050c5 4063 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4064 break;
4065 default:
4066 break;
4067 }
4068
c0c050c5
MC
4069err_out:
4070 mutex_unlock(&bp->hwrm_cmd_lock);
4071 return rc;
4072}
4073
4074static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4075{
4076 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4077 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4078
4079 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 4080 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
4081
4082 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4083 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4084 req.mask = cpu_to_le32(vnic->rx_mask);
4085 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4086}
4087
4088#ifdef CONFIG_RFS_ACCEL
4089static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4090 struct bnxt_ntuple_filter *fltr)
4091{
4092 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4093
4094 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4095 req.ntuple_filter_id = fltr->filter_id;
4096 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4097}
4098
4099#define BNXT_NTP_FLTR_FLAGS \
4100 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4101 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4102 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4103 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4104 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4105 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4106 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4107 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4108 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4109 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4110 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4111 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4112 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4113 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4114
61aad724
MC
4115#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4116 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4117
c0c050c5
MC
4118static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4119 struct bnxt_ntuple_filter *fltr)
4120{
4121 int rc = 0;
4122 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4123 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4124 bp->hwrm_cmd_resp_addr;
4125 struct flow_keys *keys = &fltr->fkeys;
4126 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
4127
4128 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 4129 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5
MC
4130
4131 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4132
4133 req.ethertype = htons(ETH_P_IP);
4134 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 4135 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
4136 req.ip_protocol = keys->basic.ip_proto;
4137
dda0e746
MC
4138 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4139 int i;
4140
4141 req.ethertype = htons(ETH_P_IPV6);
4142 req.ip_addr_type =
4143 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4144 *(struct in6_addr *)&req.src_ipaddr[0] =
4145 keys->addrs.v6addrs.src;
4146 *(struct in6_addr *)&req.dst_ipaddr[0] =
4147 keys->addrs.v6addrs.dst;
4148 for (i = 0; i < 4; i++) {
4149 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4150 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4151 }
4152 } else {
4153 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4154 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4155 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4156 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4157 }
61aad724
MC
4158 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4159 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4160 req.tunnel_type =
4161 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4162 }
c0c050c5
MC
4163
4164 req.src_port = keys->ports.src;
4165 req.src_port_mask = cpu_to_be16(0xffff);
4166 req.dst_port = keys->ports.dst;
4167 req.dst_port_mask = cpu_to_be16(0xffff);
4168
c193554e 4169 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5
MC
4170 mutex_lock(&bp->hwrm_cmd_lock);
4171 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4172 if (!rc)
4173 fltr->filter_id = resp->ntuple_filter_id;
4174 mutex_unlock(&bp->hwrm_cmd_lock);
4175 return rc;
4176}
4177#endif
4178
4179static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4180 u8 *mac_addr)
4181{
4182 u32 rc = 0;
4183 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4184 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4185
4186 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
4187 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4188 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4189 req.flags |=
4190 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 4191 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
4192 req.enables =
4193 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 4194 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
4195 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4196 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4197 req.l2_addr_mask[0] = 0xff;
4198 req.l2_addr_mask[1] = 0xff;
4199 req.l2_addr_mask[2] = 0xff;
4200 req.l2_addr_mask[3] = 0xff;
4201 req.l2_addr_mask[4] = 0xff;
4202 req.l2_addr_mask[5] = 0xff;
4203
4204 mutex_lock(&bp->hwrm_cmd_lock);
4205 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4206 if (!rc)
4207 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4208 resp->l2_filter_id;
4209 mutex_unlock(&bp->hwrm_cmd_lock);
4210 return rc;
4211}
4212
4213static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4214{
4215 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4216 int rc = 0;
4217
4218 /* Any associated ntuple filters will also be cleared by firmware. */
4219 mutex_lock(&bp->hwrm_cmd_lock);
4220 for (i = 0; i < num_of_vnics; i++) {
4221 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4222
4223 for (j = 0; j < vnic->uc_filter_count; j++) {
4224 struct hwrm_cfa_l2_filter_free_input req = {0};
4225
4226 bnxt_hwrm_cmd_hdr_init(bp, &req,
4227 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4228
4229 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4230
4231 rc = _hwrm_send_message(bp, &req, sizeof(req),
4232 HWRM_CMD_TIMEOUT);
4233 }
4234 vnic->uc_filter_count = 0;
4235 }
4236 mutex_unlock(&bp->hwrm_cmd_lock);
4237
4238 return rc;
4239}
4240
4241static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4242{
4243 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4244 struct hwrm_vnic_tpa_cfg_input req = {0};
4245
3c4fe80b
MC
4246 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4247 return 0;
4248
c0c050c5
MC
4249 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4250
4251 if (tpa_flags) {
4252 u16 mss = bp->dev->mtu - 40;
4253 u32 nsegs, n, segs = 0, flags;
4254
4255 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4256 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4257 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4258 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4259 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4260 if (tpa_flags & BNXT_FLAG_GRO)
4261 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4262
4263 req.flags = cpu_to_le32(flags);
4264
4265 req.enables =
4266 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
4267 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4268 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
4269
4270 /* Number of segs are log2 units, and first packet is not
4271 * included as part of this units.
4272 */
2839f28b
MC
4273 if (mss <= BNXT_RX_PAGE_SIZE) {
4274 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
4275 nsegs = (MAX_SKB_FRAGS - 1) * n;
4276 } else {
2839f28b
MC
4277 n = mss / BNXT_RX_PAGE_SIZE;
4278 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
4279 n++;
4280 nsegs = (MAX_SKB_FRAGS - n) / n;
4281 }
4282
4283 segs = ilog2(nsegs);
4284 req.max_agg_segs = cpu_to_le16(segs);
4285 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
c193554e
MC
4286
4287 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
4288 }
4289 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4290
4291 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4292}
4293
2c61d211
MC
4294static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4295{
4296 struct bnxt_ring_grp_info *grp_info;
4297
4298 grp_info = &bp->grp_info[ring->grp_idx];
4299 return grp_info->cp_fw_ring_id;
4300}
4301
4302static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4303{
4304 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4305 struct bnxt_napi *bnapi = rxr->bnapi;
4306 struct bnxt_cp_ring_info *cpr;
4307
4308 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4309 return cpr->cp_ring_struct.fw_ring_id;
4310 } else {
4311 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4312 }
4313}
4314
4315static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4316{
4317 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4318 struct bnxt_napi *bnapi = txr->bnapi;
4319 struct bnxt_cp_ring_info *cpr;
4320
4321 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4322 return cpr->cp_ring_struct.fw_ring_id;
4323 } else {
4324 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4325 }
4326}
4327
c0c050c5
MC
4328static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4329{
4330 u32 i, j, max_rings;
4331 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4332 struct hwrm_vnic_rss_cfg_input req = {0};
4333
7b3af4f7
MC
4334 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4335 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
4336 return 0;
4337
4338 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4339 if (set_rss) {
87da7f79 4340 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
50f011b6 4341 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
dc52c6c7
PS
4342 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4343 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4344 max_rings = bp->rx_nr_rings - 1;
4345 else
4346 max_rings = bp->rx_nr_rings;
4347 } else {
c0c050c5 4348 max_rings = 1;
dc52c6c7 4349 }
c0c050c5
MC
4350
4351 /* Fill the RSS indirection table with ring group ids */
4352 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4353 if (j == max_rings)
4354 j = 0;
4355 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4356 }
4357
4358 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4359 req.hash_key_tbl_addr =
4360 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4361 }
94ce9caa 4362 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
4363 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4364}
4365
7b3af4f7
MC
4366static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4367{
4368 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4369 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4370 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4371 struct hwrm_vnic_rss_cfg_input req = {0};
4372
4373 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4374 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4375 if (!set_rss) {
4376 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4377 return 0;
4378 }
4379 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4380 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4381 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4382 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4383 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4384 for (i = 0, k = 0; i < nr_ctxs; i++) {
4385 __le16 *ring_tbl = vnic->rss_table;
4386 int rc;
4387
4388 req.ring_table_pair_index = i;
4389 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4390 for (j = 0; j < 64; j++) {
4391 u16 ring_id;
4392
4393 ring_id = rxr->rx_ring_struct.fw_ring_id;
4394 *ring_tbl++ = cpu_to_le16(ring_id);
4395 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4396 *ring_tbl++ = cpu_to_le16(ring_id);
4397 rxr++;
4398 k++;
4399 if (k == max_rings) {
4400 k = 0;
4401 rxr = &bp->rx_ring[0];
4402 }
4403 }
4404 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4405 if (rc)
4406 return -EIO;
4407 }
4408 return 0;
4409}
4410
c0c050c5
MC
4411static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4412{
4413 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4414 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4415
4416 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4417 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4418 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4419 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4420 req.enables =
4421 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4422 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4423 /* thresholds not implemented in firmware yet */
4424 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4425 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4426 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4427 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4428}
4429
94ce9caa
PS
4430static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4431 u16 ctx_idx)
c0c050c5
MC
4432{
4433 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4434
4435 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4436 req.rss_cos_lb_ctx_id =
94ce9caa 4437 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
4438
4439 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 4440 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
4441}
4442
4443static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4444{
94ce9caa 4445 int i, j;
c0c050c5
MC
4446
4447 for (i = 0; i < bp->nr_vnics; i++) {
4448 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4449
94ce9caa
PS
4450 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4451 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4452 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4453 }
c0c050c5
MC
4454 }
4455 bp->rsscos_nr_ctxs = 0;
4456}
4457
94ce9caa 4458static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
4459{
4460 int rc;
4461 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4462 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4463 bp->hwrm_cmd_resp_addr;
4464
4465 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4466 -1);
4467
4468 mutex_lock(&bp->hwrm_cmd_lock);
4469 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4470 if (!rc)
94ce9caa 4471 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
4472 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4473 mutex_unlock(&bp->hwrm_cmd_lock);
4474
4475 return rc;
4476}
4477
abe93ad2
MC
4478static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4479{
4480 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4481 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4482 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4483}
4484
a588e458 4485int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 4486{
b81a90d3 4487 unsigned int ring = 0, grp_idx;
c0c050c5
MC
4488 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4489 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 4490 u16 def_vlan = 0;
c0c050c5
MC
4491
4492 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7 4493
7b3af4f7
MC
4494 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4495 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4496
4497 req.default_rx_ring_id =
4498 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4499 req.default_cmpl_ring_id =
4500 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4501 req.enables =
4502 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4503 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4504 goto vnic_mru;
4505 }
dc52c6c7 4506 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 4507 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
4508 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4509 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4510 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4511 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
4512 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4513 req.rss_rule =
4514 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4515 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4516 VNIC_CFG_REQ_ENABLES_MRU);
4517 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
4518 } else {
4519 req.rss_rule = cpu_to_le16(0xffff);
4520 }
94ce9caa 4521
dc52c6c7
PS
4522 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4523 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
4524 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4525 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4526 } else {
4527 req.cos_rule = cpu_to_le16(0xffff);
4528 }
4529
c0c050c5 4530 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 4531 ring = 0;
c0c050c5 4532 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 4533 ring = vnic_id - 1;
76595193
PS
4534 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4535 ring = bp->rx_nr_rings - 1;
c0c050c5 4536
b81a90d3 4537 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5 4538 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
c0c050c5 4539 req.lb_rule = cpu_to_le16(0xffff);
7b3af4f7 4540vnic_mru:
c0c050c5
MC
4541 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4542 VLAN_HLEN);
4543
7b3af4f7 4544 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
4545#ifdef CONFIG_BNXT_SRIOV
4546 if (BNXT_VF(bp))
4547 def_vlan = bp->vf.vlan;
4548#endif
4549 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 4550 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 4551 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
abe93ad2 4552 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5
MC
4553
4554 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4555}
4556
4557static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4558{
4559 u32 rc = 0;
4560
4561 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4562 struct hwrm_vnic_free_input req = {0};
4563
4564 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4565 req.vnic_id =
4566 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4567
4568 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4569 if (rc)
4570 return rc;
4571 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4572 }
4573 return rc;
4574}
4575
4576static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4577{
4578 u16 i;
4579
4580 for (i = 0; i < bp->nr_vnics; i++)
4581 bnxt_hwrm_vnic_free_one(bp, i);
4582}
4583
b81a90d3
MC
4584static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4585 unsigned int start_rx_ring_idx,
4586 unsigned int nr_rings)
c0c050c5 4587{
b81a90d3
MC
4588 int rc = 0;
4589 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
4590 struct hwrm_vnic_alloc_input req = {0};
4591 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
44c6f72a
MC
4592 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4593
4594 if (bp->flags & BNXT_FLAG_CHIP_P5)
4595 goto vnic_no_ring_grps;
c0c050c5
MC
4596
4597 /* map ring groups to this vnic */
b81a90d3
MC
4598 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4599 grp_idx = bp->rx_ring[i].bnapi->index;
4600 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 4601 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 4602 j, nr_rings);
c0c050c5
MC
4603 break;
4604 }
44c6f72a 4605 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
4606 }
4607
44c6f72a
MC
4608vnic_no_ring_grps:
4609 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4610 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5
MC
4611 if (vnic_id == 0)
4612 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4613
4614 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4615
4616 mutex_lock(&bp->hwrm_cmd_lock);
4617 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4618 if (!rc)
44c6f72a 4619 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
c0c050c5
MC
4620 mutex_unlock(&bp->hwrm_cmd_lock);
4621 return rc;
4622}
4623
8fdefd63
MC
4624static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4625{
4626 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4627 struct hwrm_vnic_qcaps_input req = {0};
4628 int rc;
4629
4630 if (bp->hwrm_spec_code < 0x10600)
4631 return 0;
4632
4633 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4634 mutex_lock(&bp->hwrm_cmd_lock);
4635 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4636 if (!rc) {
abe93ad2
MC
4637 u32 flags = le32_to_cpu(resp->flags);
4638
41e8d798
MC
4639 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4640 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 4641 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
4642 if (flags &
4643 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4644 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
8fdefd63
MC
4645 }
4646 mutex_unlock(&bp->hwrm_cmd_lock);
4647 return rc;
4648}
4649
c0c050c5
MC
4650static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4651{
4652 u16 i;
4653 u32 rc = 0;
4654
44c6f72a
MC
4655 if (bp->flags & BNXT_FLAG_CHIP_P5)
4656 return 0;
4657
c0c050c5
MC
4658 mutex_lock(&bp->hwrm_cmd_lock);
4659 for (i = 0; i < bp->rx_nr_rings; i++) {
4660 struct hwrm_ring_grp_alloc_input req = {0};
4661 struct hwrm_ring_grp_alloc_output *resp =
4662 bp->hwrm_cmd_resp_addr;
b81a90d3 4663 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
4664
4665 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4666
b81a90d3
MC
4667 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4668 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4669 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4670 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
4671
4672 rc = _hwrm_send_message(bp, &req, sizeof(req),
4673 HWRM_CMD_TIMEOUT);
4674 if (rc)
4675 break;
4676
b81a90d3
MC
4677 bp->grp_info[grp_idx].fw_grp_id =
4678 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
4679 }
4680 mutex_unlock(&bp->hwrm_cmd_lock);
4681 return rc;
4682}
4683
4684static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4685{
4686 u16 i;
4687 u32 rc = 0;
4688 struct hwrm_ring_grp_free_input req = {0};
4689
44c6f72a 4690 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
4691 return 0;
4692
4693 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4694
4695 mutex_lock(&bp->hwrm_cmd_lock);
4696 for (i = 0; i < bp->cp_nr_rings; i++) {
4697 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4698 continue;
4699 req.ring_group_id =
4700 cpu_to_le32(bp->grp_info[i].fw_grp_id);
4701
4702 rc = _hwrm_send_message(bp, &req, sizeof(req),
4703 HWRM_CMD_TIMEOUT);
4704 if (rc)
4705 break;
4706 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4707 }
4708 mutex_unlock(&bp->hwrm_cmd_lock);
4709 return rc;
4710}
4711
4712static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4713 struct bnxt_ring_struct *ring,
9899bb59 4714 u32 ring_type, u32 map_index)
c0c050c5
MC
4715{
4716 int rc = 0, err = 0;
4717 struct hwrm_ring_alloc_input req = {0};
4718 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6fe19886 4719 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 4720 struct bnxt_ring_grp_info *grp_info;
c0c050c5
MC
4721 u16 ring_id;
4722
4723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4724
4725 req.enables = 0;
6fe19886
MC
4726 if (rmem->nr_pages > 1) {
4727 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5
MC
4728 /* Page size is in log2 units */
4729 req.page_size = BNXT_PAGE_SHIFT;
4730 req.page_tbl_depth = 1;
4731 } else {
6fe19886 4732 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5
MC
4733 }
4734 req.fbo = 0;
4735 /* Association of ring index with doorbell index and MSIX number */
4736 req.logical_id = cpu_to_le16(map_index);
4737
4738 switch (ring_type) {
2c61d211
MC
4739 case HWRM_RING_ALLOC_TX: {
4740 struct bnxt_tx_ring_info *txr;
4741
4742 txr = container_of(ring, struct bnxt_tx_ring_info,
4743 tx_ring_struct);
c0c050c5
MC
4744 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4745 /* Association of transmit ring with completion ring */
9899bb59 4746 grp_info = &bp->grp_info[ring->grp_idx];
2c61d211 4747 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
c0c050c5 4748 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
9899bb59 4749 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
c0c050c5
MC
4750 req.queue_id = cpu_to_le16(ring->queue_id);
4751 break;
2c61d211 4752 }
c0c050c5
MC
4753 case HWRM_RING_ALLOC_RX:
4754 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4755 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
4756 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4757 u16 flags = 0;
4758
4759 /* Association of rx ring with stats context */
4760 grp_info = &bp->grp_info[ring->grp_idx];
4761 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
4762 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4763 req.enables |= cpu_to_le32(
4764 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4765 if (NET_IP_ALIGN == 2)
4766 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
4767 req.flags = cpu_to_le16(flags);
4768 }
c0c050c5
MC
4769 break;
4770 case HWRM_RING_ALLOC_AGG:
23aefdd7
MC
4771 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4772 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
4773 /* Association of agg ring with rx ring */
4774 grp_info = &bp->grp_info[ring->grp_idx];
4775 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
4776 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
4777 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4778 req.enables |= cpu_to_le32(
4779 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
4780 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4781 } else {
4782 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4783 }
c0c050c5
MC
4784 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4785 break;
4786 case HWRM_RING_ALLOC_CMPL:
bac9a7e0 4787 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
c0c050c5 4788 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
4789 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4790 /* Association of cp ring with nq */
4791 grp_info = &bp->grp_info[map_index];
4792 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4793 req.cq_handle = cpu_to_le64(ring->handle);
4794 req.enables |= cpu_to_le32(
4795 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
4796 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
4797 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4798 }
4799 break;
4800 case HWRM_RING_ALLOC_NQ:
4801 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
4802 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5
MC
4803 if (bp->flags & BNXT_FLAG_USING_MSIX)
4804 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4805 break;
4806 default:
4807 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4808 ring_type);
4809 return -1;
4810 }
4811
4812 mutex_lock(&bp->hwrm_cmd_lock);
4813 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4814 err = le16_to_cpu(resp->error_code);
4815 ring_id = le16_to_cpu(resp->ring_id);
4816 mutex_unlock(&bp->hwrm_cmd_lock);
4817
4818 if (rc || err) {
2727c888
MC
4819 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4820 ring_type, rc, err);
4821 return -EIO;
c0c050c5
MC
4822 }
4823 ring->fw_ring_id = ring_id;
4824 return rc;
4825}
4826
486b5c22
MC
4827static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4828{
4829 int rc;
4830
4831 if (BNXT_PF(bp)) {
4832 struct hwrm_func_cfg_input req = {0};
4833
4834 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4835 req.fid = cpu_to_le16(0xffff);
4836 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4837 req.async_event_cr = cpu_to_le16(idx);
4838 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4839 } else {
4840 struct hwrm_func_vf_cfg_input req = {0};
4841
4842 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4843 req.enables =
4844 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4845 req.async_event_cr = cpu_to_le16(idx);
4846 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4847 }
4848 return rc;
4849}
4850
697197e5
MC
4851static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
4852 u32 map_idx, u32 xid)
4853{
4854 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4855 if (BNXT_PF(bp))
4856 db->doorbell = bp->bar1 + 0x10000;
4857 else
4858 db->doorbell = bp->bar1 + 0x4000;
4859 switch (ring_type) {
4860 case HWRM_RING_ALLOC_TX:
4861 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
4862 break;
4863 case HWRM_RING_ALLOC_RX:
4864 case HWRM_RING_ALLOC_AGG:
4865 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
4866 break;
4867 case HWRM_RING_ALLOC_CMPL:
4868 db->db_key64 = DBR_PATH_L2;
4869 break;
4870 case HWRM_RING_ALLOC_NQ:
4871 db->db_key64 = DBR_PATH_L2;
4872 break;
4873 }
4874 db->db_key64 |= (u64)xid << DBR_XID_SFT;
4875 } else {
4876 db->doorbell = bp->bar1 + map_idx * 0x80;
4877 switch (ring_type) {
4878 case HWRM_RING_ALLOC_TX:
4879 db->db_key32 = DB_KEY_TX;
4880 break;
4881 case HWRM_RING_ALLOC_RX:
4882 case HWRM_RING_ALLOC_AGG:
4883 db->db_key32 = DB_KEY_RX;
4884 break;
4885 case HWRM_RING_ALLOC_CMPL:
4886 db->db_key32 = DB_KEY_CP;
4887 break;
4888 }
4889 }
4890}
4891
c0c050c5
MC
4892static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4893{
4894 int i, rc = 0;
697197e5 4895 u32 type;
c0c050c5 4896
23aefdd7
MC
4897 if (bp->flags & BNXT_FLAG_CHIP_P5)
4898 type = HWRM_RING_ALLOC_NQ;
4899 else
4900 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
4901 for (i = 0; i < bp->cp_nr_rings; i++) {
4902 struct bnxt_napi *bnapi = bp->bnapi[i];
4903 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4904 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 4905 u32 map_idx = ring->map_idx;
c0c050c5 4906
697197e5 4907 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
4908 if (rc)
4909 goto err_out;
697197e5
MC
4910 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4911 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
edd0c2cc 4912 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
4913
4914 if (!i) {
4915 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4916 if (rc)
4917 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4918 }
c0c050c5
MC
4919 }
4920
697197e5 4921 type = HWRM_RING_ALLOC_TX;
edd0c2cc 4922 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 4923 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
4924 struct bnxt_ring_struct *ring;
4925 u32 map_idx;
c0c050c5 4926
3e08b184
MC
4927 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4928 struct bnxt_napi *bnapi = txr->bnapi;
4929 struct bnxt_cp_ring_info *cpr, *cpr2;
4930 u32 type2 = HWRM_RING_ALLOC_CMPL;
4931
4932 cpr = &bnapi->cp_ring;
4933 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
4934 ring = &cpr2->cp_ring_struct;
4935 ring->handle = BNXT_TX_HDL;
4936 map_idx = bnapi->index;
4937 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
4938 if (rc)
4939 goto err_out;
4940 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
4941 ring->fw_ring_id);
4942 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
4943 }
4944 ring = &txr->tx_ring_struct;
4945 map_idx = i;
697197e5 4946 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
4947 if (rc)
4948 goto err_out;
697197e5 4949 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
4950 }
4951
697197e5 4952 type = HWRM_RING_ALLOC_RX;
edd0c2cc 4953 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 4954 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 4955 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
4956 struct bnxt_napi *bnapi = rxr->bnapi;
4957 u32 map_idx = bnapi->index;
c0c050c5 4958
697197e5 4959 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
4960 if (rc)
4961 goto err_out;
697197e5
MC
4962 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
4963 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 4964 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
4965 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4966 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4967 u32 type2 = HWRM_RING_ALLOC_CMPL;
4968 struct bnxt_cp_ring_info *cpr2;
4969
4970 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
4971 ring = &cpr2->cp_ring_struct;
4972 ring->handle = BNXT_RX_HDL;
4973 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
4974 if (rc)
4975 goto err_out;
4976 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
4977 ring->fw_ring_id);
4978 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
4979 }
c0c050c5
MC
4980 }
4981
4982 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
697197e5 4983 type = HWRM_RING_ALLOC_AGG;
c0c050c5 4984 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 4985 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
4986 struct bnxt_ring_struct *ring =
4987 &rxr->rx_agg_ring_struct;
9899bb59 4988 u32 grp_idx = ring->grp_idx;
b81a90d3 4989 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 4990
697197e5 4991 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
4992 if (rc)
4993 goto err_out;
4994
697197e5
MC
4995 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
4996 ring->fw_ring_id);
4997 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
b81a90d3 4998 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
4999 }
5000 }
5001err_out:
5002 return rc;
5003}
5004
5005static int hwrm_ring_free_send_msg(struct bnxt *bp,
5006 struct bnxt_ring_struct *ring,
5007 u32 ring_type, int cmpl_ring_id)
5008{
5009 int rc;
5010 struct hwrm_ring_free_input req = {0};
5011 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5012 u16 error_code;
5013
74608fc9 5014 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
5015 req.ring_type = ring_type;
5016 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5017
5018 mutex_lock(&bp->hwrm_cmd_lock);
5019 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5020 error_code = le16_to_cpu(resp->error_code);
5021 mutex_unlock(&bp->hwrm_cmd_lock);
5022
5023 if (rc || error_code) {
2727c888
MC
5024 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5025 ring_type, rc, error_code);
5026 return -EIO;
c0c050c5
MC
5027 }
5028 return 0;
5029}
5030
edd0c2cc 5031static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5032{
23aefdd7 5033 u32 type;
edd0c2cc 5034 int i;
c0c050c5
MC
5035
5036 if (!bp->bnapi)
edd0c2cc 5037 return;
c0c050c5 5038
edd0c2cc 5039 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5040 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5041 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2c61d211 5042 u32 cmpl_ring_id;
edd0c2cc 5043
2c61d211 5044 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
edd0c2cc
MC
5045 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5046 hwrm_ring_free_send_msg(bp, ring,
5047 RING_FREE_REQ_RING_TYPE_TX,
5048 close_path ? cmpl_ring_id :
5049 INVALID_HW_RING_ID);
5050 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5051 }
5052 }
5053
edd0c2cc 5054 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5055 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5056 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5057 u32 grp_idx = rxr->bnapi->index;
2c61d211 5058 u32 cmpl_ring_id;
edd0c2cc 5059
2c61d211 5060 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
edd0c2cc
MC
5061 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5062 hwrm_ring_free_send_msg(bp, ring,
5063 RING_FREE_REQ_RING_TYPE_RX,
5064 close_path ? cmpl_ring_id :
5065 INVALID_HW_RING_ID);
5066 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5067 bp->grp_info[grp_idx].rx_fw_ring_id =
5068 INVALID_HW_RING_ID;
c0c050c5
MC
5069 }
5070 }
5071
23aefdd7
MC
5072 if (bp->flags & BNXT_FLAG_CHIP_P5)
5073 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5074 else
5075 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5076 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5077 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5078 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 5079 u32 grp_idx = rxr->bnapi->index;
2c61d211 5080 u32 cmpl_ring_id;
edd0c2cc 5081
2c61d211 5082 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
edd0c2cc 5083 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5084 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5085 close_path ? cmpl_ring_id :
5086 INVALID_HW_RING_ID);
5087 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5088 bp->grp_info[grp_idx].agg_fw_ring_id =
5089 INVALID_HW_RING_ID;
c0c050c5
MC
5090 }
5091 }
5092
9d8bc097
MC
5093 /* The completion rings are about to be freed. After that the
5094 * IRQ doorbell will not work anymore. So we need to disable
5095 * IRQ here.
5096 */
5097 bnxt_disable_int_sync(bp);
5098
23aefdd7
MC
5099 if (bp->flags & BNXT_FLAG_CHIP_P5)
5100 type = RING_FREE_REQ_RING_TYPE_NQ;
5101 else
5102 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
5103 for (i = 0; i < bp->cp_nr_rings; i++) {
5104 struct bnxt_napi *bnapi = bp->bnapi[i];
5105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
5106 struct bnxt_ring_struct *ring;
5107 int j;
edd0c2cc 5108
3e08b184
MC
5109 for (j = 0; j < 2; j++) {
5110 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5111
5112 if (cpr2) {
5113 ring = &cpr2->cp_ring_struct;
5114 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5115 continue;
5116 hwrm_ring_free_send_msg(bp, ring,
5117 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5118 INVALID_HW_RING_ID);
5119 ring->fw_ring_id = INVALID_HW_RING_ID;
5120 }
5121 }
5122 ring = &cpr->cp_ring_struct;
edd0c2cc 5123 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5124 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5125 INVALID_HW_RING_ID);
5126 ring->fw_ring_id = INVALID_HW_RING_ID;
5127 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5128 }
5129 }
c0c050c5
MC
5130}
5131
41e8d798
MC
5132static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5133 bool shared);
5134
674f50a5
MC
5135static int bnxt_hwrm_get_rings(struct bnxt *bp)
5136{
5137 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5138 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5139 struct hwrm_func_qcfg_input req = {0};
5140 int rc;
5141
5142 if (bp->hwrm_spec_code < 0x10601)
5143 return 0;
5144
5145 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5146 req.fid = cpu_to_le16(0xffff);
5147 mutex_lock(&bp->hwrm_cmd_lock);
5148 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5149 if (rc) {
5150 mutex_unlock(&bp->hwrm_cmd_lock);
5151 return -EIO;
5152 }
5153
5154 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 5155 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
5156 u16 cp, stats;
5157
5158 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5159 hw_resc->resv_hw_ring_grps =
5160 le32_to_cpu(resp->alloc_hw_ring_grps);
5161 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5162 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5163 stats = le16_to_cpu(resp->alloc_stat_ctx);
5164 cp = min_t(u16, cp, stats);
41e8d798
MC
5165 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5166 int rx = hw_resc->resv_rx_rings;
5167 int tx = hw_resc->resv_tx_rings;
5168
5169 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5170 rx >>= 1;
5171 if (cp < (rx + tx)) {
5172 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5173 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5174 rx <<= 1;
5175 hw_resc->resv_rx_rings = rx;
5176 hw_resc->resv_tx_rings = tx;
5177 }
5178 cp = le16_to_cpu(resp->alloc_msix);
5179 hw_resc->resv_hw_ring_grps = rx;
5180 }
674f50a5
MC
5181 hw_resc->resv_cp_rings = cp;
5182 }
5183 mutex_unlock(&bp->hwrm_cmd_lock);
5184 return 0;
5185}
5186
391be5c2
MC
5187/* Caller must hold bp->hwrm_cmd_lock */
5188int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5189{
5190 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5191 struct hwrm_func_qcfg_input req = {0};
5192 int rc;
5193
5194 if (bp->hwrm_spec_code < 0x10601)
5195 return 0;
5196
5197 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5198 req.fid = cpu_to_le16(fid);
5199 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5200 if (!rc)
5201 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5202
5203 return rc;
5204}
5205
41e8d798
MC
5206static bool bnxt_rfs_supported(struct bnxt *bp);
5207
4ed50ef4
MC
5208static void
5209__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5210 int tx_rings, int rx_rings, int ring_grps,
5211 int cp_rings, int vnics)
391be5c2 5212{
674f50a5 5213 u32 enables = 0;
391be5c2 5214
4ed50ef4
MC
5215 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5216 req->fid = cpu_to_le16(0xffff);
674f50a5 5217 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 5218 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 5219 if (BNXT_NEW_RM(bp)) {
674f50a5 5220 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
41e8d798
MC
5221 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5222 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5223 enables |= tx_rings + ring_grps ?
5224 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5225 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5226 enables |= rx_rings ?
5227 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5228 } else {
5229 enables |= cp_rings ?
5230 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5231 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5232 enables |= ring_grps ?
5233 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5234 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5235 }
dbe80d44 5236 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 5237
4ed50ef4 5238 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5239 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5240 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5241 req->num_msix = cpu_to_le16(cp_rings);
5242 req->num_rsscos_ctxs =
5243 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5244 } else {
5245 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5246 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5247 req->num_rsscos_ctxs = cpu_to_le16(1);
5248 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5249 bnxt_rfs_supported(bp))
5250 req->num_rsscos_ctxs =
5251 cpu_to_le16(ring_grps + 1);
5252 }
4ed50ef4
MC
5253 req->num_stat_ctxs = req->num_cmpl_rings;
5254 req->num_vnics = cpu_to_le16(vnics);
674f50a5 5255 }
4ed50ef4
MC
5256 req->enables = cpu_to_le32(enables);
5257}
5258
5259static void
5260__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5261 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5262 int rx_rings, int ring_grps, int cp_rings,
5263 int vnics)
5264{
5265 u32 enables = 0;
5266
5267 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5268 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
5269 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5270 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5271 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5272 enables |= tx_rings + ring_grps ?
5273 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5274 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5275 } else {
5276 enables |= cp_rings ?
5277 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5278 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5279 enables |= ring_grps ?
5280 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5281 }
4ed50ef4 5282 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 5283 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 5284
41e8d798 5285 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
5286 req->num_tx_rings = cpu_to_le16(tx_rings);
5287 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5288 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5289 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5290 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5291 } else {
5292 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5293 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5294 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5295 }
4ed50ef4
MC
5296 req->num_stat_ctxs = req->num_cmpl_rings;
5297 req->num_vnics = cpu_to_le16(vnics);
5298
5299 req->enables = cpu_to_le32(enables);
5300}
5301
5302static int
5303bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5304 int ring_grps, int cp_rings, int vnics)
5305{
5306 struct hwrm_func_cfg_input req = {0};
5307 int rc;
5308
5309 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5310 cp_rings, vnics);
5311 if (!req.enables)
391be5c2
MC
5312 return 0;
5313
674f50a5
MC
5314 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5315 if (rc)
5316 return -ENOMEM;
5317
5318 if (bp->hwrm_spec_code < 0x10601)
5319 bp->hw_resc.resv_tx_rings = tx_rings;
5320
5321 rc = bnxt_hwrm_get_rings(bp);
5322 return rc;
5323}
5324
5325static int
5326bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5327 int ring_grps, int cp_rings, int vnics)
5328{
5329 struct hwrm_func_vf_cfg_input req = {0};
674f50a5
MC
5330 int rc;
5331
f1ca94de 5332 if (!BNXT_NEW_RM(bp)) {
674f50a5 5333 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 5334 return 0;
674f50a5 5335 }
391be5c2 5336
4ed50ef4
MC
5337 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5338 cp_rings, vnics);
391be5c2 5339 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
674f50a5
MC
5340 if (rc)
5341 return -ENOMEM;
5342
5343 rc = bnxt_hwrm_get_rings(bp);
5344 return rc;
5345}
5346
5347static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5348 int cp, int vnic)
5349{
5350 if (BNXT_PF(bp))
5351 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
5352 else
5353 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
5354}
5355
08654eb2
MC
5356static int bnxt_cp_rings_in_use(struct bnxt *bp)
5357{
5358 int cp = bp->cp_nr_rings;
5359 int ulp_msix, ulp_base;
5360
5361 ulp_msix = bnxt_get_ulp_msix_num(bp);
5362 if (ulp_msix) {
5363 ulp_base = bnxt_get_ulp_msix_base(bp);
5364 cp += ulp_msix;
5365 if ((ulp_base + ulp_msix) > cp)
5366 cp = ulp_base + ulp_msix;
5367 }
5368 return cp;
5369}
5370
4e41dc5d
MC
5371static bool bnxt_need_reserve_rings(struct bnxt *bp)
5372{
5373 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 5374 int cp = bnxt_cp_rings_in_use(bp);
4e41dc5d
MC
5375 int rx = bp->rx_nr_rings;
5376 int vnic = 1, grp = rx;
5377
5378 if (bp->hwrm_spec_code < 0x10601)
5379 return false;
5380
5381 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5382 return true;
5383
41e8d798 5384 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
5385 vnic = rx + 1;
5386 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5387 rx <<= 1;
f1ca94de 5388 if (BNXT_NEW_RM(bp) &&
4e41dc5d 5389 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
41e8d798
MC
5390 hw_resc->resv_vnics != vnic ||
5391 (hw_resc->resv_hw_ring_grps != grp &&
5392 !(bp->flags & BNXT_FLAG_CHIP_P5))))
4e41dc5d
MC
5393 return true;
5394 return false;
5395}
5396
674f50a5
MC
5397static int __bnxt_reserve_rings(struct bnxt *bp)
5398{
5399 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 5400 int cp = bnxt_cp_rings_in_use(bp);
674f50a5
MC
5401 int tx = bp->tx_nr_rings;
5402 int rx = bp->rx_nr_rings;
674f50a5
MC
5403 int grp, rx_rings, rc;
5404 bool sh = false;
5405 int vnic = 1;
5406
4e41dc5d 5407 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
5408 return 0;
5409
5410 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5411 sh = true;
41e8d798 5412 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
5413 vnic = rx + 1;
5414 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5415 rx <<= 1;
674f50a5 5416 grp = bp->rx_nr_rings;
674f50a5
MC
5417
5418 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
391be5c2
MC
5419 if (rc)
5420 return rc;
5421
674f50a5 5422 tx = hw_resc->resv_tx_rings;
f1ca94de 5423 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
5424 rx = hw_resc->resv_rx_rings;
5425 cp = hw_resc->resv_cp_rings;
5426 grp = hw_resc->resv_hw_ring_grps;
5427 vnic = hw_resc->resv_vnics;
5428 }
5429
5430 rx_rings = rx;
5431 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5432 if (rx >= 2) {
5433 rx_rings = rx >> 1;
5434 } else {
5435 if (netif_running(bp->dev))
5436 return -ENOMEM;
5437
5438 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5439 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5440 bp->dev->hw_features &= ~NETIF_F_LRO;
5441 bp->dev->features &= ~NETIF_F_LRO;
5442 bnxt_set_ring_params(bp);
5443 }
5444 }
5445 rx_rings = min_t(int, rx_rings, grp);
5446 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5447 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5448 rx = rx_rings << 1;
5449 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5450 bp->tx_nr_rings = tx;
5451 bp->rx_nr_rings = rx_rings;
5452 bp->cp_nr_rings = cp;
5453
5454 if (!tx || !rx || !cp || !grp || !vnic)
5455 return -ENOMEM;
5456
391be5c2
MC
5457 return rc;
5458}
5459
8f23d638 5460static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6fc2ffdf 5461 int ring_grps, int cp_rings, int vnics)
98fdbe73 5462{
8f23d638 5463 struct hwrm_func_vf_cfg_input req = {0};
6fc2ffdf 5464 u32 flags;
98fdbe73
MC
5465 int rc;
5466
f1ca94de 5467 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
5468 return 0;
5469
6fc2ffdf
EW
5470 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5471 cp_rings, vnics);
8f23d638
MC
5472 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5473 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5474 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 5475 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
5476 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5477 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5478 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5479 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638
MC
5480
5481 req.flags = cpu_to_le32(flags);
8f23d638
MC
5482 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5483 if (rc)
5484 return -ENOMEM;
5485 return 0;
5486}
5487
5488static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6fc2ffdf 5489 int ring_grps, int cp_rings, int vnics)
8f23d638
MC
5490{
5491 struct hwrm_func_cfg_input req = {0};
6fc2ffdf 5492 u32 flags;
8f23d638 5493 int rc;
98fdbe73 5494
6fc2ffdf
EW
5495 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5496 cp_rings, vnics);
8f23d638 5497 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 5498 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
5499 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5500 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
5501 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5502 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798
MC
5503 if (bp->flags & BNXT_FLAG_CHIP_P5)
5504 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5505 else
5506 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5507 }
6fc2ffdf 5508
8f23d638 5509 req.flags = cpu_to_le32(flags);
98fdbe73
MC
5510 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5511 if (rc)
5512 return -ENOMEM;
5513 return 0;
5514}
5515
8f23d638 5516static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6fc2ffdf 5517 int ring_grps, int cp_rings, int vnics)
8f23d638
MC
5518{
5519 if (bp->hwrm_spec_code < 0x10801)
5520 return 0;
5521
5522 if (BNXT_PF(bp))
5523 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6fc2ffdf 5524 ring_grps, cp_rings, vnics);
8f23d638
MC
5525
5526 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6fc2ffdf 5527 cp_rings, vnics);
8f23d638
MC
5528}
5529
74706afa
MC
5530static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5531{
5532 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5533 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5534 struct hwrm_ring_aggint_qcaps_input req = {0};
5535 int rc;
5536
5537 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5538 coal_cap->num_cmpl_dma_aggr_max = 63;
5539 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5540 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5541 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5542 coal_cap->int_lat_tmr_min_max = 65535;
5543 coal_cap->int_lat_tmr_max_max = 65535;
5544 coal_cap->num_cmpl_aggr_int_max = 65535;
5545 coal_cap->timer_units = 80;
5546
5547 if (bp->hwrm_spec_code < 0x10902)
5548 return;
5549
5550 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5551 mutex_lock(&bp->hwrm_cmd_lock);
5552 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5553 if (!rc) {
5554 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 5555 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
5556 coal_cap->num_cmpl_dma_aggr_max =
5557 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5558 coal_cap->num_cmpl_dma_aggr_during_int_max =
5559 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5560 coal_cap->cmpl_aggr_dma_tmr_max =
5561 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5562 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5563 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5564 coal_cap->int_lat_tmr_min_max =
5565 le16_to_cpu(resp->int_lat_tmr_min_max);
5566 coal_cap->int_lat_tmr_max_max =
5567 le16_to_cpu(resp->int_lat_tmr_max_max);
5568 coal_cap->num_cmpl_aggr_int_max =
5569 le16_to_cpu(resp->num_cmpl_aggr_int_max);
5570 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5571 }
5572 mutex_unlock(&bp->hwrm_cmd_lock);
5573}
5574
5575static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5576{
5577 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5578
5579 return usec * 1000 / coal_cap->timer_units;
5580}
5581
5582static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5583 struct bnxt_coal *hw_coal,
bb053f52
MC
5584 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5585{
74706afa
MC
5586 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5587 u32 cmpl_params = coal_cap->cmpl_params;
5588 u16 val, tmr, max, flags = 0;
f8503969
MC
5589
5590 max = hw_coal->bufs_per_record * 128;
5591 if (hw_coal->budget)
5592 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 5593 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
5594
5595 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5596 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 5597
74706afa 5598 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
5599 req->num_cmpl_dma_aggr = cpu_to_le16(val);
5600
74706afa
MC
5601 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5602 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
5603 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5604
74706afa
MC
5605 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
5606 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
5607 req->int_lat_tmr_max = cpu_to_le16(tmr);
5608
5609 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
5610 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
5611 val = tmr / 2;
5612 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
5613 req->int_lat_tmr_min = cpu_to_le16(val);
5614 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5615 }
f8503969
MC
5616
5617 /* buf timer set to 1/4 of interrupt timer */
74706afa 5618 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
5619 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
5620
74706afa
MC
5621 if (cmpl_params &
5622 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
5623 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
5624 val = clamp_t(u16, tmr, 1,
5625 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
5626 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
5627 req->enables |=
5628 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
5629 }
f8503969 5630
74706afa
MC
5631 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
5632 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
5633 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
5634 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 5635 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 5636 req->flags = cpu_to_le16(flags);
74706afa 5637 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
5638}
5639
58590c8d
MC
5640/* Caller holds bp->hwrm_cmd_lock */
5641static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
5642 struct bnxt_coal *hw_coal)
5643{
5644 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5645 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5646 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5647 u32 nq_params = coal_cap->nq_params;
5648 u16 tmr;
5649
5650 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
5651 return 0;
5652
5653 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5654 -1, -1);
5655 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
5656 req.flags =
5657 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
5658
5659 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
5660 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
5661 req.int_lat_tmr_min = cpu_to_le16(tmr);
5662 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5663 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5664}
5665
6a8788f2
AG
5666int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
5667{
5668 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
5669 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5670 struct bnxt_coal coal;
6a8788f2
AG
5671
5672 /* Tick values in micro seconds.
5673 * 1 coal_buf x bufs_per_record = 1 completion record.
5674 */
5675 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
5676
5677 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
5678 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
5679
5680 if (!bnapi->rx_ring)
5681 return -ENODEV;
5682
5683 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5684 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5685
74706afa 5686 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6a8788f2 5687
2c61d211 5688 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2
AG
5689
5690 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5691 HWRM_CMD_TIMEOUT);
5692}
5693
c0c050c5
MC
5694int bnxt_hwrm_set_coal(struct bnxt *bp)
5695{
5696 int i, rc = 0;
dfc9c94a
MC
5697 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5698 req_tx = {0}, *req;
c0c050c5 5699
dfc9c94a
MC
5700 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5701 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5702 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5703 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 5704
74706afa
MC
5705 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
5706 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
c0c050c5
MC
5707
5708 mutex_lock(&bp->hwrm_cmd_lock);
5709 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 5710 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 5711 struct bnxt_coal *hw_coal;
2c61d211 5712 u16 ring_id;
c0c050c5 5713
dfc9c94a 5714 req = &req_rx;
2c61d211
MC
5715 if (!bnapi->rx_ring) {
5716 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
dfc9c94a 5717 req = &req_tx;
2c61d211
MC
5718 } else {
5719 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
5720 }
5721 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a
MC
5722
5723 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
5724 HWRM_CMD_TIMEOUT);
5725 if (rc)
5726 break;
58590c8d
MC
5727
5728 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5729 continue;
5730
5731 if (bnapi->rx_ring && bnapi->tx_ring) {
5732 req = &req_tx;
5733 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5734 req->ring_id = cpu_to_le16(ring_id);
5735 rc = _hwrm_send_message(bp, req, sizeof(*req),
5736 HWRM_CMD_TIMEOUT);
5737 if (rc)
5738 break;
5739 }
5740 if (bnapi->rx_ring)
5741 hw_coal = &bp->rx_coal;
5742 else
5743 hw_coal = &bp->tx_coal;
5744 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5
MC
5745 }
5746 mutex_unlock(&bp->hwrm_cmd_lock);
5747 return rc;
5748}
5749
5750static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5751{
5752 int rc = 0, i;
5753 struct hwrm_stat_ctx_free_input req = {0};
5754
5755 if (!bp->bnapi)
5756 return 0;
5757
3e8060fa
PS
5758 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5759 return 0;
5760
c0c050c5
MC
5761 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5762
5763 mutex_lock(&bp->hwrm_cmd_lock);
5764 for (i = 0; i < bp->cp_nr_rings; i++) {
5765 struct bnxt_napi *bnapi = bp->bnapi[i];
5766 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5767
5768 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5769 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5770
5771 rc = _hwrm_send_message(bp, &req, sizeof(req),
5772 HWRM_CMD_TIMEOUT);
5773 if (rc)
5774 break;
5775
5776 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5777 }
5778 }
5779 mutex_unlock(&bp->hwrm_cmd_lock);
5780 return rc;
5781}
5782
5783static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5784{
5785 int rc = 0, i;
5786 struct hwrm_stat_ctx_alloc_input req = {0};
5787 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5788
3e8060fa
PS
5789 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5790 return 0;
5791
c0c050c5
MC
5792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5793
51f30785 5794 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
5795
5796 mutex_lock(&bp->hwrm_cmd_lock);
5797 for (i = 0; i < bp->cp_nr_rings; i++) {
5798 struct bnxt_napi *bnapi = bp->bnapi[i];
5799 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5800
5801 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5802
5803 rc = _hwrm_send_message(bp, &req, sizeof(req),
5804 HWRM_CMD_TIMEOUT);
5805 if (rc)
5806 break;
5807
5808 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5809
5810 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5811 }
5812 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 5813 return rc;
c0c050c5
MC
5814}
5815
cf6645f8
MC
5816static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5817{
5818 struct hwrm_func_qcfg_input req = {0};
567b2abe 5819 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9315edca 5820 u16 flags;
cf6645f8
MC
5821 int rc;
5822
5823 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5824 req.fid = cpu_to_le16(0xffff);
5825 mutex_lock(&bp->hwrm_cmd_lock);
5826 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5827 if (rc)
5828 goto func_qcfg_exit;
5829
5830#ifdef CONFIG_BNXT_SRIOV
5831 if (BNXT_VF(bp)) {
cf6645f8
MC
5832 struct bnxt_vf_info *vf = &bp->vf;
5833
5834 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5835 }
5836#endif
9315edca
MC
5837 flags = le16_to_cpu(resp->flags);
5838 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5839 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 5840 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 5841 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 5842 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
5843 }
5844 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5845 bp->flags |= BNXT_FLAG_MULTI_HOST;
bc39f885 5846
567b2abe
SB
5847 switch (resp->port_partition_type) {
5848 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5849 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5850 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5851 bp->port_partition_type = resp->port_partition_type;
5852 break;
5853 }
32e8239c
MC
5854 if (bp->hwrm_spec_code < 0x10707 ||
5855 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5856 bp->br_mode = BRIDGE_MODE_VEB;
5857 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5858 bp->br_mode = BRIDGE_MODE_VEPA;
5859 else
5860 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 5861
7eb9bb3a
MC
5862 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5863 if (!bp->max_mtu)
5864 bp->max_mtu = BNXT_MAX_MTU;
5865
cf6645f8
MC
5866func_qcfg_exit:
5867 mutex_unlock(&bp->hwrm_cmd_lock);
5868 return rc;
5869}
5870
98f04cf0
MC
5871static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5872{
5873 struct hwrm_func_backing_store_qcaps_input req = {0};
5874 struct hwrm_func_backing_store_qcaps_output *resp =
5875 bp->hwrm_cmd_resp_addr;
5876 int rc;
5877
5878 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
5879 return 0;
5880
5881 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
5882 mutex_lock(&bp->hwrm_cmd_lock);
5883 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5884 if (!rc) {
5885 struct bnxt_ctx_pg_info *ctx_pg;
5886 struct bnxt_ctx_mem_info *ctx;
5887 int i;
5888
5889 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
5890 if (!ctx) {
5891 rc = -ENOMEM;
5892 goto ctx_err;
5893 }
5894 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
5895 if (!ctx_pg) {
5896 kfree(ctx);
5897 rc = -ENOMEM;
5898 goto ctx_err;
5899 }
5900 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
5901 ctx->tqm_mem[i] = ctx_pg;
5902
5903 bp->ctx = ctx;
5904 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
5905 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
5906 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
5907 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
5908 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
5909 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
5910 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
5911 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
5912 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
5913 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
5914 ctx->vnic_max_vnic_entries =
5915 le16_to_cpu(resp->vnic_max_vnic_entries);
5916 ctx->vnic_max_ring_table_entries =
5917 le16_to_cpu(resp->vnic_max_ring_table_entries);
5918 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
5919 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
5920 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
5921 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
5922 ctx->tqm_min_entries_per_ring =
5923 le32_to_cpu(resp->tqm_min_entries_per_ring);
5924 ctx->tqm_max_entries_per_ring =
5925 le32_to_cpu(resp->tqm_max_entries_per_ring);
5926 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5927 if (!ctx->tqm_entries_multiple)
5928 ctx->tqm_entries_multiple = 1;
5929 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
5930 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
5931 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
5932 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
5933 } else {
5934 rc = 0;
5935 }
5936ctx_err:
5937 mutex_unlock(&bp->hwrm_cmd_lock);
5938 return rc;
5939}
5940
1b9394e5
MC
5941static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
5942 __le64 *pg_dir)
5943{
5944 u8 pg_size = 0;
5945
5946 if (BNXT_PAGE_SHIFT == 13)
5947 pg_size = 1 << 4;
5948 else if (BNXT_PAGE_SIZE == 16)
5949 pg_size = 2 << 4;
5950
5951 *pg_attr = pg_size;
5952 if (rmem->nr_pages > 1) {
5953 *pg_attr |= 1;
5954 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
5955 } else {
5956 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
5957 }
5958}
5959
5960#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
5961 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
5962 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
5963 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
5964 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
5965 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
5966
5967static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
5968{
5969 struct hwrm_func_backing_store_cfg_input req = {0};
5970 struct bnxt_ctx_mem_info *ctx = bp->ctx;
5971 struct bnxt_ctx_pg_info *ctx_pg;
5972 __le32 *num_entries;
5973 __le64 *pg_dir;
5974 u8 *pg_attr;
5975 int i, rc;
5976 u32 ena;
5977
5978 if (!ctx)
5979 return 0;
5980
5981 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
5982 req.enables = cpu_to_le32(enables);
5983
5984 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
5985 ctx_pg = &ctx->qp_mem;
5986 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
5987 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
5988 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
5989 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
5990 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5991 &req.qpc_pg_size_qpc_lvl,
5992 &req.qpc_page_dir);
5993 }
5994 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
5995 ctx_pg = &ctx->srq_mem;
5996 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
5997 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
5998 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
5999 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6000 &req.srq_pg_size_srq_lvl,
6001 &req.srq_page_dir);
6002 }
6003 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6004 ctx_pg = &ctx->cq_mem;
6005 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6006 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6007 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6008 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6009 &req.cq_page_dir);
6010 }
6011 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6012 ctx_pg = &ctx->vnic_mem;
6013 req.vnic_num_vnic_entries =
6014 cpu_to_le16(ctx->vnic_max_vnic_entries);
6015 req.vnic_num_ring_table_entries =
6016 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6017 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6018 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6019 &req.vnic_pg_size_vnic_lvl,
6020 &req.vnic_page_dir);
6021 }
6022 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6023 ctx_pg = &ctx->stat_mem;
6024 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6025 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6026 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6027 &req.stat_pg_size_stat_lvl,
6028 &req.stat_page_dir);
6029 }
6030 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6031 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6032 pg_dir = &req.tqm_sp_page_dir,
6033 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6034 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6035 if (!(enables & ena))
6036 continue;
6037
6038 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6039 ctx_pg = ctx->tqm_mem[i];
6040 *num_entries = cpu_to_le32(ctx_pg->entries);
6041 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6042 }
6043 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6044 if (rc)
6045 rc = -EIO;
6046 return rc;
6047}
6048
98f04cf0
MC
6049static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6050 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
6051{
6052 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6053
6054 if (!mem_size)
6055 return 0;
6056
6057 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6058 if (rmem->nr_pages > MAX_CTX_PAGES) {
6059 rmem->nr_pages = 0;
6060 return -EINVAL;
6061 }
6062 rmem->page_size = BNXT_PAGE_SIZE;
6063 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6064 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 6065 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
98f04cf0
MC
6066 return bnxt_alloc_ring(bp, rmem);
6067}
6068
6069static void bnxt_free_ctx_mem(struct bnxt *bp)
6070{
6071 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6072 int i;
6073
6074 if (!ctx)
6075 return;
6076
6077 if (ctx->tqm_mem[0]) {
6078 for (i = 0; i < bp->max_q + 1; i++)
6079 bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
6080 kfree(ctx->tqm_mem[0]);
6081 ctx->tqm_mem[0] = NULL;
6082 }
6083
6084 bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
6085 bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
6086 bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
6087 bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
6088 bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
6089 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6090}
6091
6092static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6093{
6094 struct bnxt_ctx_pg_info *ctx_pg;
6095 struct bnxt_ctx_mem_info *ctx;
1b9394e5 6096 u32 mem_size, ena, entries;
98f04cf0
MC
6097 int i, rc;
6098
6099 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6100 if (rc) {
6101 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6102 rc);
6103 return rc;
6104 }
6105 ctx = bp->ctx;
6106 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6107 return 0;
6108
6109 ctx_pg = &ctx->qp_mem;
6110 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
6111 mem_size = ctx->qp_entry_size * ctx_pg->entries;
6112 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6113 if (rc)
6114 return rc;
6115
6116 ctx_pg = &ctx->srq_mem;
6117 ctx_pg->entries = ctx->srq_max_l2_entries;
6118 mem_size = ctx->srq_entry_size * ctx_pg->entries;
6119 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6120 if (rc)
6121 return rc;
6122
6123 ctx_pg = &ctx->cq_mem;
6124 ctx_pg->entries = ctx->cq_max_l2_entries;
6125 mem_size = ctx->cq_entry_size * ctx_pg->entries;
6126 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6127 if (rc)
6128 return rc;
6129
6130 ctx_pg = &ctx->vnic_mem;
6131 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6132 ctx->vnic_max_ring_table_entries;
6133 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6134 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6135 if (rc)
6136 return rc;
6137
6138 ctx_pg = &ctx->stat_mem;
6139 ctx_pg->entries = ctx->stat_max_entries;
6140 mem_size = ctx->stat_entry_size * ctx_pg->entries;
6141 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6142 if (rc)
6143 return rc;
6144
6145 entries = ctx->qp_max_l2_entries;
6146 entries = roundup(entries, ctx->tqm_entries_multiple);
6147 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6148 ctx->tqm_max_entries_per_ring);
1b9394e5 6149 for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
98f04cf0
MC
6150 ctx_pg = ctx->tqm_mem[i];
6151 ctx_pg->entries = entries;
6152 mem_size = ctx->tqm_entry_size * entries;
6153 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6154 if (rc)
6155 return rc;
1b9394e5 6156 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 6157 }
1b9394e5
MC
6158 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6159 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6160 if (rc)
6161 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6162 rc);
6163 else
6164 ctx->flags |= BNXT_CTX_FLAG_INITED;
6165
98f04cf0
MC
6166 return 0;
6167}
6168
db4723b3 6169int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4
MC
6170{
6171 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6172 struct hwrm_func_resource_qcaps_input req = {0};
6173 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6174 int rc;
6175
6176 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6177 req.fid = cpu_to_le16(0xffff);
6178
6179 mutex_lock(&bp->hwrm_cmd_lock);
6180 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6181 if (rc) {
6182 rc = -EIO;
6183 goto hwrm_func_resc_qcaps_exit;
6184 }
6185
db4723b3
MC
6186 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6187 if (!all)
6188 goto hwrm_func_resc_qcaps_exit;
6189
be0dd9c4
MC
6190 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6191 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6192 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6193 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6194 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6195 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6196 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6197 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6198 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6199 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6200 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6201 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6202 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6203 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6204 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6205 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6206
9c1fabdf
MC
6207 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6208 u16 max_msix = le16_to_cpu(resp->max_msix);
6209
6210 hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix);
6211 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6212 }
6213
4673d664
MC
6214 if (BNXT_PF(bp)) {
6215 struct bnxt_pf_info *pf = &bp->pf;
6216
6217 pf->vf_resv_strategy =
6218 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 6219 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
6220 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6221 }
be0dd9c4
MC
6222hwrm_func_resc_qcaps_exit:
6223 mutex_unlock(&bp->hwrm_cmd_lock);
6224 return rc;
6225}
6226
6227static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
6228{
6229 int rc = 0;
6230 struct hwrm_func_qcaps_input req = {0};
6231 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6a4f2947
MC
6232 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6233 u32 flags;
c0c050c5
MC
6234
6235 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6236 req.fid = cpu_to_le16(0xffff);
6237
6238 mutex_lock(&bp->hwrm_cmd_lock);
6239 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6240 if (rc)
6241 goto hwrm_func_qcaps_exit;
6242
6a4f2947
MC
6243 flags = le32_to_cpu(resp->flags);
6244 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 6245 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 6246 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30
MC
6247 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6248
7cc5a20e 6249 bp->tx_push_thresh = 0;
6a4f2947 6250 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
7cc5a20e
MC
6251 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6252
6a4f2947
MC
6253 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6254 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6255 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6256 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6257 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6258 if (!hw_resc->max_hw_ring_grps)
6259 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6260 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6261 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6262 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6263
c0c050c5
MC
6264 if (BNXT_PF(bp)) {
6265 struct bnxt_pf_info *pf = &bp->pf;
6266
6267 pf->fw_fid = le16_to_cpu(resp->fid);
6268 pf->port_id = le16_to_cpu(resp->port_id);
87027db1 6269 bp->dev->dev_port = pf->port_id;
11f15ed3 6270 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
6271 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6272 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6273 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6274 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6275 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6276 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6277 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6278 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6a4f2947 6279 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 6280 bp->flags |= BNXT_FLAG_WOL_CAP;
c0c050c5 6281 } else {
379a80a1 6282#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
6283 struct bnxt_vf_info *vf = &bp->vf;
6284
6285 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 6286 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 6287#endif
c0c050c5
MC
6288 }
6289
c0c050c5
MC
6290hwrm_func_qcaps_exit:
6291 mutex_unlock(&bp->hwrm_cmd_lock);
6292 return rc;
6293}
6294
be0dd9c4
MC
6295static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6296{
6297 int rc;
6298
6299 rc = __bnxt_hwrm_func_qcaps(bp);
6300 if (rc)
6301 return rc;
6302 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
6303 rc = bnxt_alloc_ctx_mem(bp);
6304 if (rc)
6305 return rc;
db4723b3 6306 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 6307 if (!rc)
97381a18 6308 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
6309 }
6310 return 0;
6311}
6312
c0c050c5
MC
6313static int bnxt_hwrm_func_reset(struct bnxt *bp)
6314{
6315 struct hwrm_func_reset_input req = {0};
6316
6317 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6318 req.enables = 0;
6319
6320 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6321}
6322
6323static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6324{
6325 int rc = 0;
6326 struct hwrm_queue_qportcfg_input req = {0};
6327 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
aabfc016
MC
6328 u8 i, j, *qptr;
6329 bool no_rdma;
c0c050c5
MC
6330
6331 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6332
6333 mutex_lock(&bp->hwrm_cmd_lock);
6334 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6335 if (rc)
6336 goto qportcfg_exit;
6337
6338 if (!resp->max_configurable_queues) {
6339 rc = -EINVAL;
6340 goto qportcfg_exit;
6341 }
6342 bp->max_tc = resp->max_configurable_queues;
87c374de 6343 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
6344 if (bp->max_tc > BNXT_MAX_QUEUE)
6345 bp->max_tc = BNXT_MAX_QUEUE;
6346
aabfc016
MC
6347 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6348 qptr = &resp->queue_id0;
6349 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
6350 bp->q_info[j].queue_id = *qptr;
6351 bp->q_ids[i] = *qptr++;
aabfc016
MC
6352 bp->q_info[j].queue_profile = *qptr++;
6353 bp->tc_to_qidx[j] = j;
6354 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6355 (no_rdma && BNXT_PF(bp)))
6356 j++;
6357 }
98f04cf0 6358 bp->max_q = bp->max_tc;
aabfc016
MC
6359 bp->max_tc = max_t(u8, j, 1);
6360
441cabbb
MC
6361 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6362 bp->max_tc = 1;
6363
87c374de
MC
6364 if (bp->max_lltc > bp->max_tc)
6365 bp->max_lltc = bp->max_tc;
6366
c0c050c5
MC
6367qportcfg_exit:
6368 mutex_unlock(&bp->hwrm_cmd_lock);
6369 return rc;
6370}
6371
6372static int bnxt_hwrm_ver_get(struct bnxt *bp)
6373{
6374 int rc;
6375 struct hwrm_ver_get_input req = {0};
6376 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 6377 u32 dev_caps_cfg;
c0c050c5 6378
e6ef2699 6379 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5
MC
6380 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6381 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6382 req.hwrm_intf_min = HWRM_VERSION_MINOR;
6383 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6384 mutex_lock(&bp->hwrm_cmd_lock);
6385 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6386 if (rc)
6387 goto hwrm_ver_get_exit;
6388
6389 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6390
894aa69a
MC
6391 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6392 resp->hwrm_intf_min_8b << 8 |
6393 resp->hwrm_intf_upd_8b;
6394 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 6395 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
6396 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6397 resp->hwrm_intf_upd_8b);
c193554e 6398 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 6399 }
431aa1eb 6400 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
894aa69a
MC
6401 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6402 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
c0c050c5 6403
ff4fe81d
MC
6404 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
6405 if (!bp->hwrm_cmd_timeout)
6406 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
6407
1dfddc41 6408 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 6409 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
6410 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
6411 }
6412 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
6413 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 6414
659c805c 6415 bp->chip_num = le16_to_cpu(resp->chip_num);
3e8060fa
PS
6416 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
6417 !resp->chip_metal)
6418 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 6419
e605db80
DK
6420 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
6421 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
6422 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 6423 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 6424
c0c050c5
MC
6425hwrm_ver_get_exit:
6426 mutex_unlock(&bp->hwrm_cmd_lock);
6427 return rc;
6428}
6429
5ac67d8b
RS
6430int bnxt_hwrm_fw_set_time(struct bnxt *bp)
6431{
6432 struct hwrm_fw_set_time_input req = {0};
7dfaa7bc
AB
6433 struct tm tm;
6434 time64_t now = ktime_get_real_seconds();
5ac67d8b 6435
ca2c39e2
MC
6436 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
6437 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
6438 return -EOPNOTSUPP;
6439
7dfaa7bc 6440 time64_to_tm(now, 0, &tm);
5ac67d8b
RS
6441 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
6442 req.year = cpu_to_le16(1900 + tm.tm_year);
6443 req.month = 1 + tm.tm_mon;
6444 req.day = tm.tm_mday;
6445 req.hour = tm.tm_hour;
6446 req.minute = tm.tm_min;
6447 req.second = tm.tm_sec;
6448 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6449}
6450
3bdf56c4
MC
6451static int bnxt_hwrm_port_qstats(struct bnxt *bp)
6452{
6453 int rc;
6454 struct bnxt_pf_info *pf = &bp->pf;
6455 struct hwrm_port_qstats_input req = {0};
6456
6457 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
6458 return 0;
6459
6460 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
6461 req.port_id = cpu_to_le16(pf->port_id);
6462 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
6463 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
6464 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6465 return rc;
6466}
6467
00db3cba
VV
6468static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6469{
36e53349 6470 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
00db3cba
VV
6471 struct hwrm_port_qstats_ext_input req = {0};
6472 struct bnxt_pf_info *pf = &bp->pf;
36e53349 6473 int rc;
00db3cba
VV
6474
6475 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
6476 return 0;
6477
6478 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
6479 req.port_id = cpu_to_le16(pf->port_id);
6480 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6481 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
36e53349
MC
6482 req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
6483 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6484 mutex_lock(&bp->hwrm_cmd_lock);
6485 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6486 if (!rc) {
6487 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
6488 bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
6489 } else {
6490 bp->fw_rx_stats_ext_size = 0;
6491 bp->fw_tx_stats_ext_size = 0;
6492 }
6493 mutex_unlock(&bp->hwrm_cmd_lock);
6494 return rc;
00db3cba
VV
6495}
6496
c0c050c5
MC
6497static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
6498{
6499 if (bp->vxlan_port_cnt) {
6500 bnxt_hwrm_tunnel_dst_port_free(
6501 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6502 }
6503 bp->vxlan_port_cnt = 0;
6504 if (bp->nge_port_cnt) {
6505 bnxt_hwrm_tunnel_dst_port_free(
6506 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6507 }
6508 bp->nge_port_cnt = 0;
6509}
6510
6511static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
6512{
6513 int rc, i;
6514 u32 tpa_flags = 0;
6515
6516 if (set_tpa)
6517 tpa_flags = bp->flags & BNXT_FLAG_TPA;
6518 for (i = 0; i < bp->nr_vnics; i++) {
6519 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
6520 if (rc) {
6521 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 6522 i, rc);
c0c050c5
MC
6523 return rc;
6524 }
6525 }
6526 return 0;
6527}
6528
6529static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
6530{
6531 int i;
6532
6533 for (i = 0; i < bp->nr_vnics; i++)
6534 bnxt_hwrm_vnic_set_rss(bp, i, false);
6535}
6536
6537static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
6538 bool irq_re_init)
6539{
6540 if (bp->vnic_info) {
6541 bnxt_hwrm_clear_vnic_filter(bp);
6542 /* clear all RSS setting before free vnic ctx */
6543 bnxt_hwrm_clear_vnic_rss(bp);
6544 bnxt_hwrm_vnic_ctx_free(bp);
6545 /* before free the vnic, undo the vnic tpa settings */
6546 if (bp->flags & BNXT_FLAG_TPA)
6547 bnxt_set_tpa(bp, false);
6548 bnxt_hwrm_vnic_free(bp);
6549 }
6550 bnxt_hwrm_ring_free(bp, close_path);
6551 bnxt_hwrm_ring_grp_free(bp);
6552 if (irq_re_init) {
6553 bnxt_hwrm_stat_ctx_free(bp);
6554 bnxt_hwrm_free_tunnel_ports(bp);
6555 }
6556}
6557
39d8ba2e
MC
6558static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
6559{
6560 struct hwrm_func_cfg_input req = {0};
6561 int rc;
6562
6563 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6564 req.fid = cpu_to_le16(0xffff);
6565 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
6566 if (br_mode == BRIDGE_MODE_VEB)
6567 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
6568 else if (br_mode == BRIDGE_MODE_VEPA)
6569 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
6570 else
6571 return -EINVAL;
6572 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6573 if (rc)
6574 rc = -EIO;
6575 return rc;
6576}
6577
c3480a60
MC
6578static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
6579{
6580 struct hwrm_func_cfg_input req = {0};
6581 int rc;
6582
6583 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
6584 return 0;
6585
6586 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6587 req.fid = cpu_to_le16(0xffff);
6588 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
d4f52de0 6589 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 6590 if (size == 128)
d4f52de0 6591 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60
MC
6592
6593 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6594 if (rc)
6595 rc = -EIO;
6596 return rc;
6597}
6598
7b3af4f7 6599static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 6600{
ae10ae74 6601 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
6602 int rc;
6603
ae10ae74
MC
6604 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
6605 goto skip_rss_ctx;
6606
c0c050c5 6607 /* allocate context for vnic */
94ce9caa 6608 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
6609 if (rc) {
6610 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6611 vnic_id, rc);
6612 goto vnic_setup_err;
6613 }
6614 bp->rsscos_nr_ctxs++;
6615
94ce9caa
PS
6616 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6617 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
6618 if (rc) {
6619 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
6620 vnic_id, rc);
6621 goto vnic_setup_err;
6622 }
6623 bp->rsscos_nr_ctxs++;
6624 }
6625
ae10ae74 6626skip_rss_ctx:
c0c050c5
MC
6627 /* configure default vnic, ring grp */
6628 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6629 if (rc) {
6630 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6631 vnic_id, rc);
6632 goto vnic_setup_err;
6633 }
6634
6635 /* Enable RSS hashing on vnic */
6636 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
6637 if (rc) {
6638 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
6639 vnic_id, rc);
6640 goto vnic_setup_err;
6641 }
6642
6643 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6644 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6645 if (rc) {
6646 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6647 vnic_id, rc);
6648 }
6649 }
6650
6651vnic_setup_err:
6652 return rc;
6653}
6654
7b3af4f7
MC
6655static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
6656{
6657 int rc, i, nr_ctxs;
6658
6659 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
6660 for (i = 0; i < nr_ctxs; i++) {
6661 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
6662 if (rc) {
6663 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
6664 vnic_id, i, rc);
6665 break;
6666 }
6667 bp->rsscos_nr_ctxs++;
6668 }
6669 if (i < nr_ctxs)
6670 return -ENOMEM;
6671
6672 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
6673 if (rc) {
6674 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
6675 vnic_id, rc);
6676 return rc;
6677 }
6678 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6679 if (rc) {
6680 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6681 vnic_id, rc);
6682 return rc;
6683 }
6684 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6685 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6686 if (rc) {
6687 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6688 vnic_id, rc);
6689 }
6690 }
6691 return rc;
6692}
6693
6694static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6695{
6696 if (bp->flags & BNXT_FLAG_CHIP_P5)
6697 return __bnxt_setup_vnic_p5(bp, vnic_id);
6698 else
6699 return __bnxt_setup_vnic(bp, vnic_id);
6700}
6701
c0c050c5
MC
6702static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
6703{
6704#ifdef CONFIG_RFS_ACCEL
6705 int i, rc = 0;
6706
6707 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 6708 struct bnxt_vnic_info *vnic;
c0c050c5
MC
6709 u16 vnic_id = i + 1;
6710 u16 ring_id = i;
6711
6712 if (vnic_id >= bp->nr_vnics)
6713 break;
6714
ae10ae74
MC
6715 vnic = &bp->vnic_info[vnic_id];
6716 vnic->flags |= BNXT_VNIC_RFS_FLAG;
6717 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6718 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 6719 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
6720 if (rc) {
6721 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6722 vnic_id, rc);
6723 break;
6724 }
6725 rc = bnxt_setup_vnic(bp, vnic_id);
6726 if (rc)
6727 break;
6728 }
6729 return rc;
6730#else
6731 return 0;
6732#endif
6733}
6734
17c71ac3
MC
6735/* Allow PF and VF with default VLAN to be in promiscuous mode */
6736static bool bnxt_promisc_ok(struct bnxt *bp)
6737{
6738#ifdef CONFIG_BNXT_SRIOV
6739 if (BNXT_VF(bp) && !bp->vf.vlan)
6740 return false;
6741#endif
6742 return true;
6743}
6744
dc52c6c7
PS
6745static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
6746{
6747 unsigned int rc = 0;
6748
6749 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
6750 if (rc) {
6751 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
6752 rc);
6753 return rc;
6754 }
6755
6756 rc = bnxt_hwrm_vnic_cfg(bp, 1);
6757 if (rc) {
6758 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
6759 rc);
6760 return rc;
6761 }
6762 return rc;
6763}
6764
b664f008 6765static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 6766static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 6767
c0c050c5
MC
6768static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
6769{
7d2837dd 6770 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 6771 int rc = 0;
76595193 6772 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
6773
6774 if (irq_re_init) {
6775 rc = bnxt_hwrm_stat_ctx_alloc(bp);
6776 if (rc) {
6777 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
6778 rc);
6779 goto err_out;
6780 }
6781 }
6782
6783 rc = bnxt_hwrm_ring_alloc(bp);
6784 if (rc) {
6785 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
6786 goto err_out;
6787 }
6788
6789 rc = bnxt_hwrm_ring_grp_alloc(bp);
6790 if (rc) {
6791 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
6792 goto err_out;
6793 }
6794
76595193
PS
6795 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6796 rx_nr_rings--;
6797
c0c050c5 6798 /* default vnic 0 */
76595193 6799 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
6800 if (rc) {
6801 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
6802 goto err_out;
6803 }
6804
6805 rc = bnxt_setup_vnic(bp, 0);
6806 if (rc)
6807 goto err_out;
6808
6809 if (bp->flags & BNXT_FLAG_RFS) {
6810 rc = bnxt_alloc_rfs_vnics(bp);
6811 if (rc)
6812 goto err_out;
6813 }
6814
6815 if (bp->flags & BNXT_FLAG_TPA) {
6816 rc = bnxt_set_tpa(bp, true);
6817 if (rc)
6818 goto err_out;
6819 }
6820
6821 if (BNXT_VF(bp))
6822 bnxt_update_vf_mac(bp);
6823
6824 /* Filter for default vnic 0 */
6825 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
6826 if (rc) {
6827 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
6828 goto err_out;
6829 }
7d2837dd 6830 vnic->uc_filter_count = 1;
c0c050c5 6831
30e33848
MC
6832 vnic->rx_mask = 0;
6833 if (bp->dev->flags & IFF_BROADCAST)
6834 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 6835
17c71ac3 6836 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7d2837dd
MC
6837 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6838
6839 if (bp->dev->flags & IFF_ALLMULTI) {
6840 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6841 vnic->mc_list_count = 0;
6842 } else {
6843 u32 mask = 0;
6844
6845 bnxt_mc_list_updated(bp, &mask);
6846 vnic->rx_mask |= mask;
6847 }
c0c050c5 6848
b664f008
MC
6849 rc = bnxt_cfg_rx_mode(bp);
6850 if (rc)
c0c050c5 6851 goto err_out;
c0c050c5
MC
6852
6853 rc = bnxt_hwrm_set_coal(bp);
6854 if (rc)
6855 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
6856 rc);
6857
6858 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6859 rc = bnxt_setup_nitroa0_vnic(bp);
6860 if (rc)
6861 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
6862 rc);
6863 }
c0c050c5 6864
cf6645f8
MC
6865 if (BNXT_VF(bp)) {
6866 bnxt_hwrm_func_qcfg(bp);
6867 netdev_update_features(bp->dev);
6868 }
6869
c0c050c5
MC
6870 return 0;
6871
6872err_out:
6873 bnxt_hwrm_resource_free(bp, 0, true);
6874
6875 return rc;
6876}
6877
6878static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
6879{
6880 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
6881 return 0;
6882}
6883
6884static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
6885{
2247925f 6886 bnxt_init_cp_rings(bp);
c0c050c5
MC
6887 bnxt_init_rx_rings(bp);
6888 bnxt_init_tx_rings(bp);
6889 bnxt_init_ring_grps(bp, irq_re_init);
6890 bnxt_init_vnics(bp);
6891
6892 return bnxt_init_chip(bp, irq_re_init);
6893}
6894
c0c050c5
MC
6895static int bnxt_set_real_num_queues(struct bnxt *bp)
6896{
6897 int rc;
6898 struct net_device *dev = bp->dev;
6899
5f449249
MC
6900 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
6901 bp->tx_nr_rings_xdp);
c0c050c5
MC
6902 if (rc)
6903 return rc;
6904
6905 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
6906 if (rc)
6907 return rc;
6908
6909#ifdef CONFIG_RFS_ACCEL
45019a18 6910 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 6911 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
6912#endif
6913
6914 return rc;
6915}
6916
6e6c5a57
MC
6917static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6918 bool shared)
6919{
6920 int _rx = *rx, _tx = *tx;
6921
6922 if (shared) {
6923 *rx = min_t(int, _rx, max);
6924 *tx = min_t(int, _tx, max);
6925 } else {
6926 if (max < 2)
6927 return -ENOMEM;
6928
6929 while (_rx + _tx > max) {
6930 if (_rx > _tx && _rx > 1)
6931 _rx--;
6932 else if (_tx > 1)
6933 _tx--;
6934 }
6935 *rx = _rx;
6936 *tx = _tx;
6937 }
6938 return 0;
6939}
6940
7809592d
MC
6941static void bnxt_setup_msix(struct bnxt *bp)
6942{
6943 const int len = sizeof(bp->irq_tbl[0].name);
6944 struct net_device *dev = bp->dev;
6945 int tcs, i;
6946
6947 tcs = netdev_get_num_tc(dev);
6948 if (tcs > 1) {
d1e7925e 6949 int i, off, count;
7809592d 6950
d1e7925e
MC
6951 for (i = 0; i < tcs; i++) {
6952 count = bp->tx_nr_rings_per_tc;
6953 off = i * count;
6954 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
6955 }
6956 }
6957
6958 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 6959 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
6960 char *attr;
6961
6962 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6963 attr = "TxRx";
6964 else if (i < bp->rx_nr_rings)
6965 attr = "rx";
6966 else
6967 attr = "tx";
6968
e5811b8c
MC
6969 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
6970 attr, i);
6971 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
6972 }
6973}
6974
6975static void bnxt_setup_inta(struct bnxt *bp)
6976{
6977 const int len = sizeof(bp->irq_tbl[0].name);
6978
6979 if (netdev_get_num_tc(bp->dev))
6980 netdev_reset_tc(bp->dev);
6981
6982 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
6983 0);
6984 bp->irq_tbl[0].handler = bnxt_inta;
6985}
6986
6987static int bnxt_setup_int_mode(struct bnxt *bp)
6988{
6989 int rc;
6990
6991 if (bp->flags & BNXT_FLAG_USING_MSIX)
6992 bnxt_setup_msix(bp);
6993 else
6994 bnxt_setup_inta(bp);
6995
6996 rc = bnxt_set_real_num_queues(bp);
6997 return rc;
6998}
6999
b7429954 7000#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
7001static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7002{
6a4f2947 7003 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
7004}
7005
7006static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7007{
6a4f2947 7008 return bp->hw_resc.max_vnics;
8079e8f1 7009}
b7429954 7010#endif
8079e8f1 7011
e4060d30
MC
7012unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7013{
6a4f2947 7014 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
7015}
7016
a588e458
MC
7017void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
7018{
6a4f2947 7019 bp->hw_resc.max_stat_ctxs = max;
a588e458
MC
7020}
7021
e4060d30
MC
7022unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7023{
6a4f2947 7024 return bp->hw_resc.max_cp_rings;
e4060d30
MC
7025}
7026
00fe9c32 7027unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 7028{
00fe9c32 7029 return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
a588e458
MC
7030}
7031
ad95c27b 7032static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 7033{
6a4f2947
MC
7034 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7035
7036 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
7037}
7038
30f52947 7039static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 7040{
6a4f2947 7041 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
7042}
7043
fbcfc8e4
MC
7044int bnxt_get_avail_msix(struct bnxt *bp, int num)
7045{
7046 int max_cp = bnxt_get_max_func_cp_rings(bp);
7047 int max_irq = bnxt_get_max_func_irqs(bp);
7048 int total_req = bp->cp_nr_rings + num;
7049 int max_idx, avail_msix;
7050
7051 max_idx = min_t(int, bp->total_irqs, max_cp);
7052 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 7053 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
7054 return avail_msix;
7055
7056 if (max_irq < total_req) {
7057 num = max_irq - bp->cp_nr_rings;
7058 if (num <= 0)
7059 return 0;
7060 }
7061 return num;
7062}
7063
08654eb2
MC
7064static int bnxt_get_num_msix(struct bnxt *bp)
7065{
f1ca94de 7066 if (!BNXT_NEW_RM(bp))
08654eb2
MC
7067 return bnxt_get_max_func_irqs(bp);
7068
7069 return bnxt_cp_rings_in_use(bp);
7070}
7071
7809592d 7072static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 7073{
fbcfc8e4 7074 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 7075 struct msix_entry *msix_ent;
c0c050c5 7076
08654eb2
MC
7077 total_vecs = bnxt_get_num_msix(bp);
7078 max = bnxt_get_max_func_irqs(bp);
7079 if (total_vecs > max)
7080 total_vecs = max;
7081
2773dfb2
MC
7082 if (!total_vecs)
7083 return 0;
7084
c0c050c5
MC
7085 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7086 if (!msix_ent)
7087 return -ENOMEM;
7088
7089 for (i = 0; i < total_vecs; i++) {
7090 msix_ent[i].entry = i;
7091 msix_ent[i].vector = 0;
7092 }
7093
01657bcd
MC
7094 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7095 min = 2;
7096
7097 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
7098 ulp_msix = bnxt_get_ulp_msix_num(bp);
7099 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
7100 rc = -ENODEV;
7101 goto msix_setup_exit;
7102 }
7103
7104 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7105 if (bp->irq_tbl) {
7809592d
MC
7106 for (i = 0; i < total_vecs; i++)
7107 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 7108
7809592d 7109 bp->total_irqs = total_vecs;
c0c050c5 7110 /* Trim rings based upon num of vectors allocated */
6e6c5a57 7111 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 7112 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
7113 if (rc)
7114 goto msix_setup_exit;
7115
7809592d
MC
7116 bp->cp_nr_rings = (min == 1) ?
7117 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7118 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 7119
c0c050c5
MC
7120 } else {
7121 rc = -ENOMEM;
7122 goto msix_setup_exit;
7123 }
7124 bp->flags |= BNXT_FLAG_USING_MSIX;
7125 kfree(msix_ent);
7126 return 0;
7127
7128msix_setup_exit:
7809592d
MC
7129 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7130 kfree(bp->irq_tbl);
7131 bp->irq_tbl = NULL;
c0c050c5
MC
7132 pci_disable_msix(bp->pdev);
7133 kfree(msix_ent);
7134 return rc;
7135}
7136
7809592d 7137static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 7138{
c0c050c5 7139 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
7140 if (!bp->irq_tbl)
7141 return -ENOMEM;
7142
7143 bp->total_irqs = 1;
c0c050c5
MC
7144 bp->rx_nr_rings = 1;
7145 bp->tx_nr_rings = 1;
7146 bp->cp_nr_rings = 1;
01657bcd 7147 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 7148 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 7149 return 0;
c0c050c5
MC
7150}
7151
7809592d 7152static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5
MC
7153{
7154 int rc = 0;
7155
7156 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 7157 rc = bnxt_init_msix(bp);
c0c050c5 7158
1fa72e29 7159 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 7160 /* fallback to INTA */
7809592d 7161 rc = bnxt_init_inta(bp);
c0c050c5
MC
7162 }
7163 return rc;
7164}
7165
7809592d
MC
7166static void bnxt_clear_int_mode(struct bnxt *bp)
7167{
7168 if (bp->flags & BNXT_FLAG_USING_MSIX)
7169 pci_disable_msix(bp->pdev);
7170
7171 kfree(bp->irq_tbl);
7172 bp->irq_tbl = NULL;
7173 bp->flags &= ~BNXT_FLAG_USING_MSIX;
7174}
7175
fbcfc8e4 7176int bnxt_reserve_rings(struct bnxt *bp)
674f50a5 7177{
674f50a5
MC
7178 int tcs = netdev_get_num_tc(bp->dev);
7179 int rc;
7180
7181 if (!bnxt_need_reserve_rings(bp))
7182 return 0;
7183
7184 rc = __bnxt_reserve_rings(bp);
7185 if (rc) {
7186 netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
7187 return rc;
7188 }
f1ca94de 7189 if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
ec86f14e 7190 bnxt_ulp_irq_stop(bp);
674f50a5
MC
7191 bnxt_clear_int_mode(bp);
7192 rc = bnxt_init_int_mode(bp);
ec86f14e 7193 bnxt_ulp_irq_restart(bp, rc);
674f50a5
MC
7194 if (rc)
7195 return rc;
7196 }
7197 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7198 netdev_err(bp->dev, "tx ring reservation failure\n");
7199 netdev_reset_tc(bp->dev);
7200 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7201 return -ENOMEM;
7202 }
7203 bp->num_stat_ctxs = bp->cp_nr_rings;
7204 return 0;
7205}
7206
c0c050c5
MC
7207static void bnxt_free_irq(struct bnxt *bp)
7208{
7209 struct bnxt_irq *irq;
7210 int i;
7211
7212#ifdef CONFIG_RFS_ACCEL
7213 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7214 bp->dev->rx_cpu_rmap = NULL;
7215#endif
cb98526b 7216 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
7217 return;
7218
7219 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
7220 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7221
7222 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
7223 if (irq->requested) {
7224 if (irq->have_cpumask) {
7225 irq_set_affinity_hint(irq->vector, NULL);
7226 free_cpumask_var(irq->cpu_mask);
7227 irq->have_cpumask = 0;
7228 }
c0c050c5 7229 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
7230 }
7231
c0c050c5
MC
7232 irq->requested = 0;
7233 }
c0c050c5
MC
7234}
7235
7236static int bnxt_request_irq(struct bnxt *bp)
7237{
b81a90d3 7238 int i, j, rc = 0;
c0c050c5
MC
7239 unsigned long flags = 0;
7240#ifdef CONFIG_RFS_ACCEL
e5811b8c 7241 struct cpu_rmap *rmap;
c0c050c5
MC
7242#endif
7243
e5811b8c
MC
7244 rc = bnxt_setup_int_mode(bp);
7245 if (rc) {
7246 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7247 rc);
7248 return rc;
7249 }
7250#ifdef CONFIG_RFS_ACCEL
7251 rmap = bp->dev->rx_cpu_rmap;
7252#endif
c0c050c5
MC
7253 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7254 flags = IRQF_SHARED;
7255
b81a90d3 7256 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
7257 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7258 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7259
c0c050c5 7260#ifdef CONFIG_RFS_ACCEL
b81a90d3 7261 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
7262 rc = irq_cpu_rmap_add(rmap, irq->vector);
7263 if (rc)
7264 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
7265 j);
7266 j++;
c0c050c5
MC
7267 }
7268#endif
7269 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7270 bp->bnapi[i]);
7271 if (rc)
7272 break;
7273
7274 irq->requested = 1;
56f0fd80
VV
7275
7276 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7277 int numa_node = dev_to_node(&bp->pdev->dev);
7278
7279 irq->have_cpumask = 1;
7280 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7281 irq->cpu_mask);
7282 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7283 if (rc) {
7284 netdev_warn(bp->dev,
7285 "Set affinity failed, IRQ = %d\n",
7286 irq->vector);
7287 break;
7288 }
7289 }
c0c050c5
MC
7290 }
7291 return rc;
7292}
7293
7294static void bnxt_del_napi(struct bnxt *bp)
7295{
7296 int i;
7297
7298 if (!bp->bnapi)
7299 return;
7300
7301 for (i = 0; i < bp->cp_nr_rings; i++) {
7302 struct bnxt_napi *bnapi = bp->bnapi[i];
7303
7304 napi_hash_del(&bnapi->napi);
7305 netif_napi_del(&bnapi->napi);
7306 }
e5f6f564
ED
7307 /* We called napi_hash_del() before netif_napi_del(), we need
7308 * to respect an RCU grace period before freeing napi structures.
7309 */
7310 synchronize_net();
c0c050c5
MC
7311}
7312
7313static void bnxt_init_napi(struct bnxt *bp)
7314{
7315 int i;
10bbdaf5 7316 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
7317 struct bnxt_napi *bnapi;
7318
7319 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
7320 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
7321
7322 if (bp->flags & BNXT_FLAG_CHIP_P5)
7323 poll_fn = bnxt_poll_p5;
7324 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
7325 cp_nr_rings--;
7326 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 7327 bnapi = bp->bnapi[i];
0fcec985 7328 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 7329 }
10bbdaf5
PS
7330 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7331 bnapi = bp->bnapi[cp_nr_rings];
7332 netif_napi_add(bp->dev, &bnapi->napi,
7333 bnxt_poll_nitroa0, 64);
10bbdaf5 7334 }
c0c050c5
MC
7335 } else {
7336 bnapi = bp->bnapi[0];
7337 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
7338 }
7339}
7340
7341static void bnxt_disable_napi(struct bnxt *bp)
7342{
7343 int i;
7344
7345 if (!bp->bnapi)
7346 return;
7347
0bc0b97f
AG
7348 for (i = 0; i < bp->cp_nr_rings; i++) {
7349 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7350
7351 if (bp->bnapi[i]->rx_ring)
7352 cancel_work_sync(&cpr->dim.work);
7353
c0c050c5 7354 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f 7355 }
c0c050c5
MC
7356}
7357
7358static void bnxt_enable_napi(struct bnxt *bp)
7359{
7360 int i;
7361
7362 for (i = 0; i < bp->cp_nr_rings; i++) {
6a8788f2 7363 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
fa7e2812 7364 bp->bnapi[i]->in_reset = false;
6a8788f2
AG
7365
7366 if (bp->bnapi[i]->rx_ring) {
7367 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
7368 cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
7369 }
c0c050c5
MC
7370 napi_enable(&bp->bnapi[i]->napi);
7371 }
7372}
7373
7df4ae9f 7374void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
7375{
7376 int i;
c0c050c5 7377 struct bnxt_tx_ring_info *txr;
c0c050c5 7378
b6ab4b01 7379 if (bp->tx_ring) {
c0c050c5 7380 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 7381 txr = &bp->tx_ring[i];
c0c050c5 7382 txr->dev_state = BNXT_DEV_STATE_CLOSING;
c0c050c5
MC
7383 }
7384 }
7385 /* Stop all TX queues */
7386 netif_tx_disable(bp->dev);
7387 netif_carrier_off(bp->dev);
7388}
7389
7df4ae9f 7390void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
7391{
7392 int i;
c0c050c5 7393 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
7394
7395 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 7396 txr = &bp->tx_ring[i];
c0c050c5
MC
7397 txr->dev_state = 0;
7398 }
7399 netif_tx_wake_all_queues(bp->dev);
7400 if (bp->link_info.link_up)
7401 netif_carrier_on(bp->dev);
7402}
7403
7404static void bnxt_report_link(struct bnxt *bp)
7405{
7406 if (bp->link_info.link_up) {
7407 const char *duplex;
7408 const char *flow_ctrl;
38a21b34
DK
7409 u32 speed;
7410 u16 fec;
c0c050c5
MC
7411
7412 netif_carrier_on(bp->dev);
7413 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
7414 duplex = "full";
7415 else
7416 duplex = "half";
7417 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
7418 flow_ctrl = "ON - receive & transmit";
7419 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
7420 flow_ctrl = "ON - transmit";
7421 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
7422 flow_ctrl = "ON - receive";
7423 else
7424 flow_ctrl = "none";
7425 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
38a21b34 7426 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
c0c050c5 7427 speed, duplex, flow_ctrl);
170ce013
MC
7428 if (bp->flags & BNXT_FLAG_EEE_CAP)
7429 netdev_info(bp->dev, "EEE is %s\n",
7430 bp->eee.eee_active ? "active" :
7431 "not active");
e70c752f
MC
7432 fec = bp->link_info.fec_cfg;
7433 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
7434 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
7435 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
7436 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
7437 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
c0c050c5
MC
7438 } else {
7439 netif_carrier_off(bp->dev);
7440 netdev_err(bp->dev, "NIC Link is Down\n");
7441 }
7442}
7443
170ce013
MC
7444static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
7445{
7446 int rc = 0;
7447 struct hwrm_port_phy_qcaps_input req = {0};
7448 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 7449 struct bnxt_link_info *link_info = &bp->link_info;
170ce013
MC
7450
7451 if (bp->hwrm_spec_code < 0x10201)
7452 return 0;
7453
7454 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
7455
7456 mutex_lock(&bp->hwrm_cmd_lock);
7457 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7458 if (rc)
7459 goto hwrm_phy_qcaps_exit;
7460
acb20054 7461 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
7462 struct ethtool_eee *eee = &bp->eee;
7463 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
7464
7465 bp->flags |= BNXT_FLAG_EEE_CAP;
7466 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7467 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
7468 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
7469 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
7470 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
7471 }
55fd0cf3
MC
7472 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
7473 if (bp->test_info)
7474 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
7475 }
520ad89a
MC
7476 if (resp->supported_speeds_auto_mode)
7477 link_info->support_auto_speeds =
7478 le16_to_cpu(resp->supported_speeds_auto_mode);
170ce013 7479
d5430d31
MC
7480 bp->port_count = resp->port_cnt;
7481
170ce013
MC
7482hwrm_phy_qcaps_exit:
7483 mutex_unlock(&bp->hwrm_cmd_lock);
7484 return rc;
7485}
7486
c0c050c5
MC
7487static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
7488{
7489 int rc = 0;
7490 struct bnxt_link_info *link_info = &bp->link_info;
7491 struct hwrm_port_phy_qcfg_input req = {0};
7492 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7493 u8 link_up = link_info->link_up;
286ef9d6 7494 u16 diff;
c0c050c5
MC
7495
7496 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
7497
7498 mutex_lock(&bp->hwrm_cmd_lock);
7499 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7500 if (rc) {
7501 mutex_unlock(&bp->hwrm_cmd_lock);
7502 return rc;
7503 }
7504
7505 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
7506 link_info->phy_link_status = resp->link;
acb20054
MC
7507 link_info->duplex = resp->duplex_cfg;
7508 if (bp->hwrm_spec_code >= 0x10800)
7509 link_info->duplex = resp->duplex_state;
c0c050c5
MC
7510 link_info->pause = resp->pause;
7511 link_info->auto_mode = resp->auto_mode;
7512 link_info->auto_pause_setting = resp->auto_pause;
3277360e 7513 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 7514 link_info->force_pause_setting = resp->force_pause;
acb20054 7515 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
7516 if (link_info->phy_link_status == BNXT_LINK_LINK)
7517 link_info->link_speed = le16_to_cpu(resp->link_speed);
7518 else
7519 link_info->link_speed = 0;
7520 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
c0c050c5
MC
7521 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
7522 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
3277360e
MC
7523 link_info->lp_auto_link_speeds =
7524 le16_to_cpu(resp->link_partner_adv_speeds);
c0c050c5
MC
7525 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
7526 link_info->phy_ver[0] = resp->phy_maj;
7527 link_info->phy_ver[1] = resp->phy_min;
7528 link_info->phy_ver[2] = resp->phy_bld;
7529 link_info->media_type = resp->media_type;
03efbec0 7530 link_info->phy_type = resp->phy_type;
11f15ed3 7531 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
7532 link_info->phy_addr = resp->eee_config_phy_addr &
7533 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 7534 link_info->module_status = resp->module_status;
170ce013
MC
7535
7536 if (bp->flags & BNXT_FLAG_EEE_CAP) {
7537 struct ethtool_eee *eee = &bp->eee;
7538 u16 fw_speeds;
7539
7540 eee->eee_active = 0;
7541 if (resp->eee_config_phy_addr &
7542 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
7543 eee->eee_active = 1;
7544 fw_speeds = le16_to_cpu(
7545 resp->link_partner_adv_eee_link_speed_mask);
7546 eee->lp_advertised =
7547 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7548 }
7549
7550 /* Pull initial EEE config */
7551 if (!chng_link_state) {
7552 if (resp->eee_config_phy_addr &
7553 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
7554 eee->eee_enabled = 1;
c0c050c5 7555
170ce013
MC
7556 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
7557 eee->advertised =
7558 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7559
7560 if (resp->eee_config_phy_addr &
7561 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
7562 __le32 tmr;
7563
7564 eee->tx_lpi_enabled = 1;
7565 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
7566 eee->tx_lpi_timer = le32_to_cpu(tmr) &
7567 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
7568 }
7569 }
7570 }
e70c752f
MC
7571
7572 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
7573 if (bp->hwrm_spec_code >= 0x10504)
7574 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
7575
c0c050c5
MC
7576 /* TODO: need to add more logic to report VF link */
7577 if (chng_link_state) {
7578 if (link_info->phy_link_status == BNXT_LINK_LINK)
7579 link_info->link_up = 1;
7580 else
7581 link_info->link_up = 0;
7582 if (link_up != link_info->link_up)
7583 bnxt_report_link(bp);
7584 } else {
7585 /* alwasy link down if not require to update link state */
7586 link_info->link_up = 0;
7587 }
7588 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6 7589
dac04907
MC
7590 if (!BNXT_SINGLE_PF(bp))
7591 return 0;
7592
286ef9d6
MC
7593 diff = link_info->support_auto_speeds ^ link_info->advertising;
7594 if ((link_info->support_auto_speeds | diff) !=
7595 link_info->support_auto_speeds) {
7596 /* An advertised speed is no longer supported, so we need to
0eaa24b9
MC
7597 * update the advertisement settings. Caller holds RTNL
7598 * so we can modify link settings.
286ef9d6 7599 */
286ef9d6 7600 link_info->advertising = link_info->support_auto_speeds;
0eaa24b9 7601 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
286ef9d6 7602 bnxt_hwrm_set_link_setting(bp, true, false);
286ef9d6 7603 }
c0c050c5
MC
7604 return 0;
7605}
7606
10289bec
MC
7607static void bnxt_get_port_module_status(struct bnxt *bp)
7608{
7609 struct bnxt_link_info *link_info = &bp->link_info;
7610 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
7611 u8 module_status;
7612
7613 if (bnxt_update_link(bp, true))
7614 return;
7615
7616 module_status = link_info->module_status;
7617 switch (module_status) {
7618 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
7619 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
7620 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
7621 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
7622 bp->pf.port_id);
7623 if (bp->hwrm_spec_code >= 0x10201) {
7624 netdev_warn(bp->dev, "Module part number %s\n",
7625 resp->phy_vendor_partnumber);
7626 }
7627 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
7628 netdev_warn(bp->dev, "TX is disabled\n");
7629 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
7630 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
7631 }
7632}
7633
c0c050c5
MC
7634static void
7635bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
7636{
7637 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
7638 if (bp->hwrm_spec_code >= 0x10201)
7639 req->auto_pause =
7640 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
7641 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7642 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
7643 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 7644 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
7645 req->enables |=
7646 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7647 } else {
7648 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7649 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
7650 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7651 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
7652 req->enables |=
7653 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
7654 if (bp->hwrm_spec_code >= 0x10201) {
7655 req->auto_pause = req->force_pause;
7656 req->enables |= cpu_to_le32(
7657 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7658 }
c0c050c5
MC
7659 }
7660}
7661
7662static void bnxt_hwrm_set_link_common(struct bnxt *bp,
7663 struct hwrm_port_phy_cfg_input *req)
7664{
7665 u8 autoneg = bp->link_info.autoneg;
7666 u16 fw_link_speed = bp->link_info.req_link_speed;
68515a18 7667 u16 advertising = bp->link_info.advertising;
c0c050c5
MC
7668
7669 if (autoneg & BNXT_AUTONEG_SPEED) {
7670 req->auto_mode |=
11f15ed3 7671 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
c0c050c5
MC
7672
7673 req->enables |= cpu_to_le32(
7674 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
7675 req->auto_link_speed_mask = cpu_to_le16(advertising);
7676
7677 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
7678 req->flags |=
7679 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
7680 } else {
7681 req->force_link_speed = cpu_to_le16(fw_link_speed);
7682 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
7683 }
7684
c0c050c5
MC
7685 /* tell chimp that the setting takes effect immediately */
7686 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
7687}
7688
7689int bnxt_hwrm_set_pause(struct bnxt *bp)
7690{
7691 struct hwrm_port_phy_cfg_input req = {0};
7692 int rc;
7693
7694 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
7695 bnxt_hwrm_set_pause_common(bp, &req);
7696
7697 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
7698 bp->link_info.force_link_chng)
7699 bnxt_hwrm_set_link_common(bp, &req);
7700
7701 mutex_lock(&bp->hwrm_cmd_lock);
7702 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7703 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
7704 /* since changing of pause setting doesn't trigger any link
7705 * change event, the driver needs to update the current pause
7706 * result upon successfully return of the phy_cfg command
7707 */
7708 bp->link_info.pause =
7709 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
7710 bp->link_info.auto_pause_setting = 0;
7711 if (!bp->link_info.force_link_chng)
7712 bnxt_report_link(bp);
7713 }
7714 bp->link_info.force_link_chng = false;
7715 mutex_unlock(&bp->hwrm_cmd_lock);
7716 return rc;
7717}
7718
939f7f0c
MC
7719static void bnxt_hwrm_set_eee(struct bnxt *bp,
7720 struct hwrm_port_phy_cfg_input *req)
7721{
7722 struct ethtool_eee *eee = &bp->eee;
7723
7724 if (eee->eee_enabled) {
7725 u16 eee_speeds;
7726 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
7727
7728 if (eee->tx_lpi_enabled)
7729 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
7730 else
7731 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
7732
7733 req->flags |= cpu_to_le32(flags);
7734 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
7735 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
7736 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
7737 } else {
7738 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
7739 }
7740}
7741
7742int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
7743{
7744 struct hwrm_port_phy_cfg_input req = {0};
7745
7746 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
7747 if (set_pause)
7748 bnxt_hwrm_set_pause_common(bp, &req);
7749
7750 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
7751
7752 if (set_eee)
7753 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
7754 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7755}
7756
33f7d55f
MC
7757static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
7758{
7759 struct hwrm_port_phy_cfg_input req = {0};
7760
567b2abe 7761 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
7762 return 0;
7763
7764 if (pci_num_vf(bp->pdev))
7765 return 0;
7766
7767 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 7768 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
7769 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7770}
7771
25e1acd6
MC
7772static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
7773{
7774 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
7775 struct hwrm_func_drv_if_change_input req = {0};
7776 bool resc_reinit = false;
7777 int rc;
7778
7779 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
7780 return 0;
7781
7782 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
7783 if (up)
7784 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
7785 mutex_lock(&bp->hwrm_cmd_lock);
7786 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7787 if (!rc && (resp->flags &
7788 cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
7789 resc_reinit = true;
7790 mutex_unlock(&bp->hwrm_cmd_lock);
7791
7792 if (up && resc_reinit && BNXT_NEW_RM(bp)) {
7793 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7794
7795 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7796 hw_resc->resv_cp_rings = 0;
7797 hw_resc->resv_tx_rings = 0;
7798 hw_resc->resv_rx_rings = 0;
7799 hw_resc->resv_hw_ring_grps = 0;
7800 hw_resc->resv_vnics = 0;
6b95c3e9
MC
7801 bp->tx_nr_rings = 0;
7802 bp->rx_nr_rings = 0;
25e1acd6
MC
7803 }
7804 return rc;
7805}
7806
5ad2cbee
MC
7807static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
7808{
7809 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7810 struct hwrm_port_led_qcaps_input req = {0};
7811 struct bnxt_pf_info *pf = &bp->pf;
7812 int rc;
7813
7814 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
7815 return 0;
7816
7817 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
7818 req.port_id = cpu_to_le16(pf->port_id);
7819 mutex_lock(&bp->hwrm_cmd_lock);
7820 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7821 if (rc) {
7822 mutex_unlock(&bp->hwrm_cmd_lock);
7823 return rc;
7824 }
7825 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
7826 int i;
7827
7828 bp->num_leds = resp->num_leds;
7829 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
7830 bp->num_leds);
7831 for (i = 0; i < bp->num_leds; i++) {
7832 struct bnxt_led_info *led = &bp->leds[i];
7833 __le16 caps = led->led_state_caps;
7834
7835 if (!led->led_group_id ||
7836 !BNXT_LED_ALT_BLINK_CAP(caps)) {
7837 bp->num_leds = 0;
7838 break;
7839 }
7840 }
7841 }
7842 mutex_unlock(&bp->hwrm_cmd_lock);
7843 return 0;
7844}
7845
5282db6c
MC
7846int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
7847{
7848 struct hwrm_wol_filter_alloc_input req = {0};
7849 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
7850 int rc;
7851
7852 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
7853 req.port_id = cpu_to_le16(bp->pf.port_id);
7854 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
7855 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
7856 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
7857 mutex_lock(&bp->hwrm_cmd_lock);
7858 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7859 if (!rc)
7860 bp->wol_filter_id = resp->wol_filter_id;
7861 mutex_unlock(&bp->hwrm_cmd_lock);
7862 return rc;
7863}
7864
7865int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
7866{
7867 struct hwrm_wol_filter_free_input req = {0};
7868 int rc;
7869
7870 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
7871 req.port_id = cpu_to_le16(bp->pf.port_id);
7872 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
7873 req.wol_filter_id = bp->wol_filter_id;
7874 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7875 return rc;
7876}
7877
c1ef146a
MC
7878static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
7879{
7880 struct hwrm_wol_filter_qcfg_input req = {0};
7881 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7882 u16 next_handle = 0;
7883 int rc;
7884
7885 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
7886 req.port_id = cpu_to_le16(bp->pf.port_id);
7887 req.handle = cpu_to_le16(handle);
7888 mutex_lock(&bp->hwrm_cmd_lock);
7889 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7890 if (!rc) {
7891 next_handle = le16_to_cpu(resp->next_handle);
7892 if (next_handle != 0) {
7893 if (resp->wol_type ==
7894 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
7895 bp->wol = 1;
7896 bp->wol_filter_id = resp->wol_filter_id;
7897 }
7898 }
7899 }
7900 mutex_unlock(&bp->hwrm_cmd_lock);
7901 return next_handle;
7902}
7903
7904static void bnxt_get_wol_settings(struct bnxt *bp)
7905{
7906 u16 handle = 0;
7907
7908 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
7909 return;
7910
7911 do {
7912 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
7913 } while (handle && handle != 0xffff);
7914}
7915
cde49a42
VV
7916#ifdef CONFIG_BNXT_HWMON
7917static ssize_t bnxt_show_temp(struct device *dev,
7918 struct device_attribute *devattr, char *buf)
7919{
7920 struct hwrm_temp_monitor_query_input req = {0};
7921 struct hwrm_temp_monitor_query_output *resp;
7922 struct bnxt *bp = dev_get_drvdata(dev);
7923 u32 temp = 0;
7924
7925 resp = bp->hwrm_cmd_resp_addr;
7926 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
7927 mutex_lock(&bp->hwrm_cmd_lock);
7928 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
7929 temp = resp->temp * 1000; /* display millidegree */
7930 mutex_unlock(&bp->hwrm_cmd_lock);
7931
7932 return sprintf(buf, "%u\n", temp);
7933}
7934static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
7935
7936static struct attribute *bnxt_attrs[] = {
7937 &sensor_dev_attr_temp1_input.dev_attr.attr,
7938 NULL
7939};
7940ATTRIBUTE_GROUPS(bnxt);
7941
7942static void bnxt_hwmon_close(struct bnxt *bp)
7943{
7944 if (bp->hwmon_dev) {
7945 hwmon_device_unregister(bp->hwmon_dev);
7946 bp->hwmon_dev = NULL;
7947 }
7948}
7949
7950static void bnxt_hwmon_open(struct bnxt *bp)
7951{
7952 struct pci_dev *pdev = bp->pdev;
7953
7954 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
7955 DRV_MODULE_NAME, bp,
7956 bnxt_groups);
7957 if (IS_ERR(bp->hwmon_dev)) {
7958 bp->hwmon_dev = NULL;
7959 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
7960 }
7961}
7962#else
7963static void bnxt_hwmon_close(struct bnxt *bp)
7964{
7965}
7966
7967static void bnxt_hwmon_open(struct bnxt *bp)
7968{
7969}
7970#endif
7971
939f7f0c
MC
7972static bool bnxt_eee_config_ok(struct bnxt *bp)
7973{
7974 struct ethtool_eee *eee = &bp->eee;
7975 struct bnxt_link_info *link_info = &bp->link_info;
7976
7977 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
7978 return true;
7979
7980 if (eee->eee_enabled) {
7981 u32 advertising =
7982 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
7983
7984 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
7985 eee->eee_enabled = 0;
7986 return false;
7987 }
7988 if (eee->advertised & ~advertising) {
7989 eee->advertised = advertising & eee->supported;
7990 return false;
7991 }
7992 }
7993 return true;
7994}
7995
c0c050c5
MC
7996static int bnxt_update_phy_setting(struct bnxt *bp)
7997{
7998 int rc;
7999 bool update_link = false;
8000 bool update_pause = false;
939f7f0c 8001 bool update_eee = false;
c0c050c5
MC
8002 struct bnxt_link_info *link_info = &bp->link_info;
8003
8004 rc = bnxt_update_link(bp, true);
8005 if (rc) {
8006 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8007 rc);
8008 return rc;
8009 }
33dac24a
MC
8010 if (!BNXT_SINGLE_PF(bp))
8011 return 0;
8012
c0c050c5 8013 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
8014 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8015 link_info->req_flow_ctrl)
c0c050c5
MC
8016 update_pause = true;
8017 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8018 link_info->force_pause_setting != link_info->req_flow_ctrl)
8019 update_pause = true;
c0c050c5
MC
8020 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8021 if (BNXT_AUTO_MODE(link_info->auto_mode))
8022 update_link = true;
8023 if (link_info->req_link_speed != link_info->force_link_speed)
8024 update_link = true;
de73018f
MC
8025 if (link_info->req_duplex != link_info->duplex_setting)
8026 update_link = true;
c0c050c5
MC
8027 } else {
8028 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8029 update_link = true;
8030 if (link_info->advertising != link_info->auto_link_speeds)
8031 update_link = true;
c0c050c5
MC
8032 }
8033
16d663a6
MC
8034 /* The last close may have shutdown the link, so need to call
8035 * PHY_CFG to bring it back up.
8036 */
8037 if (!netif_carrier_ok(bp->dev))
8038 update_link = true;
8039
939f7f0c
MC
8040 if (!bnxt_eee_config_ok(bp))
8041 update_eee = true;
8042
c0c050c5 8043 if (update_link)
939f7f0c 8044 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
8045 else if (update_pause)
8046 rc = bnxt_hwrm_set_pause(bp);
8047 if (rc) {
8048 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8049 rc);
8050 return rc;
8051 }
8052
8053 return rc;
8054}
8055
11809490
JH
8056/* Common routine to pre-map certain register block to different GRC window.
8057 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8058 * in PF and 3 windows in VF that can be customized to map in different
8059 * register blocks.
8060 */
8061static void bnxt_preset_reg_win(struct bnxt *bp)
8062{
8063 if (BNXT_PF(bp)) {
8064 /* CAG registers map to GRC window #4 */
8065 writel(BNXT_CAG_REG_BASE,
8066 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8067 }
8068}
8069
47558acd
MC
8070static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8071
c0c050c5
MC
8072static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8073{
8074 int rc = 0;
8075
11809490 8076 bnxt_preset_reg_win(bp);
c0c050c5
MC
8077 netif_carrier_off(bp->dev);
8078 if (irq_re_init) {
47558acd
MC
8079 /* Reserve rings now if none were reserved at driver probe. */
8080 rc = bnxt_init_dflt_ring_mode(bp);
8081 if (rc) {
8082 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8083 return rc;
8084 }
c0c050c5 8085 }
41e8d798
MC
8086 rc = bnxt_reserve_rings(bp);
8087 if (rc)
8088 return rc;
c0c050c5
MC
8089 if ((bp->flags & BNXT_FLAG_RFS) &&
8090 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8091 /* disable RFS if falling back to INTA */
8092 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8093 bp->flags &= ~BNXT_FLAG_RFS;
8094 }
8095
8096 rc = bnxt_alloc_mem(bp, irq_re_init);
8097 if (rc) {
8098 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8099 goto open_err_free_mem;
8100 }
8101
8102 if (irq_re_init) {
8103 bnxt_init_napi(bp);
8104 rc = bnxt_request_irq(bp);
8105 if (rc) {
8106 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 8107 goto open_err_irq;
c0c050c5
MC
8108 }
8109 }
8110
8111 bnxt_enable_napi(bp);
cabfb09d 8112 bnxt_debug_dev_init(bp);
c0c050c5
MC
8113
8114 rc = bnxt_init_nic(bp, irq_re_init);
8115 if (rc) {
8116 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8117 goto open_err;
8118 }
8119
8120 if (link_re_init) {
e2dc9b6e 8121 mutex_lock(&bp->link_lock);
c0c050c5 8122 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 8123 mutex_unlock(&bp->link_lock);
a1ef4a79 8124 if (rc) {
ba41d46f 8125 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
8126 if (BNXT_SINGLE_PF(bp)) {
8127 bp->link_info.phy_retry = true;
8128 bp->link_info.phy_retry_expires =
8129 jiffies + 5 * HZ;
8130 }
8131 }
c0c050c5
MC
8132 }
8133
7cdd5fc3 8134 if (irq_re_init)
ad51b8e9 8135 udp_tunnel_get_rx_info(bp->dev);
c0c050c5 8136
caefe526 8137 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
8138 bnxt_enable_int(bp);
8139 /* Enable TX queues */
8140 bnxt_tx_enable(bp);
8141 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
8142 /* Poll link status and check for SFP+ module status */
8143 bnxt_get_port_module_status(bp);
c0c050c5 8144
ee5c7fb3
SP
8145 /* VF-reps may need to be re-opened after the PF is re-opened */
8146 if (BNXT_PF(bp))
8147 bnxt_vf_reps_open(bp);
c0c050c5
MC
8148 return 0;
8149
8150open_err:
cabfb09d 8151 bnxt_debug_dev_exit(bp);
c0c050c5 8152 bnxt_disable_napi(bp);
c58387ab
VG
8153
8154open_err_irq:
c0c050c5
MC
8155 bnxt_del_napi(bp);
8156
8157open_err_free_mem:
8158 bnxt_free_skbs(bp);
8159 bnxt_free_irq(bp);
8160 bnxt_free_mem(bp, true);
8161 return rc;
8162}
8163
8164/* rtnl_lock held */
8165int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8166{
8167 int rc = 0;
8168
8169 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8170 if (rc) {
8171 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8172 dev_close(bp->dev);
8173 }
8174 return rc;
8175}
8176
f7dc1ea6
MC
8177/* rtnl_lock held, open the NIC half way by allocating all resources, but
8178 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
8179 * self tests.
8180 */
8181int bnxt_half_open_nic(struct bnxt *bp)
8182{
8183 int rc = 0;
8184
8185 rc = bnxt_alloc_mem(bp, false);
8186 if (rc) {
8187 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8188 goto half_open_err;
8189 }
8190 rc = bnxt_init_nic(bp, false);
8191 if (rc) {
8192 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8193 goto half_open_err;
8194 }
8195 return 0;
8196
8197half_open_err:
8198 bnxt_free_skbs(bp);
8199 bnxt_free_mem(bp, false);
8200 dev_close(bp->dev);
8201 return rc;
8202}
8203
8204/* rtnl_lock held, this call can only be made after a previous successful
8205 * call to bnxt_half_open_nic().
8206 */
8207void bnxt_half_close_nic(struct bnxt *bp)
8208{
8209 bnxt_hwrm_resource_free(bp, false, false);
8210 bnxt_free_skbs(bp);
8211 bnxt_free_mem(bp, false);
8212}
8213
c0c050c5
MC
8214static int bnxt_open(struct net_device *dev)
8215{
8216 struct bnxt *bp = netdev_priv(dev);
25e1acd6 8217 int rc;
c0c050c5 8218
25e1acd6
MC
8219 bnxt_hwrm_if_change(bp, true);
8220 rc = __bnxt_open_nic(bp, true, true);
8221 if (rc)
8222 bnxt_hwrm_if_change(bp, false);
cde49a42
VV
8223
8224 bnxt_hwmon_open(bp);
8225
25e1acd6 8226 return rc;
c0c050c5
MC
8227}
8228
f9b76ebd
MC
8229static bool bnxt_drv_busy(struct bnxt *bp)
8230{
8231 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8232 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8233}
8234
86e953db
MC
8235static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8236 bool link_re_init)
c0c050c5 8237{
ee5c7fb3
SP
8238 /* Close the VF-reps before closing PF */
8239 if (BNXT_PF(bp))
8240 bnxt_vf_reps_close(bp);
86e953db 8241
c0c050c5
MC
8242 /* Change device state to avoid TX queue wake up's */
8243 bnxt_tx_disable(bp);
8244
caefe526 8245 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 8246 smp_mb__after_atomic();
f9b76ebd 8247 while (bnxt_drv_busy(bp))
4cebdcec 8248 msleep(20);
c0c050c5 8249
9d8bc097 8250 /* Flush rings and and disable interrupts */
c0c050c5
MC
8251 bnxt_shutdown_nic(bp, irq_re_init);
8252
8253 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8254
cabfb09d 8255 bnxt_debug_dev_exit(bp);
c0c050c5 8256 bnxt_disable_napi(bp);
c0c050c5
MC
8257 del_timer_sync(&bp->timer);
8258 bnxt_free_skbs(bp);
8259
8260 if (irq_re_init) {
8261 bnxt_free_irq(bp);
8262 bnxt_del_napi(bp);
8263 }
8264 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
8265}
8266
8267int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8268{
8269 int rc = 0;
8270
8271#ifdef CONFIG_BNXT_SRIOV
8272 if (bp->sriov_cfg) {
8273 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8274 !bp->sriov_cfg,
8275 BNXT_SRIOV_CFG_WAIT_TMO);
8276 if (rc)
8277 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8278 }
8279#endif
8280 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
8281 return rc;
8282}
8283
8284static int bnxt_close(struct net_device *dev)
8285{
8286 struct bnxt *bp = netdev_priv(dev);
8287
cde49a42 8288 bnxt_hwmon_close(bp);
c0c050c5 8289 bnxt_close_nic(bp, true, true);
33f7d55f 8290 bnxt_hwrm_shutdown_link(bp);
25e1acd6 8291 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
8292 return 0;
8293}
8294
8295/* rtnl_lock held */
8296static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8297{
8298 switch (cmd) {
8299 case SIOCGMIIPHY:
8300 /* fallthru */
8301 case SIOCGMIIREG: {
8302 if (!netif_running(dev))
8303 return -EAGAIN;
8304
8305 return 0;
8306 }
8307
8308 case SIOCSMIIREG:
8309 if (!netif_running(dev))
8310 return -EAGAIN;
8311
8312 return 0;
8313
8314 default:
8315 /* do nothing */
8316 break;
8317 }
8318 return -EOPNOTSUPP;
8319}
8320
bc1f4470 8321static void
c0c050c5
MC
8322bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
8323{
8324 u32 i;
8325 struct bnxt *bp = netdev_priv(dev);
8326
f9b76ebd
MC
8327 set_bit(BNXT_STATE_READ_STATS, &bp->state);
8328 /* Make sure bnxt_close_nic() sees that we are reading stats before
8329 * we check the BNXT_STATE_OPEN flag.
8330 */
8331 smp_mb__after_atomic();
8332 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8333 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
bc1f4470 8334 return;
f9b76ebd 8335 }
c0c050c5
MC
8336
8337 /* TODO check if we need to synchronize with bnxt_close path */
8338 for (i = 0; i < bp->cp_nr_rings; i++) {
8339 struct bnxt_napi *bnapi = bp->bnapi[i];
8340 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8341 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
8342
8343 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
8344 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
8345 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
8346
8347 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
8348 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
8349 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
8350
8351 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
8352 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
8353 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
8354
8355 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
8356 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
8357 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
8358
8359 stats->rx_missed_errors +=
8360 le64_to_cpu(hw_stats->rx_discard_pkts);
8361
8362 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
8363
c0c050c5
MC
8364 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
8365 }
8366
9947f83f
MC
8367 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8368 struct rx_port_stats *rx = bp->hw_rx_port_stats;
8369 struct tx_port_stats *tx = bp->hw_tx_port_stats;
8370
8371 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
8372 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
8373 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
8374 le64_to_cpu(rx->rx_ovrsz_frames) +
8375 le64_to_cpu(rx->rx_runt_frames);
8376 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
8377 le64_to_cpu(rx->rx_jbr_frames);
8378 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
8379 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
8380 stats->tx_errors = le64_to_cpu(tx->tx_err);
8381 }
f9b76ebd 8382 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
8383}
8384
8385static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
8386{
8387 struct net_device *dev = bp->dev;
8388 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8389 struct netdev_hw_addr *ha;
8390 u8 *haddr;
8391 int mc_count = 0;
8392 bool update = false;
8393 int off = 0;
8394
8395 netdev_for_each_mc_addr(ha, dev) {
8396 if (mc_count >= BNXT_MAX_MC_ADDRS) {
8397 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8398 vnic->mc_list_count = 0;
8399 return false;
8400 }
8401 haddr = ha->addr;
8402 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
8403 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
8404 update = true;
8405 }
8406 off += ETH_ALEN;
8407 mc_count++;
8408 }
8409 if (mc_count)
8410 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
8411
8412 if (mc_count != vnic->mc_list_count) {
8413 vnic->mc_list_count = mc_count;
8414 update = true;
8415 }
8416 return update;
8417}
8418
8419static bool bnxt_uc_list_updated(struct bnxt *bp)
8420{
8421 struct net_device *dev = bp->dev;
8422 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8423 struct netdev_hw_addr *ha;
8424 int off = 0;
8425
8426 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
8427 return true;
8428
8429 netdev_for_each_uc_addr(ha, dev) {
8430 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
8431 return true;
8432
8433 off += ETH_ALEN;
8434 }
8435 return false;
8436}
8437
8438static void bnxt_set_rx_mode(struct net_device *dev)
8439{
8440 struct bnxt *bp = netdev_priv(dev);
8441 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8442 u32 mask = vnic->rx_mask;
8443 bool mc_update = false;
8444 bool uc_update;
8445
8446 if (!netif_running(dev))
8447 return;
8448
8449 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
8450 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
8451 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
8452 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 8453
17c71ac3 8454 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
c0c050c5
MC
8455 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8456
8457 uc_update = bnxt_uc_list_updated(bp);
8458
30e33848
MC
8459 if (dev->flags & IFF_BROADCAST)
8460 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
8461 if (dev->flags & IFF_ALLMULTI) {
8462 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8463 vnic->mc_list_count = 0;
8464 } else {
8465 mc_update = bnxt_mc_list_updated(bp, &mask);
8466 }
8467
8468 if (mask != vnic->rx_mask || uc_update || mc_update) {
8469 vnic->rx_mask = mask;
8470
8471 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 8472 bnxt_queue_sp_work(bp);
c0c050c5
MC
8473 }
8474}
8475
b664f008 8476static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
8477{
8478 struct net_device *dev = bp->dev;
8479 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8480 struct netdev_hw_addr *ha;
8481 int i, off = 0, rc;
8482 bool uc_update;
8483
8484 netif_addr_lock_bh(dev);
8485 uc_update = bnxt_uc_list_updated(bp);
8486 netif_addr_unlock_bh(dev);
8487
8488 if (!uc_update)
8489 goto skip_uc;
8490
8491 mutex_lock(&bp->hwrm_cmd_lock);
8492 for (i = 1; i < vnic->uc_filter_count; i++) {
8493 struct hwrm_cfa_l2_filter_free_input req = {0};
8494
8495 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
8496 -1);
8497
8498 req.l2_filter_id = vnic->fw_l2_filter_id[i];
8499
8500 rc = _hwrm_send_message(bp, &req, sizeof(req),
8501 HWRM_CMD_TIMEOUT);
8502 }
8503 mutex_unlock(&bp->hwrm_cmd_lock);
8504
8505 vnic->uc_filter_count = 1;
8506
8507 netif_addr_lock_bh(dev);
8508 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
8509 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8510 } else {
8511 netdev_for_each_uc_addr(ha, dev) {
8512 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
8513 off += ETH_ALEN;
8514 vnic->uc_filter_count++;
8515 }
8516 }
8517 netif_addr_unlock_bh(dev);
8518
8519 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
8520 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
8521 if (rc) {
8522 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
8523 rc);
8524 vnic->uc_filter_count = i;
b664f008 8525 return rc;
c0c050c5
MC
8526 }
8527 }
8528
8529skip_uc:
8530 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8531 if (rc)
8532 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
8533 rc);
b664f008
MC
8534
8535 return rc;
c0c050c5
MC
8536}
8537
2773dfb2
MC
8538static bool bnxt_can_reserve_rings(struct bnxt *bp)
8539{
8540#ifdef CONFIG_BNXT_SRIOV
f1ca94de 8541 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
8542 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8543
8544 /* No minimum rings were provisioned by the PF. Don't
8545 * reserve rings by default when device is down.
8546 */
8547 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
8548 return true;
8549
8550 if (!netif_running(bp->dev))
8551 return false;
8552 }
8553#endif
8554 return true;
8555}
8556
8079e8f1
MC
8557/* If the chip and firmware supports RFS */
8558static bool bnxt_rfs_supported(struct bnxt *bp)
8559{
41e8d798
MC
8560 if (bp->flags & BNXT_FLAG_CHIP_P5)
8561 return false;
8079e8f1
MC
8562 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
8563 return true;
ae10ae74
MC
8564 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8565 return true;
8079e8f1
MC
8566 return false;
8567}
8568
8569/* If runtime conditions support RFS */
2bcfa6f6
MC
8570static bool bnxt_rfs_capable(struct bnxt *bp)
8571{
8572#ifdef CONFIG_RFS_ACCEL
8079e8f1 8573 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 8574
41e8d798
MC
8575 if (bp->flags & BNXT_FLAG_CHIP_P5)
8576 return false;
2773dfb2 8577 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
8578 return false;
8579
8580 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
8581 max_vnics = bnxt_get_max_func_vnics(bp);
8582 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
8583
8584 /* RSS contexts not a limiting factor */
8585 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8586 max_rss_ctxs = max_vnics;
8079e8f1 8587 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
8588 if (bp->rx_nr_rings > 1)
8589 netdev_warn(bp->dev,
8590 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
8591 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 8592 return false;
a2304909 8593 }
2bcfa6f6 8594
f1ca94de 8595 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
8596 return true;
8597
8598 if (vnics == bp->hw_resc.resv_vnics)
8599 return true;
8600
8601 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
8602 if (vnics <= bp->hw_resc.resv_vnics)
8603 return true;
8604
8605 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
8606 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
8607 return false;
2bcfa6f6
MC
8608#else
8609 return false;
8610#endif
8611}
8612
c0c050c5
MC
8613static netdev_features_t bnxt_fix_features(struct net_device *dev,
8614 netdev_features_t features)
8615{
2bcfa6f6
MC
8616 struct bnxt *bp = netdev_priv(dev);
8617
a2304909 8618 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 8619 features &= ~NETIF_F_NTUPLE;
5a9f6b23 8620
1054aee8
MC
8621 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8622 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8623
8624 if (!(features & NETIF_F_GRO))
8625 features &= ~NETIF_F_GRO_HW;
8626
8627 if (features & NETIF_F_GRO_HW)
8628 features &= ~NETIF_F_LRO;
8629
5a9f6b23
MC
8630 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
8631 * turned on or off together.
8632 */
8633 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
8634 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
8635 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
8636 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8637 NETIF_F_HW_VLAN_STAG_RX);
8638 else
8639 features |= NETIF_F_HW_VLAN_CTAG_RX |
8640 NETIF_F_HW_VLAN_STAG_RX;
8641 }
cf6645f8
MC
8642#ifdef CONFIG_BNXT_SRIOV
8643 if (BNXT_VF(bp)) {
8644 if (bp->vf.vlan) {
8645 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8646 NETIF_F_HW_VLAN_STAG_RX);
8647 }
8648 }
8649#endif
c0c050c5
MC
8650 return features;
8651}
8652
8653static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
8654{
8655 struct bnxt *bp = netdev_priv(dev);
8656 u32 flags = bp->flags;
8657 u32 changes;
8658 int rc = 0;
8659 bool re_init = false;
8660 bool update_tpa = false;
8661
8662 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 8663 if (features & NETIF_F_GRO_HW)
c0c050c5 8664 flags |= BNXT_FLAG_GRO;
1054aee8 8665 else if (features & NETIF_F_LRO)
c0c050c5
MC
8666 flags |= BNXT_FLAG_LRO;
8667
bdbd1eb5
MC
8668 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8669 flags &= ~BNXT_FLAG_TPA;
8670
c0c050c5
MC
8671 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8672 flags |= BNXT_FLAG_STRIP_VLAN;
8673
8674 if (features & NETIF_F_NTUPLE)
8675 flags |= BNXT_FLAG_RFS;
8676
8677 changes = flags ^ bp->flags;
8678 if (changes & BNXT_FLAG_TPA) {
8679 update_tpa = true;
8680 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
8681 (flags & BNXT_FLAG_TPA) == 0)
8682 re_init = true;
8683 }
8684
8685 if (changes & ~BNXT_FLAG_TPA)
8686 re_init = true;
8687
8688 if (flags != bp->flags) {
8689 u32 old_flags = bp->flags;
8690
8691 bp->flags = flags;
8692
2bcfa6f6 8693 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
c0c050c5
MC
8694 if (update_tpa)
8695 bnxt_set_ring_params(bp);
8696 return rc;
8697 }
8698
8699 if (re_init) {
8700 bnxt_close_nic(bp, false, false);
8701 if (update_tpa)
8702 bnxt_set_ring_params(bp);
8703
8704 return bnxt_open_nic(bp, false, false);
8705 }
8706 if (update_tpa) {
8707 rc = bnxt_set_tpa(bp,
8708 (flags & BNXT_FLAG_TPA) ?
8709 true : false);
8710 if (rc)
8711 bp->flags = old_flags;
8712 }
8713 }
8714 return rc;
8715}
8716
ffd77621
MC
8717static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
8718 u32 ring_id, u32 *prod, u32 *cons)
8719{
8720 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
8721 struct hwrm_dbg_ring_info_get_input req = {0};
8722 int rc;
8723
8724 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
8725 req.ring_type = ring_type;
8726 req.fw_ring_id = cpu_to_le32(ring_id);
8727 mutex_lock(&bp->hwrm_cmd_lock);
8728 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8729 if (!rc) {
8730 *prod = le32_to_cpu(resp->producer_index);
8731 *cons = le32_to_cpu(resp->consumer_index);
8732 }
8733 mutex_unlock(&bp->hwrm_cmd_lock);
8734 return rc;
8735}
8736
9f554590
MC
8737static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
8738{
b6ab4b01 8739 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
8740 int i = bnapi->index;
8741
3b2b7d9d
MC
8742 if (!txr)
8743 return;
8744
9f554590
MC
8745 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
8746 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
8747 txr->tx_cons);
8748}
8749
8750static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
8751{
b6ab4b01 8752 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
8753 int i = bnapi->index;
8754
3b2b7d9d
MC
8755 if (!rxr)
8756 return;
8757
9f554590
MC
8758 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
8759 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
8760 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
8761 rxr->rx_sw_agg_prod);
8762}
8763
8764static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
8765{
8766 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8767 int i = bnapi->index;
8768
8769 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
8770 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
8771}
8772
c0c050c5
MC
8773static void bnxt_dbg_dump_states(struct bnxt *bp)
8774{
8775 int i;
8776 struct bnxt_napi *bnapi;
c0c050c5
MC
8777
8778 for (i = 0; i < bp->cp_nr_rings; i++) {
8779 bnapi = bp->bnapi[i];
c0c050c5 8780 if (netif_msg_drv(bp)) {
9f554590
MC
8781 bnxt_dump_tx_sw_state(bnapi);
8782 bnxt_dump_rx_sw_state(bnapi);
8783 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
8784 }
8785 }
8786}
8787
6988bd92 8788static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 8789{
6988bd92
MC
8790 if (!silent)
8791 bnxt_dbg_dump_states(bp);
028de140 8792 if (netif_running(bp->dev)) {
b386cd36
MC
8793 int rc;
8794
8795 if (!silent)
8796 bnxt_ulp_stop(bp);
028de140 8797 bnxt_close_nic(bp, false, false);
b386cd36
MC
8798 rc = bnxt_open_nic(bp, false, false);
8799 if (!silent && !rc)
8800 bnxt_ulp_start(bp);
028de140 8801 }
c0c050c5
MC
8802}
8803
8804static void bnxt_tx_timeout(struct net_device *dev)
8805{
8806 struct bnxt *bp = netdev_priv(dev);
8807
8808 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
8809 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 8810 bnxt_queue_sp_work(bp);
c0c050c5
MC
8811}
8812
e99e88a9 8813static void bnxt_timer(struct timer_list *t)
c0c050c5 8814{
e99e88a9 8815 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
8816 struct net_device *dev = bp->dev;
8817
8818 if (!netif_running(dev))
8819 return;
8820
8821 if (atomic_read(&bp->intr_sem) != 0)
8822 goto bnxt_restart_timer;
8823
adcc331e
MC
8824 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
8825 bp->stats_coal_ticks) {
3bdf56c4 8826 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 8827 bnxt_queue_sp_work(bp);
3bdf56c4 8828 }
5a84acbe
SP
8829
8830 if (bnxt_tc_flower_enabled(bp)) {
8831 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
8832 bnxt_queue_sp_work(bp);
8833 }
a1ef4a79
MC
8834
8835 if (bp->link_info.phy_retry) {
8836 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
8837 bp->link_info.phy_retry = 0;
8838 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
8839 } else {
8840 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
8841 bnxt_queue_sp_work(bp);
8842 }
8843 }
ffd77621
MC
8844
8845 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
8846 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
8847 bnxt_queue_sp_work(bp);
8848 }
c0c050c5
MC
8849bnxt_restart_timer:
8850 mod_timer(&bp->timer, jiffies + bp->current_interval);
8851}
8852
a551ee94 8853static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 8854{
a551ee94
MC
8855 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
8856 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
8857 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
8858 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
8859 */
8860 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
8861 rtnl_lock();
a551ee94
MC
8862}
8863
8864static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
8865{
6988bd92
MC
8866 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
8867 rtnl_unlock();
8868}
8869
a551ee94
MC
8870/* Only called from bnxt_sp_task() */
8871static void bnxt_reset(struct bnxt *bp, bool silent)
8872{
8873 bnxt_rtnl_lock_sp(bp);
8874 if (test_bit(BNXT_STATE_OPEN, &bp->state))
8875 bnxt_reset_task(bp, silent);
8876 bnxt_rtnl_unlock_sp(bp);
8877}
8878
ffd77621
MC
8879static void bnxt_chk_missed_irq(struct bnxt *bp)
8880{
8881 int i;
8882
8883 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8884 return;
8885
8886 for (i = 0; i < bp->cp_nr_rings; i++) {
8887 struct bnxt_napi *bnapi = bp->bnapi[i];
8888 struct bnxt_cp_ring_info *cpr;
8889 u32 fw_ring_id;
8890 int j;
8891
8892 if (!bnapi)
8893 continue;
8894
8895 cpr = &bnapi->cp_ring;
8896 for (j = 0; j < 2; j++) {
8897 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
8898 u32 val[2];
8899
8900 if (!cpr2 || cpr2->has_more_work ||
8901 !bnxt_has_work(bp, cpr2))
8902 continue;
8903
8904 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
8905 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
8906 continue;
8907 }
8908 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
8909 bnxt_dbg_hwrm_ring_info_get(bp,
8910 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
8911 fw_ring_id, &val[0], &val[1]);
83eb5c5c 8912 cpr->missed_irqs++;
ffd77621
MC
8913 }
8914 }
8915}
8916
c0c050c5
MC
8917static void bnxt_cfg_ntp_filters(struct bnxt *);
8918
8919static void bnxt_sp_task(struct work_struct *work)
8920{
8921 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 8922
4cebdcec
MC
8923 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
8924 smp_mb__after_atomic();
8925 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8926 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 8927 return;
4cebdcec 8928 }
c0c050c5
MC
8929
8930 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
8931 bnxt_cfg_rx_mode(bp);
8932
8933 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
8934 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
8935 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
8936 bnxt_hwrm_exec_fwd_req(bp);
8937 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
8938 bnxt_hwrm_tunnel_dst_port_alloc(
8939 bp, bp->vxlan_port,
8940 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8941 }
8942 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
8943 bnxt_hwrm_tunnel_dst_port_free(
8944 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8945 }
7cdd5fc3
AD
8946 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
8947 bnxt_hwrm_tunnel_dst_port_alloc(
8948 bp, bp->nge_port,
8949 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8950 }
8951 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
8952 bnxt_hwrm_tunnel_dst_port_free(
8953 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8954 }
00db3cba 8955 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
3bdf56c4 8956 bnxt_hwrm_port_qstats(bp);
00db3cba
VV
8957 bnxt_hwrm_port_qstats_ext(bp);
8958 }
3bdf56c4 8959
0eaa24b9 8960 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 8961 int rc;
0eaa24b9 8962
e2dc9b6e 8963 mutex_lock(&bp->link_lock);
0eaa24b9
MC
8964 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
8965 &bp->sp_event))
8966 bnxt_hwrm_phy_qcaps(bp);
8967
e2dc9b6e
MC
8968 rc = bnxt_update_link(bp, true);
8969 mutex_unlock(&bp->link_lock);
0eaa24b9
MC
8970 if (rc)
8971 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
8972 rc);
8973 }
a1ef4a79
MC
8974 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
8975 int rc;
8976
8977 mutex_lock(&bp->link_lock);
8978 rc = bnxt_update_phy_setting(bp);
8979 mutex_unlock(&bp->link_lock);
8980 if (rc) {
8981 netdev_warn(bp->dev, "update phy settings retry failed\n");
8982 } else {
8983 bp->link_info.phy_retry = false;
8984 netdev_info(bp->dev, "update phy settings retry succeeded\n");
8985 }
8986 }
90c694bb 8987 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
8988 mutex_lock(&bp->link_lock);
8989 bnxt_get_port_module_status(bp);
8990 mutex_unlock(&bp->link_lock);
90c694bb 8991 }
5a84acbe
SP
8992
8993 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
8994 bnxt_tc_flow_stats_work(bp);
8995
ffd77621
MC
8996 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
8997 bnxt_chk_missed_irq(bp);
8998
e2dc9b6e
MC
8999 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
9000 * must be the last functions to be called before exiting.
9001 */
6988bd92
MC
9002 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9003 bnxt_reset(bp, false);
4cebdcec 9004
fc0f1929
MC
9005 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9006 bnxt_reset(bp, true);
9007
4cebdcec
MC
9008 smp_mb__before_atomic();
9009 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
9010}
9011
d1e7925e 9012/* Under rtnl_lock */
98fdbe73
MC
9013int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9014 int tx_xdp)
d1e7925e
MC
9015{
9016 int max_rx, max_tx, tx_sets = 1;
9017 int tx_rings_needed;
8f23d638 9018 int rx_rings = rx;
6fc2ffdf 9019 int cp, vnics, rc;
d1e7925e 9020
d1e7925e
MC
9021 if (tcs)
9022 tx_sets = tcs;
9023
9024 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9025 if (rc)
9026 return rc;
9027
9028 if (max_rx < rx)
9029 return -ENOMEM;
9030
5f449249 9031 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
9032 if (max_tx < tx_rings_needed)
9033 return -ENOMEM;
9034
6fc2ffdf
EW
9035 vnics = 1;
9036 if (bp->flags & BNXT_FLAG_RFS)
9037 vnics += rx_rings;
9038
8f23d638
MC
9039 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9040 rx_rings <<= 1;
9041 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
f1ca94de 9042 if (BNXT_NEW_RM(bp))
11c3ec7b 9043 cp += bnxt_get_ulp_msix_num(bp);
6fc2ffdf
EW
9044 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
9045 vnics);
d1e7925e
MC
9046}
9047
17086399
SP
9048static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9049{
9050 if (bp->bar2) {
9051 pci_iounmap(pdev, bp->bar2);
9052 bp->bar2 = NULL;
9053 }
9054
9055 if (bp->bar1) {
9056 pci_iounmap(pdev, bp->bar1);
9057 bp->bar1 = NULL;
9058 }
9059
9060 if (bp->bar0) {
9061 pci_iounmap(pdev, bp->bar0);
9062 bp->bar0 = NULL;
9063 }
9064}
9065
9066static void bnxt_cleanup_pci(struct bnxt *bp)
9067{
9068 bnxt_unmap_bars(bp, bp->pdev);
9069 pci_release_regions(bp->pdev);
9070 pci_disable_device(bp->pdev);
9071}
9072
18775aa8
MC
9073static void bnxt_init_dflt_coal(struct bnxt *bp)
9074{
9075 struct bnxt_coal *coal;
9076
9077 /* Tick values in micro seconds.
9078 * 1 coal_buf x bufs_per_record = 1 completion record.
9079 */
9080 coal = &bp->rx_coal;
9081 coal->coal_ticks = 14;
9082 coal->coal_bufs = 30;
9083 coal->coal_ticks_irq = 1;
9084 coal->coal_bufs_irq = 2;
05abe4dd 9085 coal->idle_thresh = 50;
18775aa8
MC
9086 coal->bufs_per_record = 2;
9087 coal->budget = 64; /* NAPI budget */
9088
9089 coal = &bp->tx_coal;
9090 coal->coal_ticks = 28;
9091 coal->coal_bufs = 30;
9092 coal->coal_ticks_irq = 2;
9093 coal->coal_bufs_irq = 2;
9094 coal->bufs_per_record = 1;
9095
9096 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9097}
9098
c0c050c5
MC
9099static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9100{
9101 int rc;
9102 struct bnxt *bp = netdev_priv(dev);
9103
9104 SET_NETDEV_DEV(dev, &pdev->dev);
9105
9106 /* enable device (incl. PCI PM wakeup), and bus-mastering */
9107 rc = pci_enable_device(pdev);
9108 if (rc) {
9109 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9110 goto init_err;
9111 }
9112
9113 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9114 dev_err(&pdev->dev,
9115 "Cannot find PCI device base address, aborting\n");
9116 rc = -ENODEV;
9117 goto init_err_disable;
9118 }
9119
9120 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9121 if (rc) {
9122 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9123 goto init_err_disable;
9124 }
9125
9126 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9127 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9128 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9129 goto init_err_disable;
9130 }
9131
9132 pci_set_master(pdev);
9133
9134 bp->dev = dev;
9135 bp->pdev = pdev;
9136
9137 bp->bar0 = pci_ioremap_bar(pdev, 0);
9138 if (!bp->bar0) {
9139 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9140 rc = -ENOMEM;
9141 goto init_err_release;
9142 }
9143
9144 bp->bar1 = pci_ioremap_bar(pdev, 2);
9145 if (!bp->bar1) {
9146 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9147 rc = -ENOMEM;
9148 goto init_err_release;
9149 }
9150
9151 bp->bar2 = pci_ioremap_bar(pdev, 4);
9152 if (!bp->bar2) {
9153 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9154 rc = -ENOMEM;
9155 goto init_err_release;
9156 }
9157
6316ea6d
SB
9158 pci_enable_pcie_error_reporting(pdev);
9159
c0c050c5
MC
9160 INIT_WORK(&bp->sp_task, bnxt_sp_task);
9161
9162 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
9163#if BITS_PER_LONG == 32
9164 spin_lock_init(&bp->db_lock);
9165#endif
c0c050c5
MC
9166
9167 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9168 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9169
18775aa8 9170 bnxt_init_dflt_coal(bp);
51f30785 9171
e99e88a9 9172 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
9173 bp->current_interval = BNXT_TIMER_INTERVAL;
9174
caefe526 9175 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
9176 return 0;
9177
9178init_err_release:
17086399 9179 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
9180 pci_release_regions(pdev);
9181
9182init_err_disable:
9183 pci_disable_device(pdev);
9184
9185init_err:
9186 return rc;
9187}
9188
9189/* rtnl_lock held */
9190static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9191{
9192 struct sockaddr *addr = p;
1fc2cfd0
JH
9193 struct bnxt *bp = netdev_priv(dev);
9194 int rc = 0;
c0c050c5
MC
9195
9196 if (!is_valid_ether_addr(addr->sa_data))
9197 return -EADDRNOTAVAIL;
9198
c1a7bdff
MC
9199 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9200 return 0;
9201
28ea334b 9202 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
9203 if (rc)
9204 return rc;
bdd4347b 9205
c0c050c5 9206 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
9207 if (netif_running(dev)) {
9208 bnxt_close_nic(bp, false, false);
9209 rc = bnxt_open_nic(bp, false, false);
9210 }
c0c050c5 9211
1fc2cfd0 9212 return rc;
c0c050c5
MC
9213}
9214
9215/* rtnl_lock held */
9216static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
9217{
9218 struct bnxt *bp = netdev_priv(dev);
9219
c0c050c5
MC
9220 if (netif_running(dev))
9221 bnxt_close_nic(bp, false, false);
9222
9223 dev->mtu = new_mtu;
9224 bnxt_set_ring_params(bp);
9225
9226 if (netif_running(dev))
9227 return bnxt_open_nic(bp, false, false);
9228
9229 return 0;
9230}
9231
c5e3deb8 9232int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
9233{
9234 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 9235 bool sh = false;
d1e7925e 9236 int rc;
16e5cc64 9237
c0c050c5 9238 if (tc > bp->max_tc) {
b451c8b6 9239 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
9240 tc, bp->max_tc);
9241 return -EINVAL;
9242 }
9243
9244 if (netdev_get_num_tc(dev) == tc)
9245 return 0;
9246
3ffb6a39
MC
9247 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9248 sh = true;
9249
98fdbe73
MC
9250 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
9251 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
9252 if (rc)
9253 return rc;
c0c050c5
MC
9254
9255 /* Needs to close the device and do hw resource re-allocations */
9256 if (netif_running(bp->dev))
9257 bnxt_close_nic(bp, true, false);
9258
9259 if (tc) {
9260 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
9261 netdev_set_num_tc(dev, tc);
9262 } else {
9263 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9264 netdev_reset_tc(dev);
9265 }
87e9b377 9266 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
9267 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9268 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
9269 bp->num_stat_ctxs = bp->cp_nr_rings;
9270
9271 if (netif_running(bp->dev))
9272 return bnxt_open_nic(bp, true, false);
9273
9274 return 0;
9275}
9276
9e0fd15d
JP
9277static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9278 void *cb_priv)
c5e3deb8 9279{
9e0fd15d 9280 struct bnxt *bp = cb_priv;
de4784ca 9281
312324f1
JK
9282 if (!bnxt_tc_flower_enabled(bp) ||
9283 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 9284 return -EOPNOTSUPP;
c5e3deb8 9285
9e0fd15d
JP
9286 switch (type) {
9287 case TC_SETUP_CLSFLOWER:
9288 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
9289 default:
9290 return -EOPNOTSUPP;
9291 }
9292}
9293
9294static int bnxt_setup_tc_block(struct net_device *dev,
9295 struct tc_block_offload *f)
9296{
9297 struct bnxt *bp = netdev_priv(dev);
9298
9299 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9300 return -EOPNOTSUPP;
9301
9302 switch (f->command) {
9303 case TC_BLOCK_BIND:
9304 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
60513bd8 9305 bp, bp, f->extack);
9e0fd15d
JP
9306 case TC_BLOCK_UNBIND:
9307 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
9308 return 0;
9309 default:
9310 return -EOPNOTSUPP;
9311 }
2ae7408f
SP
9312}
9313
9314static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
9315 void *type_data)
9316{
9317 switch (type) {
9e0fd15d
JP
9318 case TC_SETUP_BLOCK:
9319 return bnxt_setup_tc_block(dev, type_data);
575ed7d3 9320 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
9321 struct tc_mqprio_qopt *mqprio = type_data;
9322
9323 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 9324
2ae7408f
SP
9325 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
9326 }
9327 default:
9328 return -EOPNOTSUPP;
9329 }
c5e3deb8
MC
9330}
9331
c0c050c5
MC
9332#ifdef CONFIG_RFS_ACCEL
9333static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
9334 struct bnxt_ntuple_filter *f2)
9335{
9336 struct flow_keys *keys1 = &f1->fkeys;
9337 struct flow_keys *keys2 = &f2->fkeys;
9338
9339 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
9340 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
9341 keys1->ports.ports == keys2->ports.ports &&
9342 keys1->basic.ip_proto == keys2->basic.ip_proto &&
9343 keys1->basic.n_proto == keys2->basic.n_proto &&
61aad724 9344 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
9345 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
9346 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
9347 return true;
9348
9349 return false;
9350}
9351
9352static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
9353 u16 rxq_index, u32 flow_id)
9354{
9355 struct bnxt *bp = netdev_priv(dev);
9356 struct bnxt_ntuple_filter *fltr, *new_fltr;
9357 struct flow_keys *fkeys;
9358 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 9359 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5
MC
9360 struct hlist_head *head;
9361
a54c4d74
MC
9362 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
9363 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9364 int off = 0, j;
9365
9366 netif_addr_lock_bh(dev);
9367 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
9368 if (ether_addr_equal(eth->h_dest,
9369 vnic->uc_list + off)) {
9370 l2_idx = j + 1;
9371 break;
9372 }
9373 }
9374 netif_addr_unlock_bh(dev);
9375 if (!l2_idx)
9376 return -EINVAL;
9377 }
c0c050c5
MC
9378 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
9379 if (!new_fltr)
9380 return -ENOMEM;
9381
9382 fkeys = &new_fltr->fkeys;
9383 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
9384 rc = -EPROTONOSUPPORT;
9385 goto err_free;
9386 }
9387
dda0e746
MC
9388 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
9389 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
9390 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
9391 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
9392 rc = -EPROTONOSUPPORT;
9393 goto err_free;
9394 }
dda0e746
MC
9395 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
9396 bp->hwrm_spec_code < 0x10601) {
9397 rc = -EPROTONOSUPPORT;
9398 goto err_free;
9399 }
61aad724
MC
9400 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
9401 bp->hwrm_spec_code < 0x10601) {
9402 rc = -EPROTONOSUPPORT;
9403 goto err_free;
9404 }
c0c050c5 9405
a54c4d74 9406 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
9407 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
9408
9409 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
9410 head = &bp->ntp_fltr_hash_tbl[idx];
9411 rcu_read_lock();
9412 hlist_for_each_entry_rcu(fltr, head, hash) {
9413 if (bnxt_fltr_match(fltr, new_fltr)) {
9414 rcu_read_unlock();
9415 rc = 0;
9416 goto err_free;
9417 }
9418 }
9419 rcu_read_unlock();
9420
9421 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
9422 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
9423 BNXT_NTP_FLTR_MAX_FLTR, 0);
9424 if (bit_id < 0) {
c0c050c5
MC
9425 spin_unlock_bh(&bp->ntp_fltr_lock);
9426 rc = -ENOMEM;
9427 goto err_free;
9428 }
9429
84e86b98 9430 new_fltr->sw_id = (u16)bit_id;
c0c050c5 9431 new_fltr->flow_id = flow_id;
a54c4d74 9432 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
9433 new_fltr->rxq = rxq_index;
9434 hlist_add_head_rcu(&new_fltr->hash, head);
9435 bp->ntp_fltr_count++;
9436 spin_unlock_bh(&bp->ntp_fltr_lock);
9437
9438 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 9439 bnxt_queue_sp_work(bp);
c0c050c5
MC
9440
9441 return new_fltr->sw_id;
9442
9443err_free:
9444 kfree(new_fltr);
9445 return rc;
9446}
9447
9448static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9449{
9450 int i;
9451
9452 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
9453 struct hlist_head *head;
9454 struct hlist_node *tmp;
9455 struct bnxt_ntuple_filter *fltr;
9456 int rc;
9457
9458 head = &bp->ntp_fltr_hash_tbl[i];
9459 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
9460 bool del = false;
9461
9462 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
9463 if (rps_may_expire_flow(bp->dev, fltr->rxq,
9464 fltr->flow_id,
9465 fltr->sw_id)) {
9466 bnxt_hwrm_cfa_ntuple_filter_free(bp,
9467 fltr);
9468 del = true;
9469 }
9470 } else {
9471 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
9472 fltr);
9473 if (rc)
9474 del = true;
9475 else
9476 set_bit(BNXT_FLTR_VALID, &fltr->state);
9477 }
9478
9479 if (del) {
9480 spin_lock_bh(&bp->ntp_fltr_lock);
9481 hlist_del_rcu(&fltr->hash);
9482 bp->ntp_fltr_count--;
9483 spin_unlock_bh(&bp->ntp_fltr_lock);
9484 synchronize_rcu();
9485 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
9486 kfree(fltr);
9487 }
9488 }
9489 }
19241368
JH
9490 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9491 netdev_info(bp->dev, "Receive PF driver unload event!");
c0c050c5
MC
9492}
9493
9494#else
9495
9496static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9497{
9498}
9499
9500#endif /* CONFIG_RFS_ACCEL */
9501
ad51b8e9
AD
9502static void bnxt_udp_tunnel_add(struct net_device *dev,
9503 struct udp_tunnel_info *ti)
c0c050c5
MC
9504{
9505 struct bnxt *bp = netdev_priv(dev);
9506
ad51b8e9 9507 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
9508 return;
9509
ad51b8e9 9510 if (!netif_running(dev))
c0c050c5
MC
9511 return;
9512
ad51b8e9
AD
9513 switch (ti->type) {
9514 case UDP_TUNNEL_TYPE_VXLAN:
9515 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
9516 return;
c0c050c5 9517
ad51b8e9
AD
9518 bp->vxlan_port_cnt++;
9519 if (bp->vxlan_port_cnt == 1) {
9520 bp->vxlan_port = ti->port;
9521 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
c213eae8 9522 bnxt_queue_sp_work(bp);
ad51b8e9
AD
9523 }
9524 break;
7cdd5fc3
AD
9525 case UDP_TUNNEL_TYPE_GENEVE:
9526 if (bp->nge_port_cnt && bp->nge_port != ti->port)
9527 return;
9528
9529 bp->nge_port_cnt++;
9530 if (bp->nge_port_cnt == 1) {
9531 bp->nge_port = ti->port;
9532 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
9533 }
9534 break;
ad51b8e9
AD
9535 default:
9536 return;
c0c050c5 9537 }
ad51b8e9 9538
c213eae8 9539 bnxt_queue_sp_work(bp);
c0c050c5
MC
9540}
9541
ad51b8e9
AD
9542static void bnxt_udp_tunnel_del(struct net_device *dev,
9543 struct udp_tunnel_info *ti)
c0c050c5
MC
9544{
9545 struct bnxt *bp = netdev_priv(dev);
9546
ad51b8e9 9547 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
9548 return;
9549
ad51b8e9 9550 if (!netif_running(dev))
c0c050c5
MC
9551 return;
9552
ad51b8e9
AD
9553 switch (ti->type) {
9554 case UDP_TUNNEL_TYPE_VXLAN:
9555 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
9556 return;
c0c050c5
MC
9557 bp->vxlan_port_cnt--;
9558
ad51b8e9
AD
9559 if (bp->vxlan_port_cnt != 0)
9560 return;
9561
9562 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
9563 break;
7cdd5fc3
AD
9564 case UDP_TUNNEL_TYPE_GENEVE:
9565 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
9566 return;
9567 bp->nge_port_cnt--;
9568
9569 if (bp->nge_port_cnt != 0)
9570 return;
9571
9572 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
9573 break;
ad51b8e9
AD
9574 default:
9575 return;
c0c050c5 9576 }
ad51b8e9 9577
c213eae8 9578 bnxt_queue_sp_work(bp);
c0c050c5
MC
9579}
9580
39d8ba2e
MC
9581static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9582 struct net_device *dev, u32 filter_mask,
9583 int nlflags)
9584{
9585 struct bnxt *bp = netdev_priv(dev);
9586
9587 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
9588 nlflags, filter_mask, NULL);
9589}
9590
9591static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
9592 u16 flags)
9593{
9594 struct bnxt *bp = netdev_priv(dev);
9595 struct nlattr *attr, *br_spec;
9596 int rem, rc = 0;
9597
9598 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
9599 return -EOPNOTSUPP;
9600
9601 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9602 if (!br_spec)
9603 return -EINVAL;
9604
9605 nla_for_each_nested(attr, br_spec, rem) {
9606 u16 mode;
9607
9608 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9609 continue;
9610
9611 if (nla_len(attr) < sizeof(mode))
9612 return -EINVAL;
9613
9614 mode = nla_get_u16(attr);
9615 if (mode == bp->br_mode)
9616 break;
9617
9618 rc = bnxt_hwrm_set_br_mode(bp, mode);
9619 if (!rc)
9620 bp->br_mode = mode;
9621 break;
9622 }
9623 return rc;
9624}
9625
c124a62f
SP
9626static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
9627 size_t len)
9628{
9629 struct bnxt *bp = netdev_priv(dev);
9630 int rc;
9631
9632 /* The PF and it's VF-reps only support the switchdev framework */
9633 if (!BNXT_PF(bp))
9634 return -EOPNOTSUPP;
9635
53f70b8b 9636 rc = snprintf(buf, len, "p%d", bp->pf.port_id);
c124a62f
SP
9637
9638 if (rc >= len)
9639 return -EOPNOTSUPP;
9640 return 0;
9641}
9642
9643int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
9644{
9645 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
9646 return -EOPNOTSUPP;
9647
9648 /* The PF and it's VF-reps only support the switchdev framework */
9649 if (!BNXT_PF(bp))
9650 return -EOPNOTSUPP;
9651
9652 switch (attr->id) {
9653 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
dd4ea1da
SP
9654 attr->u.ppid.id_len = sizeof(bp->switch_id);
9655 memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
c124a62f
SP
9656 break;
9657 default:
9658 return -EOPNOTSUPP;
9659 }
9660 return 0;
9661}
9662
9663static int bnxt_swdev_port_attr_get(struct net_device *dev,
9664 struct switchdev_attr *attr)
9665{
9666 return bnxt_port_attr_get(netdev_priv(dev), attr);
9667}
9668
9669static const struct switchdev_ops bnxt_switchdev_ops = {
9670 .switchdev_port_attr_get = bnxt_swdev_port_attr_get
9671};
9672
c0c050c5
MC
9673static const struct net_device_ops bnxt_netdev_ops = {
9674 .ndo_open = bnxt_open,
9675 .ndo_start_xmit = bnxt_start_xmit,
9676 .ndo_stop = bnxt_close,
9677 .ndo_get_stats64 = bnxt_get_stats64,
9678 .ndo_set_rx_mode = bnxt_set_rx_mode,
9679 .ndo_do_ioctl = bnxt_ioctl,
9680 .ndo_validate_addr = eth_validate_addr,
9681 .ndo_set_mac_address = bnxt_change_mac_addr,
9682 .ndo_change_mtu = bnxt_change_mtu,
9683 .ndo_fix_features = bnxt_fix_features,
9684 .ndo_set_features = bnxt_set_features,
9685 .ndo_tx_timeout = bnxt_tx_timeout,
9686#ifdef CONFIG_BNXT_SRIOV
9687 .ndo_get_vf_config = bnxt_get_vf_config,
9688 .ndo_set_vf_mac = bnxt_set_vf_mac,
9689 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
9690 .ndo_set_vf_rate = bnxt_set_vf_bw,
9691 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
9692 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 9693 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
9694#endif
9695 .ndo_setup_tc = bnxt_setup_tc,
9696#ifdef CONFIG_RFS_ACCEL
9697 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
9698#endif
ad51b8e9
AD
9699 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
9700 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
f4e63525 9701 .ndo_bpf = bnxt_xdp,
39d8ba2e
MC
9702 .ndo_bridge_getlink = bnxt_bridge_getlink,
9703 .ndo_bridge_setlink = bnxt_bridge_setlink,
c124a62f 9704 .ndo_get_phys_port_name = bnxt_get_phys_port_name
c0c050c5
MC
9705};
9706
9707static void bnxt_remove_one(struct pci_dev *pdev)
9708{
9709 struct net_device *dev = pci_get_drvdata(pdev);
9710 struct bnxt *bp = netdev_priv(dev);
9711
4ab0c6a8 9712 if (BNXT_PF(bp)) {
c0c050c5 9713 bnxt_sriov_disable(bp);
4ab0c6a8
SP
9714 bnxt_dl_unregister(bp);
9715 }
c0c050c5 9716
6316ea6d 9717 pci_disable_pcie_error_reporting(pdev);
c0c050c5 9718 unregister_netdev(dev);
2ae7408f 9719 bnxt_shutdown_tc(bp);
c213eae8 9720 bnxt_cancel_sp_work(bp);
c0c050c5
MC
9721 bp->sp_event = 0;
9722
7809592d 9723 bnxt_clear_int_mode(bp);
be58a0da 9724 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 9725 bnxt_free_hwrm_resources(bp);
e605db80 9726 bnxt_free_hwrm_short_cmd_req(bp);
eb513658 9727 bnxt_ethtool_free(bp);
7df4ae9f 9728 bnxt_dcb_free(bp);
a588e458
MC
9729 kfree(bp->edev);
9730 bp->edev = NULL;
98f04cf0
MC
9731 bnxt_free_ctx_mem(bp);
9732 kfree(bp->ctx);
9733 bp->ctx = NULL;
17086399 9734 bnxt_cleanup_pci(bp);
c0c050c5 9735 free_netdev(dev);
c0c050c5
MC
9736}
9737
9738static int bnxt_probe_phy(struct bnxt *bp)
9739{
9740 int rc = 0;
9741 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 9742
170ce013
MC
9743 rc = bnxt_hwrm_phy_qcaps(bp);
9744 if (rc) {
9745 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
9746 rc);
9747 return rc;
9748 }
e2dc9b6e 9749 mutex_init(&bp->link_lock);
170ce013 9750
c0c050c5
MC
9751 rc = bnxt_update_link(bp, false);
9752 if (rc) {
9753 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
9754 rc);
9755 return rc;
9756 }
9757
93ed8117
MC
9758 /* Older firmware does not have supported_auto_speeds, so assume
9759 * that all supported speeds can be autonegotiated.
9760 */
9761 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
9762 link_info->support_auto_speeds = link_info->support_speeds;
9763
c0c050c5 9764 /*initialize the ethool setting copy with NVM settings */
0d8abf02 9765 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
c9ee9516
MC
9766 link_info->autoneg = BNXT_AUTONEG_SPEED;
9767 if (bp->hwrm_spec_code >= 0x10201) {
9768 if (link_info->auto_pause_setting &
9769 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
9770 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
9771 } else {
9772 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
9773 }
0d8abf02 9774 link_info->advertising = link_info->auto_link_speeds;
0d8abf02
MC
9775 } else {
9776 link_info->req_link_speed = link_info->force_link_speed;
9777 link_info->req_duplex = link_info->duplex_setting;
c0c050c5 9778 }
c9ee9516
MC
9779 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
9780 link_info->req_flow_ctrl =
9781 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
9782 else
9783 link_info->req_flow_ctrl = link_info->force_pause_setting;
c0c050c5
MC
9784 return rc;
9785}
9786
9787static int bnxt_get_max_irq(struct pci_dev *pdev)
9788{
9789 u16 ctrl;
9790
9791 if (!pdev->msix_cap)
9792 return 1;
9793
9794 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
9795 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
9796}
9797
6e6c5a57
MC
9798static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
9799 int *max_cp)
c0c050c5 9800{
6a4f2947 9801 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6e6c5a57 9802 int max_ring_grps = 0;
c0c050c5 9803
6a4f2947
MC
9804 *max_tx = hw_resc->max_tx_rings;
9805 *max_rx = hw_resc->max_rx_rings;
00fe9c32 9806 *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
c78fe058 9807 hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
6a4f2947
MC
9808 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
9809 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
9810 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
9811 *max_cp -= 1;
9812 *max_rx -= 2;
9813 }
c0c050c5
MC
9814 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9815 *max_rx >>= 1;
b72d4a68 9816 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
9817}
9818
9819int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
9820{
9821 int rx, tx, cp;
9822
9823 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
9824 *max_rx = rx;
9825 *max_tx = tx;
6e6c5a57
MC
9826 if (!rx || !tx || !cp)
9827 return -ENOMEM;
9828
6e6c5a57
MC
9829 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
9830}
9831
e4060d30
MC
9832static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
9833 bool shared)
9834{
9835 int rc;
9836
9837 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
9838 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
9839 /* Not enough rings, try disabling agg rings. */
9840 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
9841 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
9842 if (rc) {
9843 /* set BNXT_FLAG_AGG_RINGS back for consistency */
9844 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 9845 return rc;
07f4fde5 9846 }
bdbd1eb5 9847 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
9848 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9849 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
9850 bnxt_set_ring_params(bp);
9851 }
e4060d30
MC
9852
9853 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
9854 int max_cp, max_stat, max_irq;
9855
9856 /* Reserve minimum resources for RoCE */
9857 max_cp = bnxt_get_max_func_cp_rings(bp);
9858 max_stat = bnxt_get_max_func_stat_ctxs(bp);
9859 max_irq = bnxt_get_max_func_irqs(bp);
9860 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
9861 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
9862 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
9863 return 0;
9864
9865 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
9866 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
9867 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
9868 max_cp = min_t(int, max_cp, max_irq);
9869 max_cp = min_t(int, max_cp, max_stat);
9870 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
9871 if (rc)
9872 rc = 0;
9873 }
9874 return rc;
9875}
9876
58ea801a
MC
9877/* In initial default shared ring setting, each shared ring must have a
9878 * RX/TX ring pair.
9879 */
9880static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
9881{
9882 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
9883 bp->rx_nr_rings = bp->cp_nr_rings;
9884 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
9885 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9886}
9887
702c221c 9888static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
9889{
9890 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 9891
2773dfb2
MC
9892 if (!bnxt_can_reserve_rings(bp))
9893 return 0;
9894
6e6c5a57
MC
9895 if (sh)
9896 bp->flags |= BNXT_FLAG_SHARED_RINGS;
9897 dflt_rings = netif_get_num_default_rss_queues();
1d3ef13d
MC
9898 /* Reduce default rings on multi-port cards so that total default
9899 * rings do not exceed CPU count.
9900 */
9901 if (bp->port_count > 1) {
9902 int max_rings =
9903 max_t(int, num_online_cpus() / bp->port_count, 1);
9904
9905 dflt_rings = min_t(int, dflt_rings, max_rings);
9906 }
e4060d30 9907 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
9908 if (rc)
9909 return rc;
9910 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
9911 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
9912 if (sh)
9913 bnxt_trim_dflt_sh_rings(bp);
9914 else
9915 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
9916 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 9917
674f50a5 9918 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
9919 if (rc)
9920 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
9921 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9922 if (sh)
9923 bnxt_trim_dflt_sh_rings(bp);
391be5c2 9924
674f50a5
MC
9925 /* Rings may have been trimmed, re-reserve the trimmed rings. */
9926 if (bnxt_need_reserve_rings(bp)) {
9927 rc = __bnxt_reserve_rings(bp);
9928 if (rc)
9929 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
9930 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9931 }
6e6c5a57 9932 bp->num_stat_ctxs = bp->cp_nr_rings;
76595193
PS
9933 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9934 bp->rx_nr_rings++;
9935 bp->cp_nr_rings++;
9936 }
6e6c5a57 9937 return rc;
c0c050c5
MC
9938}
9939
47558acd
MC
9940static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
9941{
9942 int rc;
9943
9944 if (bp->tx_nr_rings)
9945 return 0;
9946
6b95c3e9
MC
9947 bnxt_ulp_irq_stop(bp);
9948 bnxt_clear_int_mode(bp);
47558acd
MC
9949 rc = bnxt_set_dflt_rings(bp, true);
9950 if (rc) {
9951 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 9952 goto init_dflt_ring_err;
47558acd
MC
9953 }
9954 rc = bnxt_init_int_mode(bp);
9955 if (rc)
6b95c3e9
MC
9956 goto init_dflt_ring_err;
9957
47558acd
MC
9958 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9959 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
9960 bp->flags |= BNXT_FLAG_RFS;
9961 bp->dev->features |= NETIF_F_NTUPLE;
9962 }
6b95c3e9
MC
9963init_dflt_ring_err:
9964 bnxt_ulp_irq_restart(bp, rc);
9965 return rc;
47558acd
MC
9966}
9967
80fcaf46 9968int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 9969{
80fcaf46
MC
9970 int rc;
9971
7b08f661
MC
9972 ASSERT_RTNL();
9973 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
9974
9975 if (netif_running(bp->dev))
9976 __bnxt_close_nic(bp, true, false);
9977
ec86f14e 9978 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
9979 bnxt_clear_int_mode(bp);
9980 rc = bnxt_init_int_mode(bp);
ec86f14e 9981 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
9982
9983 if (netif_running(bp->dev)) {
9984 if (rc)
9985 dev_close(bp->dev);
9986 else
9987 rc = bnxt_open_nic(bp, true, false);
9988 }
9989
80fcaf46 9990 return rc;
7b08f661
MC
9991}
9992
a22a6ac2
MC
9993static int bnxt_init_mac_addr(struct bnxt *bp)
9994{
9995 int rc = 0;
9996
9997 if (BNXT_PF(bp)) {
9998 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
9999 } else {
10000#ifdef CONFIG_BNXT_SRIOV
10001 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 10002 bool strict_approval = true;
a22a6ac2
MC
10003
10004 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 10005 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 10006 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
10007 /* Older PF driver or firmware may not approve this
10008 * correctly.
10009 */
10010 strict_approval = false;
a22a6ac2
MC
10011 } else {
10012 eth_hw_addr_random(bp->dev);
a22a6ac2 10013 }
28ea334b 10014 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
10015#endif
10016 }
10017 return rc;
10018}
10019
c0c050c5
MC
10020static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10021{
10022 static int version_printed;
10023 struct net_device *dev;
10024 struct bnxt *bp;
6e6c5a57 10025 int rc, max_irqs;
c0c050c5 10026
4e00338a 10027 if (pci_is_bridge(pdev))
fa853dda
PS
10028 return -ENODEV;
10029
c0c050c5
MC
10030 if (version_printed++ == 0)
10031 pr_info("%s", version);
10032
10033 max_irqs = bnxt_get_max_irq(pdev);
10034 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10035 if (!dev)
10036 return -ENOMEM;
10037
10038 bp = netdev_priv(dev);
9c1fabdf 10039 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
10040
10041 if (bnxt_vf_pciid(ent->driver_data))
10042 bp->flags |= BNXT_FLAG_VF;
10043
2bcfa6f6 10044 if (pdev->msix_cap)
c0c050c5 10045 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
10046
10047 rc = bnxt_init_board(pdev, dev);
10048 if (rc < 0)
10049 goto init_err_free;
10050
10051 dev->netdev_ops = &bnxt_netdev_ops;
10052 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10053 dev->ethtool_ops = &bnxt_ethtool_ops;
bc88055a 10054 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
c0c050c5
MC
10055 pci_set_drvdata(pdev, dev);
10056
3e8060fa
PS
10057 rc = bnxt_alloc_hwrm_resources(bp);
10058 if (rc)
17086399 10059 goto init_err_pci_clean;
3e8060fa
PS
10060
10061 mutex_init(&bp->hwrm_cmd_lock);
10062 rc = bnxt_hwrm_ver_get(bp);
10063 if (rc)
17086399 10064 goto init_err_pci_clean;
3e8060fa 10065
1dfddc41
MC
10066 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10067 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80
DK
10068 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10069 if (rc)
10070 goto init_err_pci_clean;
10071 }
10072
e38287b7
MC
10073 if (BNXT_CHIP_P5(bp))
10074 bp->flags |= BNXT_FLAG_CHIP_P5;
10075
3c2217a6
MC
10076 rc = bnxt_hwrm_func_reset(bp);
10077 if (rc)
10078 goto init_err_pci_clean;
10079
5ac67d8b
RS
10080 bnxt_hwrm_fw_set_time(bp);
10081
c0c050c5
MC
10082 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10083 NETIF_F_TSO | NETIF_F_TSO6 |
10084 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 10085 NETIF_F_GSO_IPXIP4 |
152971ee
AD
10086 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10087 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
10088 NETIF_F_RXCSUM | NETIF_F_GRO;
10089
e38287b7 10090 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 10091 dev->hw_features |= NETIF_F_LRO;
c0c050c5 10092
c0c050c5
MC
10093 dev->hw_enc_features =
10094 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10095 NETIF_F_TSO | NETIF_F_TSO6 |
10096 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 10097 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 10098 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
152971ee
AD
10099 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10100 NETIF_F_GSO_GRE_CSUM;
c0c050c5
MC
10101 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10102 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10103 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
e38287b7 10104 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 10105 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 10106 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
10107 if (dev->features & NETIF_F_GRO_HW)
10108 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
10109 dev->priv_flags |= IFF_UNICAST_FLT;
10110
10111#ifdef CONFIG_BNXT_SRIOV
10112 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 10113 mutex_init(&bp->sriov_lock);
c0c050c5 10114#endif
e38287b7
MC
10115 if (BNXT_SUPPORTS_TPA(bp)) {
10116 bp->gro_func = bnxt_gro_func_5730x;
10117 if (BNXT_CHIP_P4(bp))
10118 bp->gro_func = bnxt_gro_func_5731x;
10119 }
10120 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 10121 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 10122
c0c050c5
MC
10123 rc = bnxt_hwrm_func_drv_rgtr(bp);
10124 if (rc)
17086399 10125 goto init_err_pci_clean;
c0c050c5 10126
a1653b13
MC
10127 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10128 if (rc)
17086399 10129 goto init_err_pci_clean;
a1653b13 10130
a588e458
MC
10131 bp->ulp_probe = bnxt_ulp_probe;
10132
98f04cf0
MC
10133 rc = bnxt_hwrm_queue_qportcfg(bp);
10134 if (rc) {
10135 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10136 rc);
10137 rc = -1;
10138 goto init_err_pci_clean;
10139 }
c0c050c5
MC
10140 /* Get the MAX capabilities for this function */
10141 rc = bnxt_hwrm_func_qcaps(bp);
10142 if (rc) {
10143 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10144 rc);
10145 rc = -1;
17086399 10146 goto init_err_pci_clean;
c0c050c5 10147 }
a22a6ac2
MC
10148 rc = bnxt_init_mac_addr(bp);
10149 if (rc) {
10150 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10151 rc = -EADDRNOTAVAIL;
10152 goto init_err_pci_clean;
10153 }
c0c050c5 10154
567b2abe 10155 bnxt_hwrm_func_qcfg(bp);
6ba99038 10156 bnxt_hwrm_vnic_qcaps(bp);
5ad2cbee 10157 bnxt_hwrm_port_led_qcaps(bp);
eb513658 10158 bnxt_ethtool_init(bp);
87fe6032 10159 bnxt_dcb_init(bp);
567b2abe 10160
7eb9bb3a
MC
10161 /* MTU range: 60 - FW defined max */
10162 dev->min_mtu = ETH_ZLEN;
10163 dev->max_mtu = bp->max_mtu;
10164
d5430d31
MC
10165 rc = bnxt_probe_phy(bp);
10166 if (rc)
10167 goto init_err_pci_clean;
10168
c61fb99c 10169 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
10170 bnxt_set_tpa_flags(bp);
10171 bnxt_set_ring_params(bp);
702c221c 10172 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
10173 if (rc) {
10174 netdev_err(bp->dev, "Not enough rings available.\n");
10175 rc = -ENOMEM;
17086399 10176 goto init_err_pci_clean;
bdbd1eb5 10177 }
c0c050c5 10178
87da7f79
MC
10179 /* Default RSS hash cfg. */
10180 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10181 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10182 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10183 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
e38287b7 10184 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
87da7f79
MC
10185 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10186 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10187 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10188 }
10189
8079e8f1 10190 if (bnxt_rfs_supported(bp)) {
2bcfa6f6
MC
10191 dev->hw_features |= NETIF_F_NTUPLE;
10192 if (bnxt_rfs_capable(bp)) {
10193 bp->flags |= BNXT_FLAG_RFS;
10194 dev->features |= NETIF_F_NTUPLE;
10195 }
10196 }
10197
c0c050c5
MC
10198 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
10199 bp->flags |= BNXT_FLAG_STRIP_VLAN;
10200
7809592d 10201 rc = bnxt_init_int_mode(bp);
c0c050c5 10202 if (rc)
17086399 10203 goto init_err_pci_clean;
c0c050c5 10204
832aed16
MC
10205 /* No TC has been set yet and rings may have been trimmed due to
10206 * limited MSIX, so we re-initialize the TX rings per TC.
10207 */
10208 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10209
c1ef146a 10210 bnxt_get_wol_settings(bp);
d196ece7
MC
10211 if (bp->flags & BNXT_FLAG_WOL_CAP)
10212 device_set_wakeup_enable(&pdev->dev, bp->wol);
10213 else
10214 device_set_wakeup_capable(&pdev->dev, false);
c1ef146a 10215
c3480a60
MC
10216 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10217
74706afa
MC
10218 bnxt_hwrm_coal_params_qcaps(bp);
10219
c213eae8
MC
10220 if (BNXT_PF(bp)) {
10221 if (!bnxt_pf_wq) {
10222 bnxt_pf_wq =
10223 create_singlethread_workqueue("bnxt_pf_wq");
10224 if (!bnxt_pf_wq) {
10225 dev_err(&pdev->dev, "Unable to create workqueue.\n");
10226 goto init_err_pci_clean;
10227 }
10228 }
2ae7408f 10229 bnxt_init_tc(bp);
c213eae8 10230 }
2ae7408f 10231
7809592d
MC
10232 rc = register_netdev(dev);
10233 if (rc)
2ae7408f 10234 goto init_err_cleanup_tc;
7809592d 10235
4ab0c6a8
SP
10236 if (BNXT_PF(bp))
10237 bnxt_dl_register(bp);
10238
c0c050c5
MC
10239 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
10240 board_info[ent->driver_data].name,
10241 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 10242 pcie_print_link_status(pdev);
90c4f788 10243
c0c050c5
MC
10244 return 0;
10245
2ae7408f
SP
10246init_err_cleanup_tc:
10247 bnxt_shutdown_tc(bp);
7809592d
MC
10248 bnxt_clear_int_mode(bp);
10249
17086399 10250init_err_pci_clean:
a2bf74f4 10251 bnxt_free_hwrm_resources(bp);
98f04cf0
MC
10252 bnxt_free_ctx_mem(bp);
10253 kfree(bp->ctx);
10254 bp->ctx = NULL;
17086399 10255 bnxt_cleanup_pci(bp);
c0c050c5
MC
10256
10257init_err_free:
10258 free_netdev(dev);
10259 return rc;
10260}
10261
d196ece7
MC
10262static void bnxt_shutdown(struct pci_dev *pdev)
10263{
10264 struct net_device *dev = pci_get_drvdata(pdev);
10265 struct bnxt *bp;
10266
10267 if (!dev)
10268 return;
10269
10270 rtnl_lock();
10271 bp = netdev_priv(dev);
10272 if (!bp)
10273 goto shutdown_exit;
10274
10275 if (netif_running(dev))
10276 dev_close(dev);
10277
a7f3f939
RJ
10278 bnxt_ulp_shutdown(bp);
10279
d196ece7
MC
10280 if (system_state == SYSTEM_POWER_OFF) {
10281 bnxt_clear_int_mode(bp);
10282 pci_wake_from_d3(pdev, bp->wol);
10283 pci_set_power_state(pdev, PCI_D3hot);
10284 }
10285
10286shutdown_exit:
10287 rtnl_unlock();
10288}
10289
f65a2044
MC
10290#ifdef CONFIG_PM_SLEEP
10291static int bnxt_suspend(struct device *device)
10292{
10293 struct pci_dev *pdev = to_pci_dev(device);
10294 struct net_device *dev = pci_get_drvdata(pdev);
10295 struct bnxt *bp = netdev_priv(dev);
10296 int rc = 0;
10297
10298 rtnl_lock();
10299 if (netif_running(dev)) {
10300 netif_device_detach(dev);
10301 rc = bnxt_close(dev);
10302 }
10303 bnxt_hwrm_func_drv_unrgtr(bp);
10304 rtnl_unlock();
10305 return rc;
10306}
10307
10308static int bnxt_resume(struct device *device)
10309{
10310 struct pci_dev *pdev = to_pci_dev(device);
10311 struct net_device *dev = pci_get_drvdata(pdev);
10312 struct bnxt *bp = netdev_priv(dev);
10313 int rc = 0;
10314
10315 rtnl_lock();
10316 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
10317 rc = -ENODEV;
10318 goto resume_exit;
10319 }
10320 rc = bnxt_hwrm_func_reset(bp);
10321 if (rc) {
10322 rc = -EBUSY;
10323 goto resume_exit;
10324 }
10325 bnxt_get_wol_settings(bp);
10326 if (netif_running(dev)) {
10327 rc = bnxt_open(dev);
10328 if (!rc)
10329 netif_device_attach(dev);
10330 }
10331
10332resume_exit:
10333 rtnl_unlock();
10334 return rc;
10335}
10336
10337static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
10338#define BNXT_PM_OPS (&bnxt_pm_ops)
10339
10340#else
10341
10342#define BNXT_PM_OPS NULL
10343
10344#endif /* CONFIG_PM_SLEEP */
10345
6316ea6d
SB
10346/**
10347 * bnxt_io_error_detected - called when PCI error is detected
10348 * @pdev: Pointer to PCI device
10349 * @state: The current pci connection state
10350 *
10351 * This function is called after a PCI bus error affecting
10352 * this device has been detected.
10353 */
10354static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
10355 pci_channel_state_t state)
10356{
10357 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 10358 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
10359
10360 netdev_info(netdev, "PCI I/O error detected\n");
10361
10362 rtnl_lock();
10363 netif_device_detach(netdev);
10364
a588e458
MC
10365 bnxt_ulp_stop(bp);
10366
6316ea6d
SB
10367 if (state == pci_channel_io_perm_failure) {
10368 rtnl_unlock();
10369 return PCI_ERS_RESULT_DISCONNECT;
10370 }
10371
10372 if (netif_running(netdev))
10373 bnxt_close(netdev);
10374
10375 pci_disable_device(pdev);
10376 rtnl_unlock();
10377
10378 /* Request a slot slot reset. */
10379 return PCI_ERS_RESULT_NEED_RESET;
10380}
10381
10382/**
10383 * bnxt_io_slot_reset - called after the pci bus has been reset.
10384 * @pdev: Pointer to PCI device
10385 *
10386 * Restart the card from scratch, as if from a cold-boot.
10387 * At this point, the card has exprienced a hard reset,
10388 * followed by fixups by BIOS, and has its config space
10389 * set up identically to what it was at cold boot.
10390 */
10391static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
10392{
10393 struct net_device *netdev = pci_get_drvdata(pdev);
10394 struct bnxt *bp = netdev_priv(netdev);
10395 int err = 0;
10396 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
10397
10398 netdev_info(bp->dev, "PCI Slot Reset\n");
10399
10400 rtnl_lock();
10401
10402 if (pci_enable_device(pdev)) {
10403 dev_err(&pdev->dev,
10404 "Cannot re-enable PCI device after reset.\n");
10405 } else {
10406 pci_set_master(pdev);
10407
aa8ed021
MC
10408 err = bnxt_hwrm_func_reset(bp);
10409 if (!err && netif_running(netdev))
6316ea6d
SB
10410 err = bnxt_open(netdev);
10411
a588e458 10412 if (!err) {
6316ea6d 10413 result = PCI_ERS_RESULT_RECOVERED;
a588e458
MC
10414 bnxt_ulp_start(bp);
10415 }
6316ea6d
SB
10416 }
10417
10418 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
10419 dev_close(netdev);
10420
10421 rtnl_unlock();
10422
6316ea6d
SB
10423 return PCI_ERS_RESULT_RECOVERED;
10424}
10425
10426/**
10427 * bnxt_io_resume - called when traffic can start flowing again.
10428 * @pdev: Pointer to PCI device
10429 *
10430 * This callback is called when the error recovery driver tells
10431 * us that its OK to resume normal operation.
10432 */
10433static void bnxt_io_resume(struct pci_dev *pdev)
10434{
10435 struct net_device *netdev = pci_get_drvdata(pdev);
10436
10437 rtnl_lock();
10438
10439 netif_device_attach(netdev);
10440
10441 rtnl_unlock();
10442}
10443
10444static const struct pci_error_handlers bnxt_err_handler = {
10445 .error_detected = bnxt_io_error_detected,
10446 .slot_reset = bnxt_io_slot_reset,
10447 .resume = bnxt_io_resume
10448};
10449
c0c050c5
MC
10450static struct pci_driver bnxt_pci_driver = {
10451 .name = DRV_MODULE_NAME,
10452 .id_table = bnxt_pci_tbl,
10453 .probe = bnxt_init_one,
10454 .remove = bnxt_remove_one,
d196ece7 10455 .shutdown = bnxt_shutdown,
f65a2044 10456 .driver.pm = BNXT_PM_OPS,
6316ea6d 10457 .err_handler = &bnxt_err_handler,
c0c050c5
MC
10458#if defined(CONFIG_BNXT_SRIOV)
10459 .sriov_configure = bnxt_sriov_configure,
10460#endif
10461};
10462
c213eae8
MC
10463static int __init bnxt_init(void)
10464{
cabfb09d 10465 bnxt_debug_init();
c213eae8
MC
10466 return pci_register_driver(&bnxt_pci_driver);
10467}
10468
10469static void __exit bnxt_exit(void)
10470{
10471 pci_unregister_driver(&bnxt_pci_driver);
10472 if (bnxt_pf_wq)
10473 destroy_workqueue(bnxt_pf_wq);
cabfb09d 10474 bnxt_debug_exit();
c213eae8
MC
10475}
10476
10477module_init(bnxt_init);
10478module_exit(bnxt_exit);
This page took 2.27615 seconds and 4 git commands to generate.