]> Git Repo - linux.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
x86/MCE/AMD: Fix the thresholding machinery initialization order
[linux.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_ethtool.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2017 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/ctype.h>
12 #include <linux/stringify.h>
13 #include <linux/ethtool.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/etherdevice.h>
17 #include <linux/crc32.h>
18 #include <linux/firmware.h>
19 #include <linux/utsname.h>
20 #include <linux/time.h>
21 #include "bnxt_hsi.h"
22 #include "bnxt.h"
23 #include "bnxt_xdp.h"
24 #include "bnxt_ethtool.h"
25 #include "bnxt_nvm_defs.h"      /* NVRAM content constant and structure defs */
26 #include "bnxt_fw_hdr.h"        /* Firmware hdr constant and structure defs */
27 #include "bnxt_coredump.h"
28 #define FLASH_NVRAM_TIMEOUT     ((HWRM_CMD_TIMEOUT) * 100)
29 #define FLASH_PACKAGE_TIMEOUT   ((HWRM_CMD_TIMEOUT) * 200)
30 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
31
32 static u32 bnxt_get_msglevel(struct net_device *dev)
33 {
34         struct bnxt *bp = netdev_priv(dev);
35
36         return bp->msg_enable;
37 }
38
39 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
40 {
41         struct bnxt *bp = netdev_priv(dev);
42
43         bp->msg_enable = value;
44 }
45
46 static int bnxt_get_coalesce(struct net_device *dev,
47                              struct ethtool_coalesce *coal)
48 {
49         struct bnxt *bp = netdev_priv(dev);
50         struct bnxt_coal *hw_coal;
51         u16 mult;
52
53         memset(coal, 0, sizeof(*coal));
54
55         coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
56
57         hw_coal = &bp->rx_coal;
58         mult = hw_coal->bufs_per_record;
59         coal->rx_coalesce_usecs = hw_coal->coal_ticks;
60         coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
61         coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
62         coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
63
64         hw_coal = &bp->tx_coal;
65         mult = hw_coal->bufs_per_record;
66         coal->tx_coalesce_usecs = hw_coal->coal_ticks;
67         coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
68         coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
69         coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
70
71         coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
72
73         return 0;
74 }
75
76 static int bnxt_set_coalesce(struct net_device *dev,
77                              struct ethtool_coalesce *coal)
78 {
79         struct bnxt *bp = netdev_priv(dev);
80         bool update_stats = false;
81         struct bnxt_coal *hw_coal;
82         int rc = 0;
83         u16 mult;
84
85         if (coal->use_adaptive_rx_coalesce) {
86                 bp->flags |= BNXT_FLAG_DIM;
87         } else {
88                 if (bp->flags & BNXT_FLAG_DIM) {
89                         bp->flags &= ~(BNXT_FLAG_DIM);
90                         goto reset_coalesce;
91                 }
92         }
93
94         hw_coal = &bp->rx_coal;
95         mult = hw_coal->bufs_per_record;
96         hw_coal->coal_ticks = coal->rx_coalesce_usecs;
97         hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
98         hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
99         hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
100
101         hw_coal = &bp->tx_coal;
102         mult = hw_coal->bufs_per_record;
103         hw_coal->coal_ticks = coal->tx_coalesce_usecs;
104         hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
105         hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
106         hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
107
108         if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
109                 u32 stats_ticks = coal->stats_block_coalesce_usecs;
110
111                 /* Allow 0, which means disable. */
112                 if (stats_ticks)
113                         stats_ticks = clamp_t(u32, stats_ticks,
114                                               BNXT_MIN_STATS_COAL_TICKS,
115                                               BNXT_MAX_STATS_COAL_TICKS);
116                 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
117                 bp->stats_coal_ticks = stats_ticks;
118                 if (bp->stats_coal_ticks)
119                         bp->current_interval =
120                                 bp->stats_coal_ticks * HZ / 1000000;
121                 else
122                         bp->current_interval = BNXT_TIMER_INTERVAL;
123                 update_stats = true;
124         }
125
126 reset_coalesce:
127         if (netif_running(dev)) {
128                 if (update_stats) {
129                         rc = bnxt_close_nic(bp, true, false);
130                         if (!rc)
131                                 rc = bnxt_open_nic(bp, true, false);
132                 } else {
133                         rc = bnxt_hwrm_set_coal(bp);
134                 }
135         }
136
137         return rc;
138 }
139
140 #define BNXT_NUM_STATS  21
141
142 #define BNXT_RX_STATS_ENTRY(counter)    \
143         { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
144
145 #define BNXT_TX_STATS_ENTRY(counter)    \
146         { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
147
148 #define BNXT_RX_STATS_EXT_ENTRY(counter)        \
149         { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
150
151 #define BNXT_TX_STATS_EXT_ENTRY(counter)        \
152         { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
153
154 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n)                          \
155         BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),   \
156         BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
157
158 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n)                          \
159         BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),   \
160         BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
161
162 #define BNXT_RX_STATS_EXT_PFC_ENTRIES                           \
163         BNXT_RX_STATS_EXT_PFC_ENTRY(0),                         \
164         BNXT_RX_STATS_EXT_PFC_ENTRY(1),                         \
165         BNXT_RX_STATS_EXT_PFC_ENTRY(2),                         \
166         BNXT_RX_STATS_EXT_PFC_ENTRY(3),                         \
167         BNXT_RX_STATS_EXT_PFC_ENTRY(4),                         \
168         BNXT_RX_STATS_EXT_PFC_ENTRY(5),                         \
169         BNXT_RX_STATS_EXT_PFC_ENTRY(6),                         \
170         BNXT_RX_STATS_EXT_PFC_ENTRY(7)
171
172 #define BNXT_TX_STATS_EXT_PFC_ENTRIES                           \
173         BNXT_TX_STATS_EXT_PFC_ENTRY(0),                         \
174         BNXT_TX_STATS_EXT_PFC_ENTRY(1),                         \
175         BNXT_TX_STATS_EXT_PFC_ENTRY(2),                         \
176         BNXT_TX_STATS_EXT_PFC_ENTRY(3),                         \
177         BNXT_TX_STATS_EXT_PFC_ENTRY(4),                         \
178         BNXT_TX_STATS_EXT_PFC_ENTRY(5),                         \
179         BNXT_TX_STATS_EXT_PFC_ENTRY(6),                         \
180         BNXT_TX_STATS_EXT_PFC_ENTRY(7)
181
182 #define BNXT_RX_STATS_EXT_COS_ENTRY(n)                          \
183         BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),               \
184         BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
185
186 #define BNXT_TX_STATS_EXT_COS_ENTRY(n)                          \
187         BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),               \
188         BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
189
190 #define BNXT_RX_STATS_EXT_COS_ENTRIES                           \
191         BNXT_RX_STATS_EXT_COS_ENTRY(0),                         \
192         BNXT_RX_STATS_EXT_COS_ENTRY(1),                         \
193         BNXT_RX_STATS_EXT_COS_ENTRY(2),                         \
194         BNXT_RX_STATS_EXT_COS_ENTRY(3),                         \
195         BNXT_RX_STATS_EXT_COS_ENTRY(4),                         \
196         BNXT_RX_STATS_EXT_COS_ENTRY(5),                         \
197         BNXT_RX_STATS_EXT_COS_ENTRY(6),                         \
198         BNXT_RX_STATS_EXT_COS_ENTRY(7)                          \
199
200 #define BNXT_TX_STATS_EXT_COS_ENTRIES                           \
201         BNXT_TX_STATS_EXT_COS_ENTRY(0),                         \
202         BNXT_TX_STATS_EXT_COS_ENTRY(1),                         \
203         BNXT_TX_STATS_EXT_COS_ENTRY(2),                         \
204         BNXT_TX_STATS_EXT_COS_ENTRY(3),                         \
205         BNXT_TX_STATS_EXT_COS_ENTRY(4),                         \
206         BNXT_TX_STATS_EXT_COS_ENTRY(5),                         \
207         BNXT_TX_STATS_EXT_COS_ENTRY(6),                         \
208         BNXT_TX_STATS_EXT_COS_ENTRY(7)                          \
209
210 enum {
211         RX_TOTAL_DISCARDS,
212         TX_TOTAL_DISCARDS,
213 };
214
215 static struct {
216         u64                     counter;
217         char                    string[ETH_GSTRING_LEN];
218 } bnxt_sw_func_stats[] = {
219         {0, "rx_total_discard_pkts"},
220         {0, "tx_total_discard_pkts"},
221 };
222
223 static const struct {
224         long offset;
225         char string[ETH_GSTRING_LEN];
226 } bnxt_port_stats_arr[] = {
227         BNXT_RX_STATS_ENTRY(rx_64b_frames),
228         BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
229         BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
230         BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
231         BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
232         BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
233         BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
234         BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
235         BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
236         BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
237         BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
238         BNXT_RX_STATS_ENTRY(rx_total_frames),
239         BNXT_RX_STATS_ENTRY(rx_ucast_frames),
240         BNXT_RX_STATS_ENTRY(rx_mcast_frames),
241         BNXT_RX_STATS_ENTRY(rx_bcast_frames),
242         BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
243         BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
244         BNXT_RX_STATS_ENTRY(rx_pause_frames),
245         BNXT_RX_STATS_ENTRY(rx_pfc_frames),
246         BNXT_RX_STATS_ENTRY(rx_align_err_frames),
247         BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
248         BNXT_RX_STATS_ENTRY(rx_jbr_frames),
249         BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
250         BNXT_RX_STATS_ENTRY(rx_tagged_frames),
251         BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
252         BNXT_RX_STATS_ENTRY(rx_good_frames),
253         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
254         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
255         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
256         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
257         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
258         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
259         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
260         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
261         BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
262         BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
263         BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
264         BNXT_RX_STATS_ENTRY(rx_bytes),
265         BNXT_RX_STATS_ENTRY(rx_runt_bytes),
266         BNXT_RX_STATS_ENTRY(rx_runt_frames),
267         BNXT_RX_STATS_ENTRY(rx_stat_discard),
268         BNXT_RX_STATS_ENTRY(rx_stat_err),
269
270         BNXT_TX_STATS_ENTRY(tx_64b_frames),
271         BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
272         BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
273         BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
274         BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
275         BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
276         BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
277         BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
278         BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
279         BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
280         BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
281         BNXT_TX_STATS_ENTRY(tx_good_frames),
282         BNXT_TX_STATS_ENTRY(tx_total_frames),
283         BNXT_TX_STATS_ENTRY(tx_ucast_frames),
284         BNXT_TX_STATS_ENTRY(tx_mcast_frames),
285         BNXT_TX_STATS_ENTRY(tx_bcast_frames),
286         BNXT_TX_STATS_ENTRY(tx_pause_frames),
287         BNXT_TX_STATS_ENTRY(tx_pfc_frames),
288         BNXT_TX_STATS_ENTRY(tx_jabber_frames),
289         BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
290         BNXT_TX_STATS_ENTRY(tx_err),
291         BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
292         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
293         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
294         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
295         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
296         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
297         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
298         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
299         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
300         BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
301         BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
302         BNXT_TX_STATS_ENTRY(tx_total_collisions),
303         BNXT_TX_STATS_ENTRY(tx_bytes),
304         BNXT_TX_STATS_ENTRY(tx_xthol_frames),
305         BNXT_TX_STATS_ENTRY(tx_stat_discard),
306         BNXT_TX_STATS_ENTRY(tx_stat_error),
307 };
308
309 static const struct {
310         long offset;
311         char string[ETH_GSTRING_LEN];
312 } bnxt_port_stats_ext_arr[] = {
313         BNXT_RX_STATS_EXT_ENTRY(link_down_events),
314         BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
315         BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
316         BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
317         BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
318         BNXT_RX_STATS_EXT_COS_ENTRIES,
319         BNXT_RX_STATS_EXT_PFC_ENTRIES,
320 };
321
322 static const struct {
323         long offset;
324         char string[ETH_GSTRING_LEN];
325 } bnxt_tx_port_stats_ext_arr[] = {
326         BNXT_TX_STATS_EXT_COS_ENTRIES,
327         BNXT_TX_STATS_EXT_PFC_ENTRIES,
328 };
329
330 #define BNXT_NUM_SW_FUNC_STATS  ARRAY_SIZE(bnxt_sw_func_stats)
331 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
332
333 static int bnxt_get_num_stats(struct bnxt *bp)
334 {
335         int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
336
337         num_stats += BNXT_NUM_SW_FUNC_STATS;
338
339         if (bp->flags & BNXT_FLAG_PORT_STATS)
340                 num_stats += BNXT_NUM_PORT_STATS;
341
342         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
343                 num_stats += bp->fw_rx_stats_ext_size +
344                              bp->fw_tx_stats_ext_size;
345
346         return num_stats;
347 }
348
349 static int bnxt_get_sset_count(struct net_device *dev, int sset)
350 {
351         struct bnxt *bp = netdev_priv(dev);
352
353         switch (sset) {
354         case ETH_SS_STATS:
355                 return bnxt_get_num_stats(bp);
356         case ETH_SS_TEST:
357                 if (!bp->num_tests)
358                         return -EOPNOTSUPP;
359                 return bp->num_tests;
360         default:
361                 return -EOPNOTSUPP;
362         }
363 }
364
365 static void bnxt_get_ethtool_stats(struct net_device *dev,
366                                    struct ethtool_stats *stats, u64 *buf)
367 {
368         u32 i, j = 0;
369         struct bnxt *bp = netdev_priv(dev);
370         u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
371
372         if (!bp->bnapi)
373                 return;
374
375         for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
376                 bnxt_sw_func_stats[i].counter = 0;
377
378         for (i = 0; i < bp->cp_nr_rings; i++) {
379                 struct bnxt_napi *bnapi = bp->bnapi[i];
380                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
381                 __le64 *hw_stats = (__le64 *)cpr->hw_stats;
382                 int k;
383
384                 for (k = 0; k < stat_fields; j++, k++)
385                         buf[j] = le64_to_cpu(hw_stats[k]);
386                 buf[j++] = cpr->rx_l4_csum_errors;
387
388                 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
389                         le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
390                 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
391                         le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
392         }
393
394         for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
395                 buf[j] = bnxt_sw_func_stats[i].counter;
396
397         if (bp->flags & BNXT_FLAG_PORT_STATS) {
398                 __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
399
400                 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
401                         buf[j] = le64_to_cpu(*(port_stats +
402                                                bnxt_port_stats_arr[i].offset));
403                 }
404         }
405         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
406                 __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
407                 __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
408
409                 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
410                         buf[j] = le64_to_cpu(*(rx_port_stats_ext +
411                                             bnxt_port_stats_ext_arr[i].offset));
412                 }
413                 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
414                         buf[j] = le64_to_cpu(*(tx_port_stats_ext +
415                                         bnxt_tx_port_stats_ext_arr[i].offset));
416                 }
417         }
418 }
419
420 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
421 {
422         struct bnxt *bp = netdev_priv(dev);
423         u32 i;
424
425         switch (stringset) {
426         /* The number of strings must match BNXT_NUM_STATS defined above. */
427         case ETH_SS_STATS:
428                 for (i = 0; i < bp->cp_nr_rings; i++) {
429                         sprintf(buf, "[%d]: rx_ucast_packets", i);
430                         buf += ETH_GSTRING_LEN;
431                         sprintf(buf, "[%d]: rx_mcast_packets", i);
432                         buf += ETH_GSTRING_LEN;
433                         sprintf(buf, "[%d]: rx_bcast_packets", i);
434                         buf += ETH_GSTRING_LEN;
435                         sprintf(buf, "[%d]: rx_discards", i);
436                         buf += ETH_GSTRING_LEN;
437                         sprintf(buf, "[%d]: rx_drops", i);
438                         buf += ETH_GSTRING_LEN;
439                         sprintf(buf, "[%d]: rx_ucast_bytes", i);
440                         buf += ETH_GSTRING_LEN;
441                         sprintf(buf, "[%d]: rx_mcast_bytes", i);
442                         buf += ETH_GSTRING_LEN;
443                         sprintf(buf, "[%d]: rx_bcast_bytes", i);
444                         buf += ETH_GSTRING_LEN;
445                         sprintf(buf, "[%d]: tx_ucast_packets", i);
446                         buf += ETH_GSTRING_LEN;
447                         sprintf(buf, "[%d]: tx_mcast_packets", i);
448                         buf += ETH_GSTRING_LEN;
449                         sprintf(buf, "[%d]: tx_bcast_packets", i);
450                         buf += ETH_GSTRING_LEN;
451                         sprintf(buf, "[%d]: tx_discards", i);
452                         buf += ETH_GSTRING_LEN;
453                         sprintf(buf, "[%d]: tx_drops", i);
454                         buf += ETH_GSTRING_LEN;
455                         sprintf(buf, "[%d]: tx_ucast_bytes", i);
456                         buf += ETH_GSTRING_LEN;
457                         sprintf(buf, "[%d]: tx_mcast_bytes", i);
458                         buf += ETH_GSTRING_LEN;
459                         sprintf(buf, "[%d]: tx_bcast_bytes", i);
460                         buf += ETH_GSTRING_LEN;
461                         sprintf(buf, "[%d]: tpa_packets", i);
462                         buf += ETH_GSTRING_LEN;
463                         sprintf(buf, "[%d]: tpa_bytes", i);
464                         buf += ETH_GSTRING_LEN;
465                         sprintf(buf, "[%d]: tpa_events", i);
466                         buf += ETH_GSTRING_LEN;
467                         sprintf(buf, "[%d]: tpa_aborts", i);
468                         buf += ETH_GSTRING_LEN;
469                         sprintf(buf, "[%d]: rx_l4_csum_errors", i);
470                         buf += ETH_GSTRING_LEN;
471                 }
472                 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
473                         strcpy(buf, bnxt_sw_func_stats[i].string);
474                         buf += ETH_GSTRING_LEN;
475                 }
476
477                 if (bp->flags & BNXT_FLAG_PORT_STATS) {
478                         for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
479                                 strcpy(buf, bnxt_port_stats_arr[i].string);
480                                 buf += ETH_GSTRING_LEN;
481                         }
482                 }
483                 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
484                         for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
485                                 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
486                                 buf += ETH_GSTRING_LEN;
487                         }
488                         for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
489                                 strcpy(buf,
490                                        bnxt_tx_port_stats_ext_arr[i].string);
491                                 buf += ETH_GSTRING_LEN;
492                         }
493                 }
494                 break;
495         case ETH_SS_TEST:
496                 if (bp->num_tests)
497                         memcpy(buf, bp->test_info->string,
498                                bp->num_tests * ETH_GSTRING_LEN);
499                 break;
500         default:
501                 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
502                            stringset);
503                 break;
504         }
505 }
506
507 static void bnxt_get_ringparam(struct net_device *dev,
508                                struct ethtool_ringparam *ering)
509 {
510         struct bnxt *bp = netdev_priv(dev);
511
512         ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
513         ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
514         ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
515
516         ering->rx_pending = bp->rx_ring_size;
517         ering->rx_jumbo_pending = bp->rx_agg_ring_size;
518         ering->tx_pending = bp->tx_ring_size;
519 }
520
521 static int bnxt_set_ringparam(struct net_device *dev,
522                               struct ethtool_ringparam *ering)
523 {
524         struct bnxt *bp = netdev_priv(dev);
525
526         if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
527             (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
528             (ering->tx_pending <= MAX_SKB_FRAGS))
529                 return -EINVAL;
530
531         if (netif_running(dev))
532                 bnxt_close_nic(bp, false, false);
533
534         bp->rx_ring_size = ering->rx_pending;
535         bp->tx_ring_size = ering->tx_pending;
536         bnxt_set_ring_params(bp);
537
538         if (netif_running(dev))
539                 return bnxt_open_nic(bp, false, false);
540
541         return 0;
542 }
543
544 static void bnxt_get_channels(struct net_device *dev,
545                               struct ethtool_channels *channel)
546 {
547         struct bnxt *bp = netdev_priv(dev);
548         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
549         int max_rx_rings, max_tx_rings, tcs;
550         int max_tx_sch_inputs;
551
552         /* Get the most up-to-date max_tx_sch_inputs. */
553         if (BNXT_NEW_RM(bp))
554                 bnxt_hwrm_func_resc_qcaps(bp, false);
555         max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
556
557         bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
558         if (max_tx_sch_inputs)
559                 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
560         channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
561
562         if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
563                 max_rx_rings = 0;
564                 max_tx_rings = 0;
565         }
566         if (max_tx_sch_inputs)
567                 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
568
569         tcs = netdev_get_num_tc(dev);
570         if (tcs > 1)
571                 max_tx_rings /= tcs;
572
573         channel->max_rx = max_rx_rings;
574         channel->max_tx = max_tx_rings;
575         channel->max_other = 0;
576         if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
577                 channel->combined_count = bp->rx_nr_rings;
578                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
579                         channel->combined_count--;
580         } else {
581                 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
582                         channel->rx_count = bp->rx_nr_rings;
583                         channel->tx_count = bp->tx_nr_rings_per_tc;
584                 }
585         }
586 }
587
588 static int bnxt_set_channels(struct net_device *dev,
589                              struct ethtool_channels *channel)
590 {
591         struct bnxt *bp = netdev_priv(dev);
592         int req_tx_rings, req_rx_rings, tcs;
593         bool sh = false;
594         int tx_xdp = 0;
595         int rc = 0;
596
597         if (channel->other_count)
598                 return -EINVAL;
599
600         if (!channel->combined_count &&
601             (!channel->rx_count || !channel->tx_count))
602                 return -EINVAL;
603
604         if (channel->combined_count &&
605             (channel->rx_count || channel->tx_count))
606                 return -EINVAL;
607
608         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
609                                             channel->tx_count))
610                 return -EINVAL;
611
612         if (channel->combined_count)
613                 sh = true;
614
615         tcs = netdev_get_num_tc(dev);
616
617         req_tx_rings = sh ? channel->combined_count : channel->tx_count;
618         req_rx_rings = sh ? channel->combined_count : channel->rx_count;
619         if (bp->tx_nr_rings_xdp) {
620                 if (!sh) {
621                         netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
622                         return -EINVAL;
623                 }
624                 tx_xdp = req_rx_rings;
625         }
626         rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
627         if (rc) {
628                 netdev_warn(dev, "Unable to allocate the requested rings\n");
629                 return rc;
630         }
631
632         if (netif_running(dev)) {
633                 if (BNXT_PF(bp)) {
634                         /* TODO CHIMP_FW: Send message to all VF's
635                          * before PF unload
636                          */
637                 }
638                 rc = bnxt_close_nic(bp, true, false);
639                 if (rc) {
640                         netdev_err(bp->dev, "Set channel failure rc :%x\n",
641                                    rc);
642                         return rc;
643                 }
644         }
645
646         if (sh) {
647                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
648                 bp->rx_nr_rings = channel->combined_count;
649                 bp->tx_nr_rings_per_tc = channel->combined_count;
650         } else {
651                 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
652                 bp->rx_nr_rings = channel->rx_count;
653                 bp->tx_nr_rings_per_tc = channel->tx_count;
654         }
655         bp->tx_nr_rings_xdp = tx_xdp;
656         bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
657         if (tcs > 1)
658                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
659
660         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
661                                bp->tx_nr_rings + bp->rx_nr_rings;
662
663         bp->num_stat_ctxs = bp->cp_nr_rings;
664
665         /* After changing number of rx channels, update NTUPLE feature. */
666         netdev_update_features(dev);
667         if (netif_running(dev)) {
668                 rc = bnxt_open_nic(bp, true, false);
669                 if ((!rc) && BNXT_PF(bp)) {
670                         /* TODO CHIMP_FW: Send message to all VF's
671                          * to renable
672                          */
673                 }
674         } else {
675                 rc = bnxt_reserve_rings(bp);
676         }
677
678         return rc;
679 }
680
681 #ifdef CONFIG_RFS_ACCEL
682 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
683                             u32 *rule_locs)
684 {
685         int i, j = 0;
686
687         cmd->data = bp->ntp_fltr_count;
688         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
689                 struct hlist_head *head;
690                 struct bnxt_ntuple_filter *fltr;
691
692                 head = &bp->ntp_fltr_hash_tbl[i];
693                 rcu_read_lock();
694                 hlist_for_each_entry_rcu(fltr, head, hash) {
695                         if (j == cmd->rule_cnt)
696                                 break;
697                         rule_locs[j++] = fltr->sw_id;
698                 }
699                 rcu_read_unlock();
700                 if (j == cmd->rule_cnt)
701                         break;
702         }
703         cmd->rule_cnt = j;
704         return 0;
705 }
706
707 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
708 {
709         struct ethtool_rx_flow_spec *fs =
710                 (struct ethtool_rx_flow_spec *)&cmd->fs;
711         struct bnxt_ntuple_filter *fltr;
712         struct flow_keys *fkeys;
713         int i, rc = -EINVAL;
714
715         if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
716                 return rc;
717
718         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
719                 struct hlist_head *head;
720
721                 head = &bp->ntp_fltr_hash_tbl[i];
722                 rcu_read_lock();
723                 hlist_for_each_entry_rcu(fltr, head, hash) {
724                         if (fltr->sw_id == fs->location)
725                                 goto fltr_found;
726                 }
727                 rcu_read_unlock();
728         }
729         return rc;
730
731 fltr_found:
732         fkeys = &fltr->fkeys;
733         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
734                 if (fkeys->basic.ip_proto == IPPROTO_TCP)
735                         fs->flow_type = TCP_V4_FLOW;
736                 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
737                         fs->flow_type = UDP_V4_FLOW;
738                 else
739                         goto fltr_err;
740
741                 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
742                 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
743
744                 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
745                 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
746
747                 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
748                 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
749
750                 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
751                 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
752         } else {
753                 int i;
754
755                 if (fkeys->basic.ip_proto == IPPROTO_TCP)
756                         fs->flow_type = TCP_V6_FLOW;
757                 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
758                         fs->flow_type = UDP_V6_FLOW;
759                 else
760                         goto fltr_err;
761
762                 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
763                         fkeys->addrs.v6addrs.src;
764                 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
765                         fkeys->addrs.v6addrs.dst;
766                 for (i = 0; i < 4; i++) {
767                         fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
768                         fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
769                 }
770                 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
771                 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
772
773                 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
774                 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
775         }
776
777         fs->ring_cookie = fltr->rxq;
778         rc = 0;
779
780 fltr_err:
781         rcu_read_unlock();
782
783         return rc;
784 }
785 #endif
786
787 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
788 {
789         if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
790                 return RXH_IP_SRC | RXH_IP_DST;
791         return 0;
792 }
793
794 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
795 {
796         if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
797                 return RXH_IP_SRC | RXH_IP_DST;
798         return 0;
799 }
800
801 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
802 {
803         cmd->data = 0;
804         switch (cmd->flow_type) {
805         case TCP_V4_FLOW:
806                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
807                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
808                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
809                 cmd->data |= get_ethtool_ipv4_rss(bp);
810                 break;
811         case UDP_V4_FLOW:
812                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
813                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
814                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
815                 /* fall through */
816         case SCTP_V4_FLOW:
817         case AH_ESP_V4_FLOW:
818         case AH_V4_FLOW:
819         case ESP_V4_FLOW:
820         case IPV4_FLOW:
821                 cmd->data |= get_ethtool_ipv4_rss(bp);
822                 break;
823
824         case TCP_V6_FLOW:
825                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
826                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
827                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
828                 cmd->data |= get_ethtool_ipv6_rss(bp);
829                 break;
830         case UDP_V6_FLOW:
831                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
832                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
833                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
834                 /* fall through */
835         case SCTP_V6_FLOW:
836         case AH_ESP_V6_FLOW:
837         case AH_V6_FLOW:
838         case ESP_V6_FLOW:
839         case IPV6_FLOW:
840                 cmd->data |= get_ethtool_ipv6_rss(bp);
841                 break;
842         }
843         return 0;
844 }
845
846 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
847 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
848
849 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
850 {
851         u32 rss_hash_cfg = bp->rss_hash_cfg;
852         int tuple, rc = 0;
853
854         if (cmd->data == RXH_4TUPLE)
855                 tuple = 4;
856         else if (cmd->data == RXH_2TUPLE)
857                 tuple = 2;
858         else if (!cmd->data)
859                 tuple = 0;
860         else
861                 return -EINVAL;
862
863         if (cmd->flow_type == TCP_V4_FLOW) {
864                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
865                 if (tuple == 4)
866                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
867         } else if (cmd->flow_type == UDP_V4_FLOW) {
868                 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
869                         return -EINVAL;
870                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
871                 if (tuple == 4)
872                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
873         } else if (cmd->flow_type == TCP_V6_FLOW) {
874                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
875                 if (tuple == 4)
876                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
877         } else if (cmd->flow_type == UDP_V6_FLOW) {
878                 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
879                         return -EINVAL;
880                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
881                 if (tuple == 4)
882                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
883         } else if (tuple == 4) {
884                 return -EINVAL;
885         }
886
887         switch (cmd->flow_type) {
888         case TCP_V4_FLOW:
889         case UDP_V4_FLOW:
890         case SCTP_V4_FLOW:
891         case AH_ESP_V4_FLOW:
892         case AH_V4_FLOW:
893         case ESP_V4_FLOW:
894         case IPV4_FLOW:
895                 if (tuple == 2)
896                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
897                 else if (!tuple)
898                         rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
899                 break;
900
901         case TCP_V6_FLOW:
902         case UDP_V6_FLOW:
903         case SCTP_V6_FLOW:
904         case AH_ESP_V6_FLOW:
905         case AH_V6_FLOW:
906         case ESP_V6_FLOW:
907         case IPV6_FLOW:
908                 if (tuple == 2)
909                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
910                 else if (!tuple)
911                         rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
912                 break;
913         }
914
915         if (bp->rss_hash_cfg == rss_hash_cfg)
916                 return 0;
917
918         bp->rss_hash_cfg = rss_hash_cfg;
919         if (netif_running(bp->dev)) {
920                 bnxt_close_nic(bp, false, false);
921                 rc = bnxt_open_nic(bp, false, false);
922         }
923         return rc;
924 }
925
926 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
927                           u32 *rule_locs)
928 {
929         struct bnxt *bp = netdev_priv(dev);
930         int rc = 0;
931
932         switch (cmd->cmd) {
933 #ifdef CONFIG_RFS_ACCEL
934         case ETHTOOL_GRXRINGS:
935                 cmd->data = bp->rx_nr_rings;
936                 break;
937
938         case ETHTOOL_GRXCLSRLCNT:
939                 cmd->rule_cnt = bp->ntp_fltr_count;
940                 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
941                 break;
942
943         case ETHTOOL_GRXCLSRLALL:
944                 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
945                 break;
946
947         case ETHTOOL_GRXCLSRULE:
948                 rc = bnxt_grxclsrule(bp, cmd);
949                 break;
950 #endif
951
952         case ETHTOOL_GRXFH:
953                 rc = bnxt_grxfh(bp, cmd);
954                 break;
955
956         default:
957                 rc = -EOPNOTSUPP;
958                 break;
959         }
960
961         return rc;
962 }
963
964 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
965 {
966         struct bnxt *bp = netdev_priv(dev);
967         int rc;
968
969         switch (cmd->cmd) {
970         case ETHTOOL_SRXFH:
971                 rc = bnxt_srxfh(bp, cmd);
972                 break;
973
974         default:
975                 rc = -EOPNOTSUPP;
976                 break;
977         }
978         return rc;
979 }
980
981 static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
982 {
983         return HW_HASH_INDEX_SIZE;
984 }
985
986 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
987 {
988         return HW_HASH_KEY_SIZE;
989 }
990
991 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
992                          u8 *hfunc)
993 {
994         struct bnxt *bp = netdev_priv(dev);
995         struct bnxt_vnic_info *vnic;
996         int i = 0;
997
998         if (hfunc)
999                 *hfunc = ETH_RSS_HASH_TOP;
1000
1001         if (!bp->vnic_info)
1002                 return 0;
1003
1004         vnic = &bp->vnic_info[0];
1005         if (indir && vnic->rss_table) {
1006                 for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
1007                         indir[i] = le16_to_cpu(vnic->rss_table[i]);
1008         }
1009
1010         if (key && vnic->rss_hash_key)
1011                 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1012
1013         return 0;
1014 }
1015
1016 static void bnxt_get_drvinfo(struct net_device *dev,
1017                              struct ethtool_drvinfo *info)
1018 {
1019         struct bnxt *bp = netdev_priv(dev);
1020
1021         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1022         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1023         strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1024         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1025         info->n_stats = bnxt_get_num_stats(bp);
1026         info->testinfo_len = bp->num_tests;
1027         /* TODO CHIMP_FW: eeprom dump details */
1028         info->eedump_len = 0;
1029         /* TODO CHIMP FW: reg dump details */
1030         info->regdump_len = 0;
1031 }
1032
1033 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1034 {
1035         struct bnxt *bp = netdev_priv(dev);
1036
1037         wol->supported = 0;
1038         wol->wolopts = 0;
1039         memset(&wol->sopass, 0, sizeof(wol->sopass));
1040         if (bp->flags & BNXT_FLAG_WOL_CAP) {
1041                 wol->supported = WAKE_MAGIC;
1042                 if (bp->wol)
1043                         wol->wolopts = WAKE_MAGIC;
1044         }
1045 }
1046
1047 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1048 {
1049         struct bnxt *bp = netdev_priv(dev);
1050
1051         if (wol->wolopts & ~WAKE_MAGIC)
1052                 return -EINVAL;
1053
1054         if (wol->wolopts & WAKE_MAGIC) {
1055                 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1056                         return -EINVAL;
1057                 if (!bp->wol) {
1058                         if (bnxt_hwrm_alloc_wol_fltr(bp))
1059                                 return -EBUSY;
1060                         bp->wol = 1;
1061                 }
1062         } else {
1063                 if (bp->wol) {
1064                         if (bnxt_hwrm_free_wol_fltr(bp))
1065                                 return -EBUSY;
1066                         bp->wol = 0;
1067                 }
1068         }
1069         return 0;
1070 }
1071
1072 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1073 {
1074         u32 speed_mask = 0;
1075
1076         /* TODO: support 25GB, 40GB, 50GB with different cable type */
1077         /* set the advertised speeds */
1078         if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1079                 speed_mask |= ADVERTISED_100baseT_Full;
1080         if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1081                 speed_mask |= ADVERTISED_1000baseT_Full;
1082         if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1083                 speed_mask |= ADVERTISED_2500baseX_Full;
1084         if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1085                 speed_mask |= ADVERTISED_10000baseT_Full;
1086         if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1087                 speed_mask |= ADVERTISED_40000baseCR4_Full;
1088
1089         if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1090                 speed_mask |= ADVERTISED_Pause;
1091         else if (fw_pause & BNXT_LINK_PAUSE_TX)
1092                 speed_mask |= ADVERTISED_Asym_Pause;
1093         else if (fw_pause & BNXT_LINK_PAUSE_RX)
1094                 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1095
1096         return speed_mask;
1097 }
1098
1099 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1100 {                                                                       \
1101         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB)                    \
1102                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1103                                                      100baseT_Full);    \
1104         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB)                      \
1105                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1106                                                      1000baseT_Full);   \
1107         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB)                     \
1108                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1109                                                      10000baseT_Full);  \
1110         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB)                     \
1111                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1112                                                      25000baseCR_Full); \
1113         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB)                     \
1114                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1115                                                      40000baseCR4_Full);\
1116         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB)                     \
1117                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1118                                                      50000baseCR2_Full);\
1119         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB)                    \
1120                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1121                                                      100000baseCR4_Full);\
1122         if ((fw_pause) & BNXT_LINK_PAUSE_RX) {                          \
1123                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1124                                                      Pause);            \
1125                 if (!((fw_pause) & BNXT_LINK_PAUSE_TX))                 \
1126                         ethtool_link_ksettings_add_link_mode(           \
1127                                         lk_ksettings, name, Asym_Pause);\
1128         } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) {                   \
1129                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1130                                                      Asym_Pause);       \
1131         }                                                               \
1132 }
1133
1134 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name)          \
1135 {                                                                       \
1136         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1137                                                   100baseT_Full) ||     \
1138             ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1139                                                   100baseT_Half))       \
1140                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB;               \
1141         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1142                                                   1000baseT_Full) ||    \
1143             ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1144                                                   1000baseT_Half))      \
1145                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB;                 \
1146         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1147                                                   10000baseT_Full))     \
1148                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB;                \
1149         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1150                                                   25000baseCR_Full))    \
1151                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB;                \
1152         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1153                                                   40000baseCR4_Full))   \
1154                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB;                \
1155         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1156                                                   50000baseCR2_Full))   \
1157                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB;                \
1158         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1159                                                   100000baseCR4_Full))  \
1160                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB;               \
1161 }
1162
1163 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
1164                                 struct ethtool_link_ksettings *lk_ksettings)
1165 {
1166         u16 fw_speeds = link_info->advertising;
1167         u8 fw_pause = 0;
1168
1169         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1170                 fw_pause = link_info->auto_pause_setting;
1171
1172         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
1173 }
1174
1175 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1176                                 struct ethtool_link_ksettings *lk_ksettings)
1177 {
1178         u16 fw_speeds = link_info->lp_auto_link_speeds;
1179         u8 fw_pause = 0;
1180
1181         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1182                 fw_pause = link_info->lp_pause;
1183
1184         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1185                                 lp_advertising);
1186 }
1187
1188 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
1189                                 struct ethtool_link_ksettings *lk_ksettings)
1190 {
1191         u16 fw_speeds = link_info->support_speeds;
1192
1193         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
1194
1195         ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
1196         ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1197                                              Asym_Pause);
1198
1199         if (link_info->support_auto_speeds)
1200                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1201                                                      Autoneg);
1202 }
1203
1204 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1205 {
1206         switch (fw_link_speed) {
1207         case BNXT_LINK_SPEED_100MB:
1208                 return SPEED_100;
1209         case BNXT_LINK_SPEED_1GB:
1210                 return SPEED_1000;
1211         case BNXT_LINK_SPEED_2_5GB:
1212                 return SPEED_2500;
1213         case BNXT_LINK_SPEED_10GB:
1214                 return SPEED_10000;
1215         case BNXT_LINK_SPEED_20GB:
1216                 return SPEED_20000;
1217         case BNXT_LINK_SPEED_25GB:
1218                 return SPEED_25000;
1219         case BNXT_LINK_SPEED_40GB:
1220                 return SPEED_40000;
1221         case BNXT_LINK_SPEED_50GB:
1222                 return SPEED_50000;
1223         case BNXT_LINK_SPEED_100GB:
1224                 return SPEED_100000;
1225         default:
1226                 return SPEED_UNKNOWN;
1227         }
1228 }
1229
1230 static int bnxt_get_link_ksettings(struct net_device *dev,
1231                                    struct ethtool_link_ksettings *lk_ksettings)
1232 {
1233         struct bnxt *bp = netdev_priv(dev);
1234         struct bnxt_link_info *link_info = &bp->link_info;
1235         struct ethtool_link_settings *base = &lk_ksettings->base;
1236         u32 ethtool_speed;
1237
1238         ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1239         mutex_lock(&bp->link_lock);
1240         bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1241
1242         ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1243         if (link_info->autoneg) {
1244                 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1245                 ethtool_link_ksettings_add_link_mode(lk_ksettings,
1246                                                      advertising, Autoneg);
1247                 base->autoneg = AUTONEG_ENABLE;
1248                 if (link_info->phy_link_status == BNXT_LINK_LINK)
1249                         bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1250                 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1251                 if (!netif_carrier_ok(dev))
1252                         base->duplex = DUPLEX_UNKNOWN;
1253                 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1254                         base->duplex = DUPLEX_FULL;
1255                 else
1256                         base->duplex = DUPLEX_HALF;
1257         } else {
1258                 base->autoneg = AUTONEG_DISABLE;
1259                 ethtool_speed =
1260                         bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1261                 base->duplex = DUPLEX_HALF;
1262                 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1263                         base->duplex = DUPLEX_FULL;
1264         }
1265         base->speed = ethtool_speed;
1266
1267         base->port = PORT_NONE;
1268         if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1269                 base->port = PORT_TP;
1270                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1271                                                      TP);
1272                 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1273                                                      TP);
1274         } else {
1275                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1276                                                      FIBRE);
1277                 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1278                                                      FIBRE);
1279
1280                 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1281                         base->port = PORT_DA;
1282                 else if (link_info->media_type ==
1283                          PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1284                         base->port = PORT_FIBRE;
1285         }
1286         base->phy_address = link_info->phy_addr;
1287         mutex_unlock(&bp->link_lock);
1288
1289         return 0;
1290 }
1291
1292 static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
1293 {
1294         struct bnxt *bp = netdev_priv(dev);
1295         struct bnxt_link_info *link_info = &bp->link_info;
1296         u16 support_spds = link_info->support_speeds;
1297         u32 fw_speed = 0;
1298
1299         switch (ethtool_speed) {
1300         case SPEED_100:
1301                 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1302                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
1303                 break;
1304         case SPEED_1000:
1305                 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1306                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
1307                 break;
1308         case SPEED_2500:
1309                 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1310                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
1311                 break;
1312         case SPEED_10000:
1313                 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1314                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
1315                 break;
1316         case SPEED_20000:
1317                 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1318                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
1319                 break;
1320         case SPEED_25000:
1321                 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1322                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
1323                 break;
1324         case SPEED_40000:
1325                 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1326                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
1327                 break;
1328         case SPEED_50000:
1329                 if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
1330                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
1331                 break;
1332         case SPEED_100000:
1333                 if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
1334                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
1335                 break;
1336         default:
1337                 netdev_err(dev, "unsupported speed!\n");
1338                 break;
1339         }
1340         return fw_speed;
1341 }
1342
1343 u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1344 {
1345         u16 fw_speed_mask = 0;
1346
1347         /* only support autoneg at speed 100, 1000, and 10000 */
1348         if (advertising & (ADVERTISED_100baseT_Full |
1349                            ADVERTISED_100baseT_Half)) {
1350                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1351         }
1352         if (advertising & (ADVERTISED_1000baseT_Full |
1353                            ADVERTISED_1000baseT_Half)) {
1354                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1355         }
1356         if (advertising & ADVERTISED_10000baseT_Full)
1357                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1358
1359         if (advertising & ADVERTISED_40000baseCR4_Full)
1360                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1361
1362         return fw_speed_mask;
1363 }
1364
1365 static int bnxt_set_link_ksettings(struct net_device *dev,
1366                            const struct ethtool_link_ksettings *lk_ksettings)
1367 {
1368         struct bnxt *bp = netdev_priv(dev);
1369         struct bnxt_link_info *link_info = &bp->link_info;
1370         const struct ethtool_link_settings *base = &lk_ksettings->base;
1371         bool set_pause = false;
1372         u16 fw_advertising = 0;
1373         u32 speed;
1374         int rc = 0;
1375
1376         if (!BNXT_SINGLE_PF(bp))
1377                 return -EOPNOTSUPP;
1378
1379         mutex_lock(&bp->link_lock);
1380         if (base->autoneg == AUTONEG_ENABLE) {
1381                 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1382                                         advertising);
1383                 link_info->autoneg |= BNXT_AUTONEG_SPEED;
1384                 if (!fw_advertising)
1385                         link_info->advertising = link_info->support_auto_speeds;
1386                 else
1387                         link_info->advertising = fw_advertising;
1388                 /* any change to autoneg will cause link change, therefore the
1389                  * driver should put back the original pause setting in autoneg
1390                  */
1391                 set_pause = true;
1392         } else {
1393                 u16 fw_speed;
1394                 u8 phy_type = link_info->phy_type;
1395
1396                 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
1397                     phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1398                     link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1399                         netdev_err(dev, "10GBase-T devices must autoneg\n");
1400                         rc = -EINVAL;
1401                         goto set_setting_exit;
1402                 }
1403                 if (base->duplex == DUPLEX_HALF) {
1404                         netdev_err(dev, "HALF DUPLEX is not supported!\n");
1405                         rc = -EINVAL;
1406                         goto set_setting_exit;
1407                 }
1408                 speed = base->speed;
1409                 fw_speed = bnxt_get_fw_speed(dev, speed);
1410                 if (!fw_speed) {
1411                         rc = -EINVAL;
1412                         goto set_setting_exit;
1413                 }
1414                 link_info->req_link_speed = fw_speed;
1415                 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1416                 link_info->autoneg = 0;
1417                 link_info->advertising = 0;
1418         }
1419
1420         if (netif_running(dev))
1421                 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1422
1423 set_setting_exit:
1424         mutex_unlock(&bp->link_lock);
1425         return rc;
1426 }
1427
1428 static void bnxt_get_pauseparam(struct net_device *dev,
1429                                 struct ethtool_pauseparam *epause)
1430 {
1431         struct bnxt *bp = netdev_priv(dev);
1432         struct bnxt_link_info *link_info = &bp->link_info;
1433
1434         if (BNXT_VF(bp))
1435                 return;
1436         epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
1437         epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
1438         epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
1439 }
1440
1441 static int bnxt_set_pauseparam(struct net_device *dev,
1442                                struct ethtool_pauseparam *epause)
1443 {
1444         int rc = 0;
1445         struct bnxt *bp = netdev_priv(dev);
1446         struct bnxt_link_info *link_info = &bp->link_info;
1447
1448         if (!BNXT_SINGLE_PF(bp))
1449                 return -EOPNOTSUPP;
1450
1451         if (epause->autoneg) {
1452                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
1453                         return -EINVAL;
1454
1455                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
1456                 if (bp->hwrm_spec_code >= 0x10201)
1457                         link_info->req_flow_ctrl =
1458                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
1459         } else {
1460                 /* when transition from auto pause to force pause,
1461                  * force a link change
1462                  */
1463                 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1464                         link_info->force_link_chng = true;
1465                 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
1466                 link_info->req_flow_ctrl = 0;
1467         }
1468         if (epause->rx_pause)
1469                 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
1470
1471         if (epause->tx_pause)
1472                 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
1473
1474         if (netif_running(dev))
1475                 rc = bnxt_hwrm_set_pause(bp);
1476         return rc;
1477 }
1478
1479 static u32 bnxt_get_link(struct net_device *dev)
1480 {
1481         struct bnxt *bp = netdev_priv(dev);
1482
1483         /* TODO: handle MF, VF, driver close case */
1484         return bp->link_info.link_up;
1485 }
1486
1487 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1488                                 u16 ext, u16 *index, u32 *item_length,
1489                                 u32 *data_length);
1490
1491 static int bnxt_flash_nvram(struct net_device *dev,
1492                             u16 dir_type,
1493                             u16 dir_ordinal,
1494                             u16 dir_ext,
1495                             u16 dir_attr,
1496                             const u8 *data,
1497                             size_t data_len)
1498 {
1499         struct bnxt *bp = netdev_priv(dev);
1500         int rc;
1501         struct hwrm_nvm_write_input req = {0};
1502         dma_addr_t dma_handle;
1503         u8 *kmem;
1504
1505         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
1506
1507         req.dir_type = cpu_to_le16(dir_type);
1508         req.dir_ordinal = cpu_to_le16(dir_ordinal);
1509         req.dir_ext = cpu_to_le16(dir_ext);
1510         req.dir_attr = cpu_to_le16(dir_attr);
1511         req.dir_data_length = cpu_to_le32(data_len);
1512
1513         kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
1514                                   GFP_KERNEL);
1515         if (!kmem) {
1516                 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1517                            (unsigned)data_len);
1518                 return -ENOMEM;
1519         }
1520         memcpy(kmem, data, data_len);
1521         req.host_src_addr = cpu_to_le64(dma_handle);
1522
1523         rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
1524         dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
1525
1526         return rc;
1527 }
1528
1529 static int bnxt_firmware_reset(struct net_device *dev,
1530                                u16 dir_type)
1531 {
1532         struct bnxt *bp = netdev_priv(dev);
1533         struct hwrm_fw_reset_input req = {0};
1534
1535         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
1536
1537         /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
1538         /*       (e.g. when firmware isn't already running) */
1539         switch (dir_type) {
1540         case BNX_DIR_TYPE_CHIMP_PATCH:
1541         case BNX_DIR_TYPE_BOOTCODE:
1542         case BNX_DIR_TYPE_BOOTCODE_2:
1543                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
1544                 /* Self-reset ChiMP upon next PCIe reset: */
1545                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1546                 break;
1547         case BNX_DIR_TYPE_APE_FW:
1548         case BNX_DIR_TYPE_APE_PATCH:
1549                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
1550                 /* Self-reset APE upon next PCIe reset: */
1551                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1552                 break;
1553         case BNX_DIR_TYPE_KONG_FW:
1554         case BNX_DIR_TYPE_KONG_PATCH:
1555                 req.embedded_proc_type =
1556                         FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
1557                 break;
1558         case BNX_DIR_TYPE_BONO_FW:
1559         case BNX_DIR_TYPE_BONO_PATCH:
1560                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
1561                 break;
1562         case BNXT_FW_RESET_CHIP:
1563                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
1564                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
1565                 break;
1566         case BNXT_FW_RESET_AP:
1567                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
1568                 break;
1569         default:
1570                 return -EINVAL;
1571         }
1572
1573         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1574 }
1575
1576 static int bnxt_flash_firmware(struct net_device *dev,
1577                                u16 dir_type,
1578                                const u8 *fw_data,
1579                                size_t fw_size)
1580 {
1581         int     rc = 0;
1582         u16     code_type;
1583         u32     stored_crc;
1584         u32     calculated_crc;
1585         struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
1586
1587         switch (dir_type) {
1588         case BNX_DIR_TYPE_BOOTCODE:
1589         case BNX_DIR_TYPE_BOOTCODE_2:
1590                 code_type = CODE_BOOT;
1591                 break;
1592         case BNX_DIR_TYPE_CHIMP_PATCH:
1593                 code_type = CODE_CHIMP_PATCH;
1594                 break;
1595         case BNX_DIR_TYPE_APE_FW:
1596                 code_type = CODE_MCTP_PASSTHRU;
1597                 break;
1598         case BNX_DIR_TYPE_APE_PATCH:
1599                 code_type = CODE_APE_PATCH;
1600                 break;
1601         case BNX_DIR_TYPE_KONG_FW:
1602                 code_type = CODE_KONG_FW;
1603                 break;
1604         case BNX_DIR_TYPE_KONG_PATCH:
1605                 code_type = CODE_KONG_PATCH;
1606                 break;
1607         case BNX_DIR_TYPE_BONO_FW:
1608                 code_type = CODE_BONO_FW;
1609                 break;
1610         case BNX_DIR_TYPE_BONO_PATCH:
1611                 code_type = CODE_BONO_PATCH;
1612                 break;
1613         default:
1614                 netdev_err(dev, "Unsupported directory entry type: %u\n",
1615                            dir_type);
1616                 return -EINVAL;
1617         }
1618         if (fw_size < sizeof(struct bnxt_fw_header)) {
1619                 netdev_err(dev, "Invalid firmware file size: %u\n",
1620                            (unsigned int)fw_size);
1621                 return -EINVAL;
1622         }
1623         if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
1624                 netdev_err(dev, "Invalid firmware signature: %08X\n",
1625                            le32_to_cpu(header->signature));
1626                 return -EINVAL;
1627         }
1628         if (header->code_type != code_type) {
1629                 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
1630                            code_type, header->code_type);
1631                 return -EINVAL;
1632         }
1633         if (header->device != DEVICE_CUMULUS_FAMILY) {
1634                 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
1635                            DEVICE_CUMULUS_FAMILY, header->device);
1636                 return -EINVAL;
1637         }
1638         /* Confirm the CRC32 checksum of the file: */
1639         stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1640                                              sizeof(stored_crc)));
1641         calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1642         if (calculated_crc != stored_crc) {
1643                 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
1644                            (unsigned long)stored_crc,
1645                            (unsigned long)calculated_crc);
1646                 return -EINVAL;
1647         }
1648         rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1649                               0, 0, fw_data, fw_size);
1650         if (rc == 0)    /* Firmware update successful */
1651                 rc = bnxt_firmware_reset(dev, dir_type);
1652
1653         return rc;
1654 }
1655
1656 static int bnxt_flash_microcode(struct net_device *dev,
1657                                 u16 dir_type,
1658                                 const u8 *fw_data,
1659                                 size_t fw_size)
1660 {
1661         struct bnxt_ucode_trailer *trailer;
1662         u32 calculated_crc;
1663         u32 stored_crc;
1664         int rc = 0;
1665
1666         if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
1667                 netdev_err(dev, "Invalid microcode file size: %u\n",
1668                            (unsigned int)fw_size);
1669                 return -EINVAL;
1670         }
1671         trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
1672                                                 sizeof(*trailer)));
1673         if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
1674                 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
1675                            le32_to_cpu(trailer->sig));
1676                 return -EINVAL;
1677         }
1678         if (le16_to_cpu(trailer->dir_type) != dir_type) {
1679                 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
1680                            dir_type, le16_to_cpu(trailer->dir_type));
1681                 return -EINVAL;
1682         }
1683         if (le16_to_cpu(trailer->trailer_length) <
1684                 sizeof(struct bnxt_ucode_trailer)) {
1685                 netdev_err(dev, "Invalid microcode trailer length: %d\n",
1686                            le16_to_cpu(trailer->trailer_length));
1687                 return -EINVAL;
1688         }
1689
1690         /* Confirm the CRC32 checksum of the file: */
1691         stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1692                                              sizeof(stored_crc)));
1693         calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1694         if (calculated_crc != stored_crc) {
1695                 netdev_err(dev,
1696                            "CRC32 (%08lX) does not match calculated: %08lX\n",
1697                            (unsigned long)stored_crc,
1698                            (unsigned long)calculated_crc);
1699                 return -EINVAL;
1700         }
1701         rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1702                               0, 0, fw_data, fw_size);
1703
1704         return rc;
1705 }
1706
1707 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
1708 {
1709         switch (dir_type) {
1710         case BNX_DIR_TYPE_CHIMP_PATCH:
1711         case BNX_DIR_TYPE_BOOTCODE:
1712         case BNX_DIR_TYPE_BOOTCODE_2:
1713         case BNX_DIR_TYPE_APE_FW:
1714         case BNX_DIR_TYPE_APE_PATCH:
1715         case BNX_DIR_TYPE_KONG_FW:
1716         case BNX_DIR_TYPE_KONG_PATCH:
1717         case BNX_DIR_TYPE_BONO_FW:
1718         case BNX_DIR_TYPE_BONO_PATCH:
1719                 return true;
1720         }
1721
1722         return false;
1723 }
1724
1725 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
1726 {
1727         switch (dir_type) {
1728         case BNX_DIR_TYPE_AVS:
1729         case BNX_DIR_TYPE_EXP_ROM_MBA:
1730         case BNX_DIR_TYPE_PCIE:
1731         case BNX_DIR_TYPE_TSCF_UCODE:
1732         case BNX_DIR_TYPE_EXT_PHY:
1733         case BNX_DIR_TYPE_CCM:
1734         case BNX_DIR_TYPE_ISCSI_BOOT:
1735         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
1736         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
1737                 return true;
1738         }
1739
1740         return false;
1741 }
1742
1743 static bool bnxt_dir_type_is_executable(u16 dir_type)
1744 {
1745         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
1746                 bnxt_dir_type_is_other_exec_format(dir_type);
1747 }
1748
1749 static int bnxt_flash_firmware_from_file(struct net_device *dev,
1750                                          u16 dir_type,
1751                                          const char *filename)
1752 {
1753         const struct firmware  *fw;
1754         int                     rc;
1755
1756         rc = request_firmware(&fw, filename, &dev->dev);
1757         if (rc != 0) {
1758                 netdev_err(dev, "Error %d requesting firmware file: %s\n",
1759                            rc, filename);
1760                 return rc;
1761         }
1762         if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
1763                 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
1764         else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
1765                 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
1766         else
1767                 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1768                                       0, 0, fw->data, fw->size);
1769         release_firmware(fw);
1770         return rc;
1771 }
1772
1773 static int bnxt_flash_package_from_file(struct net_device *dev,
1774                                         char *filename, u32 install_type)
1775 {
1776         struct bnxt *bp = netdev_priv(dev);
1777         struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
1778         struct hwrm_nvm_install_update_input install = {0};
1779         const struct firmware *fw;
1780         u32 item_len;
1781         u16 index;
1782         int rc;
1783
1784         bnxt_hwrm_fw_set_time(bp);
1785
1786         if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
1787                                  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1788                                  &index, &item_len, NULL) != 0) {
1789                 netdev_err(dev, "PKG update area not created in nvram\n");
1790                 return -ENOBUFS;
1791         }
1792
1793         rc = request_firmware(&fw, filename, &dev->dev);
1794         if (rc != 0) {
1795                 netdev_err(dev, "PKG error %d requesting file: %s\n",
1796                            rc, filename);
1797                 return rc;
1798         }
1799
1800         if (fw->size > item_len) {
1801                 netdev_err(dev, "PKG insufficient update area in nvram: %lu",
1802                            (unsigned long)fw->size);
1803                 rc = -EFBIG;
1804         } else {
1805                 dma_addr_t dma_handle;
1806                 u8 *kmem;
1807                 struct hwrm_nvm_modify_input modify = {0};
1808
1809                 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
1810
1811                 modify.dir_idx = cpu_to_le16(index);
1812                 modify.len = cpu_to_le32(fw->size);
1813
1814                 kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
1815                                           &dma_handle, GFP_KERNEL);
1816                 if (!kmem) {
1817                         netdev_err(dev,
1818                                    "dma_alloc_coherent failure, length = %u\n",
1819                                    (unsigned int)fw->size);
1820                         rc = -ENOMEM;
1821                 } else {
1822                         memcpy(kmem, fw->data, fw->size);
1823                         modify.host_src_addr = cpu_to_le64(dma_handle);
1824
1825                         rc = hwrm_send_message(bp, &modify, sizeof(modify),
1826                                                FLASH_PACKAGE_TIMEOUT);
1827                         dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
1828                                           dma_handle);
1829                 }
1830         }
1831         release_firmware(fw);
1832         if (rc)
1833                 return rc;
1834
1835         if ((install_type & 0xffff) == 0)
1836                 install_type >>= 16;
1837         bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
1838         install.install_type = cpu_to_le32(install_type);
1839
1840         mutex_lock(&bp->hwrm_cmd_lock);
1841         rc = _hwrm_send_message(bp, &install, sizeof(install),
1842                                 INSTALL_PACKAGE_TIMEOUT);
1843         if (rc) {
1844                 rc = -EOPNOTSUPP;
1845                 goto flash_pkg_exit;
1846         }
1847
1848         if (resp->error_code) {
1849                 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
1850
1851                 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
1852                         install.flags |= cpu_to_le16(
1853                                NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
1854                         rc = _hwrm_send_message(bp, &install, sizeof(install),
1855                                                 INSTALL_PACKAGE_TIMEOUT);
1856                         if (rc) {
1857                                 rc = -EOPNOTSUPP;
1858                                 goto flash_pkg_exit;
1859                         }
1860                 }
1861         }
1862
1863         if (resp->result) {
1864                 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
1865                            (s8)resp->result, (int)resp->problem_item);
1866                 rc = -ENOPKG;
1867         }
1868 flash_pkg_exit:
1869         mutex_unlock(&bp->hwrm_cmd_lock);
1870         return rc;
1871 }
1872
1873 static int bnxt_flash_device(struct net_device *dev,
1874                              struct ethtool_flash *flash)
1875 {
1876         if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
1877                 netdev_err(dev, "flashdev not supported from a virtual function\n");
1878                 return -EINVAL;
1879         }
1880
1881         if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
1882             flash->region > 0xffff)
1883                 return bnxt_flash_package_from_file(dev, flash->data,
1884                                                     flash->region);
1885
1886         return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
1887 }
1888
1889 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
1890 {
1891         struct bnxt *bp = netdev_priv(dev);
1892         int rc;
1893         struct hwrm_nvm_get_dir_info_input req = {0};
1894         struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
1895
1896         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
1897
1898         mutex_lock(&bp->hwrm_cmd_lock);
1899         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1900         if (!rc) {
1901                 *entries = le32_to_cpu(output->entries);
1902                 *length = le32_to_cpu(output->entry_length);
1903         }
1904         mutex_unlock(&bp->hwrm_cmd_lock);
1905         return rc;
1906 }
1907
1908 static int bnxt_get_eeprom_len(struct net_device *dev)
1909 {
1910         struct bnxt *bp = netdev_priv(dev);
1911
1912         if (BNXT_VF(bp))
1913                 return 0;
1914
1915         /* The -1 return value allows the entire 32-bit range of offsets to be
1916          * passed via the ethtool command-line utility.
1917          */
1918         return -1;
1919 }
1920
1921 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
1922 {
1923         struct bnxt *bp = netdev_priv(dev);
1924         int rc;
1925         u32 dir_entries;
1926         u32 entry_length;
1927         u8 *buf;
1928         size_t buflen;
1929         dma_addr_t dma_handle;
1930         struct hwrm_nvm_get_dir_entries_input req = {0};
1931
1932         rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
1933         if (rc != 0)
1934                 return rc;
1935
1936         /* Insert 2 bytes of directory info (count and size of entries) */
1937         if (len < 2)
1938                 return -EINVAL;
1939
1940         *data++ = dir_entries;
1941         *data++ = entry_length;
1942         len -= 2;
1943         memset(data, 0xff, len);
1944
1945         buflen = dir_entries * entry_length;
1946         buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
1947                                  GFP_KERNEL);
1948         if (!buf) {
1949                 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1950                            (unsigned)buflen);
1951                 return -ENOMEM;
1952         }
1953         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
1954         req.host_dest_addr = cpu_to_le64(dma_handle);
1955         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1956         if (rc == 0)
1957                 memcpy(data, buf, len > buflen ? buflen : len);
1958         dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
1959         return rc;
1960 }
1961
1962 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
1963                                u32 length, u8 *data)
1964 {
1965         struct bnxt *bp = netdev_priv(dev);
1966         int rc;
1967         u8 *buf;
1968         dma_addr_t dma_handle;
1969         struct hwrm_nvm_read_input req = {0};
1970
1971         if (!length)
1972                 return -EINVAL;
1973
1974         buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
1975                                  GFP_KERNEL);
1976         if (!buf) {
1977                 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1978                            (unsigned)length);
1979                 return -ENOMEM;
1980         }
1981         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
1982         req.host_dest_addr = cpu_to_le64(dma_handle);
1983         req.dir_idx = cpu_to_le16(index);
1984         req.offset = cpu_to_le32(offset);
1985         req.len = cpu_to_le32(length);
1986
1987         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1988         if (rc == 0)
1989                 memcpy(data, buf, length);
1990         dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
1991         return rc;
1992 }
1993
1994 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1995                                 u16 ext, u16 *index, u32 *item_length,
1996                                 u32 *data_length)
1997 {
1998         struct bnxt *bp = netdev_priv(dev);
1999         int rc;
2000         struct hwrm_nvm_find_dir_entry_input req = {0};
2001         struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
2002
2003         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
2004         req.enables = 0;
2005         req.dir_idx = 0;
2006         req.dir_type = cpu_to_le16(type);
2007         req.dir_ordinal = cpu_to_le16(ordinal);
2008         req.dir_ext = cpu_to_le16(ext);
2009         req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
2010         mutex_lock(&bp->hwrm_cmd_lock);
2011         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2012         if (rc == 0) {
2013                 if (index)
2014                         *index = le16_to_cpu(output->dir_idx);
2015                 if (item_length)
2016                         *item_length = le32_to_cpu(output->dir_item_length);
2017                 if (data_length)
2018                         *data_length = le32_to_cpu(output->dir_data_length);
2019         }
2020         mutex_unlock(&bp->hwrm_cmd_lock);
2021         return rc;
2022 }
2023
2024 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
2025 {
2026         char    *retval = NULL;
2027         char    *p;
2028         char    *value;
2029         int     field = 0;
2030
2031         if (datalen < 1)
2032                 return NULL;
2033         /* null-terminate the log data (removing last '\n'): */
2034         data[datalen - 1] = 0;
2035         for (p = data; *p != 0; p++) {
2036                 field = 0;
2037                 retval = NULL;
2038                 while (*p != 0 && *p != '\n') {
2039                         value = p;
2040                         while (*p != 0 && *p != '\t' && *p != '\n')
2041                                 p++;
2042                         if (field == desired_field)
2043                                 retval = value;
2044                         if (*p != '\t')
2045                                 break;
2046                         *p = 0;
2047                         field++;
2048                         p++;
2049                 }
2050                 if (*p == 0)
2051                         break;
2052                 *p = 0;
2053         }
2054         return retval;
2055 }
2056
2057 static void bnxt_get_pkgver(struct net_device *dev)
2058 {
2059         struct bnxt *bp = netdev_priv(dev);
2060         u16 index = 0;
2061         char *pkgver;
2062         u32 pkglen;
2063         u8 *pkgbuf;
2064         int len;
2065
2066         if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
2067                                  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2068                                  &index, NULL, &pkglen) != 0)
2069                 return;
2070
2071         pkgbuf = kzalloc(pkglen, GFP_KERNEL);
2072         if (!pkgbuf) {
2073                 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
2074                         pkglen);
2075                 return;
2076         }
2077
2078         if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
2079                 goto err;
2080
2081         pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
2082                                    pkglen);
2083         if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
2084                 len = strlen(bp->fw_ver_str);
2085                 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
2086                          "/pkg %s", pkgver);
2087         }
2088 err:
2089         kfree(pkgbuf);
2090 }
2091
2092 static int bnxt_get_eeprom(struct net_device *dev,
2093                            struct ethtool_eeprom *eeprom,
2094                            u8 *data)
2095 {
2096         u32 index;
2097         u32 offset;
2098
2099         if (eeprom->offset == 0) /* special offset value to get directory */
2100                 return bnxt_get_nvram_directory(dev, eeprom->len, data);
2101
2102         index = eeprom->offset >> 24;
2103         offset = eeprom->offset & 0xffffff;
2104
2105         if (index == 0) {
2106                 netdev_err(dev, "unsupported index value: %d\n", index);
2107                 return -EINVAL;
2108         }
2109
2110         return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
2111 }
2112
2113 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
2114 {
2115         struct bnxt *bp = netdev_priv(dev);
2116         struct hwrm_nvm_erase_dir_entry_input req = {0};
2117
2118         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
2119         req.dir_idx = cpu_to_le16(index);
2120         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2121 }
2122
2123 static int bnxt_set_eeprom(struct net_device *dev,
2124                            struct ethtool_eeprom *eeprom,
2125                            u8 *data)
2126 {
2127         struct bnxt *bp = netdev_priv(dev);
2128         u8 index, dir_op;
2129         u16 type, ext, ordinal, attr;
2130
2131         if (!BNXT_PF(bp)) {
2132                 netdev_err(dev, "NVM write not supported from a virtual function\n");
2133                 return -EINVAL;
2134         }
2135
2136         type = eeprom->magic >> 16;
2137
2138         if (type == 0xffff) { /* special value for directory operations */
2139                 index = eeprom->magic & 0xff;
2140                 dir_op = eeprom->magic >> 8;
2141                 if (index == 0)
2142                         return -EINVAL;
2143                 switch (dir_op) {
2144                 case 0x0e: /* erase */
2145                         if (eeprom->offset != ~eeprom->magic)
2146                                 return -EINVAL;
2147                         return bnxt_erase_nvram_directory(dev, index - 1);
2148                 default:
2149                         return -EINVAL;
2150                 }
2151         }
2152
2153         /* Create or re-write an NVM item: */
2154         if (bnxt_dir_type_is_executable(type) == true)
2155                 return -EOPNOTSUPP;
2156         ext = eeprom->magic & 0xffff;
2157         ordinal = eeprom->offset >> 16;
2158         attr = eeprom->offset & 0xffff;
2159
2160         return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
2161                                 eeprom->len);
2162 }
2163
2164 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2165 {
2166         struct bnxt *bp = netdev_priv(dev);
2167         struct ethtool_eee *eee = &bp->eee;
2168         struct bnxt_link_info *link_info = &bp->link_info;
2169         u32 advertising =
2170                  _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
2171         int rc = 0;
2172
2173         if (!BNXT_SINGLE_PF(bp))
2174                 return -EOPNOTSUPP;
2175
2176         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
2177                 return -EOPNOTSUPP;
2178
2179         if (!edata->eee_enabled)
2180                 goto eee_ok;
2181
2182         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2183                 netdev_warn(dev, "EEE requires autoneg\n");
2184                 return -EINVAL;
2185         }
2186         if (edata->tx_lpi_enabled) {
2187                 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
2188                                        edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
2189                         netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
2190                                     bp->lpi_tmr_lo, bp->lpi_tmr_hi);
2191                         return -EINVAL;
2192                 } else if (!bp->lpi_tmr_hi) {
2193                         edata->tx_lpi_timer = eee->tx_lpi_timer;
2194                 }
2195         }
2196         if (!edata->advertised) {
2197                 edata->advertised = advertising & eee->supported;
2198         } else if (edata->advertised & ~advertising) {
2199                 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
2200                             edata->advertised, advertising);
2201                 return -EINVAL;
2202         }
2203
2204         eee->advertised = edata->advertised;
2205         eee->tx_lpi_enabled = edata->tx_lpi_enabled;
2206         eee->tx_lpi_timer = edata->tx_lpi_timer;
2207 eee_ok:
2208         eee->eee_enabled = edata->eee_enabled;
2209
2210         if (netif_running(dev))
2211                 rc = bnxt_hwrm_set_link_setting(bp, false, true);
2212
2213         return rc;
2214 }
2215
2216 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
2217 {
2218         struct bnxt *bp = netdev_priv(dev);
2219
2220         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
2221                 return -EOPNOTSUPP;
2222
2223         *edata = bp->eee;
2224         if (!bp->eee.eee_enabled) {
2225                 /* Preserve tx_lpi_timer so that the last value will be used
2226                  * by default when it is re-enabled.
2227                  */
2228                 edata->advertised = 0;
2229                 edata->tx_lpi_enabled = 0;
2230         }
2231
2232         if (!bp->eee.eee_active)
2233                 edata->lp_advertised = 0;
2234
2235         return 0;
2236 }
2237
2238 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
2239                                             u16 page_number, u16 start_addr,
2240                                             u16 data_length, u8 *buf)
2241 {
2242         struct hwrm_port_phy_i2c_read_input req = {0};
2243         struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2244         int rc, byte_offset = 0;
2245
2246         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2247         req.i2c_slave_addr = i2c_addr;
2248         req.page_number = cpu_to_le16(page_number);
2249         req.port_id = cpu_to_le16(bp->pf.port_id);
2250         do {
2251                 u16 xfer_size;
2252
2253                 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2254                 data_length -= xfer_size;
2255                 req.page_offset = cpu_to_le16(start_addr + byte_offset);
2256                 req.data_length = xfer_size;
2257                 req.enables = cpu_to_le32(start_addr + byte_offset ?
2258                                  PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
2259                 mutex_lock(&bp->hwrm_cmd_lock);
2260                 rc = _hwrm_send_message(bp, &req, sizeof(req),
2261                                         HWRM_CMD_TIMEOUT);
2262                 if (!rc)
2263                         memcpy(buf + byte_offset, output->data, xfer_size);
2264                 mutex_unlock(&bp->hwrm_cmd_lock);
2265                 byte_offset += xfer_size;
2266         } while (!rc && data_length > 0);
2267
2268         return rc;
2269 }
2270
2271 static int bnxt_get_module_info(struct net_device *dev,
2272                                 struct ethtool_modinfo *modinfo)
2273 {
2274         u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
2275         struct bnxt *bp = netdev_priv(dev);
2276         int rc;
2277
2278         /* No point in going further if phy status indicates
2279          * module is not inserted or if it is powered down or
2280          * if it is of type 10GBase-T
2281          */
2282         if (bp->link_info.module_status >
2283                 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
2284                 return -EOPNOTSUPP;
2285
2286         /* This feature is not supported in older firmware versions */
2287         if (bp->hwrm_spec_code < 0x10202)
2288                 return -EOPNOTSUPP;
2289
2290         rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
2291                                               SFF_DIAG_SUPPORT_OFFSET + 1,
2292                                               data);
2293         if (!rc) {
2294                 u8 module_id = data[0];
2295                 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
2296
2297                 switch (module_id) {
2298                 case SFF_MODULE_ID_SFP:
2299                         modinfo->type = ETH_MODULE_SFF_8472;
2300                         modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2301                         if (!diag_supported)
2302                                 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2303                         break;
2304                 case SFF_MODULE_ID_QSFP:
2305                 case SFF_MODULE_ID_QSFP_PLUS:
2306                         modinfo->type = ETH_MODULE_SFF_8436;
2307                         modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2308                         break;
2309                 case SFF_MODULE_ID_QSFP28:
2310                         modinfo->type = ETH_MODULE_SFF_8636;
2311                         modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2312                         break;
2313                 default:
2314                         rc = -EOPNOTSUPP;
2315                         break;
2316                 }
2317         }
2318         return rc;
2319 }
2320
2321 static int bnxt_get_module_eeprom(struct net_device *dev,
2322                                   struct ethtool_eeprom *eeprom,
2323                                   u8 *data)
2324 {
2325         struct bnxt *bp = netdev_priv(dev);
2326         u16  start = eeprom->offset, length = eeprom->len;
2327         int rc = 0;
2328
2329         memset(data, 0, eeprom->len);
2330
2331         /* Read A0 portion of the EEPROM */
2332         if (start < ETH_MODULE_SFF_8436_LEN) {
2333                 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
2334                         length = ETH_MODULE_SFF_8436_LEN - start;
2335                 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
2336                                                       start, length, data);
2337                 if (rc)
2338                         return rc;
2339                 start += length;
2340                 data += length;
2341                 length = eeprom->len - length;
2342         }
2343
2344         /* Read A2 portion of the EEPROM */
2345         if (length) {
2346                 start -= ETH_MODULE_SFF_8436_LEN;
2347                 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
2348                                                       start, length, data);
2349         }
2350         return rc;
2351 }
2352
2353 static int bnxt_nway_reset(struct net_device *dev)
2354 {
2355         int rc = 0;
2356
2357         struct bnxt *bp = netdev_priv(dev);
2358         struct bnxt_link_info *link_info = &bp->link_info;
2359
2360         if (!BNXT_SINGLE_PF(bp))
2361                 return -EOPNOTSUPP;
2362
2363         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
2364                 return -EINVAL;
2365
2366         if (netif_running(dev))
2367                 rc = bnxt_hwrm_set_link_setting(bp, true, false);
2368
2369         return rc;
2370 }
2371
2372 static int bnxt_set_phys_id(struct net_device *dev,
2373                             enum ethtool_phys_id_state state)
2374 {
2375         struct hwrm_port_led_cfg_input req = {0};
2376         struct bnxt *bp = netdev_priv(dev);
2377         struct bnxt_pf_info *pf = &bp->pf;
2378         struct bnxt_led_cfg *led_cfg;
2379         u8 led_state;
2380         __le16 duration;
2381         int i, rc;
2382
2383         if (!bp->num_leds || BNXT_VF(bp))
2384                 return -EOPNOTSUPP;
2385
2386         if (state == ETHTOOL_ID_ACTIVE) {
2387                 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
2388                 duration = cpu_to_le16(500);
2389         } else if (state == ETHTOOL_ID_INACTIVE) {
2390                 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
2391                 duration = cpu_to_le16(0);
2392         } else {
2393                 return -EINVAL;
2394         }
2395         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
2396         req.port_id = cpu_to_le16(pf->port_id);
2397         req.num_leds = bp->num_leds;
2398         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2399         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2400                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2401                 led_cfg->led_id = bp->leds[i].led_id;
2402                 led_cfg->led_state = led_state;
2403                 led_cfg->led_blink_on = duration;
2404                 led_cfg->led_blink_off = duration;
2405                 led_cfg->led_group_id = bp->leds[i].led_group_id;
2406         }
2407         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2408         if (rc)
2409                 rc = -EIO;
2410         return rc;
2411 }
2412
2413 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
2414 {
2415         struct hwrm_selftest_irq_input req = {0};
2416
2417         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
2418         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2419 }
2420
2421 static int bnxt_test_irq(struct bnxt *bp)
2422 {
2423         int i;
2424
2425         for (i = 0; i < bp->cp_nr_rings; i++) {
2426                 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
2427                 int rc;
2428
2429                 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
2430                 if (rc)
2431                         return rc;
2432         }
2433         return 0;
2434 }
2435
2436 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
2437 {
2438         struct hwrm_port_mac_cfg_input req = {0};
2439
2440         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
2441
2442         req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
2443         if (enable)
2444                 req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
2445         else
2446                 req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
2447         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2448 }
2449
2450 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
2451                                     struct hwrm_port_phy_cfg_input *req)
2452 {
2453         struct bnxt_link_info *link_info = &bp->link_info;
2454         u16 fw_advertising = link_info->advertising;
2455         u16 fw_speed;
2456         int rc;
2457
2458         if (!link_info->autoneg)
2459                 return 0;
2460
2461         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2462         if (netif_carrier_ok(bp->dev))
2463                 fw_speed = bp->link_info.link_speed;
2464         else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
2465                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2466         else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
2467                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2468         else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
2469                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2470         else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
2471                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2472
2473         req->force_link_speed = cpu_to_le16(fw_speed);
2474         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
2475                                   PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2476         rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
2477         req->flags = 0;
2478         req->force_link_speed = cpu_to_le16(0);
2479         return rc;
2480 }
2481
2482 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
2483 {
2484         struct hwrm_port_phy_cfg_input req = {0};
2485
2486         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
2487
2488         if (enable) {
2489                 bnxt_disable_an_for_lpbk(bp, &req);
2490                 if (ext)
2491                         req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
2492                 else
2493                         req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
2494         } else {
2495                 req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
2496         }
2497         req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
2498         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2499 }
2500
2501 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2502                             u32 raw_cons, int pkt_size)
2503 {
2504         struct bnxt_napi *bnapi = cpr->bnapi;
2505         struct bnxt_rx_ring_info *rxr;
2506         struct bnxt_sw_rx_bd *rx_buf;
2507         struct rx_cmp *rxcmp;
2508         u16 cp_cons, cons;
2509         u8 *data;
2510         u32 len;
2511         int i;
2512
2513         rxr = bnapi->rx_ring;
2514         cp_cons = RING_CMP(raw_cons);
2515         rxcmp = (struct rx_cmp *)
2516                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2517         cons = rxcmp->rx_cmp_opaque;
2518         rx_buf = &rxr->rx_buf_ring[cons];
2519         data = rx_buf->data_ptr;
2520         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
2521         if (len != pkt_size)
2522                 return -EIO;
2523         i = ETH_ALEN;
2524         if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
2525                 return -EIO;
2526         i += ETH_ALEN;
2527         for (  ; i < pkt_size; i++) {
2528                 if (data[i] != (u8)(i & 0xff))
2529                         return -EIO;
2530         }
2531         return 0;
2532 }
2533
2534 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2535                               int pkt_size)
2536 {
2537         struct tx_cmp *txcmp;
2538         int rc = -EIO;
2539         u32 raw_cons;
2540         u32 cons;
2541         int i;
2542
2543         raw_cons = cpr->cp_raw_cons;
2544         for (i = 0; i < 200; i++) {
2545                 cons = RING_CMP(raw_cons);
2546                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2547
2548                 if (!TX_CMP_VALID(txcmp, raw_cons)) {
2549                         udelay(5);
2550                         continue;
2551                 }
2552
2553                 /* The valid test of the entry must be done first before
2554                  * reading any further.
2555                  */
2556                 dma_rmb();
2557                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
2558                         rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
2559                         raw_cons = NEXT_RAW_CMP(raw_cons);
2560                         raw_cons = NEXT_RAW_CMP(raw_cons);
2561                         break;
2562                 }
2563                 raw_cons = NEXT_RAW_CMP(raw_cons);
2564         }
2565         cpr->cp_raw_cons = raw_cons;
2566         return rc;
2567 }
2568
2569 static int bnxt_run_loopback(struct bnxt *bp)
2570 {
2571         struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
2572         struct bnxt_cp_ring_info *cpr;
2573         int pkt_size, i = 0;
2574         struct sk_buff *skb;
2575         dma_addr_t map;
2576         u8 *data;
2577         int rc;
2578
2579         cpr = &txr->bnapi->cp_ring;
2580         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
2581         skb = netdev_alloc_skb(bp->dev, pkt_size);
2582         if (!skb)
2583                 return -ENOMEM;
2584         data = skb_put(skb, pkt_size);
2585         eth_broadcast_addr(data);
2586         i += ETH_ALEN;
2587         ether_addr_copy(&data[i], bp->dev->dev_addr);
2588         i += ETH_ALEN;
2589         for ( ; i < pkt_size; i++)
2590                 data[i] = (u8)(i & 0xff);
2591
2592         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
2593                              PCI_DMA_TODEVICE);
2594         if (dma_mapping_error(&bp->pdev->dev, map)) {
2595                 dev_kfree_skb(skb);
2596                 return -EIO;
2597         }
2598         bnxt_xmit_xdp(bp, txr, map, pkt_size, 0);
2599
2600         /* Sync BD data before updating doorbell */
2601         wmb();
2602
2603         bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
2604         rc = bnxt_poll_loopback(bp, cpr, pkt_size);
2605
2606         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
2607         dev_kfree_skb(skb);
2608         return rc;
2609 }
2610
2611 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
2612 {
2613         struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
2614         struct hwrm_selftest_exec_input req = {0};
2615         int rc;
2616
2617         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
2618         mutex_lock(&bp->hwrm_cmd_lock);
2619         resp->test_success = 0;
2620         req.flags = test_mask;
2621         rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
2622         *test_results = resp->test_success;
2623         mutex_unlock(&bp->hwrm_cmd_lock);
2624         return rc;
2625 }
2626
2627 #define BNXT_DRV_TESTS                  4
2628 #define BNXT_MACLPBK_TEST_IDX           (bp->num_tests - BNXT_DRV_TESTS)
2629 #define BNXT_PHYLPBK_TEST_IDX           (BNXT_MACLPBK_TEST_IDX + 1)
2630 #define BNXT_EXTLPBK_TEST_IDX           (BNXT_MACLPBK_TEST_IDX + 2)
2631 #define BNXT_IRQ_TEST_IDX               (BNXT_MACLPBK_TEST_IDX + 3)
2632
2633 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
2634                            u64 *buf)
2635 {
2636         struct bnxt *bp = netdev_priv(dev);
2637         bool do_ext_lpbk = false;
2638         bool offline = false;
2639         u8 test_results = 0;
2640         u8 test_mask = 0;
2641         int rc, i;
2642
2643         if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
2644                 return;
2645         memset(buf, 0, sizeof(u64) * bp->num_tests);
2646         if (!netif_running(dev)) {
2647                 etest->flags |= ETH_TEST_FL_FAILED;
2648                 return;
2649         }
2650
2651         if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
2652             (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
2653                 do_ext_lpbk = true;
2654
2655         if (etest->flags & ETH_TEST_FL_OFFLINE) {
2656                 if (bp->pf.active_vfs) {
2657                         etest->flags |= ETH_TEST_FL_FAILED;
2658                         netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
2659                         return;
2660                 }
2661                 offline = true;
2662         }
2663
2664         for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
2665                 u8 bit_val = 1 << i;
2666
2667                 if (!(bp->test_info->offline_mask & bit_val))
2668                         test_mask |= bit_val;
2669                 else if (offline)
2670                         test_mask |= bit_val;
2671         }
2672         if (!offline) {
2673                 bnxt_run_fw_tests(bp, test_mask, &test_results);
2674         } else {
2675                 rc = bnxt_close_nic(bp, false, false);
2676                 if (rc)
2677                         return;
2678                 bnxt_run_fw_tests(bp, test_mask, &test_results);
2679
2680                 buf[BNXT_MACLPBK_TEST_IDX] = 1;
2681                 bnxt_hwrm_mac_loopback(bp, true);
2682                 msleep(250);
2683                 rc = bnxt_half_open_nic(bp);
2684                 if (rc) {
2685                         bnxt_hwrm_mac_loopback(bp, false);
2686                         etest->flags |= ETH_TEST_FL_FAILED;
2687                         return;
2688                 }
2689                 if (bnxt_run_loopback(bp))
2690                         etest->flags |= ETH_TEST_FL_FAILED;
2691                 else
2692                         buf[BNXT_MACLPBK_TEST_IDX] = 0;
2693
2694                 bnxt_hwrm_mac_loopback(bp, false);
2695                 bnxt_hwrm_phy_loopback(bp, true, false);
2696                 msleep(1000);
2697                 if (bnxt_run_loopback(bp)) {
2698                         buf[BNXT_PHYLPBK_TEST_IDX] = 1;
2699                         etest->flags |= ETH_TEST_FL_FAILED;
2700                 }
2701                 if (do_ext_lpbk) {
2702                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2703                         bnxt_hwrm_phy_loopback(bp, true, true);
2704                         msleep(1000);
2705                         if (bnxt_run_loopback(bp)) {
2706                                 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
2707                                 etest->flags |= ETH_TEST_FL_FAILED;
2708                         }
2709                 }
2710                 bnxt_hwrm_phy_loopback(bp, false, false);
2711                 bnxt_half_close_nic(bp);
2712                 bnxt_open_nic(bp, false, true);
2713         }
2714         if (bnxt_test_irq(bp)) {
2715                 buf[BNXT_IRQ_TEST_IDX] = 1;
2716                 etest->flags |= ETH_TEST_FL_FAILED;
2717         }
2718         for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
2719                 u8 bit_val = 1 << i;
2720
2721                 if ((test_mask & bit_val) && !(test_results & bit_val)) {
2722                         buf[i] = 1;
2723                         etest->flags |= ETH_TEST_FL_FAILED;
2724                 }
2725         }
2726 }
2727
2728 static int bnxt_reset(struct net_device *dev, u32 *flags)
2729 {
2730         struct bnxt *bp = netdev_priv(dev);
2731         int rc = 0;
2732
2733         if (!BNXT_PF(bp)) {
2734                 netdev_err(dev, "Reset is not supported from a VF\n");
2735                 return -EOPNOTSUPP;
2736         }
2737
2738         if (pci_vfs_assigned(bp->pdev)) {
2739                 netdev_err(dev,
2740                            "Reset not allowed when VFs are assigned to VMs\n");
2741                 return -EBUSY;
2742         }
2743
2744         if (*flags == ETH_RESET_ALL) {
2745                 /* This feature is not supported in older firmware versions */
2746                 if (bp->hwrm_spec_code < 0x10803)
2747                         return -EOPNOTSUPP;
2748
2749                 rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
2750                 if (!rc) {
2751                         netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
2752                         *flags = 0;
2753                 }
2754         } else if (*flags == ETH_RESET_AP) {
2755                 /* This feature is not supported in older firmware versions */
2756                 if (bp->hwrm_spec_code < 0x10803)
2757                         return -EOPNOTSUPP;
2758
2759                 rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
2760                 if (!rc) {
2761                         netdev_info(dev, "Reset Application Processor request successful.\n");
2762                         *flags = 0;
2763                 }
2764         } else {
2765                 rc = -EINVAL;
2766         }
2767
2768         return rc;
2769 }
2770
2771 static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
2772                                   struct bnxt_hwrm_dbg_dma_info *info)
2773 {
2774         struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
2775         struct hwrm_dbg_cmn_input *cmn_req = msg;
2776         __le16 *seq_ptr = msg + info->seq_off;
2777         u16 seq = 0, len, segs_off;
2778         void *resp = cmn_resp;
2779         dma_addr_t dma_handle;
2780         int rc, off = 0;
2781         void *dma_buf;
2782
2783         dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
2784                                      GFP_KERNEL);
2785         if (!dma_buf)
2786                 return -ENOMEM;
2787
2788         segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
2789                             total_segments);
2790         cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
2791         cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
2792         mutex_lock(&bp->hwrm_cmd_lock);
2793         while (1) {
2794                 *seq_ptr = cpu_to_le16(seq);
2795                 rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
2796                 if (rc)
2797                         break;
2798
2799                 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
2800                 if (!seq &&
2801                     cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
2802                         info->segs = le16_to_cpu(*((__le16 *)(resp +
2803                                                               segs_off)));
2804                         if (!info->segs) {
2805                                 rc = -EIO;
2806                                 break;
2807                         }
2808
2809                         info->dest_buf_size = info->segs *
2810                                         sizeof(struct coredump_segment_record);
2811                         info->dest_buf = kmalloc(info->dest_buf_size,
2812                                                  GFP_KERNEL);
2813                         if (!info->dest_buf) {
2814                                 rc = -ENOMEM;
2815                                 break;
2816                         }
2817                 }
2818
2819                 if (info->dest_buf)
2820                         memcpy(info->dest_buf + off, dma_buf, len);
2821
2822                 if (cmn_req->req_type ==
2823                                 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
2824                         info->dest_buf_size += len;
2825
2826                 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
2827                         break;
2828
2829                 seq++;
2830                 off += len;
2831         }
2832         mutex_unlock(&bp->hwrm_cmd_lock);
2833         dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
2834         return rc;
2835 }
2836
2837 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
2838                                        struct bnxt_coredump *coredump)
2839 {
2840         struct hwrm_dbg_coredump_list_input req = {0};
2841         struct bnxt_hwrm_dbg_dma_info info = {NULL};
2842         int rc;
2843
2844         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
2845
2846         info.dma_len = COREDUMP_LIST_BUF_LEN;
2847         info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
2848         info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
2849                                      data_len);
2850
2851         rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
2852         if (!rc) {
2853                 coredump->data = info.dest_buf;
2854                 coredump->data_size = info.dest_buf_size;
2855                 coredump->total_segs = info.segs;
2856         }
2857         return rc;
2858 }
2859
2860 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
2861                                            u16 segment_id)
2862 {
2863         struct hwrm_dbg_coredump_initiate_input req = {0};
2864
2865         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
2866         req.component_id = cpu_to_le16(component_id);
2867         req.segment_id = cpu_to_le16(segment_id);
2868
2869         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2870 }
2871
2872 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
2873                                            u16 segment_id, u32 *seg_len,
2874                                            void *buf, u32 offset)
2875 {
2876         struct hwrm_dbg_coredump_retrieve_input req = {0};
2877         struct bnxt_hwrm_dbg_dma_info info = {NULL};
2878         int rc;
2879
2880         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
2881         req.component_id = cpu_to_le16(component_id);
2882         req.segment_id = cpu_to_le16(segment_id);
2883
2884         info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
2885         info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
2886                                 seq_no);
2887         info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
2888                                      data_len);
2889         if (buf)
2890                 info.dest_buf = buf + offset;
2891
2892         rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
2893         if (!rc)
2894                 *seg_len = info.dest_buf_size;
2895
2896         return rc;
2897 }
2898
2899 static void
2900 bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
2901                            struct bnxt_coredump_segment_hdr *seg_hdr,
2902                            struct coredump_segment_record *seg_rec, u32 seg_len,
2903                            int status, u32 duration, u32 instance)
2904 {
2905         memset(seg_hdr, 0, sizeof(*seg_hdr));
2906         memcpy(seg_hdr->signature, "sEgM", 4);
2907         if (seg_rec) {
2908                 seg_hdr->component_id = (__force __le32)seg_rec->component_id;
2909                 seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
2910                 seg_hdr->low_version = seg_rec->version_low;
2911                 seg_hdr->high_version = seg_rec->version_hi;
2912         } else {
2913                 /* For hwrm_ver_get response Component id = 2
2914                  * and Segment id = 0
2915                  */
2916                 seg_hdr->component_id = cpu_to_le32(2);
2917                 seg_hdr->segment_id = 0;
2918         }
2919         seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
2920         seg_hdr->length = cpu_to_le32(seg_len);
2921         seg_hdr->status = cpu_to_le32(status);
2922         seg_hdr->duration = cpu_to_le32(duration);
2923         seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
2924         seg_hdr->instance = cpu_to_le32(instance);
2925 }
2926
2927 static void
2928 bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
2929                           time64_t start, s16 start_utc, u16 total_segs,
2930                           int status)
2931 {
2932         time64_t end = ktime_get_real_seconds();
2933         u32 os_ver_major = 0, os_ver_minor = 0;
2934         struct tm tm;
2935
2936         time64_to_tm(start, 0, &tm);
2937         memset(record, 0, sizeof(*record));
2938         memcpy(record->signature, "cOrE", 4);
2939         record->flags = 0;
2940         record->low_version = 0;
2941         record->high_version = 1;
2942         record->asic_state = 0;
2943         strlcpy(record->system_name, utsname()->nodename,
2944                 sizeof(record->system_name));
2945         record->year = cpu_to_le16(tm.tm_year);
2946         record->month = cpu_to_le16(tm.tm_mon);
2947         record->day = cpu_to_le16(tm.tm_mday);
2948         record->hour = cpu_to_le16(tm.tm_hour);
2949         record->minute = cpu_to_le16(tm.tm_min);
2950         record->second = cpu_to_le16(tm.tm_sec);
2951         record->utc_bias = cpu_to_le16(start_utc);
2952         strcpy(record->commandline, "ethtool -w");
2953         record->total_segments = cpu_to_le32(total_segs);
2954
2955         sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
2956         record->os_ver_major = cpu_to_le32(os_ver_major);
2957         record->os_ver_minor = cpu_to_le32(os_ver_minor);
2958
2959         strlcpy(record->os_name, utsname()->sysname, 32);
2960         time64_to_tm(end, 0, &tm);
2961         record->end_year = cpu_to_le16(tm.tm_year + 1900);
2962         record->end_month = cpu_to_le16(tm.tm_mon + 1);
2963         record->end_day = cpu_to_le16(tm.tm_mday);
2964         record->end_hour = cpu_to_le16(tm.tm_hour);
2965         record->end_minute = cpu_to_le16(tm.tm_min);
2966         record->end_second = cpu_to_le16(tm.tm_sec);
2967         record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
2968         record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
2969                                        bp->ver_resp.chip_rev << 8 |
2970                                        bp->ver_resp.chip_metal);
2971         record->asic_id2 = 0;
2972         record->coredump_status = cpu_to_le32(status);
2973         record->ioctl_low_version = 0;
2974         record->ioctl_high_version = 0;
2975 }
2976
2977 static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
2978 {
2979         u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
2980         struct coredump_segment_record *seg_record = NULL;
2981         u32 offset = 0, seg_hdr_len, seg_record_len;
2982         struct bnxt_coredump_segment_hdr seg_hdr;
2983         struct bnxt_coredump coredump = {NULL};
2984         time64_t start_time;
2985         u16 start_utc;
2986         int rc = 0, i;
2987
2988         start_time = ktime_get_real_seconds();
2989         start_utc = sys_tz.tz_minuteswest * 60;
2990         seg_hdr_len = sizeof(seg_hdr);
2991
2992         /* First segment should be hwrm_ver_get response */
2993         *dump_len = seg_hdr_len + ver_get_resp_len;
2994         if (buf) {
2995                 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
2996                                            0, 0, 0);
2997                 memcpy(buf + offset, &seg_hdr, seg_hdr_len);
2998                 offset += seg_hdr_len;
2999                 memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
3000                 offset += ver_get_resp_len;
3001         }
3002
3003         rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
3004         if (rc) {
3005                 netdev_err(bp->dev, "Failed to get coredump segment list\n");
3006                 goto err;
3007         }
3008
3009         *dump_len += seg_hdr_len * coredump.total_segs;
3010
3011         seg_record = (struct coredump_segment_record *)coredump.data;
3012         seg_record_len = sizeof(*seg_record);
3013
3014         for (i = 0; i < coredump.total_segs; i++) {
3015                 u16 comp_id = le16_to_cpu(seg_record->component_id);
3016                 u16 seg_id = le16_to_cpu(seg_record->segment_id);
3017                 u32 duration = 0, seg_len = 0;
3018                 unsigned long start, end;
3019
3020                 start = jiffies;
3021
3022                 rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
3023                 if (rc) {
3024                         netdev_err(bp->dev,
3025                                    "Failed to initiate coredump for seg = %d\n",
3026                                    seg_record->segment_id);
3027                         goto next_seg;
3028                 }
3029
3030                 /* Write segment data into the buffer */
3031                 rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
3032                                                      &seg_len, buf,
3033                                                      offset + seg_hdr_len);
3034                 if (rc)
3035                         netdev_err(bp->dev,
3036                                    "Failed to retrieve coredump for seg = %d\n",
3037                                    seg_record->segment_id);
3038
3039 next_seg:
3040                 end = jiffies;
3041                 duration = jiffies_to_msecs(end - start);
3042                 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
3043                                            rc, duration, 0);
3044
3045                 if (buf) {
3046                         /* Write segment header into the buffer */
3047                         memcpy(buf + offset, &seg_hdr, seg_hdr_len);
3048                         offset += seg_hdr_len + seg_len;
3049                 }
3050
3051                 *dump_len += seg_len;
3052                 seg_record =
3053                         (struct coredump_segment_record *)((u8 *)seg_record +
3054                                                            seg_record_len);
3055         }
3056
3057 err:
3058         if (buf)
3059                 bnxt_fill_coredump_record(bp, buf + offset, start_time,
3060                                           start_utc, coredump.total_segs + 1,
3061                                           rc);
3062         kfree(coredump.data);
3063         *dump_len += sizeof(struct bnxt_coredump_record);
3064
3065         return rc;
3066 }
3067
3068 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
3069 {
3070         struct bnxt *bp = netdev_priv(dev);
3071
3072         if (bp->hwrm_spec_code < 0x10801)
3073                 return -EOPNOTSUPP;
3074
3075         dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
3076                         bp->ver_resp.hwrm_fw_min_8b << 16 |
3077                         bp->ver_resp.hwrm_fw_bld_8b << 8 |
3078                         bp->ver_resp.hwrm_fw_rsvd_8b;
3079
3080         return bnxt_get_coredump(bp, NULL, &dump->len);
3081 }
3082
3083 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
3084                               void *buf)
3085 {
3086         struct bnxt *bp = netdev_priv(dev);
3087
3088         if (bp->hwrm_spec_code < 0x10801)
3089                 return -EOPNOTSUPP;
3090
3091         memset(buf, 0, dump->len);
3092
3093         return bnxt_get_coredump(bp, buf, &dump->len);
3094 }
3095
3096 void bnxt_ethtool_init(struct bnxt *bp)
3097 {
3098         struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
3099         struct hwrm_selftest_qlist_input req = {0};
3100         struct bnxt_test_info *test_info;
3101         struct net_device *dev = bp->dev;
3102         int i, rc;
3103
3104         bnxt_get_pkgver(dev);
3105
3106         if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
3107                 return;
3108
3109         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
3110         mutex_lock(&bp->hwrm_cmd_lock);
3111         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3112         if (rc)
3113                 goto ethtool_init_exit;
3114
3115         test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
3116         if (!test_info)
3117                 goto ethtool_init_exit;
3118
3119         bp->test_info = test_info;
3120         bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
3121         if (bp->num_tests > BNXT_MAX_TEST)
3122                 bp->num_tests = BNXT_MAX_TEST;
3123
3124         test_info->offline_mask = resp->offline_tests;
3125         test_info->timeout = le16_to_cpu(resp->test_timeout);
3126         if (!test_info->timeout)
3127                 test_info->timeout = HWRM_CMD_TIMEOUT;
3128         for (i = 0; i < bp->num_tests; i++) {
3129                 char *str = test_info->string[i];
3130                 char *fw_str = resp->test0_name + i * 32;
3131
3132                 if (i == BNXT_MACLPBK_TEST_IDX) {
3133                         strcpy(str, "Mac loopback test (offline)");
3134                 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
3135                         strcpy(str, "Phy loopback test (offline)");
3136                 } else if (i == BNXT_EXTLPBK_TEST_IDX) {
3137                         strcpy(str, "Ext loopback test (offline)");
3138                 } else if (i == BNXT_IRQ_TEST_IDX) {
3139                         strcpy(str, "Interrupt_test (offline)");
3140                 } else {
3141                         strlcpy(str, fw_str, ETH_GSTRING_LEN);
3142                         strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
3143                         if (test_info->offline_mask & (1 << i))
3144                                 strncat(str, " (offline)",
3145                                         ETH_GSTRING_LEN - strlen(str));
3146                         else
3147                                 strncat(str, " (online)",
3148                                         ETH_GSTRING_LEN - strlen(str));
3149                 }
3150         }
3151
3152 ethtool_init_exit:
3153         mutex_unlock(&bp->hwrm_cmd_lock);
3154 }
3155
3156 void bnxt_ethtool_free(struct bnxt *bp)
3157 {
3158         kfree(bp->test_info);
3159         bp->test_info = NULL;
3160 }
3161
3162 const struct ethtool_ops bnxt_ethtool_ops = {
3163         .get_link_ksettings     = bnxt_get_link_ksettings,
3164         .set_link_ksettings     = bnxt_set_link_ksettings,
3165         .get_pauseparam         = bnxt_get_pauseparam,
3166         .set_pauseparam         = bnxt_set_pauseparam,
3167         .get_drvinfo            = bnxt_get_drvinfo,
3168         .get_wol                = bnxt_get_wol,
3169         .set_wol                = bnxt_set_wol,
3170         .get_coalesce           = bnxt_get_coalesce,
3171         .set_coalesce           = bnxt_set_coalesce,
3172         .get_msglevel           = bnxt_get_msglevel,
3173         .set_msglevel           = bnxt_set_msglevel,
3174         .get_sset_count         = bnxt_get_sset_count,
3175         .get_strings            = bnxt_get_strings,
3176         .get_ethtool_stats      = bnxt_get_ethtool_stats,
3177         .set_ringparam          = bnxt_set_ringparam,
3178         .get_ringparam          = bnxt_get_ringparam,
3179         .get_channels           = bnxt_get_channels,
3180         .set_channels           = bnxt_set_channels,
3181         .get_rxnfc              = bnxt_get_rxnfc,
3182         .set_rxnfc              = bnxt_set_rxnfc,
3183         .get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
3184         .get_rxfh_key_size      = bnxt_get_rxfh_key_size,
3185         .get_rxfh               = bnxt_get_rxfh,
3186         .flash_device           = bnxt_flash_device,
3187         .get_eeprom_len         = bnxt_get_eeprom_len,
3188         .get_eeprom             = bnxt_get_eeprom,
3189         .set_eeprom             = bnxt_set_eeprom,
3190         .get_link               = bnxt_get_link,
3191         .get_eee                = bnxt_get_eee,
3192         .set_eee                = bnxt_set_eee,
3193         .get_module_info        = bnxt_get_module_info,
3194         .get_module_eeprom      = bnxt_get_module_eeprom,
3195         .nway_reset             = bnxt_nway_reset,
3196         .set_phys_id            = bnxt_set_phys_id,
3197         .self_test              = bnxt_self_test,
3198         .reset                  = bnxt_reset,
3199         .get_dump_flag          = bnxt_get_dump_flag,
3200         .get_dump_data          = bnxt_get_dump_data,
3201 };
This page took 0.22132 seconds and 4 git commands to generate.