1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2005 - 2016 Broadcom
11 * Costa Mesa, CA 92626
14 #include <linux/prefetch.h>
15 #include <linux/module.h>
18 #include <asm/div64.h>
19 #include <linux/aer.h>
20 #include <linux/if_bridge.h>
21 #include <net/busy_poll.h>
22 #include <net/vxlan.h>
24 MODULE_DESCRIPTION(DRV_DESC);
25 MODULE_AUTHOR("Emulex Corporation");
26 MODULE_LICENSE("GPL");
28 /* num_vfs module param is obsolete.
29 * Use sysfs method to enable/disable VFs.
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, 0444);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, 0444);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39 /* Per-module error detection/recovery workq shared across all functions.
40 * Each function schedules its own work request on this shared workq.
42 static struct workqueue_struct *be_err_recovery_workq;
44 static const struct pci_device_id be_dev_ids[] = {
45 #ifdef CONFIG_BE2NET_BE2
46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 #endif /* CONFIG_BE2NET_BE2 */
49 #ifdef CONFIG_BE2NET_BE3
50 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
51 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
52 #endif /* CONFIG_BE2NET_BE3 */
53 #ifdef CONFIG_BE2NET_LANCER
54 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
55 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
56 #endif /* CONFIG_BE2NET_LANCER */
57 #ifdef CONFIG_BE2NET_SKYHAWK
58 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
59 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
60 #endif /* CONFIG_BE2NET_SKYHAWK */
63 MODULE_DEVICE_TABLE(pci, be_dev_ids);
65 /* Workqueue used by all functions for defering cmd calls to the adapter */
66 static struct workqueue_struct *be_wq;
68 /* UE Status Low CSR */
69 static const char * const ue_status_low_desc[] = {
104 /* UE Status High CSR */
105 static const char * const ue_status_hi_desc[] = {
140 #define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
141 BE_IF_FLAGS_BROADCAST | \
142 BE_IF_FLAGS_MULTICAST | \
143 BE_IF_FLAGS_PASS_L3L4_ERRORS)
145 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
147 struct be_dma_mem *mem = &q->dma_mem;
150 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
156 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
157 u16 len, u16 entry_size)
159 struct be_dma_mem *mem = &q->dma_mem;
161 memset(q, 0, sizeof(*q));
163 q->entry_size = entry_size;
164 mem->size = len * entry_size;
165 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
166 &mem->dma, GFP_KERNEL);
172 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
176 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
178 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
180 if (!enabled && enable)
181 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
182 else if (enabled && !enable)
183 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
187 pci_write_config_dword(adapter->pdev,
188 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
191 static void be_intr_set(struct be_adapter *adapter, bool enable)
195 /* On lancer interrupts can't be controlled via this register */
196 if (lancer_chip(adapter))
199 if (be_check_error(adapter, BE_ERROR_EEH))
202 status = be_cmd_intr_set(adapter, enable);
204 be_reg_intr_set(adapter, enable);
207 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
211 if (be_check_error(adapter, BE_ERROR_HW))
214 val |= qid & DB_RQ_RING_ID_MASK;
215 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
218 iowrite32(val, adapter->db + DB_RQ_OFFSET);
221 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
226 if (be_check_error(adapter, BE_ERROR_HW))
229 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
230 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
233 iowrite32(val, adapter->db + txo->db_offset);
236 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
237 bool arm, bool clear_int, u16 num_popped,
238 u32 eq_delay_mult_enc)
242 val |= qid & DB_EQ_RING_ID_MASK;
243 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
245 if (be_check_error(adapter, BE_ERROR_HW))
249 val |= 1 << DB_EQ_REARM_SHIFT;
251 val |= 1 << DB_EQ_CLR_SHIFT;
252 val |= 1 << DB_EQ_EVNT_SHIFT;
253 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
254 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
255 iowrite32(val, adapter->db + DB_EQ_OFFSET);
258 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
262 val |= qid & DB_CQ_RING_ID_MASK;
263 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
264 DB_CQ_RING_ID_EXT_MASK_SHIFT);
266 if (be_check_error(adapter, BE_ERROR_HW))
270 val |= 1 << DB_CQ_REARM_SHIFT;
271 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
272 iowrite32(val, adapter->db + DB_CQ_OFFSET);
275 static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
279 /* Check if mac has already been added as part of uc-list */
280 for (i = 0; i < adapter->uc_macs; i++) {
281 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
282 /* mac already added, skip addition */
283 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
288 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
289 &adapter->pmac_id[0], 0);
292 static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
296 /* Skip deletion if the programmed mac is
297 * being used in uc-list
299 for (i = 0; i < adapter->uc_macs; i++) {
300 if (adapter->pmac_id[i + 1] == pmac_id)
303 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
306 static int be_mac_addr_set(struct net_device *netdev, void *p)
308 struct be_adapter *adapter = netdev_priv(netdev);
309 struct device *dev = &adapter->pdev->dev;
310 struct sockaddr *addr = p;
313 u32 old_pmac_id = adapter->pmac_id[0];
315 if (!is_valid_ether_addr(addr->sa_data))
316 return -EADDRNOTAVAIL;
318 /* Proceed further only if, User provided MAC is different
321 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
324 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
327 if (BEx_chip(adapter) && be_virtfn(adapter) &&
328 !check_privilege(adapter, BE_PRIV_FILTMGMT))
331 /* if device is not running, copy MAC to netdev->dev_addr */
332 if (!netif_running(netdev))
335 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
336 * privilege or if PF did not provision the new MAC address.
337 * On BE3, this cmd will always fail if the VF doesn't have the
338 * FILTMGMT privilege. This failure is OK, only if the PF programmed
339 * the MAC for the VF.
341 mutex_lock(&adapter->rx_filter_lock);
342 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
345 /* Delete the old programmed MAC. This call may fail if the
346 * old MAC was already deleted by the PF driver.
348 if (adapter->pmac_id[0] != old_pmac_id)
349 be_dev_mac_del(adapter, old_pmac_id);
352 mutex_unlock(&adapter->rx_filter_lock);
353 /* Decide if the new MAC is successfully activated only after
356 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
357 adapter->if_handle, true, 0);
361 /* The MAC change did not happen, either due to lack of privilege
362 * or PF didn't pre-provision.
364 if (!ether_addr_equal(addr->sa_data, mac)) {
369 /* Remember currently programmed MAC */
370 ether_addr_copy(adapter->dev_mac, addr->sa_data);
372 ether_addr_copy(netdev->dev_addr, addr->sa_data);
373 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
376 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
380 /* BE2 supports only v0 cmd */
381 static void *hw_stats_from_cmd(struct be_adapter *adapter)
383 if (BE2_chip(adapter)) {
384 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
386 return &cmd->hw_stats;
387 } else if (BE3_chip(adapter)) {
388 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
390 return &cmd->hw_stats;
392 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
394 return &cmd->hw_stats;
398 /* BE2 supports only v0 cmd */
399 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
401 if (BE2_chip(adapter)) {
402 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
404 return &hw_stats->erx;
405 } else if (BE3_chip(adapter)) {
406 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
408 return &hw_stats->erx;
410 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
412 return &hw_stats->erx;
416 static void populate_be_v0_stats(struct be_adapter *adapter)
418 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
419 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
420 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
421 struct be_port_rxf_stats_v0 *port_stats =
422 &rxf_stats->port[adapter->port_num];
423 struct be_drv_stats *drvs = &adapter->drv_stats;
425 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
426 drvs->rx_pause_frames = port_stats->rx_pause_frames;
427 drvs->rx_crc_errors = port_stats->rx_crc_errors;
428 drvs->rx_control_frames = port_stats->rx_control_frames;
429 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
430 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
431 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
432 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
433 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
434 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
435 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
436 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
437 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
438 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
439 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
440 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
441 drvs->rx_dropped_header_too_small =
442 port_stats->rx_dropped_header_too_small;
443 drvs->rx_address_filtered =
444 port_stats->rx_address_filtered +
445 port_stats->rx_vlan_filtered;
446 drvs->rx_alignment_symbol_errors =
447 port_stats->rx_alignment_symbol_errors;
449 drvs->tx_pauseframes = port_stats->tx_pauseframes;
450 drvs->tx_controlframes = port_stats->tx_controlframes;
452 if (adapter->port_num)
453 drvs->jabber_events = rxf_stats->port1_jabber_events;
455 drvs->jabber_events = rxf_stats->port0_jabber_events;
456 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
457 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
458 drvs->forwarded_packets = rxf_stats->forwarded_packets;
459 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
460 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
461 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
462 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
465 static void populate_be_v1_stats(struct be_adapter *adapter)
467 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
468 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
469 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
470 struct be_port_rxf_stats_v1 *port_stats =
471 &rxf_stats->port[adapter->port_num];
472 struct be_drv_stats *drvs = &adapter->drv_stats;
474 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
475 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
476 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
477 drvs->rx_pause_frames = port_stats->rx_pause_frames;
478 drvs->rx_crc_errors = port_stats->rx_crc_errors;
479 drvs->rx_control_frames = port_stats->rx_control_frames;
480 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
481 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
482 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
483 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
484 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
485 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
486 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
487 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
488 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
489 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
490 drvs->rx_dropped_header_too_small =
491 port_stats->rx_dropped_header_too_small;
492 drvs->rx_input_fifo_overflow_drop =
493 port_stats->rx_input_fifo_overflow_drop;
494 drvs->rx_address_filtered = port_stats->rx_address_filtered;
495 drvs->rx_alignment_symbol_errors =
496 port_stats->rx_alignment_symbol_errors;
497 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
498 drvs->tx_pauseframes = port_stats->tx_pauseframes;
499 drvs->tx_controlframes = port_stats->tx_controlframes;
500 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
501 drvs->jabber_events = port_stats->jabber_events;
502 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
503 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
504 drvs->forwarded_packets = rxf_stats->forwarded_packets;
505 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
506 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
507 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
508 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
511 static void populate_be_v2_stats(struct be_adapter *adapter)
513 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
514 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
515 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
516 struct be_port_rxf_stats_v2 *port_stats =
517 &rxf_stats->port[adapter->port_num];
518 struct be_drv_stats *drvs = &adapter->drv_stats;
520 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
521 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
522 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
523 drvs->rx_pause_frames = port_stats->rx_pause_frames;
524 drvs->rx_crc_errors = port_stats->rx_crc_errors;
525 drvs->rx_control_frames = port_stats->rx_control_frames;
526 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
527 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
528 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
529 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
530 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
531 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
532 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
533 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
534 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
535 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
536 drvs->rx_dropped_header_too_small =
537 port_stats->rx_dropped_header_too_small;
538 drvs->rx_input_fifo_overflow_drop =
539 port_stats->rx_input_fifo_overflow_drop;
540 drvs->rx_address_filtered = port_stats->rx_address_filtered;
541 drvs->rx_alignment_symbol_errors =
542 port_stats->rx_alignment_symbol_errors;
543 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
544 drvs->tx_pauseframes = port_stats->tx_pauseframes;
545 drvs->tx_controlframes = port_stats->tx_controlframes;
546 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
547 drvs->jabber_events = port_stats->jabber_events;
548 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
549 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
550 drvs->forwarded_packets = rxf_stats->forwarded_packets;
551 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
552 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
553 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
554 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
555 if (be_roce_supported(adapter)) {
556 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
557 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
558 drvs->rx_roce_frames = port_stats->roce_frames_received;
559 drvs->roce_drops_crc = port_stats->roce_drops_crc;
560 drvs->roce_drops_payload_len =
561 port_stats->roce_drops_payload_len;
565 static void populate_lancer_stats(struct be_adapter *adapter)
567 struct be_drv_stats *drvs = &adapter->drv_stats;
568 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
570 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
571 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
572 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
573 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
574 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
575 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
576 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
577 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
578 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
579 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
580 drvs->rx_dropped_tcp_length =
581 pport_stats->rx_dropped_invalid_tcp_length;
582 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
583 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
584 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
585 drvs->rx_dropped_header_too_small =
586 pport_stats->rx_dropped_header_too_small;
587 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
588 drvs->rx_address_filtered =
589 pport_stats->rx_address_filtered +
590 pport_stats->rx_vlan_filtered;
591 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
592 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
593 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
594 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
595 drvs->jabber_events = pport_stats->rx_jabbers;
596 drvs->forwarded_packets = pport_stats->num_forwards_lo;
597 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
598 drvs->rx_drops_too_many_frags =
599 pport_stats->rx_drops_too_many_frags_lo;
602 static void accumulate_16bit_val(u32 *acc, u16 val)
604 #define lo(x) (x & 0xFFFF)
605 #define hi(x) (x & 0xFFFF0000)
606 bool wrapped = val < lo(*acc);
607 u32 newacc = hi(*acc) + val;
611 WRITE_ONCE(*acc, newacc);
614 static void populate_erx_stats(struct be_adapter *adapter,
615 struct be_rx_obj *rxo, u32 erx_stat)
617 if (!BEx_chip(adapter))
618 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
620 /* below erx HW counter can actually wrap around after
621 * 65535. Driver accumulates a 32-bit value
623 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
627 void be_parse_stats(struct be_adapter *adapter)
629 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
630 struct be_rx_obj *rxo;
634 if (lancer_chip(adapter)) {
635 populate_lancer_stats(adapter);
637 if (BE2_chip(adapter))
638 populate_be_v0_stats(adapter);
639 else if (BE3_chip(adapter))
641 populate_be_v1_stats(adapter);
643 populate_be_v2_stats(adapter);
645 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
646 for_all_rx_queues(adapter, rxo, i) {
647 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
648 populate_erx_stats(adapter, rxo, erx_stat);
653 static void be_get_stats64(struct net_device *netdev,
654 struct rtnl_link_stats64 *stats)
656 struct be_adapter *adapter = netdev_priv(netdev);
657 struct be_drv_stats *drvs = &adapter->drv_stats;
658 struct be_rx_obj *rxo;
659 struct be_tx_obj *txo;
664 for_all_rx_queues(adapter, rxo, i) {
665 const struct be_rx_stats *rx_stats = rx_stats(rxo);
668 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
669 pkts = rx_stats(rxo)->rx_pkts;
670 bytes = rx_stats(rxo)->rx_bytes;
671 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
672 stats->rx_packets += pkts;
673 stats->rx_bytes += bytes;
674 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
675 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
676 rx_stats(rxo)->rx_drops_no_frags;
679 for_all_tx_queues(adapter, txo, i) {
680 const struct be_tx_stats *tx_stats = tx_stats(txo);
683 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
684 pkts = tx_stats(txo)->tx_pkts;
685 bytes = tx_stats(txo)->tx_bytes;
686 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
687 stats->tx_packets += pkts;
688 stats->tx_bytes += bytes;
691 /* bad pkts received */
692 stats->rx_errors = drvs->rx_crc_errors +
693 drvs->rx_alignment_symbol_errors +
694 drvs->rx_in_range_errors +
695 drvs->rx_out_range_errors +
696 drvs->rx_frame_too_long +
697 drvs->rx_dropped_too_small +
698 drvs->rx_dropped_too_short +
699 drvs->rx_dropped_header_too_small +
700 drvs->rx_dropped_tcp_length +
701 drvs->rx_dropped_runt;
703 /* detailed rx errors */
704 stats->rx_length_errors = drvs->rx_in_range_errors +
705 drvs->rx_out_range_errors +
706 drvs->rx_frame_too_long;
708 stats->rx_crc_errors = drvs->rx_crc_errors;
710 /* frame alignment errors */
711 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
713 /* receiver fifo overrun */
714 /* drops_no_pbuf is no per i/f, it's per BE card */
715 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
716 drvs->rx_input_fifo_overflow_drop +
717 drvs->rx_drops_no_pbuf;
720 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
722 struct net_device *netdev = adapter->netdev;
724 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
725 netif_carrier_off(netdev);
726 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
730 netif_carrier_on(netdev);
732 netif_carrier_off(netdev);
734 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
737 static int be_gso_hdr_len(struct sk_buff *skb)
739 if (skb->encapsulation)
740 return skb_inner_transport_offset(skb) +
741 inner_tcp_hdrlen(skb);
742 return skb_transport_offset(skb) + tcp_hdrlen(skb);
745 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
747 struct be_tx_stats *stats = tx_stats(txo);
748 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
749 /* Account for headers which get duplicated in TSO pkt */
750 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
752 u64_stats_update_begin(&stats->sync);
754 stats->tx_bytes += skb->len + dup_hdr_len;
755 stats->tx_pkts += tx_pkts;
756 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
757 stats->tx_vxlan_offload_pkts += tx_pkts;
758 u64_stats_update_end(&stats->sync);
761 /* Returns number of WRBs needed for the skb */
762 static u32 skb_wrb_cnt(struct sk_buff *skb)
764 /* +1 for the header wrb */
765 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
768 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
770 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
771 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
772 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
776 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
777 * to avoid the swap and shift/mask operations in wrb_fill().
779 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
787 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
793 vlan_tag = skb_vlan_tag_get(skb);
794 vlan_prio = skb_vlan_tag_get_prio(skb);
795 /* If vlan priority provided by OS is NOT in available bmap */
796 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
797 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
798 adapter->recommended_prio_bits;
803 /* Used only for IP tunnel packets */
804 static u16 skb_inner_ip_proto(struct sk_buff *skb)
806 return (inner_ip_hdr(skb)->version == 4) ?
807 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
810 static u16 skb_ip_proto(struct sk_buff *skb)
812 return (ip_hdr(skb)->version == 4) ?
813 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
816 static inline bool be_is_txq_full(struct be_tx_obj *txo)
818 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
821 static inline bool be_can_txq_wake(struct be_tx_obj *txo)
823 return atomic_read(&txo->q.used) < txo->q.len / 2;
826 static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
828 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
831 static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
833 struct be_wrb_params *wrb_params)
837 if (skb_is_gso(skb)) {
838 BE_WRB_F_SET(wrb_params->features, LSO, 1);
839 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
840 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
841 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
842 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
843 if (skb->encapsulation) {
844 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
845 proto = skb_inner_ip_proto(skb);
847 proto = skb_ip_proto(skb);
849 if (proto == IPPROTO_TCP)
850 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
851 else if (proto == IPPROTO_UDP)
852 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
855 if (skb_vlan_tag_present(skb)) {
856 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
857 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
860 BE_WRB_F_SET(wrb_params->features, CRC, 1);
863 static void wrb_fill_hdr(struct be_adapter *adapter,
864 struct be_eth_hdr_wrb *hdr,
865 struct be_wrb_params *wrb_params,
868 memset(hdr, 0, sizeof(*hdr));
870 SET_TX_WRB_HDR_BITS(crc, hdr,
871 BE_WRB_F_GET(wrb_params->features, CRC));
872 SET_TX_WRB_HDR_BITS(ipcs, hdr,
873 BE_WRB_F_GET(wrb_params->features, IPCS));
874 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
875 BE_WRB_F_GET(wrb_params->features, TCPCS));
876 SET_TX_WRB_HDR_BITS(udpcs, hdr,
877 BE_WRB_F_GET(wrb_params->features, UDPCS));
879 SET_TX_WRB_HDR_BITS(lso, hdr,
880 BE_WRB_F_GET(wrb_params->features, LSO));
881 SET_TX_WRB_HDR_BITS(lso6, hdr,
882 BE_WRB_F_GET(wrb_params->features, LSO6));
883 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
885 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
886 * hack is not needed, the evt bit is set while ringing DB.
888 SET_TX_WRB_HDR_BITS(event, hdr,
889 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
890 SET_TX_WRB_HDR_BITS(vlan, hdr,
891 BE_WRB_F_GET(wrb_params->features, VLAN));
892 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
894 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
895 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
896 SET_TX_WRB_HDR_BITS(mgmt, hdr,
897 BE_WRB_F_GET(wrb_params->features, OS2BMC));
900 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
904 u32 frag_len = le32_to_cpu(wrb->frag_len);
907 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
908 (u64)le32_to_cpu(wrb->frag_pa_lo);
911 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
913 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
917 /* Grab a WRB header for xmit */
918 static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
920 u32 head = txo->q.head;
922 queue_head_inc(&txo->q);
926 /* Set up the WRB header for xmit */
927 static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
928 struct be_tx_obj *txo,
929 struct be_wrb_params *wrb_params,
930 struct sk_buff *skb, u16 head)
932 u32 num_frags = skb_wrb_cnt(skb);
933 struct be_queue_info *txq = &txo->q;
934 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
936 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
937 be_dws_cpu_to_le(hdr, sizeof(*hdr));
939 BUG_ON(txo->sent_skb_list[head]);
940 txo->sent_skb_list[head] = skb;
941 txo->last_req_hdr = head;
942 atomic_add(num_frags, &txq->used);
943 txo->last_req_wrb_cnt = num_frags;
944 txo->pend_wrb_cnt += num_frags;
947 /* Setup a WRB fragment (buffer descriptor) for xmit */
948 static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
951 struct be_eth_wrb *wrb;
952 struct be_queue_info *txq = &txo->q;
954 wrb = queue_head_node(txq);
955 wrb_fill(wrb, busaddr, len);
959 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
960 * was invoked. The producer index is restored to the previous packet and the
961 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
963 static void be_xmit_restore(struct be_adapter *adapter,
964 struct be_tx_obj *txo, u32 head, bool map_single,
968 struct be_eth_wrb *wrb;
969 struct be_queue_info *txq = &txo->q;
971 dev = &adapter->pdev->dev;
974 /* skip the first wrb (hdr); it's not mapped */
977 wrb = queue_head_node(txq);
978 unmap_tx_frag(dev, wrb, map_single);
980 copied -= le32_to_cpu(wrb->frag_len);
987 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
988 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
989 * of WRBs used up by the packet.
991 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
993 struct be_wrb_params *wrb_params)
995 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
996 struct device *dev = &adapter->pdev->dev;
997 bool map_single = false;
1002 head = be_tx_get_wrb_hdr(txo);
1004 if (skb->len > skb->data_len) {
1005 len = skb_headlen(skb);
1007 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1008 if (dma_mapping_error(dev, busaddr))
1011 be_tx_setup_wrb_frag(txo, busaddr, len);
1015 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1016 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1017 len = skb_frag_size(frag);
1019 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1020 if (dma_mapping_error(dev, busaddr))
1022 be_tx_setup_wrb_frag(txo, busaddr, len);
1026 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1028 be_tx_stats_update(txo, skb);
1032 adapter->drv_stats.dma_map_errors++;
1033 be_xmit_restore(adapter, txo, head, map_single, copied);
1037 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1039 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1042 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
1043 struct sk_buff *skb,
1044 struct be_wrb_params
1047 bool insert_vlan = false;
1050 skb = skb_share_check(skb, GFP_ATOMIC);
1054 if (skb_vlan_tag_present(skb)) {
1055 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
1059 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1061 vlan_tag = adapter->pvid;
1064 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1065 * skip VLAN insertion
1067 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1071 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1075 __vlan_hwaccel_clear_tag(skb);
1078 /* Insert the outer VLAN, if any */
1079 if (adapter->qnq_vid) {
1080 vlan_tag = adapter->qnq_vid;
1081 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1085 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1091 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1093 struct ethhdr *eh = (struct ethhdr *)skb->data;
1094 u16 offset = ETH_HLEN;
1096 if (eh->h_proto == htons(ETH_P_IPV6)) {
1097 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1099 offset += sizeof(struct ipv6hdr);
1100 if (ip6h->nexthdr != NEXTHDR_TCP &&
1101 ip6h->nexthdr != NEXTHDR_UDP) {
1102 struct ipv6_opt_hdr *ehdr =
1103 (struct ipv6_opt_hdr *)(skb->data + offset);
1105 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1106 if (ehdr->hdrlen == 0xff)
1113 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1115 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1118 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1120 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1123 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1124 struct sk_buff *skb,
1125 struct be_wrb_params
1128 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1129 unsigned int eth_hdr_len;
1132 /* For padded packets, BE HW modifies tot_len field in IP header
1133 * incorrecly when VLAN tag is inserted by HW.
1134 * For padded packets, Lancer computes incorrect checksum.
1136 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1137 VLAN_ETH_HLEN : ETH_HLEN;
1138 if (skb->len <= 60 &&
1139 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1141 ip = (struct iphdr *)ip_hdr(skb);
1142 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1145 /* If vlan tag is already inlined in the packet, skip HW VLAN
1146 * tagging in pvid-tagging mode
1148 if (be_pvid_tagging_enabled(adapter) &&
1149 veh->h_vlan_proto == htons(ETH_P_8021Q))
1150 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1152 /* HW has a bug wherein it will calculate CSUM for VLAN
1153 * pkts even though it is disabled.
1154 * Manually insert VLAN in pkt.
1156 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1157 skb_vlan_tag_present(skb)) {
1158 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1163 /* HW may lockup when VLAN HW tagging is requested on
1164 * certain ipv6 packets. Drop such pkts if the HW workaround to
1165 * skip HW tagging is not enabled by FW.
1167 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1168 (adapter->pvid || adapter->qnq_vid) &&
1169 !qnq_async_evt_rcvd(adapter)))
1172 /* Manual VLAN tag insertion to prevent:
1173 * ASIC lockup when the ASIC inserts VLAN tag into
1174 * certain ipv6 packets. Insert VLAN tags in driver,
1175 * and set event, completion, vlan bits accordingly
1178 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1179 be_vlan_tag_tx_chk(adapter, skb)) {
1180 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1187 dev_kfree_skb_any(skb);
1192 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1193 struct sk_buff *skb,
1194 struct be_wrb_params *wrb_params)
1198 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1199 * packets that are 32b or less may cause a transmit stall
1200 * on that port. The workaround is to pad such packets
1201 * (len <= 32 bytes) to a minimum length of 36b.
1203 if (skb->len <= 32) {
1204 if (skb_put_padto(skb, 36))
1208 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1209 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1214 /* The stack can send us skbs with length greater than
1215 * what the HW can handle. Trim the extra bytes.
1217 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1218 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1224 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1226 struct be_queue_info *txq = &txo->q;
1227 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1229 /* Mark the last request eventable if it hasn't been marked already */
1230 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1231 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1233 /* compose a dummy wrb if there are odd set of wrbs to notify */
1234 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1235 wrb_fill_dummy(queue_head_node(txq));
1236 queue_head_inc(txq);
1237 atomic_inc(&txq->used);
1238 txo->pend_wrb_cnt++;
1239 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1240 TX_HDR_WRB_NUM_SHIFT);
1241 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1242 TX_HDR_WRB_NUM_SHIFT);
1244 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1245 txo->pend_wrb_cnt = 0;
1248 /* OS2BMC related */
1250 #define DHCP_CLIENT_PORT 68
1251 #define DHCP_SERVER_PORT 67
1252 #define NET_BIOS_PORT1 137
1253 #define NET_BIOS_PORT2 138
1254 #define DHCPV6_RAS_PORT 547
1256 #define is_mc_allowed_on_bmc(adapter, eh) \
1257 (!is_multicast_filt_enabled(adapter) && \
1258 is_multicast_ether_addr(eh->h_dest) && \
1259 !is_broadcast_ether_addr(eh->h_dest))
1261 #define is_bc_allowed_on_bmc(adapter, eh) \
1262 (!is_broadcast_filt_enabled(adapter) && \
1263 is_broadcast_ether_addr(eh->h_dest))
1265 #define is_arp_allowed_on_bmc(adapter, skb) \
1266 (is_arp(skb) && is_arp_filt_enabled(adapter))
1268 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1270 #define is_arp_filt_enabled(adapter) \
1271 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1273 #define is_dhcp_client_filt_enabled(adapter) \
1274 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1276 #define is_dhcp_srvr_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1279 #define is_nbios_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1282 #define is_ipv6_na_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & \
1284 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1286 #define is_ipv6_ra_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1289 #define is_ipv6_ras_filt_enabled(adapter) \
1290 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1292 #define is_broadcast_filt_enabled(adapter) \
1293 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1295 #define is_multicast_filt_enabled(adapter) \
1296 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1298 static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1299 struct sk_buff **skb)
1301 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1302 bool os2bmc = false;
1304 if (!be_is_os2bmc_enabled(adapter))
1307 if (!is_multicast_ether_addr(eh->h_dest))
1310 if (is_mc_allowed_on_bmc(adapter, eh) ||
1311 is_bc_allowed_on_bmc(adapter, eh) ||
1312 is_arp_allowed_on_bmc(adapter, (*skb))) {
1317 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1318 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1319 u8 nexthdr = hdr->nexthdr;
1321 if (nexthdr == IPPROTO_ICMPV6) {
1322 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1324 switch (icmp6->icmp6_type) {
1325 case NDISC_ROUTER_ADVERTISEMENT:
1326 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1328 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1329 os2bmc = is_ipv6_na_filt_enabled(adapter);
1337 if (is_udp_pkt((*skb))) {
1338 struct udphdr *udp = udp_hdr((*skb));
1340 switch (ntohs(udp->dest)) {
1341 case DHCP_CLIENT_PORT:
1342 os2bmc = is_dhcp_client_filt_enabled(adapter);
1344 case DHCP_SERVER_PORT:
1345 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1347 case NET_BIOS_PORT1:
1348 case NET_BIOS_PORT2:
1349 os2bmc = is_nbios_filt_enabled(adapter);
1351 case DHCPV6_RAS_PORT:
1352 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1359 /* For packets over a vlan, which are destined
1360 * to BMC, asic expects the vlan to be inline in the packet.
1363 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1368 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1370 struct be_adapter *adapter = netdev_priv(netdev);
1371 u16 q_idx = skb_get_queue_mapping(skb);
1372 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1373 struct be_wrb_params wrb_params = { 0 };
1374 bool flush = !netdev_xmit_more();
1377 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1381 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1383 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1384 if (unlikely(!wrb_cnt)) {
1385 dev_kfree_skb_any(skb);
1389 /* if os2bmc is enabled and if the pkt is destined to bmc,
1390 * enqueue the pkt a 2nd time with mgmt bit set.
1392 if (be_send_pkt_to_bmc(adapter, &skb)) {
1393 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1394 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1395 if (unlikely(!wrb_cnt))
1401 if (be_is_txq_full(txo)) {
1402 netif_stop_subqueue(netdev, q_idx);
1403 tx_stats(txo)->tx_stops++;
1406 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1407 be_xmit_flush(adapter, txo);
1409 return NETDEV_TX_OK;
1411 tx_stats(txo)->tx_drv_drops++;
1412 /* Flush the already enqueued tx requests */
1413 if (flush && txo->pend_wrb_cnt)
1414 be_xmit_flush(adapter, txo);
1416 return NETDEV_TX_OK;
1419 static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1421 struct be_adapter *adapter = netdev_priv(netdev);
1422 struct device *dev = &adapter->pdev->dev;
1423 struct be_tx_obj *txo;
1424 struct sk_buff *skb;
1425 struct tcphdr *tcphdr;
1426 struct udphdr *udphdr;
1431 for_all_tx_queues(adapter, txo, i) {
1432 dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
1433 i, txo->q.head, txo->q.tail,
1434 atomic_read(&txo->q.used), txo->q.id);
1436 entry = txo->q.dma_mem.va;
1437 for (j = 0; j < TX_Q_LEN * 4; j += 4) {
1438 if (entry[j] != 0 || entry[j + 1] != 0 ||
1439 entry[j + 2] != 0 || entry[j + 3] != 0) {
1440 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1441 j, entry[j], entry[j + 1],
1442 entry[j + 2], entry[j + 3]);
1446 entry = txo->cq.dma_mem.va;
1447 dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
1448 i, txo->cq.head, txo->cq.tail,
1449 atomic_read(&txo->cq.used));
1450 for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
1451 if (entry[j] != 0 || entry[j + 1] != 0 ||
1452 entry[j + 2] != 0 || entry[j + 3] != 0) {
1453 dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1454 j, entry[j], entry[j + 1],
1455 entry[j + 2], entry[j + 3]);
1459 for (j = 0; j < TX_Q_LEN; j++) {
1460 if (txo->sent_skb_list[j]) {
1461 skb = txo->sent_skb_list[j];
1462 if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1463 tcphdr = tcp_hdr(skb);
1464 dev_info(dev, "TCP source port %d\n",
1465 ntohs(tcphdr->source));
1466 dev_info(dev, "TCP dest port %d\n",
1467 ntohs(tcphdr->dest));
1468 dev_info(dev, "TCP sequence num %d\n",
1469 ntohs(tcphdr->seq));
1470 dev_info(dev, "TCP ack_seq %d\n",
1471 ntohs(tcphdr->ack_seq));
1472 } else if (ip_hdr(skb)->protocol ==
1474 udphdr = udp_hdr(skb);
1475 dev_info(dev, "UDP source port %d\n",
1476 ntohs(udphdr->source));
1477 dev_info(dev, "UDP dest port %d\n",
1478 ntohs(udphdr->dest));
1480 dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
1481 j, skb, skb->len, skb->protocol);
1486 if (lancer_chip(adapter)) {
1487 dev_info(dev, "Initiating reset due to tx timeout\n");
1488 dev_info(dev, "Resetting adapter\n");
1489 status = lancer_physdev_ctrl(adapter,
1490 PHYSDEV_CONTROL_FW_RESET_MASK);
1492 dev_err(dev, "Reset failed .. Reboot server\n");
1496 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1498 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1499 BE_IF_FLAGS_ALL_PROMISCUOUS;
1502 static int be_set_vlan_promisc(struct be_adapter *adapter)
1504 struct device *dev = &adapter->pdev->dev;
1507 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1510 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1512 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1513 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1515 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1520 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1522 struct device *dev = &adapter->pdev->dev;
1525 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1527 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1528 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1534 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1535 * If the user configures more, place BE in vlan promiscuous mode.
1537 static int be_vid_config(struct be_adapter *adapter)
1539 struct device *dev = &adapter->pdev->dev;
1540 u16 vids[BE_NUM_VLANS_SUPPORTED];
1544 /* No need to change the VLAN state if the I/F is in promiscuous */
1545 if (adapter->netdev->flags & IFF_PROMISC)
1548 if (adapter->vlans_added > be_max_vlans(adapter))
1549 return be_set_vlan_promisc(adapter);
1551 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1552 status = be_clear_vlan_promisc(adapter);
1556 /* Construct VLAN Table to give to HW */
1557 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1558 vids[num++] = cpu_to_le16(i);
1560 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1562 dev_err(dev, "Setting HW VLAN filtering failed\n");
1563 /* Set to VLAN promisc mode as setting VLAN filter failed */
1564 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1565 addl_status(status) ==
1566 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1567 return be_set_vlan_promisc(adapter);
1572 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1574 struct be_adapter *adapter = netdev_priv(netdev);
1577 mutex_lock(&adapter->rx_filter_lock);
1579 /* Packets with VID 0 are always received by Lancer by default */
1580 if (lancer_chip(adapter) && vid == 0)
1583 if (test_bit(vid, adapter->vids))
1586 set_bit(vid, adapter->vids);
1587 adapter->vlans_added++;
1589 status = be_vid_config(adapter);
1591 mutex_unlock(&adapter->rx_filter_lock);
1595 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1597 struct be_adapter *adapter = netdev_priv(netdev);
1600 mutex_lock(&adapter->rx_filter_lock);
1602 /* Packets with VID 0 are always received by Lancer by default */
1603 if (lancer_chip(adapter) && vid == 0)
1606 if (!test_bit(vid, adapter->vids))
1609 clear_bit(vid, adapter->vids);
1610 adapter->vlans_added--;
1612 status = be_vid_config(adapter);
1614 mutex_unlock(&adapter->rx_filter_lock);
1618 static void be_set_all_promisc(struct be_adapter *adapter)
1620 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1621 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1624 static void be_set_mc_promisc(struct be_adapter *adapter)
1628 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1631 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1633 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1636 static void be_set_uc_promisc(struct be_adapter *adapter)
1640 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1643 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
1645 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1648 static void be_clear_uc_promisc(struct be_adapter *adapter)
1652 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1655 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1657 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1660 /* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1661 * We use a single callback function for both sync and unsync. We really don't
1662 * add/remove addresses through this callback. But, we use it to detect changes
1663 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1665 static int be_uc_list_update(struct net_device *netdev,
1666 const unsigned char *addr)
1668 struct be_adapter *adapter = netdev_priv(netdev);
1670 adapter->update_uc_list = true;
1674 static int be_mc_list_update(struct net_device *netdev,
1675 const unsigned char *addr)
1677 struct be_adapter *adapter = netdev_priv(netdev);
1679 adapter->update_mc_list = true;
1683 static void be_set_mc_list(struct be_adapter *adapter)
1685 struct net_device *netdev = adapter->netdev;
1686 struct netdev_hw_addr *ha;
1687 bool mc_promisc = false;
1690 netif_addr_lock_bh(netdev);
1691 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1693 if (netdev->flags & IFF_PROMISC) {
1694 adapter->update_mc_list = false;
1695 } else if (netdev->flags & IFF_ALLMULTI ||
1696 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1697 /* Enable multicast promisc if num configured exceeds
1701 adapter->update_mc_list = false;
1702 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1703 /* Update mc-list unconditionally if the iface was previously
1704 * in mc-promisc mode and now is out of that mode.
1706 adapter->update_mc_list = true;
1709 if (adapter->update_mc_list) {
1712 /* cache the mc-list in adapter */
1713 netdev_for_each_mc_addr(ha, netdev) {
1714 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1717 adapter->mc_count = netdev_mc_count(netdev);
1719 netif_addr_unlock_bh(netdev);
1722 be_set_mc_promisc(adapter);
1723 } else if (adapter->update_mc_list) {
1724 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1726 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1728 be_set_mc_promisc(adapter);
1730 adapter->update_mc_list = false;
1734 static void be_clear_mc_list(struct be_adapter *adapter)
1736 struct net_device *netdev = adapter->netdev;
1738 __dev_mc_unsync(netdev, NULL);
1739 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
1740 adapter->mc_count = 0;
1743 static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1745 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
1746 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1750 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
1752 &adapter->pmac_id[uc_idx + 1], 0);
1755 static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1757 if (pmac_id == adapter->pmac_id[0])
1760 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1763 static void be_set_uc_list(struct be_adapter *adapter)
1765 struct net_device *netdev = adapter->netdev;
1766 struct netdev_hw_addr *ha;
1767 bool uc_promisc = false;
1768 int curr_uc_macs = 0, i;
1770 netif_addr_lock_bh(netdev);
1771 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
1773 if (netdev->flags & IFF_PROMISC) {
1774 adapter->update_uc_list = false;
1775 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1777 adapter->update_uc_list = false;
1778 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1779 /* Update uc-list unconditionally if the iface was previously
1780 * in uc-promisc mode and now is out of that mode.
1782 adapter->update_uc_list = true;
1785 if (adapter->update_uc_list) {
1786 /* cache the uc-list in adapter array */
1788 netdev_for_each_uc_addr(ha, netdev) {
1789 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1792 curr_uc_macs = netdev_uc_count(netdev);
1794 netif_addr_unlock_bh(netdev);
1797 be_set_uc_promisc(adapter);
1798 } else if (adapter->update_uc_list) {
1799 be_clear_uc_promisc(adapter);
1801 for (i = 0; i < adapter->uc_macs; i++)
1802 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1804 for (i = 0; i < curr_uc_macs; i++)
1805 be_uc_mac_add(adapter, i);
1806 adapter->uc_macs = curr_uc_macs;
1807 adapter->update_uc_list = false;
1811 static void be_clear_uc_list(struct be_adapter *adapter)
1813 struct net_device *netdev = adapter->netdev;
1816 __dev_uc_unsync(netdev, NULL);
1817 for (i = 0; i < adapter->uc_macs; i++)
1818 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1820 adapter->uc_macs = 0;
1823 static void __be_set_rx_mode(struct be_adapter *adapter)
1825 struct net_device *netdev = adapter->netdev;
1827 mutex_lock(&adapter->rx_filter_lock);
1829 if (netdev->flags & IFF_PROMISC) {
1830 if (!be_in_all_promisc(adapter))
1831 be_set_all_promisc(adapter);
1832 } else if (be_in_all_promisc(adapter)) {
1833 /* We need to re-program the vlan-list or clear
1834 * vlan-promisc mode (if needed) when the interface
1835 * comes out of promisc mode.
1837 be_vid_config(adapter);
1840 be_set_uc_list(adapter);
1841 be_set_mc_list(adapter);
1843 mutex_unlock(&adapter->rx_filter_lock);
1846 static void be_work_set_rx_mode(struct work_struct *work)
1848 struct be_cmd_work *cmd_work =
1849 container_of(work, struct be_cmd_work, work);
1851 __be_set_rx_mode(cmd_work->adapter);
1855 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1857 struct be_adapter *adapter = netdev_priv(netdev);
1858 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1861 if (!sriov_enabled(adapter))
1864 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1867 /* Proceed further only if user provided MAC is different
1870 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1873 if (BEx_chip(adapter)) {
1874 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1877 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1878 &vf_cfg->pmac_id, vf + 1);
1880 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1885 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1887 return be_cmd_status(status);
1890 ether_addr_copy(vf_cfg->mac_addr, mac);
1895 static int be_get_vf_config(struct net_device *netdev, int vf,
1896 struct ifla_vf_info *vi)
1898 struct be_adapter *adapter = netdev_priv(netdev);
1899 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1901 if (!sriov_enabled(adapter))
1904 if (vf >= adapter->num_vfs)
1908 vi->max_tx_rate = vf_cfg->tx_rate;
1909 vi->min_tx_rate = 0;
1910 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1911 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1912 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1913 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1914 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1919 static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1921 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1922 u16 vids[BE_NUM_VLANS_SUPPORTED];
1923 int vf_if_id = vf_cfg->if_handle;
1926 /* Enable Transparent VLAN Tagging */
1927 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1931 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1933 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1935 dev_info(&adapter->pdev->dev,
1936 "Cleared guest VLANs on VF%d", vf);
1938 /* After TVT is enabled, disallow VFs to program VLAN filters */
1939 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1940 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1941 ~BE_PRIV_FILTMGMT, vf + 1);
1943 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1948 static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1950 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1951 struct device *dev = &adapter->pdev->dev;
1954 /* Reset Transparent VLAN Tagging. */
1955 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1956 vf_cfg->if_handle, 0, 0);
1960 /* Allow VFs to program VLAN filtering */
1961 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1962 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1963 BE_PRIV_FILTMGMT, vf + 1);
1965 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1966 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1971 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1975 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1978 struct be_adapter *adapter = netdev_priv(netdev);
1979 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1982 if (!sriov_enabled(adapter))
1985 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1988 if (vlan_proto != htons(ETH_P_8021Q))
1989 return -EPROTONOSUPPORT;
1992 vlan |= qos << VLAN_PRIO_SHIFT;
1993 status = be_set_vf_tvt(adapter, vf, vlan);
1995 status = be_clear_vf_tvt(adapter, vf);
1999 dev_err(&adapter->pdev->dev,
2000 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
2002 return be_cmd_status(status);
2005 vf_cfg->vlan_tag = vlan;
2009 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
2010 int min_tx_rate, int max_tx_rate)
2012 struct be_adapter *adapter = netdev_priv(netdev);
2013 struct device *dev = &adapter->pdev->dev;
2014 int percent_rate, status = 0;
2018 if (!sriov_enabled(adapter))
2021 if (vf >= adapter->num_vfs)
2030 status = be_cmd_link_status_query(adapter, &link_speed,
2036 dev_err(dev, "TX-rate setting not allowed when link is down\n");
2041 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
2042 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
2048 /* On Skyhawk the QOS setting must be done only as a % value */
2049 percent_rate = link_speed / 100;
2050 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
2051 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
2058 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
2062 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2066 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2068 return be_cmd_status(status);
2071 static int be_set_vf_link_state(struct net_device *netdev, int vf,
2074 struct be_adapter *adapter = netdev_priv(netdev);
2077 if (!sriov_enabled(adapter))
2080 if (vf >= adapter->num_vfs)
2083 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
2085 dev_err(&adapter->pdev->dev,
2086 "Link state change on VF %d failed: %#x\n", vf, status);
2087 return be_cmd_status(status);
2090 adapter->vf_cfg[vf].plink_tracking = link_state;
2095 static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2097 struct be_adapter *adapter = netdev_priv(netdev);
2098 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2102 if (!sriov_enabled(adapter))
2105 if (vf >= adapter->num_vfs)
2108 if (BEx_chip(adapter))
2111 if (enable == vf_cfg->spoofchk)
2114 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2116 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2119 dev_err(&adapter->pdev->dev,
2120 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2121 return be_cmd_status(status);
2124 vf_cfg->spoofchk = enable;
2128 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2131 aic->rx_pkts_prev = rx_pkts;
2132 aic->tx_reqs_prev = tx_pkts;
2136 static int be_get_new_eqd(struct be_eq_obj *eqo)
2138 struct be_adapter *adapter = eqo->adapter;
2140 struct be_aic_obj *aic;
2141 struct be_rx_obj *rxo;
2142 struct be_tx_obj *txo;
2143 u64 rx_pkts = 0, tx_pkts = 0;
2148 aic = &adapter->aic_obj[eqo->idx];
2149 if (!adapter->aic_enabled) {
2156 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2158 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2159 rx_pkts += rxo->stats.rx_pkts;
2160 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2163 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2165 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2166 tx_pkts += txo->stats.tx_reqs;
2167 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2170 /* Skip, if wrapped around or first calculation */
2172 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2173 rx_pkts < aic->rx_pkts_prev ||
2174 tx_pkts < aic->tx_reqs_prev) {
2175 be_aic_update(aic, rx_pkts, tx_pkts, now);
2176 return aic->prev_eqd;
2179 delta = jiffies_to_msecs(now - aic->jiffies);
2181 return aic->prev_eqd;
2183 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2184 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2185 eqd = (pps / 15000) << 2;
2189 eqd = min_t(u32, eqd, aic->max_eqd);
2190 eqd = max_t(u32, eqd, aic->min_eqd);
2192 be_aic_update(aic, rx_pkts, tx_pkts, now);
2197 /* For Skyhawk-R only */
2198 static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2200 struct be_adapter *adapter = eqo->adapter;
2201 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2202 ulong now = jiffies;
2206 if (!adapter->aic_enabled)
2209 if (jiffies_to_msecs(now - aic->jiffies) < 1)
2210 eqd = aic->prev_eqd;
2212 eqd = be_get_new_eqd(eqo);
2215 mult_enc = R2I_DLY_ENC_1;
2217 mult_enc = R2I_DLY_ENC_2;
2219 mult_enc = R2I_DLY_ENC_3;
2221 mult_enc = R2I_DLY_ENC_0;
2223 aic->prev_eqd = eqd;
2228 void be_eqd_update(struct be_adapter *adapter, bool force_update)
2230 struct be_set_eqd set_eqd[MAX_EVT_QS];
2231 struct be_aic_obj *aic;
2232 struct be_eq_obj *eqo;
2233 int i, num = 0, eqd;
2235 for_all_evt_queues(adapter, eqo, i) {
2236 aic = &adapter->aic_obj[eqo->idx];
2237 eqd = be_get_new_eqd(eqo);
2238 if (force_update || eqd != aic->prev_eqd) {
2239 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2240 set_eqd[num].eq_id = eqo->q.id;
2241 aic->prev_eqd = eqd;
2247 be_cmd_modify_eqd(adapter, set_eqd, num);
2250 static void be_rx_stats_update(struct be_rx_obj *rxo,
2251 struct be_rx_compl_info *rxcp)
2253 struct be_rx_stats *stats = rx_stats(rxo);
2255 u64_stats_update_begin(&stats->sync);
2257 stats->rx_bytes += rxcp->pkt_size;
2260 stats->rx_vxlan_offload_pkts++;
2261 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
2262 stats->rx_mcast_pkts++;
2264 stats->rx_compl_err++;
2265 u64_stats_update_end(&stats->sync);
2268 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
2270 /* L4 checksum is not reliable for non TCP/UDP packets.
2271 * Also ignore ipcksm for ipv6 pkts
2273 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
2274 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
2277 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
2279 struct be_adapter *adapter = rxo->adapter;
2280 struct be_rx_page_info *rx_page_info;
2281 struct be_queue_info *rxq = &rxo->q;
2282 u32 frag_idx = rxq->tail;
2284 rx_page_info = &rxo->page_info_tbl[frag_idx];
2285 BUG_ON(!rx_page_info->page);
2287 if (rx_page_info->last_frag) {
2288 dma_unmap_page(&adapter->pdev->dev,
2289 dma_unmap_addr(rx_page_info, bus),
2290 adapter->big_page_size, DMA_FROM_DEVICE);
2291 rx_page_info->last_frag = false;
2293 dma_sync_single_for_cpu(&adapter->pdev->dev,
2294 dma_unmap_addr(rx_page_info, bus),
2295 rx_frag_size, DMA_FROM_DEVICE);
2298 queue_tail_inc(rxq);
2299 atomic_dec(&rxq->used);
2300 return rx_page_info;
2303 /* Throwaway the data in the Rx completion */
2304 static void be_rx_compl_discard(struct be_rx_obj *rxo,
2305 struct be_rx_compl_info *rxcp)
2307 struct be_rx_page_info *page_info;
2308 u16 i, num_rcvd = rxcp->num_rcvd;
2310 for (i = 0; i < num_rcvd; i++) {
2311 page_info = get_rx_page_info(rxo);
2312 put_page(page_info->page);
2313 memset(page_info, 0, sizeof(*page_info));
2318 * skb_fill_rx_data forms a complete skb for an ether frame
2319 * indicated by rxcp.
2321 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2322 struct be_rx_compl_info *rxcp)
2324 struct be_rx_page_info *page_info;
2326 u16 hdr_len, curr_frag_len, remaining;
2329 page_info = get_rx_page_info(rxo);
2330 start = page_address(page_info->page) + page_info->page_offset;
2333 /* Copy data in the first descriptor of this completion */
2334 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2336 skb->len = curr_frag_len;
2337 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
2338 memcpy(skb->data, start, curr_frag_len);
2339 /* Complete packet has now been moved to data */
2340 put_page(page_info->page);
2342 skb->tail += curr_frag_len;
2345 memcpy(skb->data, start, hdr_len);
2346 skb_shinfo(skb)->nr_frags = 1;
2347 skb_frag_set_page(skb, 0, page_info->page);
2348 skb_frag_off_set(&skb_shinfo(skb)->frags[0],
2349 page_info->page_offset + hdr_len);
2350 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2351 curr_frag_len - hdr_len);
2352 skb->data_len = curr_frag_len - hdr_len;
2353 skb->truesize += rx_frag_size;
2354 skb->tail += hdr_len;
2356 page_info->page = NULL;
2358 if (rxcp->pkt_size <= rx_frag_size) {
2359 BUG_ON(rxcp->num_rcvd != 1);
2363 /* More frags present for this completion */
2364 remaining = rxcp->pkt_size - curr_frag_len;
2365 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2366 page_info = get_rx_page_info(rxo);
2367 curr_frag_len = min(remaining, rx_frag_size);
2369 /* Coalesce all frags from the same physical page in one slot */
2370 if (page_info->page_offset == 0) {
2373 skb_frag_set_page(skb, j, page_info->page);
2374 skb_frag_off_set(&skb_shinfo(skb)->frags[j],
2375 page_info->page_offset);
2376 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2377 skb_shinfo(skb)->nr_frags++;
2379 put_page(page_info->page);
2382 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2383 skb->len += curr_frag_len;
2384 skb->data_len += curr_frag_len;
2385 skb->truesize += rx_frag_size;
2386 remaining -= curr_frag_len;
2387 page_info->page = NULL;
2389 BUG_ON(j > MAX_SKB_FRAGS);
2392 /* Process the RX completion indicated by rxcp when GRO is disabled */
2393 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2394 struct be_rx_compl_info *rxcp)
2396 struct be_adapter *adapter = rxo->adapter;
2397 struct net_device *netdev = adapter->netdev;
2398 struct sk_buff *skb;
2400 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2401 if (unlikely(!skb)) {
2402 rx_stats(rxo)->rx_drops_no_skbs++;
2403 be_rx_compl_discard(rxo, rxcp);
2407 skb_fill_rx_data(rxo, skb, rxcp);
2409 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2410 skb->ip_summed = CHECKSUM_UNNECESSARY;
2412 skb_checksum_none_assert(skb);
2414 skb->protocol = eth_type_trans(skb, netdev);
2415 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2416 if (netdev->features & NETIF_F_RXHASH)
2417 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2419 skb->csum_level = rxcp->tunneled;
2420 skb_mark_napi_id(skb, napi);
2423 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2425 netif_receive_skb(skb);
2428 /* Process the RX completion indicated by rxcp when GRO is enabled */
2429 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2430 struct napi_struct *napi,
2431 struct be_rx_compl_info *rxcp)
2433 struct be_adapter *adapter = rxo->adapter;
2434 struct be_rx_page_info *page_info;
2435 struct sk_buff *skb = NULL;
2436 u16 remaining, curr_frag_len;
2439 skb = napi_get_frags(napi);
2441 be_rx_compl_discard(rxo, rxcp);
2445 remaining = rxcp->pkt_size;
2446 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2447 page_info = get_rx_page_info(rxo);
2449 curr_frag_len = min(remaining, rx_frag_size);
2451 /* Coalesce all frags from the same physical page in one slot */
2452 if (i == 0 || page_info->page_offset == 0) {
2453 /* First frag or Fresh page */
2455 skb_frag_set_page(skb, j, page_info->page);
2456 skb_frag_off_set(&skb_shinfo(skb)->frags[j],
2457 page_info->page_offset);
2458 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2460 put_page(page_info->page);
2462 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2463 skb->truesize += rx_frag_size;
2464 remaining -= curr_frag_len;
2465 memset(page_info, 0, sizeof(*page_info));
2467 BUG_ON(j > MAX_SKB_FRAGS);
2469 skb_shinfo(skb)->nr_frags = j + 1;
2470 skb->len = rxcp->pkt_size;
2471 skb->data_len = rxcp->pkt_size;
2472 skb->ip_summed = CHECKSUM_UNNECESSARY;
2473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2474 if (adapter->netdev->features & NETIF_F_RXHASH)
2475 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2477 skb->csum_level = rxcp->tunneled;
2480 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2482 napi_gro_frags(napi);
2485 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2486 struct be_rx_compl_info *rxcp)
2488 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2489 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2490 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2491 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2492 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2493 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2494 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2495 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2496 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2497 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2498 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2500 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2501 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2503 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2505 GET_RX_COMPL_V1_BITS(tunneled, compl);
2508 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2509 struct be_rx_compl_info *rxcp)
2511 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2512 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2513 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2514 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2515 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2516 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2517 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2518 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2519 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2520 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2521 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2523 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2524 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2526 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2527 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2530 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2532 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2533 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2534 struct be_adapter *adapter = rxo->adapter;
2536 /* For checking the valid bit it is Ok to use either definition as the
2537 * valid bit is at the same position in both v0 and v1 Rx compl */
2538 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2542 be_dws_le_to_cpu(compl, sizeof(*compl));
2544 if (adapter->be3_native)
2545 be_parse_rx_compl_v1(compl, rxcp);
2547 be_parse_rx_compl_v0(compl, rxcp);
2553 /* In QNQ modes, if qnq bit is not set, then the packet was
2554 * tagged only with the transparent outer vlan-tag and must
2555 * not be treated as a vlan packet by host
2557 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2560 if (!lancer_chip(adapter))
2561 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2563 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2564 !test_bit(rxcp->vlan_tag, adapter->vids))
2568 /* As the compl has been parsed, reset it; we wont touch it again */
2569 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2571 queue_tail_inc(&rxo->cq);
2575 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2577 u32 order = get_order(size);
2581 return alloc_pages(gfp, order);
2585 * Allocate a page, split it to fragments of size rx_frag_size and post as
2586 * receive buffers to BE
2588 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2590 struct be_adapter *adapter = rxo->adapter;
2591 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2592 struct be_queue_info *rxq = &rxo->q;
2593 struct page *pagep = NULL;
2594 struct device *dev = &adapter->pdev->dev;
2595 struct be_eth_rx_d *rxd;
2596 u64 page_dmaaddr = 0, frag_dmaaddr;
2597 u32 posted, page_offset = 0, notify = 0;
2599 page_info = &rxo->page_info_tbl[rxq->head];
2600 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2602 pagep = be_alloc_pages(adapter->big_page_size, gfp);
2603 if (unlikely(!pagep)) {
2604 rx_stats(rxo)->rx_post_fail++;
2607 page_dmaaddr = dma_map_page(dev, pagep, 0,
2608 adapter->big_page_size,
2610 if (dma_mapping_error(dev, page_dmaaddr)) {
2613 adapter->drv_stats.dma_map_errors++;
2619 page_offset += rx_frag_size;
2621 page_info->page_offset = page_offset;
2622 page_info->page = pagep;
2624 rxd = queue_head_node(rxq);
2625 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2626 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2627 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2629 /* Any space left in the current big page for another frag? */
2630 if ((page_offset + rx_frag_size + rx_frag_size) >
2631 adapter->big_page_size) {
2633 page_info->last_frag = true;
2634 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2636 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2639 prev_page_info = page_info;
2640 queue_head_inc(rxq);
2641 page_info = &rxo->page_info_tbl[rxq->head];
2644 /* Mark the last frag of a page when we break out of the above loop
2645 * with no more slots available in the RXQ
2648 prev_page_info->last_frag = true;
2649 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2653 atomic_add(posted, &rxq->used);
2654 if (rxo->rx_post_starved)
2655 rxo->rx_post_starved = false;
2657 notify = min(MAX_NUM_POST_ERX_DB, posted);
2658 be_rxq_notify(adapter, rxq->id, notify);
2661 } else if (atomic_read(&rxq->used) == 0) {
2662 /* Let be_worker replenish when memory is available */
2663 rxo->rx_post_starved = true;
2667 static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2670 case BE_TX_COMP_HDR_PARSE_ERR:
2671 tx_stats(txo)->tx_hdr_parse_err++;
2673 case BE_TX_COMP_NDMA_ERR:
2674 tx_stats(txo)->tx_dma_err++;
2676 case BE_TX_COMP_ACL_ERR:
2677 tx_stats(txo)->tx_spoof_check_err++;
2682 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2685 case LANCER_TX_COMP_LSO_ERR:
2686 tx_stats(txo)->tx_tso_err++;
2688 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2689 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2690 tx_stats(txo)->tx_spoof_check_err++;
2692 case LANCER_TX_COMP_QINQ_ERR:
2693 tx_stats(txo)->tx_qinq_err++;
2695 case LANCER_TX_COMP_PARITY_ERR:
2696 tx_stats(txo)->tx_internal_parity_err++;
2698 case LANCER_TX_COMP_DMA_ERR:
2699 tx_stats(txo)->tx_dma_err++;
2701 case LANCER_TX_COMP_SGE_ERR:
2702 tx_stats(txo)->tx_sge_err++;
2707 static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
2708 struct be_tx_obj *txo)
2710 struct be_queue_info *tx_cq = &txo->cq;
2711 struct be_tx_compl_info *txcp = &txo->txcp;
2712 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2714 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2717 /* Ensure load ordering of valid bit dword and other dwords below */
2719 be_dws_le_to_cpu(compl, sizeof(*compl));
2721 txcp->status = GET_TX_COMPL_BITS(status, compl);
2722 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2725 if (lancer_chip(adapter)) {
2726 lancer_update_tx_err(txo, txcp->status);
2727 /* Reset the adapter incase of TSO,
2728 * SGE or Parity error
2730 if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
2731 txcp->status == LANCER_TX_COMP_PARITY_ERR ||
2732 txcp->status == LANCER_TX_COMP_SGE_ERR)
2733 be_set_error(adapter, BE_ERROR_TX);
2735 be_update_tx_err(txo, txcp->status);
2739 if (be_check_error(adapter, BE_ERROR_TX))
2742 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2743 queue_tail_inc(tx_cq);
2747 static u16 be_tx_compl_process(struct be_adapter *adapter,
2748 struct be_tx_obj *txo, u16 last_index)
2750 struct sk_buff **sent_skbs = txo->sent_skb_list;
2751 struct be_queue_info *txq = &txo->q;
2752 struct sk_buff *skb = NULL;
2753 bool unmap_skb_hdr = false;
2754 struct be_eth_wrb *wrb;
2759 if (sent_skbs[txq->tail]) {
2760 /* Free skb from prev req */
2762 dev_consume_skb_any(skb);
2763 skb = sent_skbs[txq->tail];
2764 sent_skbs[txq->tail] = NULL;
2765 queue_tail_inc(txq); /* skip hdr wrb */
2767 unmap_skb_hdr = true;
2769 wrb = queue_tail_node(txq);
2770 frag_index = txq->tail;
2771 unmap_tx_frag(&adapter->pdev->dev, wrb,
2772 (unmap_skb_hdr && skb_headlen(skb)));
2773 unmap_skb_hdr = false;
2774 queue_tail_inc(txq);
2776 } while (frag_index != last_index);
2777 dev_consume_skb_any(skb);
2782 /* Return the number of events in the event queue */
2783 static inline int events_get(struct be_eq_obj *eqo)
2785 struct be_eq_entry *eqe;
2789 eqe = queue_tail_node(&eqo->q);
2796 queue_tail_inc(&eqo->q);
2802 /* Leaves the EQ is disarmed state */
2803 static void be_eq_clean(struct be_eq_obj *eqo)
2805 int num = events_get(eqo);
2807 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2810 /* Free posted rx buffers that were not used */
2811 static void be_rxq_clean(struct be_rx_obj *rxo)
2813 struct be_queue_info *rxq = &rxo->q;
2814 struct be_rx_page_info *page_info;
2816 while (atomic_read(&rxq->used) > 0) {
2817 page_info = get_rx_page_info(rxo);
2818 put_page(page_info->page);
2819 memset(page_info, 0, sizeof(*page_info));
2821 BUG_ON(atomic_read(&rxq->used));
2826 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2828 struct be_queue_info *rx_cq = &rxo->cq;
2829 struct be_rx_compl_info *rxcp;
2830 struct be_adapter *adapter = rxo->adapter;
2833 /* Consume pending rx completions.
2834 * Wait for the flush completion (identified by zero num_rcvd)
2835 * to arrive. Notify CQ even when there are no more CQ entries
2836 * for HW to flush partially coalesced CQ entries.
2837 * In Lancer, there is no need to wait for flush compl.
2840 rxcp = be_rx_compl_get(rxo);
2842 if (lancer_chip(adapter))
2845 if (flush_wait++ > 50 ||
2846 be_check_error(adapter,
2848 dev_warn(&adapter->pdev->dev,
2849 "did not receive flush compl\n");
2852 be_cq_notify(adapter, rx_cq->id, true, 0);
2855 be_rx_compl_discard(rxo, rxcp);
2856 be_cq_notify(adapter, rx_cq->id, false, 1);
2857 if (rxcp->num_rcvd == 0)
2862 /* After cleanup, leave the CQ in unarmed state */
2863 be_cq_notify(adapter, rx_cq->id, false, 0);
2866 static void be_tx_compl_clean(struct be_adapter *adapter)
2868 struct device *dev = &adapter->pdev->dev;
2869 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
2870 struct be_tx_compl_info *txcp;
2871 struct be_queue_info *txq;
2872 u32 end_idx, notified_idx;
2873 struct be_tx_obj *txo;
2874 int i, pending_txqs;
2876 /* Stop polling for compls when HW has been silent for 10ms */
2878 pending_txqs = adapter->num_tx_qs;
2880 for_all_tx_queues(adapter, txo, i) {
2884 while ((txcp = be_tx_compl_get(adapter, txo))) {
2886 be_tx_compl_process(adapter, txo,
2891 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2892 atomic_sub(num_wrbs, &txq->used);
2895 if (!be_is_tx_compl_pending(txo))
2899 if (pending_txqs == 0 || ++timeo > 10 ||
2900 be_check_error(adapter, BE_ERROR_HW))
2906 /* Free enqueued TX that was never notified to HW */
2907 for_all_tx_queues(adapter, txo, i) {
2910 if (atomic_read(&txq->used)) {
2911 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2912 i, atomic_read(&txq->used));
2913 notified_idx = txq->tail;
2914 end_idx = txq->tail;
2915 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2917 /* Use the tx-compl process logic to handle requests
2918 * that were not sent to the HW.
2920 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2921 atomic_sub(num_wrbs, &txq->used);
2922 BUG_ON(atomic_read(&txq->used));
2923 txo->pend_wrb_cnt = 0;
2924 /* Since hw was never notified of these requests,
2927 txq->head = notified_idx;
2928 txq->tail = notified_idx;
2933 static void be_evt_queues_destroy(struct be_adapter *adapter)
2935 struct be_eq_obj *eqo;
2938 for_all_evt_queues(adapter, eqo, i) {
2939 if (eqo->q.created) {
2941 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2942 netif_napi_del(&eqo->napi);
2943 free_cpumask_var(eqo->affinity_mask);
2945 be_queue_free(adapter, &eqo->q);
2949 static int be_evt_queues_create(struct be_adapter *adapter)
2951 struct be_queue_info *eq;
2952 struct be_eq_obj *eqo;
2953 struct be_aic_obj *aic;
2956 /* need enough EQs to service both RX and TX queues */
2957 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2958 max(adapter->cfg_num_rx_irqs,
2959 adapter->cfg_num_tx_irqs));
2961 adapter->aic_enabled = true;
2963 for_all_evt_queues(adapter, eqo, i) {
2964 int numa_node = dev_to_node(&adapter->pdev->dev);
2966 aic = &adapter->aic_obj[i];
2967 eqo->adapter = adapter;
2969 aic->max_eqd = BE_MAX_EQD;
2972 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2973 sizeof(struct be_eq_entry));
2977 rc = be_cmd_eq_create(adapter, eqo);
2981 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2983 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2984 eqo->affinity_mask);
2985 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2991 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2993 struct be_queue_info *q;
2995 q = &adapter->mcc_obj.q;
2997 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2998 be_queue_free(adapter, q);
3000 q = &adapter->mcc_obj.cq;
3002 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3003 be_queue_free(adapter, q);
3006 /* Must be called only after TX qs are created as MCC shares TX EQ */
3007 static int be_mcc_queues_create(struct be_adapter *adapter)
3009 struct be_queue_info *q, *cq;
3011 cq = &adapter->mcc_obj.cq;
3012 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
3013 sizeof(struct be_mcc_compl)))
3016 /* Use the default EQ for MCC completions */
3017 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
3020 q = &adapter->mcc_obj.q;
3021 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3022 goto mcc_cq_destroy;
3024 if (be_cmd_mccq_create(adapter, q, cq))
3030 be_queue_free(adapter, q);
3032 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
3034 be_queue_free(adapter, cq);
3039 static void be_tx_queues_destroy(struct be_adapter *adapter)
3041 struct be_queue_info *q;
3042 struct be_tx_obj *txo;
3045 for_all_tx_queues(adapter, txo, i) {
3048 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
3049 be_queue_free(adapter, q);
3053 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3054 be_queue_free(adapter, q);
3058 static int be_tx_qs_create(struct be_adapter *adapter)
3060 struct be_queue_info *cq;
3061 struct be_tx_obj *txo;
3062 struct be_eq_obj *eqo;
3065 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
3067 for_all_tx_queues(adapter, txo, i) {
3069 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
3070 sizeof(struct be_eth_tx_compl));
3074 u64_stats_init(&txo->stats.sync);
3075 u64_stats_init(&txo->stats.sync_compl);
3077 /* If num_evt_qs is less than num_tx_qs, then more than
3078 * one txq share an eq
3080 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
3081 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
3085 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
3086 sizeof(struct be_eth_wrb));
3090 status = be_cmd_txq_create(adapter, txo);
3094 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
3098 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
3099 adapter->num_tx_qs);
3103 static void be_rx_cqs_destroy(struct be_adapter *adapter)
3105 struct be_queue_info *q;
3106 struct be_rx_obj *rxo;
3109 for_all_rx_queues(adapter, rxo, i) {
3112 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3113 be_queue_free(adapter, q);
3117 static int be_rx_cqs_create(struct be_adapter *adapter)
3119 struct be_queue_info *eq, *cq;
3120 struct be_rx_obj *rxo;
3123 adapter->num_rss_qs =
3124 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
3126 /* We'll use RSS only if atleast 2 RSS rings are supported. */
3127 if (adapter->num_rss_qs < 2)
3128 adapter->num_rss_qs = 0;
3130 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3132 /* When the interface is not capable of RSS rings (and there is no
3133 * need to create a default RXQ) we'll still need one RXQ
3135 if (adapter->num_rx_qs == 0)
3136 adapter->num_rx_qs = 1;
3138 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3139 for_all_rx_queues(adapter, rxo, i) {
3140 rxo->adapter = adapter;
3142 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
3143 sizeof(struct be_eth_rx_compl));
3147 u64_stats_init(&rxo->stats.sync);
3148 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3149 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3154 dev_info(&adapter->pdev->dev,
3155 "created %d RX queue(s)\n", adapter->num_rx_qs);
3159 static irqreturn_t be_intx(int irq, void *dev)
3161 struct be_eq_obj *eqo = dev;
3162 struct be_adapter *adapter = eqo->adapter;
3165 /* IRQ is not expected when NAPI is scheduled as the EQ
3166 * will not be armed.
3167 * But, this can happen on Lancer INTx where it takes
3168 * a while to de-assert INTx or in BE2 where occasionaly
3169 * an interrupt may be raised even when EQ is unarmed.
3170 * If NAPI is already scheduled, then counting & notifying
3171 * events will orphan them.
3173 if (napi_schedule_prep(&eqo->napi)) {
3174 num_evts = events_get(eqo);
3175 __napi_schedule(&eqo->napi);
3177 eqo->spurious_intr = 0;
3179 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
3181 /* Return IRQ_HANDLED only for the the first spurious intr
3182 * after a valid intr to stop the kernel from branding
3183 * this irq as a bad one!
3185 if (num_evts || eqo->spurious_intr++ == 0)
3191 static irqreturn_t be_msix(int irq, void *dev)
3193 struct be_eq_obj *eqo = dev;
3195 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
3196 napi_schedule(&eqo->napi);
3200 static inline bool do_gro(struct be_rx_compl_info *rxcp)
3202 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
3205 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
3208 struct be_adapter *adapter = rxo->adapter;
3209 struct be_queue_info *rx_cq = &rxo->cq;
3210 struct be_rx_compl_info *rxcp;
3212 u32 frags_consumed = 0;
3214 for (work_done = 0; work_done < budget; work_done++) {
3215 rxcp = be_rx_compl_get(rxo);
3219 /* Is it a flush compl that has no data */
3220 if (unlikely(rxcp->num_rcvd == 0))
3223 /* Discard compl with partial DMA Lancer B0 */
3224 if (unlikely(!rxcp->pkt_size)) {
3225 be_rx_compl_discard(rxo, rxcp);
3229 /* On BE drop pkts that arrive due to imperfect filtering in
3230 * promiscuous mode on some skews
3232 if (unlikely(rxcp->port != adapter->port_num &&
3233 !lancer_chip(adapter))) {
3234 be_rx_compl_discard(rxo, rxcp);
3239 be_rx_compl_process_gro(rxo, napi, rxcp);
3241 be_rx_compl_process(rxo, napi, rxcp);
3244 frags_consumed += rxcp->num_rcvd;
3245 be_rx_stats_update(rxo, rxcp);
3249 be_cq_notify(adapter, rx_cq->id, true, work_done);
3251 /* When an rx-obj gets into post_starved state, just
3252 * let be_worker do the posting.
3254 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3255 !rxo->rx_post_starved)
3256 be_post_rx_frags(rxo, GFP_ATOMIC,
3257 max_t(u32, MAX_RX_POST,
3265 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3268 int num_wrbs = 0, work_done = 0;
3269 struct be_tx_compl_info *txcp;
3271 while ((txcp = be_tx_compl_get(adapter, txo))) {
3272 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
3277 be_cq_notify(adapter, txo->cq.id, true, work_done);
3278 atomic_sub(num_wrbs, &txo->q.used);
3280 /* As Tx wrbs have been freed up, wake up netdev queue
3281 * if it was stopped due to lack of tx wrbs. */
3282 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
3283 be_can_txq_wake(txo)) {
3284 netif_wake_subqueue(adapter->netdev, idx);
3287 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3288 tx_stats(txo)->tx_compl += work_done;
3289 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3293 int be_poll(struct napi_struct *napi, int budget)
3295 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3296 struct be_adapter *adapter = eqo->adapter;
3297 int max_work = 0, work, i, num_evts;
3298 struct be_rx_obj *rxo;
3299 struct be_tx_obj *txo;
3302 num_evts = events_get(eqo);
3304 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3305 be_process_tx(adapter, txo, i);
3307 /* This loop will iterate twice for EQ0 in which
3308 * completions of the last RXQ (default one) are also processed
3309 * For other EQs the loop iterates only once
3311 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3312 work = be_process_rx(rxo, napi, budget);
3313 max_work = max(work, max_work);
3316 if (is_mcc_eqo(eqo))
3317 be_process_mcc(adapter);
3319 if (max_work < budget) {
3320 napi_complete_done(napi, max_work);
3322 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3323 * delay via a delay multiplier encoding value
3325 if (skyhawk_chip(adapter))
3326 mult_enc = be_get_eq_delay_mult_enc(eqo);
3328 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3331 /* As we'll continue in polling mode, count and clear events */
3332 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3337 void be_detect_error(struct be_adapter *adapter)
3339 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3340 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3341 struct device *dev = &adapter->pdev->dev;
3345 if (be_check_error(adapter, BE_ERROR_HW))
3348 if (lancer_chip(adapter)) {
3349 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3350 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3351 be_set_error(adapter, BE_ERROR_UE);
3352 sliport_err1 = ioread32(adapter->db +
3353 SLIPORT_ERROR1_OFFSET);
3354 sliport_err2 = ioread32(adapter->db +
3355 SLIPORT_ERROR2_OFFSET);
3356 /* Do not log error messages if its a FW reset */
3357 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3358 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3359 dev_info(dev, "Reset is in progress\n");
3361 dev_err(dev, "Error detected in the card\n");
3362 dev_err(dev, "ERR: sliport status 0x%x\n",
3364 dev_err(dev, "ERR: sliport error1 0x%x\n",
3366 dev_err(dev, "ERR: sliport error2 0x%x\n",
3371 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3372 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3373 ue_lo_mask = ioread32(adapter->pcicfg +
3374 PCICFG_UE_STATUS_LOW_MASK);
3375 ue_hi_mask = ioread32(adapter->pcicfg +
3376 PCICFG_UE_STATUS_HI_MASK);
3378 ue_lo = (ue_lo & ~ue_lo_mask);
3379 ue_hi = (ue_hi & ~ue_hi_mask);
3381 if (ue_lo || ue_hi) {
3382 /* On certain platforms BE3 hardware can indicate
3383 * spurious UEs. In case of a UE in the chip,
3384 * the POST register correctly reports either a
3385 * FAT_LOG_START state (FW is currently dumping
3386 * FAT log data) or a ARMFW_UE state. Check for the
3387 * above states to ascertain if the UE is valid or not.
3389 if (BE3_chip(adapter)) {
3390 val = be_POST_stage_get(adapter);
3391 if ((val & POST_STAGE_FAT_LOG_START)
3392 != POST_STAGE_FAT_LOG_START &&
3393 (val & POST_STAGE_ARMFW_UE)
3394 != POST_STAGE_ARMFW_UE &&
3395 (val & POST_STAGE_RECOVERABLE_ERR)
3396 != POST_STAGE_RECOVERABLE_ERR)
3400 dev_err(dev, "Error detected in the adapter");
3401 be_set_error(adapter, BE_ERROR_UE);
3403 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3405 dev_err(dev, "UE: %s bit set\n",
3406 ue_status_low_desc[i]);
3408 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3410 dev_err(dev, "UE: %s bit set\n",
3411 ue_status_hi_desc[i]);
3417 static void be_msix_disable(struct be_adapter *adapter)
3419 if (msix_enabled(adapter)) {
3420 pci_disable_msix(adapter->pdev);
3421 adapter->num_msix_vec = 0;
3422 adapter->num_msix_roce_vec = 0;
3426 static int be_msix_enable(struct be_adapter *adapter)
3428 unsigned int i, max_roce_eqs;
3429 struct device *dev = &adapter->pdev->dev;
3432 /* If RoCE is supported, program the max number of vectors that
3433 * could be used for NIC and RoCE, else, just program the number
3434 * we'll use initially.
3436 if (be_roce_supported(adapter)) {
3438 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3439 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3440 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3442 num_vec = max(adapter->cfg_num_rx_irqs,
3443 adapter->cfg_num_tx_irqs);
3446 for (i = 0; i < num_vec; i++)
3447 adapter->msix_entries[i].entry = i;
3449 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3450 MIN_MSIX_VECTORS, num_vec);
3454 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3455 adapter->num_msix_roce_vec = num_vec / 2;
3456 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3457 adapter->num_msix_roce_vec);
3460 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3462 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3463 adapter->num_msix_vec);
3467 dev_warn(dev, "MSIx enable failed\n");
3469 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3470 if (be_virtfn(adapter))
3475 static inline int be_msix_vec_get(struct be_adapter *adapter,
3476 struct be_eq_obj *eqo)
3478 return adapter->msix_entries[eqo->msix_idx].vector;
3481 static int be_msix_register(struct be_adapter *adapter)
3483 struct net_device *netdev = adapter->netdev;
3484 struct be_eq_obj *eqo;
3487 for_all_evt_queues(adapter, eqo, i) {
3488 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3489 vec = be_msix_vec_get(adapter, eqo);
3490 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3494 irq_set_affinity_hint(vec, eqo->affinity_mask);
3499 for (i--; i >= 0; i--) {
3500 eqo = &adapter->eq_obj[i];
3501 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3503 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3505 be_msix_disable(adapter);
3509 static int be_irq_register(struct be_adapter *adapter)
3511 struct net_device *netdev = adapter->netdev;
3514 if (msix_enabled(adapter)) {
3515 status = be_msix_register(adapter);
3518 /* INTx is not supported for VF */
3519 if (be_virtfn(adapter))
3523 /* INTx: only the first EQ is used */
3524 netdev->irq = adapter->pdev->irq;
3525 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3526 &adapter->eq_obj[0]);
3528 dev_err(&adapter->pdev->dev,
3529 "INTx request IRQ failed - err %d\n", status);
3533 adapter->isr_registered = true;
3537 static void be_irq_unregister(struct be_adapter *adapter)
3539 struct net_device *netdev = adapter->netdev;
3540 struct be_eq_obj *eqo;
3543 if (!adapter->isr_registered)
3547 if (!msix_enabled(adapter)) {
3548 free_irq(netdev->irq, &adapter->eq_obj[0]);
3553 for_all_evt_queues(adapter, eqo, i) {
3554 vec = be_msix_vec_get(adapter, eqo);
3555 irq_set_affinity_hint(vec, NULL);
3560 adapter->isr_registered = false;
3563 static void be_rx_qs_destroy(struct be_adapter *adapter)
3565 struct rss_info *rss = &adapter->rss_info;
3566 struct be_queue_info *q;
3567 struct be_rx_obj *rxo;
3570 for_all_rx_queues(adapter, rxo, i) {
3573 /* If RXQs are destroyed while in an "out of buffer"
3574 * state, there is a possibility of an HW stall on
3575 * Lancer. So, post 64 buffers to each queue to relieve
3576 * the "out of buffer" condition.
3577 * Make sure there's space in the RXQ before posting.
3579 if (lancer_chip(adapter)) {
3580 be_rx_cq_clean(rxo);
3581 if (atomic_read(&q->used) == 0)
3582 be_post_rx_frags(rxo, GFP_KERNEL,
3586 be_cmd_rxq_destroy(adapter, q);
3587 be_rx_cq_clean(rxo);
3590 be_queue_free(adapter, q);
3593 if (rss->rss_flags) {
3594 rss->rss_flags = RSS_ENABLE_NONE;
3595 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3596 128, rss->rss_hkey);
3600 static void be_disable_if_filters(struct be_adapter *adapter)
3602 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3603 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3604 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3605 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3606 eth_zero_addr(adapter->dev_mac);
3609 be_clear_uc_list(adapter);
3610 be_clear_mc_list(adapter);
3612 /* The IFACE flags are enabled in the open path and cleared
3613 * in the close path. When a VF gets detached from the host and
3614 * assigned to a VM the following happens:
3615 * - VF's IFACE flags get cleared in the detach path
3616 * - IFACE create is issued by the VF in the attach path
3617 * Due to a bug in the BE3/Skyhawk-R FW
3618 * (Lancer FW doesn't have the bug), the IFACE capability flags
3619 * specified along with the IFACE create cmd issued by a VF are not
3620 * honoured by FW. As a consequence, if a *new* driver
3621 * (that enables/disables IFACE flags in open/close)
3622 * is loaded in the host and an *old* driver is * used by a VM/VF,
3623 * the IFACE gets created *without* the needed flags.
3624 * To avoid this, disable RX-filter flags only for Lancer.
3626 if (lancer_chip(adapter)) {
3627 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3628 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3632 static int be_close(struct net_device *netdev)
3634 struct be_adapter *adapter = netdev_priv(netdev);
3635 struct be_eq_obj *eqo;
3638 /* This protection is needed as be_close() may be called even when the
3639 * adapter is in cleared state (after eeh perm failure)
3641 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3644 /* Before attempting cleanup ensure all the pending cmds in the
3645 * config_wq have finished execution
3647 flush_workqueue(be_wq);
3649 be_disable_if_filters(adapter);
3651 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3652 for_all_evt_queues(adapter, eqo, i) {
3653 napi_disable(&eqo->napi);
3655 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3658 be_async_mcc_disable(adapter);
3660 /* Wait for all pending tx completions to arrive so that
3661 * all tx skbs are freed.
3663 netif_tx_disable(netdev);
3664 be_tx_compl_clean(adapter);
3666 be_rx_qs_destroy(adapter);
3668 for_all_evt_queues(adapter, eqo, i) {
3669 if (msix_enabled(adapter))
3670 synchronize_irq(be_msix_vec_get(adapter, eqo));
3672 synchronize_irq(netdev->irq);
3676 be_irq_unregister(adapter);
3681 static int be_rx_qs_create(struct be_adapter *adapter)
3683 struct rss_info *rss = &adapter->rss_info;
3684 u8 rss_key[RSS_HASH_KEY_LEN];
3685 struct be_rx_obj *rxo;
3688 for_all_rx_queues(adapter, rxo, i) {
3689 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3690 sizeof(struct be_eth_rx_d));
3695 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3696 rxo = default_rxo(adapter);
3697 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3698 rx_frag_size, adapter->if_handle,
3699 false, &rxo->rss_id);
3704 for_all_rss_queues(adapter, rxo, i) {
3705 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3706 rx_frag_size, adapter->if_handle,
3707 true, &rxo->rss_id);
3712 if (be_multi_rxq(adapter)) {
3713 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3714 for_all_rss_queues(adapter, rxo, i) {
3715 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3717 rss->rsstable[j + i] = rxo->rss_id;
3718 rss->rss_queue[j + i] = i;
3721 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3722 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3724 if (!BEx_chip(adapter))
3725 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3726 RSS_ENABLE_UDP_IPV6;
3728 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3729 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3730 RSS_INDIR_TABLE_LEN, rss_key);
3732 rss->rss_flags = RSS_ENABLE_NONE;
3736 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3738 /* Disable RSS, if only default RX Q is created */
3739 rss->rss_flags = RSS_ENABLE_NONE;
3743 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3744 * which is a queue empty condition
3746 for_all_rx_queues(adapter, rxo, i)
3747 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3752 static int be_enable_if_filters(struct be_adapter *adapter)
3756 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
3760 /* Normally this condition usually true as the ->dev_mac is zeroed.
3761 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3762 * subsequent be_dev_mac_add() can fail (after fresh boot)
3764 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3765 int old_pmac_id = -1;
3767 /* Remember old programmed MAC if any - can happen on BE3 VF */
3768 if (!is_zero_ether_addr(adapter->dev_mac))
3769 old_pmac_id = adapter->pmac_id[0];
3771 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3775 /* Delete the old programmed MAC as we successfully programmed
3778 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3779 be_dev_mac_del(adapter, old_pmac_id);
3781 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3784 if (adapter->vlans_added)
3785 be_vid_config(adapter);
3787 __be_set_rx_mode(adapter);
3792 static int be_open(struct net_device *netdev)
3794 struct be_adapter *adapter = netdev_priv(netdev);
3795 struct be_eq_obj *eqo;
3796 struct be_rx_obj *rxo;
3797 struct be_tx_obj *txo;
3801 status = be_rx_qs_create(adapter);
3805 status = be_enable_if_filters(adapter);
3809 status = be_irq_register(adapter);
3813 for_all_rx_queues(adapter, rxo, i)
3814 be_cq_notify(adapter, rxo->cq.id, true, 0);
3816 for_all_tx_queues(adapter, txo, i)
3817 be_cq_notify(adapter, txo->cq.id, true, 0);
3819 be_async_mcc_enable(adapter);
3821 for_all_evt_queues(adapter, eqo, i) {
3822 napi_enable(&eqo->napi);
3823 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3825 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3827 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3829 be_link_status_update(adapter, link_status);
3831 netif_tx_start_all_queues(netdev);
3833 udp_tunnel_nic_reset_ntf(netdev);
3837 be_close(adapter->netdev);
3841 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3845 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3847 mac[5] = (u8)(addr & 0xFF);
3848 mac[4] = (u8)((addr >> 8) & 0xFF);
3849 mac[3] = (u8)((addr >> 16) & 0xFF);
3850 /* Use the OUI from the current MAC address */
3851 memcpy(mac, adapter->netdev->dev_addr, 3);
3855 * Generate a seed MAC address from the PF MAC Address using jhash.
3856 * MAC Address for VFs are assigned incrementally starting from the seed.
3857 * These addresses are programmed in the ASIC by the PF and the VF driver
3858 * queries for the MAC address during its probe.
3860 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3865 struct be_vf_cfg *vf_cfg;
3867 be_vf_eth_addr_generate(adapter, mac);
3869 for_all_vfs(adapter, vf_cfg, vf) {
3870 if (BEx_chip(adapter))
3871 status = be_cmd_pmac_add(adapter, mac,
3873 &vf_cfg->pmac_id, vf + 1);
3875 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3879 dev_err(&adapter->pdev->dev,
3880 "Mac address assignment failed for VF %d\n",
3883 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3890 static int be_vfs_mac_query(struct be_adapter *adapter)
3894 struct be_vf_cfg *vf_cfg;
3896 for_all_vfs(adapter, vf_cfg, vf) {
3897 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3898 mac, vf_cfg->if_handle,
3902 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3907 static void be_vf_clear(struct be_adapter *adapter)
3909 struct be_vf_cfg *vf_cfg;
3912 if (pci_vfs_assigned(adapter->pdev)) {
3913 dev_warn(&adapter->pdev->dev,
3914 "VFs are assigned to VMs: not disabling VFs\n");
3918 pci_disable_sriov(adapter->pdev);
3920 for_all_vfs(adapter, vf_cfg, vf) {
3921 if (BEx_chip(adapter))
3922 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3923 vf_cfg->pmac_id, vf + 1);
3925 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3928 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3931 if (BE3_chip(adapter))
3932 be_cmd_set_hsw_config(adapter, 0, 0,
3934 PORT_FWD_TYPE_PASSTHRU, 0);
3936 kfree(adapter->vf_cfg);
3937 adapter->num_vfs = 0;
3938 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3941 static void be_clear_queues(struct be_adapter *adapter)
3943 be_mcc_queues_destroy(adapter);
3944 be_rx_cqs_destroy(adapter);
3945 be_tx_queues_destroy(adapter);
3946 be_evt_queues_destroy(adapter);
3949 static void be_cancel_worker(struct be_adapter *adapter)
3951 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3952 cancel_delayed_work_sync(&adapter->work);
3953 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3957 static void be_cancel_err_detection(struct be_adapter *adapter)
3959 struct be_error_recovery *err_rec = &adapter->error_recovery;
3961 if (!be_err_recovery_workq)
3964 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3965 cancel_delayed_work_sync(&err_rec->err_detection_work);
3966 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3970 /* VxLAN offload Notes:
3972 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
3973 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
3974 * is expected to work across all types of IP tunnels once exported. Skyhawk
3975 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
3976 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
3977 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
3978 * those other tunnels are unexported on the fly through ndo_features_check().
3980 static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
3981 unsigned int entry, struct udp_tunnel_info *ti)
3983 struct be_adapter *adapter = netdev_priv(netdev);
3984 struct device *dev = &adapter->pdev->dev;
3987 status = be_cmd_manage_iface(adapter, adapter->if_handle,
3988 OP_CONVERT_NORMAL_TO_TUNNEL);
3990 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3993 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3995 status = be_cmd_set_vxlan_port(adapter, ti->port);
3997 dev_warn(dev, "Failed to add VxLAN port\n");
4000 adapter->vxlan_port = ti->port;
4002 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4003 NETIF_F_TSO | NETIF_F_TSO6 |
4004 NETIF_F_GSO_UDP_TUNNEL;
4006 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4007 be16_to_cpu(ti->port));
4011 static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4012 unsigned int entry, struct udp_tunnel_info *ti)
4014 struct be_adapter *adapter = netdev_priv(netdev);
4016 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
4017 be_cmd_manage_iface(adapter, adapter->if_handle,
4018 OP_CONVERT_TUNNEL_TO_NORMAL);
4020 if (adapter->vxlan_port)
4021 be_cmd_set_vxlan_port(adapter, 0);
4023 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4024 adapter->vxlan_port = 0;
4026 netdev->hw_enc_features = 0;
4030 static const struct udp_tunnel_nic_info be_udp_tunnels = {
4031 .set_port = be_vxlan_set_port,
4032 .unset_port = be_vxlan_unset_port,
4033 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
4034 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
4036 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
4040 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4041 struct be_resources *vft_res)
4043 struct be_resources res = adapter->pool_res;
4044 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4045 struct be_resources res_mod = {0};
4048 /* Distribute the queue resources among the PF and it's VFs */
4050 /* Divide the rx queues evenly among the VFs and the PF, capped
4051 * at VF-EQ-count. Any remainder queues belong to the PF.
4053 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4054 res.max_rss_qs / (num_vfs + 1));
4056 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4057 * RSS Tables per port. Provide RSS on VFs, only if number of
4058 * VFs requested is less than it's PF Pool's RSS Tables limit.
4060 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
4064 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4065 * which are modifiable using SET_PROFILE_CONFIG cmd.
4067 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4068 RESOURCE_MODIFIABLE, 0);
4070 /* If RSS IFACE capability flags are modifiable for a VF, set the
4071 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4072 * more than 1 RSSQ is available for a VF.
4073 * Otherwise, provision only 1 queue pair for VF.
4075 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4076 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4077 if (num_vf_qs > 1) {
4078 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4079 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4080 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4082 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4083 BE_IF_FLAGS_DEFQ_RSS);
4089 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4090 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4091 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4094 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4095 vft_res->max_rx_qs = num_vf_qs;
4096 vft_res->max_rss_qs = num_vf_qs;
4097 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4098 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4100 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4101 * among the PF and it's VFs, if the fields are changeable
4103 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4104 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4106 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4107 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4109 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4110 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4112 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4113 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
4116 static void be_if_destroy(struct be_adapter *adapter)
4118 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4120 kfree(adapter->pmac_id);
4121 adapter->pmac_id = NULL;
4123 kfree(adapter->mc_list);
4124 adapter->mc_list = NULL;
4126 kfree(adapter->uc_list);
4127 adapter->uc_list = NULL;
4130 static int be_clear(struct be_adapter *adapter)
4132 struct pci_dev *pdev = adapter->pdev;
4133 struct be_resources vft_res = {0};
4135 be_cancel_worker(adapter);
4137 flush_workqueue(be_wq);
4139 if (sriov_enabled(adapter))
4140 be_vf_clear(adapter);
4142 /* Re-configure FW to distribute resources evenly across max-supported
4143 * number of VFs, only when VFs are not already enabled.
4145 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4146 !pci_vfs_assigned(pdev)) {
4147 be_calculate_vf_res(adapter,
4148 pci_sriov_get_totalvfs(pdev),
4150 be_cmd_set_sriov_config(adapter, adapter->pool_res,
4151 pci_sriov_get_totalvfs(pdev),
4155 be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
4157 be_if_destroy(adapter);
4159 be_clear_queues(adapter);
4161 be_msix_disable(adapter);
4162 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
4166 static int be_vfs_if_create(struct be_adapter *adapter)
4168 struct be_resources res = {0};
4169 u32 cap_flags, en_flags, vf;
4170 struct be_vf_cfg *vf_cfg;
4173 /* If a FW profile exists, then cap_flags are updated */
4174 cap_flags = BE_VF_IF_EN_FLAGS;
4176 for_all_vfs(adapter, vf_cfg, vf) {
4177 if (!BE3_chip(adapter)) {
4178 status = be_cmd_get_profile_config(adapter, &res, NULL,
4179 ACTIVE_PROFILE_TYPE,
4183 cap_flags = res.if_cap_flags;
4184 /* Prevent VFs from enabling VLAN promiscuous
4187 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4191 /* PF should enable IF flags during proxy if_create call */
4192 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
4193 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4194 &vf_cfg->if_handle, vf + 1);
4202 static int be_vf_setup_init(struct be_adapter *adapter)
4204 struct be_vf_cfg *vf_cfg;
4207 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4209 if (!adapter->vf_cfg)
4212 for_all_vfs(adapter, vf_cfg, vf) {
4213 vf_cfg->if_handle = -1;
4214 vf_cfg->pmac_id = -1;
4219 static int be_vf_setup(struct be_adapter *adapter)
4221 struct device *dev = &adapter->pdev->dev;
4222 struct be_vf_cfg *vf_cfg;
4223 int status, old_vfs, vf;
4226 old_vfs = pci_num_vf(adapter->pdev);
4228 status = be_vf_setup_init(adapter);
4233 for_all_vfs(adapter, vf_cfg, vf) {
4234 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4239 status = be_vfs_mac_query(adapter);
4243 status = be_vfs_if_create(adapter);
4247 status = be_vf_eth_addr_config(adapter);
4252 for_all_vfs(adapter, vf_cfg, vf) {
4253 /* Allow VFs to programs MAC/VLAN filters */
4254 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4256 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
4257 status = be_cmd_set_fn_privileges(adapter,
4258 vf_cfg->privileges |
4262 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
4263 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4268 /* Allow full available bandwidth */
4270 be_cmd_config_qos(adapter, 0, 0, vf + 1);
4272 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4273 vf_cfg->if_handle, NULL,
4276 vf_cfg->spoofchk = spoofchk;
4279 be_cmd_enable_vf(adapter, vf + 1);
4280 be_cmd_set_logical_link_config(adapter,
4281 IFLA_VF_LINK_STATE_AUTO,
4287 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4289 dev_err(dev, "SRIOV enable failed\n");
4290 adapter->num_vfs = 0;
4295 if (BE3_chip(adapter)) {
4296 /* On BE3, enable VEB only when SRIOV is enabled */
4297 status = be_cmd_set_hsw_config(adapter, 0, 0,
4299 PORT_FWD_TYPE_VEB, 0);
4304 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
4307 dev_err(dev, "VF setup failed\n");
4308 be_vf_clear(adapter);
4312 /* Converting function_mode bits on BE3 to SH mc_type enums */
4314 static u8 be_convert_mc_type(u32 function_mode)
4316 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
4318 else if (function_mode & QNQ_MODE)
4320 else if (function_mode & VNIC_MODE)
4322 else if (function_mode & UMC_ENABLED)
4328 /* On BE2/BE3 FW does not suggest the supported limits */
4329 static void BEx_get_resources(struct be_adapter *adapter,
4330 struct be_resources *res)
4332 bool use_sriov = adapter->num_vfs ? 1 : 0;
4334 if (be_physfn(adapter))
4335 res->max_uc_mac = BE_UC_PMAC_COUNT;
4337 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4339 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4341 if (be_is_mc(adapter)) {
4342 /* Assuming that there are 4 channels per port,
4343 * when multi-channel is enabled
4345 if (be_is_qnq_mode(adapter))
4346 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4348 /* In a non-qnq multichannel mode, the pvid
4349 * takes up one vlan entry
4351 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4353 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
4356 res->max_mcast_mac = BE_MAX_MC;
4358 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4359 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4360 * *only* if it is RSS-capable.
4362 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
4363 be_virtfn(adapter) ||
4364 (be_is_mc(adapter) &&
4365 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
4367 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4368 struct be_resources super_nic_res = {0};
4370 /* On a SuperNIC profile, the driver needs to use the
4371 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4373 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4374 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4376 /* Some old versions of BE3 FW don't report max_tx_qs value */
4377 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4379 res->max_tx_qs = BE3_MAX_TX_QS;
4382 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4383 !use_sriov && be_physfn(adapter))
4384 res->max_rss_qs = (adapter->be3_native) ?
4385 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4386 res->max_rx_qs = res->max_rss_qs + 1;
4388 if (be_physfn(adapter))
4389 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4390 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4392 res->max_evt_qs = 1;
4394 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4395 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4396 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4397 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4400 static void be_setup_init(struct be_adapter *adapter)
4402 adapter->vlan_prio_bmap = 0xff;
4403 adapter->phy.link_speed = -1;
4404 adapter->if_handle = -1;
4405 adapter->be3_native = false;
4406 adapter->if_flags = 0;
4407 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
4408 if (be_physfn(adapter))
4409 adapter->cmd_privileges = MAX_PRIVILEGES;
4411 adapter->cmd_privileges = MIN_PRIVILEGES;
4414 /* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4415 * However, this HW limitation is not exposed to the host via any SLI cmd.
4416 * As a result, in the case of SRIOV and in particular multi-partition configs
4417 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4418 * for distribution between the VFs. This self-imposed limit will determine the
4419 * no: of VFs for which RSS can be enabled.
4421 static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4423 struct be_port_resources port_res = {0};
4424 u8 rss_tables_on_port;
4425 u16 max_vfs = be_max_vfs(adapter);
4427 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4428 RESOURCE_LIMITS, 0);
4430 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4432 /* Each PF Pool's RSS Tables limit =
4433 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4435 adapter->pool_res.max_rss_tables =
4436 max_vfs * rss_tables_on_port / port_res.max_vfs;
4439 static int be_get_sriov_config(struct be_adapter *adapter)
4441 struct be_resources res = {0};
4442 int max_vfs, old_vfs;
4444 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4445 RESOURCE_LIMITS, 0);
4447 /* Some old versions of BE3 FW don't report max_vfs value */
4448 if (BE3_chip(adapter) && !res.max_vfs) {
4449 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4450 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4453 adapter->pool_res = res;
4455 /* If during previous unload of the driver, the VFs were not disabled,
4456 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4457 * Instead use the TotalVFs value stored in the pci-dev struct.
4459 old_vfs = pci_num_vf(adapter->pdev);
4461 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4464 adapter->pool_res.max_vfs =
4465 pci_sriov_get_totalvfs(adapter->pdev);
4466 adapter->num_vfs = old_vfs;
4469 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4470 be_calculate_pf_pool_rss_tables(adapter);
4471 dev_info(&adapter->pdev->dev,
4472 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4473 be_max_pf_pool_rss_tables(adapter));
4478 static void be_alloc_sriov_res(struct be_adapter *adapter)
4480 int old_vfs = pci_num_vf(adapter->pdev);
4481 struct be_resources vft_res = {0};
4484 be_get_sriov_config(adapter);
4487 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4489 /* When the HW is in SRIOV capable configuration, the PF-pool
4490 * resources are given to PF during driver load, if there are no
4491 * old VFs. This facility is not available in BE3 FW.
4492 * Also, this is done by FW in Lancer chip.
4494 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4495 be_calculate_vf_res(adapter, 0, &vft_res);
4496 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4499 dev_err(&adapter->pdev->dev,
4500 "Failed to optimize SRIOV resources\n");
4504 static int be_get_resources(struct be_adapter *adapter)
4506 struct device *dev = &adapter->pdev->dev;
4507 struct be_resources res = {0};
4510 /* For Lancer, SH etc read per-function resource limits from FW.
4511 * GET_FUNC_CONFIG returns per function guaranteed limits.
4512 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4514 if (BEx_chip(adapter)) {
4515 BEx_get_resources(adapter, &res);
4517 status = be_cmd_get_func_config(adapter, &res);
4521 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4522 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4523 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4524 res.max_rss_qs -= 1;
4527 /* If RoCE is supported stash away half the EQs for RoCE */
4528 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4529 res.max_evt_qs / 2 : res.max_evt_qs;
4532 /* If FW supports RSS default queue, then skip creating non-RSS
4533 * queue for non-IP traffic.
4535 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4536 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4538 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4539 be_max_txqs(adapter), be_max_rxqs(adapter),
4540 be_max_rss(adapter), be_max_nic_eqs(adapter),
4541 be_max_vfs(adapter));
4542 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4543 be_max_uc(adapter), be_max_mc(adapter),
4544 be_max_vlans(adapter));
4546 /* Ensure RX and TX queues are created in pairs at init time */
4547 adapter->cfg_num_rx_irqs =
4548 min_t(u16, netif_get_num_default_rss_queues(),
4549 be_max_qp_irqs(adapter));
4550 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
4554 static int be_get_config(struct be_adapter *adapter)
4559 status = be_cmd_get_cntl_attributes(adapter);
4563 status = be_cmd_query_fw_cfg(adapter);
4567 if (!lancer_chip(adapter) && be_physfn(adapter))
4568 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4570 if (BEx_chip(adapter)) {
4571 level = be_cmd_get_fw_log_level(adapter);
4572 adapter->msg_enable =
4573 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4576 be_cmd_get_acpi_wol_cap(adapter);
4577 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4578 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
4580 be_cmd_query_port_name(adapter);
4582 if (be_physfn(adapter)) {
4583 status = be_cmd_get_active_profile(adapter, &profile_id);
4585 dev_info(&adapter->pdev->dev,
4586 "Using profile 0x%x\n", profile_id);
4592 static int be_mac_setup(struct be_adapter *adapter)
4597 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4598 status = be_cmd_get_perm_mac(adapter, mac);
4602 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4603 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4605 /* Initial MAC for BE3 VFs is already programmed by PF */
4606 if (BEx_chip(adapter) && be_virtfn(adapter))
4607 memcpy(adapter->dev_mac, mac, ETH_ALEN);
4613 static void be_schedule_worker(struct be_adapter *adapter)
4615 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
4616 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4619 static void be_destroy_err_recovery_workq(void)
4621 if (!be_err_recovery_workq)
4624 flush_workqueue(be_err_recovery_workq);
4625 destroy_workqueue(be_err_recovery_workq);
4626 be_err_recovery_workq = NULL;
4629 static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
4631 struct be_error_recovery *err_rec = &adapter->error_recovery;
4633 if (!be_err_recovery_workq)
4636 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4637 msecs_to_jiffies(delay));
4638 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4641 static int be_setup_queues(struct be_adapter *adapter)
4643 struct net_device *netdev = adapter->netdev;
4646 status = be_evt_queues_create(adapter);
4650 status = be_tx_qs_create(adapter);
4654 status = be_rx_cqs_create(adapter);
4658 status = be_mcc_queues_create(adapter);
4662 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4666 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4672 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4676 static int be_if_create(struct be_adapter *adapter)
4678 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4679 u32 cap_flags = be_if_cap_flags(adapter);
4682 /* alloc required memory for other filtering fields */
4683 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4684 sizeof(*adapter->pmac_id), GFP_KERNEL);
4685 if (!adapter->pmac_id)
4688 adapter->mc_list = kcalloc(be_max_mc(adapter),
4689 sizeof(*adapter->mc_list), GFP_KERNEL);
4690 if (!adapter->mc_list)
4693 adapter->uc_list = kcalloc(be_max_uc(adapter),
4694 sizeof(*adapter->uc_list), GFP_KERNEL);
4695 if (!adapter->uc_list)
4698 if (adapter->cfg_num_rx_irqs == 1)
4699 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4701 en_flags &= cap_flags;
4702 /* will enable all the needed filter flags in be_open() */
4703 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4704 &adapter->if_handle, 0);
4712 int be_update_queues(struct be_adapter *adapter)
4714 struct net_device *netdev = adapter->netdev;
4717 if (netif_running(netdev)) {
4718 /* be_tx_timeout() must not run concurrently with this
4719 * function, synchronize with an already-running dev_watchdog
4721 netif_tx_lock_bh(netdev);
4722 /* device cannot transmit now, avoid dev_watchdog timeouts */
4723 netif_carrier_off(netdev);
4724 netif_tx_unlock_bh(netdev);
4729 be_cancel_worker(adapter);
4731 /* If any vectors have been shared with RoCE we cannot re-program
4734 if (!adapter->num_msix_roce_vec)
4735 be_msix_disable(adapter);
4737 be_clear_queues(adapter);
4738 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4742 if (!msix_enabled(adapter)) {
4743 status = be_msix_enable(adapter);
4748 status = be_if_create(adapter);
4752 status = be_setup_queues(adapter);
4756 be_schedule_worker(adapter);
4758 /* The IF was destroyed and re-created. We need to clear
4759 * all promiscuous flags valid for the destroyed IF.
4760 * Without this promisc mode is not restored during
4761 * be_open() because the driver thinks that it is
4762 * already enabled in HW.
4764 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4766 if (netif_running(netdev))
4767 status = be_open(netdev);
4772 static inline int fw_major_num(const char *fw_ver)
4774 int fw_major = 0, i;
4776 i = sscanf(fw_ver, "%d.", &fw_major);
4783 /* If it is error recovery, FLR the PF
4784 * Else if any VFs are already enabled don't FLR the PF
4786 static bool be_reset_required(struct be_adapter *adapter)
4788 if (be_error_recovering(adapter))
4791 return pci_num_vf(adapter->pdev) == 0;
4794 /* Wait for the FW to be ready and perform the required initialization */
4795 static int be_func_init(struct be_adapter *adapter)
4799 status = be_fw_wait_ready(adapter);
4803 /* FW is now ready; clear errors to allow cmds/doorbell */
4804 be_clear_error(adapter, BE_CLEAR_ALL);
4806 if (be_reset_required(adapter)) {
4807 status = be_cmd_reset_function(adapter);
4811 /* Wait for interrupts to quiesce after an FLR */
4815 /* Tell FW we're ready to fire cmds */
4816 status = be_cmd_fw_init(adapter);
4820 /* Allow interrupts for other ULPs running on NIC function */
4821 be_intr_set(adapter, true);
4826 static int be_setup(struct be_adapter *adapter)
4828 struct device *dev = &adapter->pdev->dev;
4831 status = be_func_init(adapter);
4835 be_setup_init(adapter);
4837 if (!lancer_chip(adapter))
4838 be_cmd_req_native_mode(adapter);
4840 /* invoke this cmd first to get pf_num and vf_num which are needed
4841 * for issuing profile related cmds
4843 if (!BEx_chip(adapter)) {
4844 status = be_cmd_get_func_config(adapter, NULL);
4849 status = be_get_config(adapter);
4853 if (!BE2_chip(adapter) && be_physfn(adapter))
4854 be_alloc_sriov_res(adapter);
4856 status = be_get_resources(adapter);
4860 status = be_msix_enable(adapter);
4864 /* will enable all the needed filter flags in be_open() */
4865 status = be_if_create(adapter);
4869 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4871 status = be_setup_queues(adapter);
4876 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4878 status = be_mac_setup(adapter);
4882 be_cmd_get_fw_ver(adapter);
4883 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4885 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4886 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4888 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4891 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4894 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4897 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4898 adapter->tx_fc, adapter->rx_fc);
4900 if (be_physfn(adapter))
4901 be_cmd_set_logical_link_config(adapter,
4902 IFLA_VF_LINK_STATE_AUTO, 0);
4904 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4905 * confusing a linux bridge or OVS that it might be connected to.
4906 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4907 * when SRIOV is not enabled.
4909 if (BE3_chip(adapter))
4910 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4911 PORT_FWD_TYPE_PASSTHRU, 0);
4913 if (adapter->num_vfs)
4914 be_vf_setup(adapter);
4916 status = be_cmd_get_phy_info(adapter);
4917 if (!status && be_pause_supported(adapter))
4918 adapter->phy.fc_autoneg = 1;
4920 if (be_physfn(adapter) && !lancer_chip(adapter))
4921 be_cmd_set_features(adapter);
4923 be_schedule_worker(adapter);
4924 adapter->flags |= BE_FLAGS_SETUP_DONE;
4931 #ifdef CONFIG_NET_POLL_CONTROLLER
4932 static void be_netpoll(struct net_device *netdev)
4934 struct be_adapter *adapter = netdev_priv(netdev);
4935 struct be_eq_obj *eqo;
4938 for_all_evt_queues(adapter, eqo, i) {
4939 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4940 napi_schedule(&eqo->napi);
4945 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4947 const struct firmware *fw;
4950 if (!netif_running(adapter->netdev)) {
4951 dev_err(&adapter->pdev->dev,
4952 "Firmware load not allowed (interface is down)\n");
4956 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4960 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4962 if (lancer_chip(adapter))
4963 status = lancer_fw_download(adapter, fw);
4965 status = be_fw_download(adapter, fw);
4968 be_cmd_get_fw_ver(adapter);
4971 release_firmware(fw);
4975 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4976 u16 flags, struct netlink_ext_ack *extack)
4978 struct be_adapter *adapter = netdev_priv(dev);
4979 struct nlattr *attr, *br_spec;
4984 if (!sriov_enabled(adapter))
4987 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4991 nla_for_each_nested(attr, br_spec, rem) {
4992 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4995 if (nla_len(attr) < sizeof(mode))
4998 mode = nla_get_u16(attr);
4999 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
5002 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5005 status = be_cmd_set_hsw_config(adapter, 0, 0,
5007 mode == BRIDGE_MODE_VEPA ?
5008 PORT_FWD_TYPE_VEPA :
5009 PORT_FWD_TYPE_VEB, 0);
5013 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5014 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5019 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5020 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5025 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5026 struct net_device *dev, u32 filter_mask,
5029 struct be_adapter *adapter = netdev_priv(dev);
5033 /* BE and Lancer chips support VEB mode only */
5034 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5035 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
5036 if (!pci_sriov_get_totalvfs(adapter->pdev))
5038 hsw_mode = PORT_FWD_TYPE_VEB;
5040 status = be_cmd_get_hsw_config(adapter, NULL, 0,
5041 adapter->if_handle, &hsw_mode,
5046 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5050 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5051 hsw_mode == PORT_FWD_TYPE_VEPA ?
5052 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
5053 0, 0, nlflags, filter_mask, NULL);
5056 static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5057 void (*func)(struct work_struct *))
5059 struct be_cmd_work *work;
5061 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5063 dev_err(&adapter->pdev->dev,
5064 "be_work memory allocation failed\n");
5068 INIT_WORK(&work->work, func);
5069 work->adapter = adapter;
5073 static netdev_features_t be_features_check(struct sk_buff *skb,
5074 struct net_device *dev,
5075 netdev_features_t features)
5077 struct be_adapter *adapter = netdev_priv(dev);
5080 if (skb_is_gso(skb)) {
5081 /* IPv6 TSO requests with extension hdrs are a problem
5082 * to Lancer and BE3 HW. Disable TSO6 feature.
5084 if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
5085 features &= ~NETIF_F_TSO6;
5087 /* Lancer cannot handle the packet with MSS less than 256.
5088 * Also it can't handle a TSO packet with a single segment
5089 * Disable the GSO support in such cases
5091 if (lancer_chip(adapter) &&
5092 (skb_shinfo(skb)->gso_size < 256 ||
5093 skb_shinfo(skb)->gso_segs == 1))
5094 features &= ~NETIF_F_GSO_MASK;
5097 /* The code below restricts offload features for some tunneled and
5099 * Offload features for normal (non tunnel) packets are unchanged.
5101 features = vlan_features_check(skb, features);
5102 if (!skb->encapsulation ||
5103 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5106 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5107 * should disable tunnel offload features if it's not a VxLAN packet,
5108 * as tunnel offloads have been enabled only for VxLAN. This is done to
5109 * allow other tunneled traffic like GRE work fine while VxLAN
5110 * offloads are configured in Skyhawk-R.
5112 switch (vlan_get_protocol(skb)) {
5113 case htons(ETH_P_IP):
5114 l4_hdr = ip_hdr(skb)->protocol;
5116 case htons(ETH_P_IPV6):
5117 l4_hdr = ipv6_hdr(skb)->nexthdr;
5123 if (l4_hdr != IPPROTO_UDP ||
5124 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5125 skb->inner_protocol != htons(ETH_P_TEB) ||
5126 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5127 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5128 !adapter->vxlan_port ||
5129 udp_hdr(skb)->dest != adapter->vxlan_port)
5130 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
5135 static int be_get_phys_port_id(struct net_device *dev,
5136 struct netdev_phys_item_id *ppid)
5138 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5139 struct be_adapter *adapter = netdev_priv(dev);
5142 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5145 ppid->id[0] = adapter->hba_port_num + 1;
5147 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5148 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5149 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5151 ppid->id_len = id_len;
5156 static void be_set_rx_mode(struct net_device *dev)
5158 struct be_adapter *adapter = netdev_priv(dev);
5159 struct be_cmd_work *work;
5161 work = be_alloc_work(adapter, be_work_set_rx_mode);
5163 queue_work(be_wq, &work->work);
5166 static const struct net_device_ops be_netdev_ops = {
5167 .ndo_open = be_open,
5168 .ndo_stop = be_close,
5169 .ndo_start_xmit = be_xmit,
5170 .ndo_set_rx_mode = be_set_rx_mode,
5171 .ndo_set_mac_address = be_mac_addr_set,
5172 .ndo_get_stats64 = be_get_stats64,
5173 .ndo_validate_addr = eth_validate_addr,
5174 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5175 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
5176 .ndo_set_vf_mac = be_set_vf_mac,
5177 .ndo_set_vf_vlan = be_set_vf_vlan,
5178 .ndo_set_vf_rate = be_set_vf_tx_rate,
5179 .ndo_get_vf_config = be_get_vf_config,
5180 .ndo_set_vf_link_state = be_set_vf_link_state,
5181 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
5182 .ndo_tx_timeout = be_tx_timeout,
5183 #ifdef CONFIG_NET_POLL_CONTROLLER
5184 .ndo_poll_controller = be_netpoll,
5186 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5187 .ndo_bridge_getlink = be_ndo_bridge_getlink,
5188 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
5189 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
5190 .ndo_features_check = be_features_check,
5191 .ndo_get_phys_port_id = be_get_phys_port_id,
5194 static void be_netdev_init(struct net_device *netdev)
5196 struct be_adapter *adapter = netdev_priv(netdev);
5198 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5199 NETIF_F_GSO_UDP_TUNNEL |
5200 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5201 NETIF_F_HW_VLAN_CTAG_TX;
5202 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
5203 netdev->hw_features |= NETIF_F_RXHASH;
5205 netdev->features |= netdev->hw_features |
5206 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
5208 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5209 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5211 netdev->priv_flags |= IFF_UNICAST_FLT;
5213 netdev->flags |= IFF_MULTICAST;
5215 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
5217 netdev->netdev_ops = &be_netdev_ops;
5219 netdev->ethtool_ops = &be_ethtool_ops;
5221 if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
5222 netdev->udp_tunnel_nic_info = &be_udp_tunnels;
5224 /* MTU range: 256 - 9000 */
5225 netdev->min_mtu = BE_MIN_MTU;
5226 netdev->max_mtu = BE_MAX_MTU;
5229 static void be_cleanup(struct be_adapter *adapter)
5231 struct net_device *netdev = adapter->netdev;
5234 netif_device_detach(netdev);
5235 if (netif_running(netdev))
5242 static int be_resume(struct be_adapter *adapter)
5244 struct net_device *netdev = adapter->netdev;
5247 status = be_setup(adapter);
5252 if (netif_running(netdev))
5253 status = be_open(netdev);
5259 netif_device_attach(netdev);
5264 static void be_soft_reset(struct be_adapter *adapter)
5268 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5269 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5270 val |= SLIPORT_SOFTRESET_SR_MASK;
5271 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5274 static bool be_err_is_recoverable(struct be_adapter *adapter)
5276 struct be_error_recovery *err_rec = &adapter->error_recovery;
5277 unsigned long initial_idle_time =
5278 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5279 unsigned long recovery_interval =
5280 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5284 val = be_POST_stage_get(adapter);
5285 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5287 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5288 if (ue_err_code == 0)
5291 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5294 if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
5295 dev_err(&adapter->pdev->dev,
5296 "Cannot recover within %lu sec from driver load\n",
5297 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5301 if (err_rec->last_recovery_time && time_before_eq(
5302 jiffies - err_rec->last_recovery_time, recovery_interval)) {
5303 dev_err(&adapter->pdev->dev,
5304 "Cannot recover within %lu sec from last recovery\n",
5305 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5309 if (ue_err_code == err_rec->last_err_code) {
5310 dev_err(&adapter->pdev->dev,
5311 "Cannot recover from a consecutive TPE error\n");
5315 err_rec->last_recovery_time = jiffies;
5316 err_rec->last_err_code = ue_err_code;
5320 static int be_tpe_recover(struct be_adapter *adapter)
5322 struct be_error_recovery *err_rec = &adapter->error_recovery;
5323 int status = -EAGAIN;
5326 switch (err_rec->recovery_state) {
5327 case ERR_RECOVERY_ST_NONE:
5328 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5329 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5332 case ERR_RECOVERY_ST_DETECT:
5333 val = be_POST_stage_get(adapter);
5334 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5335 POST_STAGE_RECOVERABLE_ERR) {
5336 dev_err(&adapter->pdev->dev,
5337 "Unrecoverable HW error detected: 0x%x\n", val);
5339 err_rec->resched_delay = 0;
5343 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5345 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5346 * milliseconds before it checks for final error status in
5347 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5348 * If it does, then PF0 initiates a Soft Reset.
5350 if (adapter->pf_num == 0) {
5351 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5352 err_rec->resched_delay = err_rec->ue_to_reset_time -
5353 ERR_RECOVERY_UE_DETECT_DURATION;
5357 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5358 err_rec->resched_delay = err_rec->ue_to_poll_time -
5359 ERR_RECOVERY_UE_DETECT_DURATION;
5362 case ERR_RECOVERY_ST_RESET:
5363 if (!be_err_is_recoverable(adapter)) {
5364 dev_err(&adapter->pdev->dev,
5365 "Failed to meet recovery criteria\n");
5367 err_rec->resched_delay = 0;
5370 be_soft_reset(adapter);
5371 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5372 err_rec->resched_delay = err_rec->ue_to_poll_time -
5373 err_rec->ue_to_reset_time;
5376 case ERR_RECOVERY_ST_PRE_POLL:
5377 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5378 err_rec->resched_delay = 0;
5379 status = 0; /* done */
5384 err_rec->resched_delay = 0;
5391 static int be_err_recover(struct be_adapter *adapter)
5395 if (!lancer_chip(adapter)) {
5396 if (!adapter->error_recovery.recovery_supported ||
5397 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5399 status = be_tpe_recover(adapter);
5404 /* Wait for adapter to reach quiescent state before
5407 status = be_fw_wait_ready(adapter);
5411 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5413 be_cleanup(adapter);
5415 status = be_resume(adapter);
5419 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5425 static void be_err_detection_task(struct work_struct *work)
5427 struct be_error_recovery *err_rec =
5428 container_of(work, struct be_error_recovery,
5429 err_detection_work.work);
5430 struct be_adapter *adapter =
5431 container_of(err_rec, struct be_adapter,
5433 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
5434 struct device *dev = &adapter->pdev->dev;
5435 int recovery_status;
5437 be_detect_error(adapter);
5438 if (!be_check_error(adapter, BE_ERROR_HW))
5439 goto reschedule_task;
5441 recovery_status = be_err_recover(adapter);
5442 if (!recovery_status) {
5443 err_rec->recovery_retries = 0;
5444 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
5445 dev_info(dev, "Adapter recovery successful\n");
5446 goto reschedule_task;
5447 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5448 /* BEx/SH recovery state machine */
5449 if (adapter->pf_num == 0 &&
5450 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5451 dev_err(&adapter->pdev->dev,
5452 "Adapter recovery in progress\n");
5453 resched_delay = err_rec->resched_delay;
5454 goto reschedule_task;
5455 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
5456 /* For VFs, check if PF have allocated resources
5459 dev_err(dev, "Re-trying adapter recovery\n");
5460 goto reschedule_task;
5461 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5462 ERR_RECOVERY_MAX_RETRY_COUNT) {
5463 /* In case of another error during recovery, it takes 30 sec
5464 * for adapter to come out of error. Retry error recovery after
5465 * this time interval.
5467 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5468 resched_delay = ERR_RECOVERY_RETRY_DELAY;
5469 goto reschedule_task;
5471 dev_err(dev, "Adapter recovery failed\n");
5472 dev_err(dev, "Please reboot server to recover\n");
5478 be_schedule_err_detection(adapter, resched_delay);
5481 static void be_log_sfp_info(struct be_adapter *adapter)
5485 status = be_cmd_query_sfp_info(adapter);
5487 dev_err(&adapter->pdev->dev,
5488 "Port %c: %s Vendor: %s part no: %s",
5490 be_misconfig_evt_port_state[adapter->phy_state],
5491 adapter->phy.vendor_name,
5492 adapter->phy.vendor_pn);
5494 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
5497 static void be_worker(struct work_struct *work)
5499 struct be_adapter *adapter =
5500 container_of(work, struct be_adapter, work.work);
5501 struct be_rx_obj *rxo;
5504 if (be_physfn(adapter) &&
5505 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5506 be_cmd_get_die_temperature(adapter);
5508 /* when interrupts are not yet enabled, just reap any pending
5511 if (!netif_running(adapter->netdev)) {
5512 be_process_mcc(adapter);
5516 if (!adapter->stats_cmd_sent) {
5517 if (lancer_chip(adapter))
5518 lancer_cmd_get_pport_stats(adapter,
5519 &adapter->stats_cmd);
5521 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5524 for_all_rx_queues(adapter, rxo, i) {
5525 /* Replenish RX-queues starved due to memory
5526 * allocation failures.
5528 if (rxo->rx_post_starved)
5529 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5532 /* EQ-delay update for Skyhawk is done while notifying EQ */
5533 if (!skyhawk_chip(adapter))
5534 be_eqd_update(adapter, false);
5536 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
5537 be_log_sfp_info(adapter);
5540 adapter->work_counter++;
5541 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
5544 static void be_unmap_pci_bars(struct be_adapter *adapter)
5547 pci_iounmap(adapter->pdev, adapter->csr);
5549 pci_iounmap(adapter->pdev, adapter->db);
5550 if (adapter->pcicfg && adapter->pcicfg_mapped)
5551 pci_iounmap(adapter->pdev, adapter->pcicfg);
5554 static int db_bar(struct be_adapter *adapter)
5556 if (lancer_chip(adapter) || be_virtfn(adapter))
5562 static int be_roce_map_pci_bars(struct be_adapter *adapter)
5564 if (skyhawk_chip(adapter)) {
5565 adapter->roce_db.size = 4096;
5566 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5568 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5574 static int be_map_pci_bars(struct be_adapter *adapter)
5576 struct pci_dev *pdev = adapter->pdev;
5580 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5581 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5582 SLI_INTF_FAMILY_SHIFT;
5583 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5585 if (BEx_chip(adapter) && be_physfn(adapter)) {
5586 adapter->csr = pci_iomap(pdev, 2, 0);
5591 addr = pci_iomap(pdev, db_bar(adapter), 0);
5596 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5597 if (be_physfn(adapter)) {
5598 /* PCICFG is the 2nd BAR in BE2 */
5599 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5602 adapter->pcicfg = addr;
5603 adapter->pcicfg_mapped = true;
5605 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5606 adapter->pcicfg_mapped = false;
5610 be_roce_map_pci_bars(adapter);
5614 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5615 be_unmap_pci_bars(adapter);
5619 static void be_drv_cleanup(struct be_adapter *adapter)
5621 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5622 struct device *dev = &adapter->pdev->dev;
5625 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5627 mem = &adapter->rx_filter;
5629 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5631 mem = &adapter->stats_cmd;
5633 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5636 /* Allocate and initialize various fields in be_adapter struct */
5637 static int be_drv_init(struct be_adapter *adapter)
5639 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5640 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5641 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5642 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5643 struct device *dev = &adapter->pdev->dev;
5646 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5647 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5648 &mbox_mem_alloc->dma,
5650 if (!mbox_mem_alloc->va)
5653 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5654 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5655 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5657 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5658 rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5659 &rx_filter->dma, GFP_KERNEL);
5660 if (!rx_filter->va) {
5665 if (lancer_chip(adapter))
5666 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5667 else if (BE2_chip(adapter))
5668 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5669 else if (BE3_chip(adapter))
5670 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5672 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5673 stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5674 &stats_cmd->dma, GFP_KERNEL);
5675 if (!stats_cmd->va) {
5677 goto free_rx_filter;
5680 mutex_init(&adapter->mbox_lock);
5681 mutex_init(&adapter->mcc_lock);
5682 mutex_init(&adapter->rx_filter_lock);
5683 spin_lock_init(&adapter->mcc_cq_lock);
5684 init_completion(&adapter->et_cmd_compl);
5686 pci_save_state(adapter->pdev);
5688 INIT_DELAYED_WORK(&adapter->work, be_worker);
5690 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5691 adapter->error_recovery.resched_delay = 0;
5692 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
5693 be_err_detection_task);
5695 adapter->rx_fc = true;
5696 adapter->tx_fc = true;
5698 /* Must be a power of 2 or else MODULO will BUG_ON */
5699 adapter->be_get_temp_freq = 64;
5704 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5706 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5707 mbox_mem_alloc->dma);
5711 static void be_remove(struct pci_dev *pdev)
5713 struct be_adapter *adapter = pci_get_drvdata(pdev);
5718 be_roce_dev_remove(adapter);
5719 be_intr_set(adapter, false);
5721 be_cancel_err_detection(adapter);
5723 unregister_netdev(adapter->netdev);
5727 if (!pci_vfs_assigned(adapter->pdev))
5728 be_cmd_reset_function(adapter);
5730 /* tell fw we're done with firing cmds */
5731 be_cmd_fw_clean(adapter);
5733 be_unmap_pci_bars(adapter);
5734 be_drv_cleanup(adapter);
5736 pci_disable_pcie_error_reporting(pdev);
5738 pci_release_regions(pdev);
5739 pci_disable_device(pdev);
5741 free_netdev(adapter->netdev);
5744 static ssize_t be_hwmon_show_temp(struct device *dev,
5745 struct device_attribute *dev_attr,
5748 struct be_adapter *adapter = dev_get_drvdata(dev);
5750 /* Unit: millidegree Celsius */
5751 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5754 return sprintf(buf, "%u\n",
5755 adapter->hwmon_info.be_on_die_temp * 1000);
5758 static SENSOR_DEVICE_ATTR(temp1_input, 0444,
5759 be_hwmon_show_temp, NULL, 1);
5761 static struct attribute *be_hwmon_attrs[] = {
5762 &sensor_dev_attr_temp1_input.dev_attr.attr,
5766 ATTRIBUTE_GROUPS(be_hwmon);
5768 static char *mc_name(struct be_adapter *adapter)
5770 char *str = ""; /* default */
5772 switch (adapter->mc_type) {
5798 static inline char *func_name(struct be_adapter *adapter)
5800 return be_physfn(adapter) ? "PF" : "VF";
5803 static inline char *nic_name(struct pci_dev *pdev)
5805 switch (pdev->device) {
5812 return OC_NAME_LANCER;
5823 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5825 struct be_adapter *adapter;
5826 struct net_device *netdev;
5829 status = pci_enable_device(pdev);
5833 status = pci_request_regions(pdev, DRV_NAME);
5836 pci_set_master(pdev);
5838 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5843 adapter = netdev_priv(netdev);
5844 adapter->pdev = pdev;
5845 pci_set_drvdata(pdev, adapter);
5846 adapter->netdev = netdev;
5847 SET_NETDEV_DEV(netdev, &pdev->dev);
5849 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5851 netdev->features |= NETIF_F_HIGHDMA;
5853 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5855 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5860 status = pci_enable_pcie_error_reporting(pdev);
5862 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
5864 status = be_map_pci_bars(adapter);
5868 status = be_drv_init(adapter);
5872 status = be_setup(adapter);
5876 be_netdev_init(netdev);
5877 status = register_netdev(netdev);
5881 be_roce_dev_add(adapter);
5883 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5884 adapter->error_recovery.probe_time = jiffies;
5886 /* On Die temperature not supported for VF. */
5887 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5888 adapter->hwmon_info.hwmon_dev =
5889 devm_hwmon_device_register_with_groups(&pdev->dev,
5893 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5896 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5897 func_name(adapter), mc_name(adapter), adapter->port_name);
5904 be_drv_cleanup(adapter);
5906 be_unmap_pci_bars(adapter);
5908 free_netdev(netdev);
5910 pci_release_regions(pdev);
5912 pci_disable_device(pdev);
5914 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5918 static int __maybe_unused be_suspend(struct device *dev_d)
5920 struct be_adapter *adapter = dev_get_drvdata(dev_d);
5922 be_intr_set(adapter, false);
5923 be_cancel_err_detection(adapter);
5925 be_cleanup(adapter);
5930 static int __maybe_unused be_pci_resume(struct device *dev_d)
5932 struct be_adapter *adapter = dev_get_drvdata(dev_d);
5935 status = be_resume(adapter);
5939 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5945 * An FLR will stop BE from DMAing any data.
5947 static void be_shutdown(struct pci_dev *pdev)
5949 struct be_adapter *adapter = pci_get_drvdata(pdev);
5954 be_roce_dev_shutdown(adapter);
5955 cancel_delayed_work_sync(&adapter->work);
5956 be_cancel_err_detection(adapter);
5958 netif_device_detach(adapter->netdev);
5960 be_cmd_reset_function(adapter);
5962 pci_disable_device(pdev);
5965 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5966 pci_channel_state_t state)
5968 struct be_adapter *adapter = pci_get_drvdata(pdev);
5970 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5972 be_roce_dev_remove(adapter);
5974 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5975 be_set_error(adapter, BE_ERROR_EEH);
5977 be_cancel_err_detection(adapter);
5979 be_cleanup(adapter);
5982 if (state == pci_channel_io_perm_failure)
5983 return PCI_ERS_RESULT_DISCONNECT;
5985 pci_disable_device(pdev);
5987 /* The error could cause the FW to trigger a flash debug dump.
5988 * Resetting the card while flash dump is in progress
5989 * can cause it not to recover; wait for it to finish.
5990 * Wait only for first function as it is needed only once per
5993 if (pdev->devfn == 0)
5996 return PCI_ERS_RESULT_NEED_RESET;
5999 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6001 struct be_adapter *adapter = pci_get_drvdata(pdev);
6004 dev_info(&adapter->pdev->dev, "EEH reset\n");
6006 status = pci_enable_device(pdev);
6008 return PCI_ERS_RESULT_DISCONNECT;
6010 pci_set_master(pdev);
6011 pci_restore_state(pdev);
6013 /* Check if card is ok and fw is ready */
6014 dev_info(&adapter->pdev->dev,
6015 "Waiting for FW to be ready after EEH reset\n");
6016 status = be_fw_wait_ready(adapter);
6018 return PCI_ERS_RESULT_DISCONNECT;
6020 be_clear_error(adapter, BE_CLEAR_ALL);
6021 return PCI_ERS_RESULT_RECOVERED;
6024 static void be_eeh_resume(struct pci_dev *pdev)
6027 struct be_adapter *adapter = pci_get_drvdata(pdev);
6029 dev_info(&adapter->pdev->dev, "EEH resume\n");
6031 pci_save_state(pdev);
6033 status = be_resume(adapter);
6037 be_roce_dev_add(adapter);
6039 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
6042 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
6045 static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6047 struct be_adapter *adapter = pci_get_drvdata(pdev);
6048 struct be_resources vft_res = {0};
6052 be_vf_clear(adapter);
6054 adapter->num_vfs = num_vfs;
6056 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6057 dev_warn(&pdev->dev,
6058 "Cannot disable VFs while they are assigned\n");
6062 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6063 * are equally distributed across the max-number of VFs. The user may
6064 * request only a subset of the max-vfs to be enabled.
6065 * Based on num_vfs, redistribute the resources across num_vfs so that
6066 * each VF will have access to more number of resources.
6067 * This facility is not available in BE3 FW.
6068 * Also, this is done by FW in Lancer chip.
6070 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6071 be_calculate_vf_res(adapter, adapter->num_vfs,
6073 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6074 adapter->num_vfs, &vft_res);
6077 "Failed to optimize SR-IOV resources\n");
6080 status = be_get_resources(adapter);
6082 return be_cmd_status(status);
6084 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6086 status = be_update_queues(adapter);
6089 return be_cmd_status(status);
6091 if (adapter->num_vfs)
6092 status = be_vf_setup(adapter);
6095 return adapter->num_vfs;
6100 static const struct pci_error_handlers be_eeh_handlers = {
6101 .error_detected = be_eeh_err_detected,
6102 .slot_reset = be_eeh_reset,
6103 .resume = be_eeh_resume,
6106 static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
6108 static struct pci_driver be_driver = {
6110 .id_table = be_dev_ids,
6112 .remove = be_remove,
6113 .driver.pm = &be_pci_pm_ops,
6114 .shutdown = be_shutdown,
6115 .sriov_configure = be_pci_sriov_configure,
6116 .err_handler = &be_eeh_handlers
6119 static int __init be_init_module(void)
6123 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6124 rx_frag_size != 2048) {
6125 printk(KERN_WARNING DRV_NAME
6126 " : Module param rx_frag_size must be 2048/4096/8192."
6128 rx_frag_size = 2048;
6132 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6133 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6136 be_wq = create_singlethread_workqueue("be_wq");
6138 pr_warn(DRV_NAME "workqueue creation failed\n");
6142 be_err_recovery_workq =
6143 create_singlethread_workqueue("be_err_recover");
6144 if (!be_err_recovery_workq)
6145 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6147 status = pci_register_driver(&be_driver);
6149 destroy_workqueue(be_wq);
6150 be_destroy_err_recovery_workq();
6154 module_init(be_init_module);
6156 static void __exit be_exit_module(void)
6158 pci_unregister_driver(&be_driver);
6160 be_destroy_err_recovery_workq();
6163 destroy_workqueue(be_wq);
6165 module_exit(be_exit_module);