1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
6 static const struct net_device_ops idpf_netdev_ops_splitq;
7 static const struct net_device_ops idpf_netdev_ops_singleq;
9 const char * const idpf_vport_vc_state_str[] = {
10 IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
14 * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
15 * @adapter: private data struct
17 * Return 0 on success, error on failure
19 static int idpf_init_vector_stack(struct idpf_adapter *adapter)
21 struct idpf_vector_lifo *stack;
25 mutex_lock(&adapter->vector_lock);
26 min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
27 stack = &adapter->vector_stack;
28 stack->size = adapter->num_msix_entries;
29 /* set the base and top to point at start of the 'free pool' to
30 * distribute the unused vectors on-demand basis
32 stack->base = min_vec;
35 stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
36 if (!stack->vec_idx) {
37 mutex_unlock(&adapter->vector_lock);
42 for (i = 0; i < stack->size; i++)
43 stack->vec_idx[i] = i;
45 mutex_unlock(&adapter->vector_lock);
51 * idpf_deinit_vector_stack - zero out the MSIX vector stack
52 * @adapter: private data struct
54 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
56 struct idpf_vector_lifo *stack;
58 mutex_lock(&adapter->vector_lock);
59 stack = &adapter->vector_stack;
60 kfree(stack->vec_idx);
61 stack->vec_idx = NULL;
62 mutex_unlock(&adapter->vector_lock);
66 * idpf_mb_intr_rel_irq - Free the IRQ association with the OS
67 * @adapter: adapter structure
69 * This will also disable interrupt mode and queue up mailbox task. Mailbox
70 * task will reschedule itself if not in interrupt mode.
72 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
74 clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
75 free_irq(adapter->msix_entries[0].vector, adapter);
76 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
80 * idpf_intr_rel - Release interrupt capabilities and free memory
81 * @adapter: adapter to disable interrupts on
83 void idpf_intr_rel(struct idpf_adapter *adapter)
87 if (!adapter->msix_entries)
90 idpf_mb_intr_rel_irq(adapter);
91 pci_free_irq_vectors(adapter->pdev);
93 err = idpf_send_dealloc_vectors_msg(adapter);
95 dev_err(&adapter->pdev->dev,
96 "Failed to deallocate vectors: %d\n", err);
98 idpf_deinit_vector_stack(adapter);
99 kfree(adapter->msix_entries);
100 adapter->msix_entries = NULL;
104 * idpf_mb_intr_clean - Interrupt handler for the mailbox
105 * @irq: interrupt number
106 * @data: pointer to the adapter structure
108 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
110 struct idpf_adapter *adapter = (struct idpf_adapter *)data;
112 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
118 * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
119 * @adapter: adapter to get the hardware address for register write
121 static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
123 struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
126 val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
127 writel(val, intr->dyn_ctl);
128 writel(intr->icr_ena_ctlq_m, intr->icr_ena);
132 * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
133 * @adapter: adapter structure to pass to the mailbox irq handler
135 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
137 struct idpf_q_vector *mb_vector = &adapter->mb_vector;
138 int irq_num, mb_vidx = 0, err;
140 irq_num = adapter->msix_entries[mb_vidx].vector;
141 mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
142 dev_driver_string(&adapter->pdev->dev),
144 err = request_irq(irq_num, adapter->irq_mb_handler, 0,
145 mb_vector->name, adapter);
147 dev_err(&adapter->pdev->dev,
148 "IRQ request for mailbox failed, error: %d\n", err);
153 set_bit(IDPF_MB_INTR_MODE, adapter->flags);
159 * idpf_set_mb_vec_id - Set vector index for mailbox
160 * @adapter: adapter structure to access the vector chunks
162 * The first vector id in the requested vector chunks from the CP is for
165 static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
167 if (adapter->req_vec_chunks)
168 adapter->mb_vector.v_idx =
169 le16_to_cpu(adapter->caps.mailbox_vector_id);
171 adapter->mb_vector.v_idx = 0;
175 * idpf_mb_intr_init - Initialize the mailbox interrupt
176 * @adapter: adapter structure to store the mailbox vector
178 static int idpf_mb_intr_init(struct idpf_adapter *adapter)
180 adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
181 adapter->irq_mb_handler = idpf_mb_intr_clean;
183 return idpf_mb_intr_req_irq(adapter);
187 * idpf_vector_lifo_push - push MSIX vector index onto stack
188 * @adapter: private data struct
189 * @vec_idx: vector index to store
191 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
193 struct idpf_vector_lifo *stack = &adapter->vector_stack;
195 lockdep_assert_held(&adapter->vector_lock);
197 if (stack->top == stack->base) {
198 dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
203 stack->vec_idx[--stack->top] = vec_idx;
209 * idpf_vector_lifo_pop - pop MSIX vector index from stack
210 * @adapter: private data struct
212 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
214 struct idpf_vector_lifo *stack = &adapter->vector_stack;
216 lockdep_assert_held(&adapter->vector_lock);
218 if (stack->top == stack->size) {
219 dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
224 return stack->vec_idx[stack->top++];
228 * idpf_vector_stash - Store the vector indexes onto the stack
229 * @adapter: private data struct
230 * @q_vector_idxs: vector index array
231 * @vec_info: info related to the number of vectors
233 * This function is a no-op if there are no vectors indexes to be stashed
235 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
236 struct idpf_vector_info *vec_info)
241 lockdep_assert_held(&adapter->vector_lock);
243 if (!vec_info->num_curr_vecs)
246 /* For default vports, no need to stash vector allocated from the
247 * default pool onto the stack
249 if (vec_info->default_vport)
250 base = IDPF_MIN_Q_VEC;
252 for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
253 vec_idx = q_vector_idxs[i];
254 idpf_vector_lifo_push(adapter, vec_idx);
255 adapter->num_avail_msix++;
260 * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
261 * @adapter: driver specific private structure
262 * @q_vector_idxs: vector index array
263 * @vec_info: info related to the number of vectors
265 * This is the core function to distribute the MSIX vectors acquired from the
266 * OS. It expects the caller to pass the number of vectors required and
267 * also previously allocated. First, it stashes previously allocated vector
268 * indexes on to the stack and then figures out if it can allocate requested
269 * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
270 * requested vectors, then this function just stashes the already allocated
271 * vectors and returns 0.
273 * Returns actual number of vectors allocated on success, error value on failure
274 * If 0 is returned, implies the stack has no vectors to allocate which is also
275 * a failure case for the caller
277 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
279 struct idpf_vector_info *vec_info)
281 u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
282 struct idpf_vector_lifo *stack;
285 mutex_lock(&adapter->vector_lock);
286 stack = &adapter->vector_stack;
287 num_req_vecs = vec_info->num_req_vecs;
289 /* Stash interrupt vector indexes onto the stack if required */
290 idpf_vector_stash(adapter, q_vector_idxs, vec_info);
295 if (vec_info->default_vport) {
296 /* As IDPF_MIN_Q_VEC per default vport is put aside in the
297 * default pool of the stack, use them for default vports
299 j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
300 for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
301 q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
306 /* Find if stack has enough vector to allocate */
307 max_vecs = min(adapter->num_avail_msix, num_req_vecs);
309 for (j = 0; j < max_vecs; j++) {
310 vecid = idpf_vector_lifo_pop(adapter);
311 q_vector_idxs[num_alloc_vecs++] = vecid;
313 adapter->num_avail_msix -= max_vecs;
316 mutex_unlock(&adapter->vector_lock);
318 return num_alloc_vecs;
322 * idpf_intr_req - Request interrupt capabilities
323 * @adapter: adapter to enable interrupts on
325 * Returns 0 on success, negative on failure
327 int idpf_intr_req(struct idpf_adapter *adapter)
329 u16 default_vports = idpf_get_default_vports(adapter);
330 int num_q_vecs, total_vecs, num_vec_ids;
331 int min_vectors, v_actual, err;
335 total_vecs = idpf_get_reserved_vecs(adapter);
336 num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
338 err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
340 dev_err(&adapter->pdev->dev,
341 "Failed to allocate %d vectors: %d\n", num_q_vecs, err);
346 min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
347 v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
348 total_vecs, PCI_IRQ_MSIX);
349 if (v_actual < min_vectors) {
350 dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
353 goto send_dealloc_vecs;
356 adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
359 if (!adapter->msix_entries) {
364 idpf_set_mb_vec_id(adapter);
366 vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
372 if (adapter->req_vec_chunks) {
373 struct virtchnl2_vector_chunks *vchunks;
374 struct virtchnl2_alloc_vectors *ac;
376 ac = adapter->req_vec_chunks;
377 vchunks = &ac->vchunks;
379 num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
381 if (num_vec_ids < v_actual) {
388 for (i = 0; i < v_actual; i++)
392 for (vector = 0; vector < v_actual; vector++) {
393 adapter->msix_entries[vector].entry = vecids[vector];
394 adapter->msix_entries[vector].vector =
395 pci_irq_vector(adapter->pdev, vector);
398 adapter->num_req_msix = total_vecs;
399 adapter->num_msix_entries = v_actual;
400 /* 'num_avail_msix' is used to distribute excess vectors to the vports
401 * after considering the minimum vectors required per each default
404 adapter->num_avail_msix = v_actual - min_vectors;
406 /* Fill MSIX vector lifo stack with vector indexes */
407 err = idpf_init_vector_stack(adapter);
411 err = idpf_mb_intr_init(adapter);
413 goto deinit_vec_stack;
414 idpf_mb_irq_enable(adapter);
420 idpf_deinit_vector_stack(adapter);
424 kfree(adapter->msix_entries);
425 adapter->msix_entries = NULL;
427 pci_free_irq_vectors(adapter->pdev);
429 idpf_send_dealloc_vectors_msg(adapter);
435 * idpf_find_mac_filter - Search filter list for specific mac filter
436 * @vconfig: Vport config structure
437 * @macaddr: The MAC address
439 * Returns ptr to the filter object or NULL. Must be called while holding the
440 * mac_filter_list_lock.
442 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
445 struct idpf_mac_filter *f;
450 list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
451 if (ether_addr_equal(macaddr, f->macaddr))
459 * __idpf_del_mac_filter - Delete a MAC filter from the filter list
460 * @vport_config: Vport config structure
461 * @macaddr: The MAC address
463 * Returns 0 on success, error value on failure
465 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
468 struct idpf_mac_filter *f;
470 spin_lock_bh(&vport_config->mac_filter_list_lock);
471 f = idpf_find_mac_filter(vport_config, macaddr);
476 spin_unlock_bh(&vport_config->mac_filter_list_lock);
482 * idpf_del_mac_filter - Delete a MAC filter from the filter list
483 * @vport: Main vport structure
484 * @np: Netdev private structure
485 * @macaddr: The MAC address
486 * @async: Don't wait for return message
488 * Removes filter from list and if interface is up, tells hardware about the
491 static int idpf_del_mac_filter(struct idpf_vport *vport,
492 struct idpf_netdev_priv *np,
493 const u8 *macaddr, bool async)
495 struct idpf_vport_config *vport_config;
496 struct idpf_mac_filter *f;
498 vport_config = np->adapter->vport_config[np->vport_idx];
500 spin_lock_bh(&vport_config->mac_filter_list_lock);
501 f = idpf_find_mac_filter(vport_config, macaddr);
505 spin_unlock_bh(&vport_config->mac_filter_list_lock);
509 spin_unlock_bh(&vport_config->mac_filter_list_lock);
511 if (np->state == __IDPF_VPORT_UP) {
514 err = idpf_add_del_mac_filters(vport, np, false, async);
519 return __idpf_del_mac_filter(vport_config, macaddr);
523 * __idpf_add_mac_filter - Add mac filter helper function
524 * @vport_config: Vport config structure
525 * @macaddr: Address to add
527 * Takes mac_filter_list_lock spinlock to add new filter to list.
529 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
532 struct idpf_mac_filter *f;
534 spin_lock_bh(&vport_config->mac_filter_list_lock);
536 f = idpf_find_mac_filter(vport_config, macaddr);
539 spin_unlock_bh(&vport_config->mac_filter_list_lock);
544 f = kzalloc(sizeof(*f), GFP_ATOMIC);
546 spin_unlock_bh(&vport_config->mac_filter_list_lock);
551 ether_addr_copy(f->macaddr, macaddr);
552 list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
555 spin_unlock_bh(&vport_config->mac_filter_list_lock);
561 * idpf_add_mac_filter - Add a mac filter to the filter list
562 * @vport: Main vport structure
563 * @np: Netdev private structure
564 * @macaddr: The MAC address
565 * @async: Don't wait for return message
567 * Returns 0 on success or error on failure. If interface is up, we'll also
568 * send the virtchnl message to tell hardware about the filter.
570 static int idpf_add_mac_filter(struct idpf_vport *vport,
571 struct idpf_netdev_priv *np,
572 const u8 *macaddr, bool async)
574 struct idpf_vport_config *vport_config;
577 vport_config = np->adapter->vport_config[np->vport_idx];
578 err = __idpf_add_mac_filter(vport_config, macaddr);
582 if (np->state == __IDPF_VPORT_UP)
583 err = idpf_add_del_mac_filters(vport, np, true, async);
589 * idpf_del_all_mac_filters - Delete all MAC filters in list
590 * @vport: main vport struct
592 * Takes mac_filter_list_lock spinlock. Deletes all filters
594 static void idpf_del_all_mac_filters(struct idpf_vport *vport)
596 struct idpf_vport_config *vport_config;
597 struct idpf_mac_filter *f, *ftmp;
599 vport_config = vport->adapter->vport_config[vport->idx];
600 spin_lock_bh(&vport_config->mac_filter_list_lock);
602 list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
608 spin_unlock_bh(&vport_config->mac_filter_list_lock);
612 * idpf_restore_mac_filters - Re-add all MAC filters in list
613 * @vport: main vport struct
615 * Takes mac_filter_list_lock spinlock. Sets add field to true for filters to
616 * resync filters back to HW.
618 static void idpf_restore_mac_filters(struct idpf_vport *vport)
620 struct idpf_vport_config *vport_config;
621 struct idpf_mac_filter *f;
623 vport_config = vport->adapter->vport_config[vport->idx];
624 spin_lock_bh(&vport_config->mac_filter_list_lock);
626 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
629 spin_unlock_bh(&vport_config->mac_filter_list_lock);
631 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
636 * idpf_remove_mac_filters - Remove all MAC filters in list
637 * @vport: main vport struct
639 * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
640 * to remove filters in HW.
642 static void idpf_remove_mac_filters(struct idpf_vport *vport)
644 struct idpf_vport_config *vport_config;
645 struct idpf_mac_filter *f;
647 vport_config = vport->adapter->vport_config[vport->idx];
648 spin_lock_bh(&vport_config->mac_filter_list_lock);
650 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
653 spin_unlock_bh(&vport_config->mac_filter_list_lock);
655 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
660 * idpf_deinit_mac_addr - deinitialize mac address for vport
661 * @vport: main vport structure
663 static void idpf_deinit_mac_addr(struct idpf_vport *vport)
665 struct idpf_vport_config *vport_config;
666 struct idpf_mac_filter *f;
668 vport_config = vport->adapter->vport_config[vport->idx];
670 spin_lock_bh(&vport_config->mac_filter_list_lock);
672 f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
678 spin_unlock_bh(&vport_config->mac_filter_list_lock);
682 * idpf_init_mac_addr - initialize mac address for vport
683 * @vport: main vport structure
684 * @netdev: pointer to netdev struct associated with this vport
686 static int idpf_init_mac_addr(struct idpf_vport *vport,
687 struct net_device *netdev)
689 struct idpf_netdev_priv *np = netdev_priv(netdev);
690 struct idpf_adapter *adapter = vport->adapter;
693 if (is_valid_ether_addr(vport->default_mac_addr)) {
694 eth_hw_addr_set(netdev, vport->default_mac_addr);
695 ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
697 return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
701 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
702 VIRTCHNL2_CAP_MACFILTER)) {
703 dev_err(&adapter->pdev->dev,
704 "MAC address is not provided and capability is not set\n");
709 eth_hw_addr_random(netdev);
710 err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
714 dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
715 vport->default_mac_addr, netdev->dev_addr);
716 ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
722 * idpf_cfg_netdev - Allocate, configure and register a netdev
723 * @vport: main vport structure
725 * Returns 0 on success, negative value on failure.
727 static int idpf_cfg_netdev(struct idpf_vport *vport)
729 struct idpf_adapter *adapter = vport->adapter;
730 struct idpf_vport_config *vport_config;
731 netdev_features_t dflt_features;
732 netdev_features_t offloads = 0;
733 struct idpf_netdev_priv *np;
734 struct net_device *netdev;
735 u16 idx = vport->idx;
738 vport_config = adapter->vport_config[idx];
740 /* It's possible we already have a netdev allocated and registered for
743 if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
744 netdev = adapter->netdevs[idx];
745 np = netdev_priv(netdev);
747 np->vport_idx = vport->idx;
748 np->vport_id = vport->vport_id;
749 vport->netdev = netdev;
751 return idpf_init_mac_addr(vport, netdev);
754 netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
755 vport_config->max_q.max_txq,
756 vport_config->max_q.max_rxq);
760 vport->netdev = netdev;
761 np = netdev_priv(netdev);
763 np->adapter = adapter;
764 np->vport_idx = vport->idx;
765 np->vport_id = vport->vport_id;
767 spin_lock_init(&np->stats_lock);
769 err = idpf_init_mac_addr(vport, netdev);
771 free_netdev(vport->netdev);
772 vport->netdev = NULL;
777 /* assign netdev_ops */
778 if (idpf_is_queue_model_split(vport->txq_model))
779 netdev->netdev_ops = &idpf_netdev_ops_splitq;
781 netdev->netdev_ops = &idpf_netdev_ops_singleq;
783 /* setup watchdog timeout value to be 5 second */
784 netdev->watchdog_timeo = 5 * HZ;
786 /* configure default MTU size */
787 netdev->min_mtu = ETH_MIN_MTU;
788 netdev->max_mtu = vport->max_mtu;
790 dflt_features = NETIF_F_SG |
793 if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
794 dflt_features |= NETIF_F_RXHASH;
795 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
796 dflt_features |= NETIF_F_IP_CSUM;
797 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
798 dflt_features |= NETIF_F_IPV6_CSUM;
799 if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
800 dflt_features |= NETIF_F_RXCSUM;
801 if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
802 dflt_features |= NETIF_F_SCTP_CRC;
804 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
805 dflt_features |= NETIF_F_TSO;
806 if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
807 dflt_features |= NETIF_F_TSO6;
808 if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
809 VIRTCHNL2_CAP_SEG_IPV4_UDP |
810 VIRTCHNL2_CAP_SEG_IPV6_UDP))
811 dflt_features |= NETIF_F_GSO_UDP_L4;
812 if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
813 offloads |= NETIF_F_GRO_HW;
814 /* advertise to stack only if offloads for encapsulated packets is
817 if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
818 VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
819 offloads |= NETIF_F_GSO_UDP_TUNNEL |
821 NETIF_F_GSO_GRE_CSUM |
822 NETIF_F_GSO_PARTIAL |
823 NETIF_F_GSO_UDP_TUNNEL_CSUM |
828 if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
829 IDPF_CAP_TUNNEL_TX_CSUM))
830 netdev->gso_partial_features |=
831 NETIF_F_GSO_UDP_TUNNEL_CSUM;
833 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
834 offloads |= NETIF_F_TSO_MANGLEID;
836 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
837 offloads |= NETIF_F_LOOPBACK;
839 netdev->features |= dflt_features;
840 netdev->hw_features |= dflt_features | offloads;
841 netdev->hw_enc_features |= dflt_features | offloads;
842 idpf_set_ethtool_ops(netdev);
843 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
845 /* carrier off on init to avoid Tx hangs */
846 netif_carrier_off(netdev);
848 /* make sure transmit queues start off as stopped */
849 netif_tx_stop_all_queues(netdev);
851 /* The vport can be arbitrarily released so we need to also track
852 * netdevs in the adapter struct
854 adapter->netdevs[idx] = netdev;
860 * idpf_get_free_slot - get the next non-NULL location index in array
861 * @adapter: adapter in which to look for a free vport slot
863 static int idpf_get_free_slot(struct idpf_adapter *adapter)
867 for (i = 0; i < adapter->max_vports; i++) {
868 if (!adapter->vports[i])
872 return IDPF_NO_FREE_SLOT;
876 * idpf_remove_features - Turn off feature configs
877 * @vport: virtual port structure
879 static void idpf_remove_features(struct idpf_vport *vport)
881 struct idpf_adapter *adapter = vport->adapter;
883 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
884 idpf_remove_mac_filters(vport);
888 * idpf_vport_stop - Disable a vport
889 * @vport: vport to disable
891 static void idpf_vport_stop(struct idpf_vport *vport)
893 struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
895 if (np->state <= __IDPF_VPORT_DOWN)
898 netif_carrier_off(vport->netdev);
899 netif_tx_disable(vport->netdev);
901 idpf_send_disable_vport_msg(vport);
902 idpf_send_disable_queues_msg(vport);
903 idpf_send_map_unmap_queue_vector_msg(vport, false);
904 /* Normally we ask for queues in create_vport, but if the number of
905 * initially requested queues have changed, for example via ethtool
906 * set channels, we do delete queues and then add the queues back
907 * instead of deleting and reallocating the vport.
909 if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
910 idpf_send_delete_queues_msg(vport);
912 idpf_remove_features(vport);
914 vport->link_up = false;
915 idpf_vport_intr_deinit(vport);
916 idpf_vport_intr_rel(vport);
917 idpf_vport_queues_rel(vport);
918 np->state = __IDPF_VPORT_DOWN;
922 * idpf_stop - Disables a network interface
923 * @netdev: network interface device structure
925 * The stop entry point is called when an interface is de-activated by the OS,
926 * and the netdevice enters the DOWN state. The hardware is still under the
927 * driver's control, but the netdev interface is disabled.
929 * Returns success only - not allowed to fail
931 static int idpf_stop(struct net_device *netdev)
933 struct idpf_netdev_priv *np = netdev_priv(netdev);
934 struct idpf_vport *vport;
936 if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
939 idpf_vport_ctrl_lock(netdev);
940 vport = idpf_netdev_to_vport(netdev);
942 idpf_vport_stop(vport);
944 idpf_vport_ctrl_unlock(netdev);
950 * idpf_decfg_netdev - Unregister the netdev
951 * @vport: vport for which netdev to be unregistered
953 static void idpf_decfg_netdev(struct idpf_vport *vport)
955 struct idpf_adapter *adapter = vport->adapter;
957 unregister_netdev(vport->netdev);
958 free_netdev(vport->netdev);
959 vport->netdev = NULL;
961 adapter->netdevs[vport->idx] = NULL;
965 * idpf_vport_rel - Delete a vport and free its resources
966 * @vport: the vport being removed
968 static void idpf_vport_rel(struct idpf_vport *vport)
970 struct idpf_adapter *adapter = vport->adapter;
971 struct idpf_vport_config *vport_config;
972 struct idpf_vector_info vec_info;
973 struct idpf_rss_data *rss_data;
974 struct idpf_vport_max_q max_q;
975 u16 idx = vport->idx;
978 vport_config = adapter->vport_config[vport->idx];
979 idpf_deinit_rss(vport);
980 rss_data = &vport_config->user_config.rss_data;
981 kfree(rss_data->rss_key);
982 rss_data->rss_key = NULL;
984 idpf_send_destroy_vport_msg(vport);
986 /* Set all bits as we dont know on which vc_state the vport vhnl_wq
987 * is waiting on and wakeup the virtchnl workqueue even if it is
988 * waiting for the response as we are going down
990 for (i = 0; i < IDPF_VC_NBITS; i++)
991 set_bit(i, vport->vc_state);
992 wake_up(&vport->vchnl_wq);
994 mutex_destroy(&vport->vc_buf_lock);
996 /* Clear all the bits */
997 for (i = 0; i < IDPF_VC_NBITS; i++)
998 clear_bit(i, vport->vc_state);
1000 /* Release all max queues allocated to the adapter's pool */
1001 max_q.max_rxq = vport_config->max_q.max_rxq;
1002 max_q.max_txq = vport_config->max_q.max_txq;
1003 max_q.max_bufq = vport_config->max_q.max_bufq;
1004 max_q.max_complq = vport_config->max_q.max_complq;
1005 idpf_vport_dealloc_max_qs(adapter, &max_q);
1007 /* Release all the allocated vectors on the stack */
1008 vec_info.num_req_vecs = 0;
1009 vec_info.num_curr_vecs = vport->num_q_vectors;
1010 vec_info.default_vport = vport->default_vport;
1012 idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
1014 kfree(vport->q_vector_idxs);
1015 vport->q_vector_idxs = NULL;
1017 kfree(adapter->vport_params_recvd[idx]);
1018 adapter->vport_params_recvd[idx] = NULL;
1019 kfree(adapter->vport_params_reqd[idx]);
1020 adapter->vport_params_reqd[idx] = NULL;
1021 if (adapter->vport_config[idx]) {
1022 kfree(adapter->vport_config[idx]->req_qs_chunks);
1023 adapter->vport_config[idx]->req_qs_chunks = NULL;
1026 adapter->num_alloc_vports--;
1030 * idpf_vport_dealloc - cleanup and release a given vport
1031 * @vport: pointer to idpf vport structure
1035 static void idpf_vport_dealloc(struct idpf_vport *vport)
1037 struct idpf_adapter *adapter = vport->adapter;
1038 unsigned int i = vport->idx;
1040 idpf_deinit_mac_addr(vport);
1041 idpf_vport_stop(vport);
1043 if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1044 idpf_decfg_netdev(vport);
1045 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1046 idpf_del_all_mac_filters(vport);
1048 if (adapter->netdevs[i]) {
1049 struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
1054 idpf_vport_rel(vport);
1056 adapter->vports[i] = NULL;
1057 adapter->next_vport = idpf_get_free_slot(adapter);
1061 * idpf_vport_alloc - Allocates the next available struct vport in the adapter
1062 * @adapter: board private structure
1063 * @max_q: vport max queue info
1065 * returns a pointer to a vport on success, NULL on failure.
1067 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
1068 struct idpf_vport_max_q *max_q)
1070 struct idpf_rss_data *rss_data;
1071 u16 idx = adapter->next_vport;
1072 struct idpf_vport *vport;
1075 if (idx == IDPF_NO_FREE_SLOT)
1078 vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1082 if (!adapter->vport_config[idx]) {
1083 struct idpf_vport_config *vport_config;
1085 vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
1086 if (!vport_config) {
1092 adapter->vport_config[idx] = vport_config;
1096 vport->adapter = adapter;
1097 vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
1098 vport->default_vport = adapter->num_alloc_vports <
1099 idpf_get_default_vports(adapter);
1101 num_max_q = max(max_q->max_txq, max_q->max_rxq);
1102 vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
1103 if (!vport->q_vector_idxs) {
1108 idpf_vport_init(vport, max_q);
1110 /* This alloc is done separate from the LUT because it's not strictly
1111 * dependent on how many queues we have. If we change number of queues
1112 * and soft reset we'll need a new LUT but the key can remain the same
1113 * for as long as the vport exists.
1115 rss_data = &adapter->vport_config[idx]->user_config.rss_data;
1116 rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
1117 if (!rss_data->rss_key) {
1122 /* Initialize default rss key */
1123 netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
1125 /* fill vport slot in the adapter struct */
1126 adapter->vports[idx] = vport;
1127 adapter->vport_ids[idx] = idpf_get_vport_id(vport);
1129 adapter->num_alloc_vports++;
1130 /* prepare adapter->next_vport for next use */
1131 adapter->next_vport = idpf_get_free_slot(adapter);
1137 * idpf_get_stats64 - get statistics for network device structure
1138 * @netdev: network interface device structure
1139 * @stats: main device statistics structure
1141 static void idpf_get_stats64(struct net_device *netdev,
1142 struct rtnl_link_stats64 *stats)
1144 struct idpf_netdev_priv *np = netdev_priv(netdev);
1146 spin_lock_bh(&np->stats_lock);
1147 *stats = np->netstats;
1148 spin_unlock_bh(&np->stats_lock);
1152 * idpf_statistics_task - Delayed task to get statistics over mailbox
1153 * @work: work_struct handle to our data
1155 void idpf_statistics_task(struct work_struct *work)
1157 struct idpf_adapter *adapter;
1160 adapter = container_of(work, struct idpf_adapter, stats_task.work);
1162 for (i = 0; i < adapter->max_vports; i++) {
1163 struct idpf_vport *vport = adapter->vports[i];
1165 if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1166 idpf_send_get_stats_msg(vport);
1169 queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1170 msecs_to_jiffies(10000));
1174 * idpf_mbx_task - Delayed task to handle mailbox responses
1175 * @work: work_struct handle
1177 void idpf_mbx_task(struct work_struct *work)
1179 struct idpf_adapter *adapter;
1181 adapter = container_of(work, struct idpf_adapter, mbx_task.work);
1183 if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
1184 idpf_mb_irq_enable(adapter);
1186 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
1187 msecs_to_jiffies(300));
1189 idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
1193 * idpf_service_task - Delayed task for handling mailbox responses
1194 * @work: work_struct handle to our data
1197 void idpf_service_task(struct work_struct *work)
1199 struct idpf_adapter *adapter;
1201 adapter = container_of(work, struct idpf_adapter, serv_task.work);
1203 if (idpf_is_reset_detected(adapter) &&
1204 !idpf_is_reset_in_prog(adapter) &&
1205 !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
1206 dev_info(&adapter->pdev->dev, "HW reset detected\n");
1207 set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
1208 queue_delayed_work(adapter->vc_event_wq,
1209 &adapter->vc_event_task,
1210 msecs_to_jiffies(10));
1213 queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
1214 msecs_to_jiffies(300));
1218 * idpf_restore_features - Restore feature configs
1219 * @vport: virtual port structure
1221 static void idpf_restore_features(struct idpf_vport *vport)
1223 struct idpf_adapter *adapter = vport->adapter;
1225 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
1226 idpf_restore_mac_filters(vport);
1230 * idpf_set_real_num_queues - set number of queues for netdev
1231 * @vport: virtual port structure
1233 * Returns 0 on success, negative on failure.
1235 static int idpf_set_real_num_queues(struct idpf_vport *vport)
1239 err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
1243 return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
1247 * idpf_up_complete - Complete interface up sequence
1248 * @vport: virtual port structure
1250 * Returns 0 on success, negative on failure.
1252 static int idpf_up_complete(struct idpf_vport *vport)
1254 struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1256 if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
1257 netif_carrier_on(vport->netdev);
1258 netif_tx_start_all_queues(vport->netdev);
1261 np->state = __IDPF_VPORT_UP;
1267 * idpf_rx_init_buf_tail - Write initial buffer ring tail value
1268 * @vport: virtual port struct
1270 static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
1274 for (i = 0; i < vport->num_rxq_grp; i++) {
1275 struct idpf_rxq_group *grp = &vport->rxq_grps[i];
1277 if (idpf_is_queue_model_split(vport->rxq_model)) {
1278 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1279 struct idpf_queue *q =
1280 &grp->splitq.bufq_sets[j].bufq;
1282 writel(q->next_to_alloc, q->tail);
1285 for (j = 0; j < grp->singleq.num_rxq; j++) {
1286 struct idpf_queue *q =
1287 grp->singleq.rxqs[j];
1289 writel(q->next_to_alloc, q->tail);
1296 * idpf_vport_open - Bring up a vport
1297 * @vport: vport to bring up
1298 * @alloc_res: allocate queue resources
1300 static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
1302 struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1303 struct idpf_adapter *adapter = vport->adapter;
1304 struct idpf_vport_config *vport_config;
1307 if (np->state != __IDPF_VPORT_DOWN)
1310 /* we do not allow interface up just yet */
1311 netif_carrier_off(vport->netdev);
1314 err = idpf_vport_queues_alloc(vport);
1319 err = idpf_vport_intr_alloc(vport);
1321 dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
1322 vport->vport_id, err);
1326 err = idpf_vport_queue_ids_init(vport);
1328 dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
1329 vport->vport_id, err);
1333 err = idpf_vport_intr_init(vport);
1335 dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
1336 vport->vport_id, err);
1340 err = idpf_rx_bufs_init_all(vport);
1342 dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
1343 vport->vport_id, err);
1347 err = idpf_queue_reg_init(vport);
1349 dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
1350 vport->vport_id, err);
1354 idpf_rx_init_buf_tail(vport);
1356 err = idpf_send_config_queues_msg(vport);
1358 dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
1359 vport->vport_id, err);
1363 err = idpf_send_map_unmap_queue_vector_msg(vport, true);
1365 dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
1366 vport->vport_id, err);
1370 err = idpf_send_enable_queues_msg(vport);
1372 dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
1373 vport->vport_id, err);
1374 goto unmap_queue_vectors;
1377 err = idpf_send_enable_vport_msg(vport);
1379 dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
1380 vport->vport_id, err);
1382 goto disable_queues;
1385 idpf_restore_features(vport);
1387 vport_config = adapter->vport_config[vport->idx];
1388 if (vport_config->user_config.rss_data.rss_lut)
1389 err = idpf_config_rss(vport);
1391 err = idpf_init_rss(vport);
1393 dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
1394 vport->vport_id, err);
1398 err = idpf_up_complete(vport);
1400 dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
1401 vport->vport_id, err);
1408 idpf_deinit_rss(vport);
1410 idpf_send_disable_vport_msg(vport);
1412 idpf_send_disable_queues_msg(vport);
1413 unmap_queue_vectors:
1414 idpf_send_map_unmap_queue_vector_msg(vport, false);
1416 idpf_vport_intr_deinit(vport);
1418 idpf_vport_intr_rel(vport);
1420 idpf_vport_queues_rel(vport);
1426 * idpf_init_task - Delayed initialization task
1427 * @work: work_struct handle to our data
1429 * Init task finishes up pending work started in probe. Due to the asynchronous
1430 * nature in which the device communicates with hardware, we may have to wait
1431 * several milliseconds to get a response. Instead of busy polling in probe,
1432 * pulling it out into a delayed work task prevents us from bogging down the
1433 * whole system waiting for a response from hardware.
1435 void idpf_init_task(struct work_struct *work)
1437 struct idpf_vport_config *vport_config;
1438 struct idpf_vport_max_q max_q;
1439 struct idpf_adapter *adapter;
1440 struct idpf_netdev_priv *np;
1441 struct idpf_vport *vport;
1442 u16 num_default_vports;
1443 struct pci_dev *pdev;
1447 adapter = container_of(work, struct idpf_adapter, init_task.work);
1449 num_default_vports = idpf_get_default_vports(adapter);
1450 if (adapter->num_alloc_vports < num_default_vports)
1451 default_vport = true;
1453 default_vport = false;
1455 err = idpf_vport_alloc_max_qs(adapter, &max_q);
1459 err = idpf_send_create_vport_msg(adapter, &max_q);
1461 idpf_vport_dealloc_max_qs(adapter, &max_q);
1465 pdev = adapter->pdev;
1466 vport = idpf_vport_alloc(adapter, &max_q);
1469 dev_err(&pdev->dev, "failed to allocate vport: %d\n",
1471 idpf_vport_dealloc_max_qs(adapter, &max_q);
1476 vport_config = adapter->vport_config[index];
1478 init_waitqueue_head(&vport->sw_marker_wq);
1479 init_waitqueue_head(&vport->vchnl_wq);
1481 mutex_init(&vport->vc_buf_lock);
1482 spin_lock_init(&vport_config->mac_filter_list_lock);
1484 INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
1486 err = idpf_check_supported_desc_ids(vport);
1488 dev_err(&pdev->dev, "failed to get required descriptor ids\n");
1489 goto cfg_netdev_err;
1492 if (idpf_cfg_netdev(vport))
1493 goto cfg_netdev_err;
1495 err = idpf_send_get_rx_ptype_msg(vport);
1499 /* Once state is put into DOWN, driver is ready for dev_open */
1500 np = netdev_priv(vport->netdev);
1501 np->state = __IDPF_VPORT_DOWN;
1502 if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
1503 idpf_vport_open(vport, true);
1505 /* Spawn and return 'idpf_init_task' work queue until all the
1506 * default vports are created
1508 if (adapter->num_alloc_vports < num_default_vports) {
1509 queue_delayed_work(adapter->init_wq, &adapter->init_task,
1510 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
1515 for (index = 0; index < adapter->max_vports; index++) {
1516 if (adapter->netdevs[index] &&
1517 !test_bit(IDPF_VPORT_REG_NETDEV,
1518 adapter->vport_config[index]->flags)) {
1519 register_netdev(adapter->netdevs[index]);
1520 set_bit(IDPF_VPORT_REG_NETDEV,
1521 adapter->vport_config[index]->flags);
1525 /* As all the required vports are created, clear the reset flag
1526 * unconditionally here in case we were in reset and the link was down.
1528 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1529 /* Start the statistics task now */
1530 queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1531 msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
1536 idpf_decfg_netdev(vport);
1538 idpf_vport_rel(vport);
1539 adapter->vports[index] = NULL;
1541 if (default_vport) {
1542 for (index = 0; index < adapter->max_vports; index++) {
1543 if (adapter->vports[index])
1544 idpf_vport_dealloc(adapter->vports[index]);
1547 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1551 * idpf_sriov_ena - Enable or change number of VFs
1552 * @adapter: private data struct
1553 * @num_vfs: number of VFs to allocate
1555 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
1557 struct device *dev = &adapter->pdev->dev;
1560 err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
1562 dev_err(dev, "Failed to allocate VFs: %d\n", err);
1567 err = pci_enable_sriov(adapter->pdev, num_vfs);
1569 idpf_send_set_sriov_vfs_msg(adapter, 0);
1570 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1575 adapter->num_vfs = num_vfs;
1581 * idpf_sriov_configure - Configure the requested VFs
1582 * @pdev: pointer to a pci_dev structure
1583 * @num_vfs: number of vfs to allocate
1585 * Enable or change the number of VFs. Called when the user updates the number
1588 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
1590 struct idpf_adapter *adapter = pci_get_drvdata(pdev);
1592 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
1593 dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
1599 return idpf_sriov_ena(adapter, num_vfs);
1601 if (pci_vfs_assigned(pdev)) {
1602 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
1607 pci_disable_sriov(adapter->pdev);
1608 idpf_send_set_sriov_vfs_msg(adapter, 0);
1609 adapter->num_vfs = 0;
1615 * idpf_deinit_task - Device deinit routine
1616 * @adapter: Driver specific private structure
1618 * Extended remove logic which will be used for
1619 * hard reset as well
1621 void idpf_deinit_task(struct idpf_adapter *adapter)
1625 /* Wait until the init_task is done else this thread might release
1626 * the resources first and the other thread might end up in a bad state
1628 cancel_delayed_work_sync(&adapter->init_task);
1630 if (!adapter->vports)
1633 cancel_delayed_work_sync(&adapter->stats_task);
1635 for (i = 0; i < adapter->max_vports; i++) {
1636 if (adapter->vports[i])
1637 idpf_vport_dealloc(adapter->vports[i]);
1642 * idpf_check_reset_complete - check that reset is complete
1643 * @hw: pointer to hw struct
1644 * @reset_reg: struct with reset registers
1646 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
1648 static int idpf_check_reset_complete(struct idpf_hw *hw,
1649 struct idpf_reset_reg *reset_reg)
1651 struct idpf_adapter *adapter = hw->back;
1654 for (i = 0; i < 2000; i++) {
1655 u32 reg_val = readl(reset_reg->rstat);
1657 /* 0xFFFFFFFF might be read if other side hasn't cleared the
1658 * register for us yet and 0xFFFFFFFF is not a valid value for
1659 * the register, so treat that as invalid.
1661 if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
1664 usleep_range(5000, 10000);
1667 dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
1668 /* Clear the reset flag unconditionally here since the reset
1669 * technically isn't in progress anymore from the driver's perspective
1671 clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1677 * idpf_set_vport_state - Set the vport state to be after the reset
1678 * @adapter: Driver specific private structure
1680 static void idpf_set_vport_state(struct idpf_adapter *adapter)
1684 for (i = 0; i < adapter->max_vports; i++) {
1685 struct idpf_netdev_priv *np;
1687 if (!adapter->netdevs[i])
1690 np = netdev_priv(adapter->netdevs[i]);
1691 if (np->state == __IDPF_VPORT_UP)
1692 set_bit(IDPF_VPORT_UP_REQUESTED,
1693 adapter->vport_config[i]->flags);
1698 * idpf_init_hard_reset - Initiate a hardware reset
1699 * @adapter: Driver specific private structure
1701 * Deallocate the vports and all the resources associated with them and
1702 * reallocate. Also reinitialize the mailbox. Return 0 on success,
1703 * negative on failure.
1705 static int idpf_init_hard_reset(struct idpf_adapter *adapter)
1707 struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
1708 struct device *dev = &adapter->pdev->dev;
1709 struct net_device *netdev;
1713 mutex_lock(&adapter->vport_ctrl_lock);
1715 dev_info(dev, "Device HW Reset initiated\n");
1717 /* Avoid TX hangs on reset */
1718 for (i = 0; i < adapter->max_vports; i++) {
1719 netdev = adapter->netdevs[i];
1723 netif_carrier_off(netdev);
1724 netif_tx_disable(netdev);
1727 /* Prepare for reset */
1728 if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1729 reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
1730 } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
1731 bool is_reset = idpf_is_reset_detected(adapter);
1733 idpf_set_vport_state(adapter);
1734 idpf_vc_core_deinit(adapter);
1736 reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
1737 idpf_deinit_dflt_mbx(adapter);
1739 dev_err(dev, "Unhandled hard reset cause\n");
1744 /* Wait for reset to complete */
1745 err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
1747 dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
1752 /* Reset is complete and so start building the driver resources again */
1753 err = idpf_init_dflt_mbx(adapter);
1755 dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
1759 /* Initialize the state machine, also allocate memory and request
1762 err = idpf_vc_core_init(adapter);
1764 idpf_deinit_dflt_mbx(adapter);
1768 /* Wait till all the vports are initialized to release the reset lock,
1769 * else user space callbacks may access uninitialized vports
1771 while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1775 mutex_unlock(&adapter->vport_ctrl_lock);
1781 * idpf_vc_event_task - Handle virtchannel event logic
1782 * @work: work queue struct
1784 void idpf_vc_event_task(struct work_struct *work)
1786 struct idpf_adapter *adapter;
1788 adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
1790 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1793 if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
1794 test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1795 set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1796 idpf_init_hard_reset(adapter);
1801 * idpf_initiate_soft_reset - Initiate a software reset
1802 * @vport: virtual port data struct
1803 * @reset_cause: reason for the soft reset
1805 * Soft reset only reallocs vport queue resources. Returns 0 on success,
1806 * negative on failure.
1808 int idpf_initiate_soft_reset(struct idpf_vport *vport,
1809 enum idpf_vport_reset_cause reset_cause)
1811 struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1812 enum idpf_vport_state current_state = np->state;
1813 struct idpf_adapter *adapter = vport->adapter;
1814 struct idpf_vport *new_vport;
1817 /* If the system is low on memory, we can end up in bad state if we
1818 * free all the memory for queue resources and try to allocate them
1819 * again. Instead, we can pre-allocate the new resources before doing
1820 * anything and bailing if the alloc fails.
1822 * Make a clone of the existing vport to mimic its current
1823 * configuration, then modify the new structure with any requested
1824 * changes. Once the allocation of the new resources is done, stop the
1825 * existing vport and copy the configuration to the main vport. If an
1826 * error occurred, the existing vport will be untouched.
1829 new_vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1833 /* This purposely avoids copying the end of the struct because it
1834 * contains wait_queues and mutexes and other stuff we don't want to
1835 * mess with. Nothing below should use those variables from new_vport
1836 * and should instead always refer to them in vport if they need to.
1838 memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
1840 /* Adjust resource parameters prior to reallocating resources */
1841 switch (reset_cause) {
1842 case IDPF_SR_Q_CHANGE:
1843 err = idpf_vport_adjust_qs(new_vport);
1847 case IDPF_SR_Q_DESC_CHANGE:
1848 /* Update queue parameters before allocating resources */
1849 idpf_vport_calc_num_q_desc(new_vport);
1851 case IDPF_SR_MTU_CHANGE:
1852 case IDPF_SR_RSC_CHANGE:
1855 dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
1860 err = idpf_vport_queues_alloc(new_vport);
1863 if (current_state <= __IDPF_VPORT_DOWN) {
1864 idpf_send_delete_queues_msg(vport);
1866 set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
1867 idpf_vport_stop(vport);
1870 idpf_deinit_rss(vport);
1871 /* We're passing in vport here because we need its wait_queue
1872 * to send a message and it should be getting all the vport
1873 * config data out of the adapter but we need to be careful not
1874 * to add code to add_queues to change the vport config within
1875 * vport itself as it will be wiped with a memcpy later.
1877 err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
1878 new_vport->num_complq,
1880 new_vport->num_bufq);
1884 /* Same comment as above regarding avoiding copying the wait_queues and
1885 * mutexes applies here. We do not want to mess with those if possible.
1887 memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
1889 /* Since idpf_vport_queues_alloc was called with new_port, the queue
1890 * back pointers are currently pointing to the local new_vport. Reset
1891 * the backpointers to the original vport here
1893 for (i = 0; i < vport->num_txq_grp; i++) {
1894 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1897 tx_qgrp->vport = vport;
1898 for (j = 0; j < tx_qgrp->num_txq; j++)
1899 tx_qgrp->txqs[j]->vport = vport;
1901 if (idpf_is_queue_model_split(vport->txq_model))
1902 tx_qgrp->complq->vport = vport;
1905 for (i = 0; i < vport->num_rxq_grp; i++) {
1906 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1907 struct idpf_queue *q;
1911 rx_qgrp->vport = vport;
1912 for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
1913 rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
1915 if (idpf_is_queue_model_split(vport->rxq_model))
1916 num_rxq = rx_qgrp->splitq.num_rxq_sets;
1918 num_rxq = rx_qgrp->singleq.num_rxq;
1920 for (j = 0; j < num_rxq; j++) {
1921 if (idpf_is_queue_model_split(vport->rxq_model))
1922 q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1924 q = rx_qgrp->singleq.rxqs[j];
1929 if (reset_cause == IDPF_SR_Q_CHANGE)
1930 idpf_vport_alloc_vec_indexes(vport);
1932 err = idpf_set_real_num_queues(vport);
1936 if (current_state == __IDPF_VPORT_UP)
1937 err = idpf_vport_open(vport, false);
1944 idpf_vport_queues_rel(new_vport);
1952 * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1953 * @netdev: the netdevice
1954 * @addr: address to add
1956 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1957 * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
1958 * meaning we cannot sleep in this context. Due to this, we have to add the
1959 * filter and send the virtchnl message asynchronously without waiting for the
1960 * response from the other side. We won't know whether or not the operation
1961 * actually succeeded until we get the message back. Returns 0 on success,
1962 * negative on failure.
1964 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
1966 struct idpf_netdev_priv *np = netdev_priv(netdev);
1968 return idpf_add_mac_filter(np->vport, np, addr, true);
1972 * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1973 * @netdev: the netdevice
1974 * @addr: address to add
1976 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1977 * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
1978 * meaning we cannot sleep in this context. Due to this we have to delete the
1979 * filter and send the virtchnl message asynchronously without waiting for the
1980 * return from the other side. We won't know whether or not the operation
1981 * actually succeeded until we get the message back. Returns 0 on success,
1982 * negative on failure.
1984 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
1986 struct idpf_netdev_priv *np = netdev_priv(netdev);
1988 /* Under some circumstances, we might receive a request to delete
1989 * our own device address from our uc list. Because we store the
1990 * device address in the VSI's MAC filter list, we need to ignore
1991 * such requests and not delete our device address from this list.
1993 if (ether_addr_equal(addr, netdev->dev_addr))
1996 idpf_del_mac_filter(np->vport, np, addr, true);
2002 * idpf_set_rx_mode - NDO callback to set the netdev filters
2003 * @netdev: network interface device structure
2005 * Stack takes addr_list_lock spinlock before calling our .set_rx_mode. We
2006 * cannot sleep in this context.
2008 static void idpf_set_rx_mode(struct net_device *netdev)
2010 struct idpf_netdev_priv *np = netdev_priv(netdev);
2011 struct idpf_vport_user_config_data *config_data;
2012 struct idpf_adapter *adapter;
2013 bool changed = false;
2017 adapter = np->adapter;
2018 dev = &adapter->pdev->dev;
2020 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
2021 __dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2022 __dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2025 if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
2028 config_data = &adapter->vport_config[np->vport_idx]->user_config;
2029 /* IFF_PROMISC enables both unicast and multicast promiscuous,
2030 * while IFF_ALLMULTI only enables multicast such that:
2032 * promisc + allmulti = unicast | multicast
2033 * promisc + !allmulti = unicast | multicast
2034 * !promisc + allmulti = multicast
2036 if ((netdev->flags & IFF_PROMISC) &&
2037 !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2039 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
2040 if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
2041 dev_info(dev, "Entering multicast promiscuous mode\n");
2044 if (!(netdev->flags & IFF_PROMISC) &&
2045 test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2047 dev_info(dev, "Leaving promiscuous mode\n");
2050 if (netdev->flags & IFF_ALLMULTI &&
2051 !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2053 dev_info(dev, "Entering multicast promiscuous mode\n");
2056 if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
2057 test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2059 dev_info(dev, "Leaving multicast promiscuous mode\n");
2065 err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
2067 dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
2071 * idpf_vport_manage_rss_lut - disable/enable RSS
2072 * @vport: the vport being changed
2074 * In the event of disable request for RSS, this function will zero out RSS
2075 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
2076 * LUT with the default LUT configuration.
2078 static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
2080 bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
2081 struct idpf_rss_data *rss_data;
2082 u16 idx = vport->idx;
2085 rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
2086 lut_size = rss_data->rss_lut_size * sizeof(u32);
2089 /* This will contain the default or user configured LUT */
2090 memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
2092 /* Save a copy of the current LUT to be restored later if
2095 memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
2097 /* Zero out the current LUT to disable */
2098 memset(rss_data->rss_lut, 0, lut_size);
2101 return idpf_config_rss(vport);
2105 * idpf_set_features - set the netdev feature flags
2106 * @netdev: ptr to the netdev being adjusted
2107 * @features: the feature set that the stack is suggesting
2109 static int idpf_set_features(struct net_device *netdev,
2110 netdev_features_t features)
2112 netdev_features_t changed = netdev->features ^ features;
2113 struct idpf_adapter *adapter;
2114 struct idpf_vport *vport;
2117 idpf_vport_ctrl_lock(netdev);
2118 vport = idpf_netdev_to_vport(netdev);
2120 adapter = vport->adapter;
2122 if (idpf_is_reset_in_prog(adapter)) {
2123 dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
2128 if (changed & NETIF_F_RXHASH) {
2129 netdev->features ^= NETIF_F_RXHASH;
2130 err = idpf_vport_manage_rss_lut(vport);
2135 if (changed & NETIF_F_GRO_HW) {
2136 netdev->features ^= NETIF_F_GRO_HW;
2137 err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
2142 if (changed & NETIF_F_LOOPBACK) {
2143 netdev->features ^= NETIF_F_LOOPBACK;
2144 err = idpf_send_ena_dis_loopback_msg(vport);
2148 idpf_vport_ctrl_unlock(netdev);
2154 * idpf_open - Called when a network interface becomes active
2155 * @netdev: network interface device structure
2157 * The open entry point is called when a network interface is made
2158 * active by the system (IFF_UP). At this point all resources needed
2159 * for transmit and receive operations are allocated, the interrupt
2160 * handler is registered with the OS, the netdev watchdog is enabled,
2161 * and the stack is notified that the interface is ready.
2163 * Returns 0 on success, negative value on failure
2165 static int idpf_open(struct net_device *netdev)
2167 struct idpf_vport *vport;
2170 idpf_vport_ctrl_lock(netdev);
2171 vport = idpf_netdev_to_vport(netdev);
2173 err = idpf_vport_open(vport, true);
2175 idpf_vport_ctrl_unlock(netdev);
2181 * idpf_change_mtu - NDO callback to change the MTU
2182 * @netdev: network interface device structure
2183 * @new_mtu: new value for maximum frame size
2185 * Returns 0 on success, negative on failure
2187 static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
2189 struct idpf_vport *vport;
2192 idpf_vport_ctrl_lock(netdev);
2193 vport = idpf_netdev_to_vport(netdev);
2195 netdev->mtu = new_mtu;
2197 err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
2199 idpf_vport_ctrl_unlock(netdev);
2205 * idpf_features_check - Validate packet conforms to limits
2207 * @netdev: This port's netdev
2208 * @features: Offload features that the stack believes apply
2210 static netdev_features_t idpf_features_check(struct sk_buff *skb,
2211 struct net_device *netdev,
2212 netdev_features_t features)
2214 struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2215 struct idpf_adapter *adapter = vport->adapter;
2218 /* No point in doing any of this if neither checksum nor GSO are
2219 * being requested for this frame. We can rule out both by just
2220 * checking for CHECKSUM_PARTIAL
2222 if (skb->ip_summed != CHECKSUM_PARTIAL)
2225 /* We cannot support GSO if the MSS is going to be less than
2226 * 88 bytes. If it is then we need to drop support for GSO.
2228 if (skb_is_gso(skb) &&
2229 (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
2230 features &= ~NETIF_F_GSO_MASK;
2232 /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
2233 len = skb_network_offset(skb);
2234 if (unlikely(len & ~(126)))
2237 len = skb_network_header_len(skb);
2238 if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
2241 if (!skb->encapsulation)
2244 /* L4TUNLEN can support 127 words */
2245 len = skb_inner_network_header(skb) - skb_transport_header(skb);
2246 if (unlikely(len & ~(127 * 2)))
2249 /* IPLEN can support at most 127 dwords */
2250 len = skb_inner_network_header_len(skb);
2251 if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
2254 /* No need to validate L4LEN as TCP is the only protocol with a
2255 * a flexible value and we support all possible values supported
2256 * by TCP, which is at most 15 dwords
2262 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2266 * idpf_set_mac - NDO callback to set port mac address
2267 * @netdev: network interface device structure
2268 * @p: pointer to an address structure
2270 * Returns 0 on success, negative on failure
2272 static int idpf_set_mac(struct net_device *netdev, void *p)
2274 struct idpf_netdev_priv *np = netdev_priv(netdev);
2275 struct idpf_vport_config *vport_config;
2276 struct sockaddr *addr = p;
2277 struct idpf_vport *vport;
2280 idpf_vport_ctrl_lock(netdev);
2281 vport = idpf_netdev_to_vport(netdev);
2283 if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
2284 VIRTCHNL2_CAP_MACFILTER)) {
2285 dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
2290 if (!is_valid_ether_addr(addr->sa_data)) {
2291 dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
2293 err = -EADDRNOTAVAIL;
2297 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
2300 vport_config = vport->adapter->vport_config[vport->idx];
2301 err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
2303 __idpf_del_mac_filter(vport_config, addr->sa_data);
2307 if (is_valid_ether_addr(vport->default_mac_addr))
2308 idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
2310 ether_addr_copy(vport->default_mac_addr, addr->sa_data);
2311 eth_hw_addr_set(netdev, addr->sa_data);
2314 idpf_vport_ctrl_unlock(netdev);
2320 * idpf_alloc_dma_mem - Allocate dma memory
2321 * @hw: pointer to hw struct
2322 * @mem: pointer to dma_mem struct
2323 * @size: size of the memory to allocate
2325 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
2327 struct idpf_adapter *adapter = hw->back;
2328 size_t sz = ALIGN(size, 4096);
2330 mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
2331 &mem->pa, GFP_KERNEL);
2338 * idpf_free_dma_mem - Free the allocated dma memory
2339 * @hw: pointer to hw struct
2340 * @mem: pointer to dma_mem struct
2342 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
2344 struct idpf_adapter *adapter = hw->back;
2346 dma_free_coherent(&adapter->pdev->dev, mem->size,
2353 static const struct net_device_ops idpf_netdev_ops_splitq = {
2354 .ndo_open = idpf_open,
2355 .ndo_stop = idpf_stop,
2356 .ndo_start_xmit = idpf_tx_splitq_start,
2357 .ndo_features_check = idpf_features_check,
2358 .ndo_set_rx_mode = idpf_set_rx_mode,
2359 .ndo_validate_addr = eth_validate_addr,
2360 .ndo_set_mac_address = idpf_set_mac,
2361 .ndo_change_mtu = idpf_change_mtu,
2362 .ndo_get_stats64 = idpf_get_stats64,
2363 .ndo_set_features = idpf_set_features,
2364 .ndo_tx_timeout = idpf_tx_timeout,
2367 static const struct net_device_ops idpf_netdev_ops_singleq = {
2368 .ndo_open = idpf_open,
2369 .ndo_stop = idpf_stop,
2370 .ndo_start_xmit = idpf_tx_singleq_start,
2371 .ndo_features_check = idpf_features_check,
2372 .ndo_set_rx_mode = idpf_set_rx_mode,
2373 .ndo_validate_addr = eth_validate_addr,
2374 .ndo_set_mac_address = idpf_set_mac,
2375 .ndo_change_mtu = idpf_change_mtu,
2376 .ndo_get_stats64 = idpf_get_stats64,
2377 .ndo_set_features = idpf_set_features,
2378 .ndo_tx_timeout = idpf_tx_timeout,