1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
83 static const char ibmvnic_driver_name[] = "ibmvnic";
84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92 static int ibmvnic_remove(struct vio_dev *);
93 static void release_sub_crqs(struct ibmvnic_adapter *);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static void send_request_unmap(struct ibmvnic_adapter *, u8);
114 static void send_login(struct ibmvnic_adapter *adapter);
115 static void send_cap_queries(struct ibmvnic_adapter *adapter);
116 static int init_sub_crqs(struct ibmvnic_adapter *);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118 static int ibmvnic_init(struct ibmvnic_adapter *);
119 static void release_crq_queue(struct ibmvnic_adapter *);
120 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
122 struct ibmvnic_stat {
123 char name[ETH_GSTRING_LEN];
127 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
128 offsetof(struct ibmvnic_statistics, stat))
129 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
131 static const struct ibmvnic_stat ibmvnic_stats[] = {
132 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
133 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
134 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
135 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
136 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
137 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
138 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
139 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
140 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
141 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
142 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
143 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
144 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
145 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
146 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
147 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
148 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
149 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
150 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
151 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
152 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
153 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
156 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
157 unsigned long length, unsigned long *number,
160 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
163 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
170 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
171 struct ibmvnic_long_term_buff *ltb, int size)
173 struct device *dev = &adapter->vdev->dev;
176 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
180 dev_err(dev, "Couldn't alloc long term buffer\n");
183 ltb->map_id = adapter->map_id;
186 init_completion(&adapter->fw_done);
187 send_request_map(adapter, ltb->addr,
188 ltb->size, ltb->map_id);
189 wait_for_completion(&adapter->fw_done);
191 if (adapter->fw_done_rc) {
192 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
193 adapter->fw_done_rc);
199 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
200 struct ibmvnic_long_term_buff *ltb)
202 struct device *dev = &adapter->vdev->dev;
207 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
208 adapter->reset_reason != VNIC_RESET_MOBILITY)
209 send_request_unmap(adapter, ltb->map_id);
210 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
213 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
214 struct ibmvnic_long_term_buff *ltb)
216 memset(ltb->buff, 0, ltb->size);
218 init_completion(&adapter->fw_done);
219 send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220 wait_for_completion(&adapter->fw_done);
222 if (adapter->fw_done_rc) {
223 dev_info(&adapter->vdev->dev,
224 "Reset failed, attempting to free and reallocate buffer\n");
225 free_long_term_buff(adapter, ltb);
226 return alloc_long_term_buff(adapter, ltb, ltb->size);
231 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
235 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
237 adapter->rx_pool[i].active = 0;
240 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
241 struct ibmvnic_rx_pool *pool)
243 int count = pool->size - atomic_read(&pool->available);
244 struct device *dev = &adapter->vdev->dev;
245 int buffers_added = 0;
246 unsigned long lpar_rc;
247 union sub_crq sub_crq;
260 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261 be32_to_cpu(adapter->login_rsp_buf->
264 for (i = 0; i < count; ++i) {
265 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
267 dev_err(dev, "Couldn't replenish rx buff\n");
268 adapter->replenish_no_mem++;
272 index = pool->free_map[pool->next_free];
274 if (pool->rx_buff[index].skb)
275 dev_err(dev, "Inconsistent free_map!\n");
277 /* Copy the skb to the long term mapped DMA buffer */
278 offset = index * pool->buff_size;
279 dst = pool->long_term_buff.buff + offset;
280 memset(dst, 0, pool->buff_size);
281 dma_addr = pool->long_term_buff.addr + offset;
282 pool->rx_buff[index].data = dst;
284 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285 pool->rx_buff[index].dma = dma_addr;
286 pool->rx_buff[index].skb = skb;
287 pool->rx_buff[index].pool_index = pool->index;
288 pool->rx_buff[index].size = pool->buff_size;
290 memset(&sub_crq, 0, sizeof(sub_crq));
291 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292 sub_crq.rx_add.correlator =
293 cpu_to_be64((u64)&pool->rx_buff[index]);
294 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
297 /* The length field of the sCRQ is defined to be 24 bits so the
298 * buffer size needs to be left shifted by a byte before it is
299 * converted to big endian to prevent the last byte from being
302 #ifdef __LITTLE_ENDIAN__
305 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
307 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
309 if (lpar_rc != H_SUCCESS)
313 adapter->replenish_add_buff_success++;
314 pool->next_free = (pool->next_free + 1) % pool->size;
316 atomic_add(buffers_added, &pool->available);
320 dev_info(dev, "replenish pools failure\n");
321 pool->free_map[pool->next_free] = index;
322 pool->rx_buff[index].skb = NULL;
323 if (!dma_mapping_error(dev, dma_addr))
324 dma_unmap_single(dev, dma_addr, pool->buff_size,
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
331 if (lpar_rc == H_CLOSED) {
332 /* Disable buffer pool replenishment and report carrier off if
333 * queue is closed. Firmware guarantees that a signal will
334 * be sent to the driver, triggering a reset.
336 deactivate_rx_pools(adapter);
337 netif_carrier_off(adapter->netdev);
341 static void replenish_pools(struct ibmvnic_adapter *adapter)
345 adapter->replenish_task_cycles++;
346 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
348 if (adapter->rx_pool[i].active)
349 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
353 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
355 kfree(adapter->tx_stats_buffers);
356 kfree(adapter->rx_stats_buffers);
359 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
361 adapter->tx_stats_buffers =
362 kcalloc(adapter->req_tx_queues,
363 sizeof(struct ibmvnic_tx_queue_stats),
365 if (!adapter->tx_stats_buffers)
368 adapter->rx_stats_buffers =
369 kcalloc(adapter->req_rx_queues,
370 sizeof(struct ibmvnic_rx_queue_stats),
372 if (!adapter->rx_stats_buffers)
378 static void release_stats_token(struct ibmvnic_adapter *adapter)
380 struct device *dev = &adapter->vdev->dev;
382 if (!adapter->stats_token)
385 dma_unmap_single(dev, adapter->stats_token,
386 sizeof(struct ibmvnic_statistics),
388 adapter->stats_token = 0;
391 static int init_stats_token(struct ibmvnic_adapter *adapter)
393 struct device *dev = &adapter->vdev->dev;
396 stok = dma_map_single(dev, &adapter->stats,
397 sizeof(struct ibmvnic_statistics),
399 if (dma_mapping_error(dev, stok)) {
400 dev_err(dev, "Couldn't map stats buffer\n");
404 adapter->stats_token = stok;
405 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
409 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
411 struct ibmvnic_rx_pool *rx_pool;
416 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
417 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
419 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
420 for (i = 0; i < rx_scrqs; i++) {
421 rx_pool = &adapter->rx_pool[i];
423 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
425 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
426 free_long_term_buff(adapter, &rx_pool->long_term_buff);
427 rx_pool->buff_size = be64_to_cpu(size_array[i]);
428 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
432 rc = reset_long_term_buff(adapter,
433 &rx_pool->long_term_buff);
439 for (j = 0; j < rx_pool->size; j++)
440 rx_pool->free_map[j] = j;
442 memset(rx_pool->rx_buff, 0,
443 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
445 atomic_set(&rx_pool->available, 0);
446 rx_pool->next_alloc = 0;
447 rx_pool->next_free = 0;
454 static void release_rx_pools(struct ibmvnic_adapter *adapter)
456 struct ibmvnic_rx_pool *rx_pool;
459 if (!adapter->rx_pool)
462 for (i = 0; i < adapter->num_active_rx_pools; i++) {
463 rx_pool = &adapter->rx_pool[i];
465 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
467 kfree(rx_pool->free_map);
468 free_long_term_buff(adapter, &rx_pool->long_term_buff);
470 if (!rx_pool->rx_buff)
473 for (j = 0; j < rx_pool->size; j++) {
474 if (rx_pool->rx_buff[j].skb) {
475 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
476 rx_pool->rx_buff[i].skb = NULL;
480 kfree(rx_pool->rx_buff);
483 kfree(adapter->rx_pool);
484 adapter->rx_pool = NULL;
485 adapter->num_active_rx_pools = 0;
488 static int init_rx_pools(struct net_device *netdev)
490 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
491 struct device *dev = &adapter->vdev->dev;
492 struct ibmvnic_rx_pool *rx_pool;
498 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
499 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
500 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
502 adapter->rx_pool = kcalloc(rxadd_subcrqs,
503 sizeof(struct ibmvnic_rx_pool),
505 if (!adapter->rx_pool) {
506 dev_err(dev, "Failed to allocate rx pools\n");
510 adapter->num_active_rx_pools = 0;
512 for (i = 0; i < rxadd_subcrqs; i++) {
513 rx_pool = &adapter->rx_pool[i];
515 netdev_dbg(adapter->netdev,
516 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
517 i, adapter->req_rx_add_entries_per_subcrq,
518 be64_to_cpu(size_array[i]));
520 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
522 rx_pool->buff_size = be64_to_cpu(size_array[i]);
525 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
527 if (!rx_pool->free_map) {
528 release_rx_pools(adapter);
532 rx_pool->rx_buff = kcalloc(rx_pool->size,
533 sizeof(struct ibmvnic_rx_buff),
535 if (!rx_pool->rx_buff) {
536 dev_err(dev, "Couldn't alloc rx buffers\n");
537 release_rx_pools(adapter);
541 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
542 rx_pool->size * rx_pool->buff_size)) {
543 release_rx_pools(adapter);
547 for (j = 0; j < rx_pool->size; ++j)
548 rx_pool->free_map[j] = j;
550 atomic_set(&rx_pool->available, 0);
551 rx_pool->next_alloc = 0;
552 rx_pool->next_free = 0;
555 adapter->num_active_rx_pools = rxadd_subcrqs;
560 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
562 struct ibmvnic_tx_pool *tx_pool;
566 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
567 for (i = 0; i < tx_scrqs; i++) {
568 netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i);
570 tx_pool = &adapter->tx_pool[i];
572 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
576 rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb);
580 memset(tx_pool->tx_buff, 0,
581 adapter->req_tx_entries_per_subcrq *
582 sizeof(struct ibmvnic_tx_buff));
584 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
585 tx_pool->free_map[j] = j;
587 tx_pool->consumer_index = 0;
588 tx_pool->producer_index = 0;
589 tx_pool->tso_index = 0;
595 static void release_vpd_data(struct ibmvnic_adapter *adapter)
600 kfree(adapter->vpd->buff);
604 static void release_tx_pools(struct ibmvnic_adapter *adapter)
606 struct ibmvnic_tx_pool *tx_pool;
609 if (!adapter->tx_pool)
612 for (i = 0; i < adapter->num_active_tx_pools; i++) {
613 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
614 tx_pool = &adapter->tx_pool[i];
615 kfree(tx_pool->tx_buff);
616 free_long_term_buff(adapter, &tx_pool->long_term_buff);
617 free_long_term_buff(adapter, &tx_pool->tso_ltb);
618 kfree(tx_pool->free_map);
621 kfree(adapter->tx_pool);
622 adapter->tx_pool = NULL;
623 adapter->num_active_tx_pools = 0;
626 static int init_tx_pools(struct net_device *netdev)
628 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
629 struct device *dev = &adapter->vdev->dev;
630 struct ibmvnic_tx_pool *tx_pool;
634 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
635 adapter->tx_pool = kcalloc(tx_subcrqs,
636 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
637 if (!adapter->tx_pool)
640 adapter->num_active_tx_pools = 0;
642 for (i = 0; i < tx_subcrqs; i++) {
643 tx_pool = &adapter->tx_pool[i];
645 netdev_dbg(adapter->netdev,
646 "Initializing tx_pool[%d], %lld buffs\n",
647 i, adapter->req_tx_entries_per_subcrq);
649 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
650 sizeof(struct ibmvnic_tx_buff),
652 if (!tx_pool->tx_buff) {
653 dev_err(dev, "tx pool buffer allocation failed\n");
654 release_tx_pools(adapter);
658 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
659 adapter->req_tx_entries_per_subcrq *
661 release_tx_pools(adapter);
666 if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb,
668 IBMVNIC_TSO_BUF_SZ)) {
669 release_tx_pools(adapter);
673 tx_pool->tso_index = 0;
675 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
676 sizeof(int), GFP_KERNEL);
677 if (!tx_pool->free_map) {
678 release_tx_pools(adapter);
682 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
683 tx_pool->free_map[j] = j;
685 tx_pool->consumer_index = 0;
686 tx_pool->producer_index = 0;
689 adapter->num_active_tx_pools = tx_subcrqs;
694 static void release_error_buffers(struct ibmvnic_adapter *adapter)
696 struct device *dev = &adapter->vdev->dev;
697 struct ibmvnic_error_buff *error_buff, *tmp;
700 spin_lock_irqsave(&adapter->error_list_lock, flags);
701 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
702 list_del(&error_buff->list);
703 dma_unmap_single(dev, error_buff->dma, error_buff->len,
705 kfree(error_buff->buff);
708 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
711 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
715 if (adapter->napi_enabled)
718 for (i = 0; i < adapter->req_rx_queues; i++)
719 napi_enable(&adapter->napi[i]);
721 adapter->napi_enabled = true;
724 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
728 if (!adapter->napi_enabled)
731 for (i = 0; i < adapter->req_rx_queues; i++) {
732 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
733 napi_disable(&adapter->napi[i]);
736 adapter->napi_enabled = false;
739 static int ibmvnic_login(struct net_device *netdev)
741 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
742 unsigned long timeout = msecs_to_jiffies(30000);
743 struct device *dev = &adapter->vdev->dev;
747 if (adapter->renegotiate) {
748 adapter->renegotiate = false;
749 release_sub_crqs(adapter);
751 reinit_completion(&adapter->init_done);
752 send_cap_queries(adapter);
753 if (!wait_for_completion_timeout(&adapter->init_done,
755 dev_err(dev, "Capabilities query timeout\n");
758 rc = init_sub_crqs(adapter);
761 "Initialization of SCRQ's failed\n");
764 rc = init_sub_crq_irqs(adapter);
767 "Initialization of SCRQ's irqs failed\n");
772 reinit_completion(&adapter->init_done);
774 if (!wait_for_completion_timeout(&adapter->init_done,
776 dev_err(dev, "Login timeout\n");
779 } while (adapter->renegotiate);
781 /* handle pending MAC address changes after successful login */
782 if (adapter->mac_change_pending) {
783 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
784 adapter->mac_change_pending = false;
790 static void release_resources(struct ibmvnic_adapter *adapter)
794 release_vpd_data(adapter);
796 release_tx_pools(adapter);
797 release_rx_pools(adapter);
799 release_stats_token(adapter);
800 release_stats_buffers(adapter);
801 release_error_buffers(adapter);
804 for (i = 0; i < adapter->req_rx_queues; i++) {
805 if (&adapter->napi[i]) {
806 netdev_dbg(adapter->netdev,
807 "Releasing napi[%d]\n", i);
808 netif_napi_del(&adapter->napi[i]);
814 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
816 struct net_device *netdev = adapter->netdev;
817 unsigned long timeout = msecs_to_jiffies(30000);
818 union ibmvnic_crq crq;
822 netdev_dbg(netdev, "setting link state %d\n", link_state);
824 memset(&crq, 0, sizeof(crq));
825 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
826 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
827 crq.logical_link_state.link_state = link_state;
832 reinit_completion(&adapter->init_done);
833 rc = ibmvnic_send_crq(adapter, &crq);
835 netdev_err(netdev, "Failed to set link state\n");
839 if (!wait_for_completion_timeout(&adapter->init_done,
841 netdev_err(netdev, "timeout setting link state\n");
845 if (adapter->init_done_rc == 1) {
846 /* Partuial success, delay and re-send */
855 static int set_real_num_queues(struct net_device *netdev)
857 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
860 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
861 adapter->req_tx_queues, adapter->req_rx_queues);
863 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
865 netdev_err(netdev, "failed to set the number of tx queues\n");
869 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
871 netdev_err(netdev, "failed to set the number of rx queues\n");
876 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
878 struct device *dev = &adapter->vdev->dev;
879 union ibmvnic_crq crq;
882 if (adapter->vpd->buff)
883 len = adapter->vpd->len;
885 init_completion(&adapter->fw_done);
886 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
887 crq.get_vpd_size.cmd = GET_VPD_SIZE;
888 ibmvnic_send_crq(adapter, &crq);
889 wait_for_completion(&adapter->fw_done);
891 if (!adapter->vpd->len)
894 if (!adapter->vpd->buff)
895 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
896 else if (adapter->vpd->len != len)
898 krealloc(adapter->vpd->buff,
899 adapter->vpd->len, GFP_KERNEL);
901 if (!adapter->vpd->buff) {
902 dev_err(dev, "Could allocate VPD buffer\n");
906 adapter->vpd->dma_addr =
907 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
909 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
910 dev_err(dev, "Could not map VPD buffer\n");
911 kfree(adapter->vpd->buff);
915 reinit_completion(&adapter->fw_done);
916 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
917 crq.get_vpd.cmd = GET_VPD;
918 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
919 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
920 ibmvnic_send_crq(adapter, &crq);
921 wait_for_completion(&adapter->fw_done);
926 static int init_resources(struct ibmvnic_adapter *adapter)
928 struct net_device *netdev = adapter->netdev;
931 rc = set_real_num_queues(netdev);
935 rc = init_stats_buffers(adapter);
939 rc = init_stats_token(adapter);
943 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
947 /* Vital Product Data (VPD) */
948 rc = ibmvnic_get_vpd(adapter);
950 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
955 adapter->napi = kcalloc(adapter->req_rx_queues,
956 sizeof(struct napi_struct), GFP_KERNEL);
960 for (i = 0; i < adapter->req_rx_queues; i++) {
961 netdev_dbg(netdev, "Adding napi[%d]\n", i);
962 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
966 send_map_query(adapter);
968 rc = init_rx_pools(netdev);
972 rc = init_tx_pools(netdev);
976 static int __ibmvnic_open(struct net_device *netdev)
978 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
979 enum vnic_state prev_state = adapter->state;
982 adapter->state = VNIC_OPENING;
983 replenish_pools(adapter);
984 ibmvnic_napi_enable(adapter);
986 /* We're ready to receive frames, enable the sub-crq interrupts and
987 * set the logical link state to up
989 for (i = 0; i < adapter->req_rx_queues; i++) {
990 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
991 if (prev_state == VNIC_CLOSED)
992 enable_irq(adapter->rx_scrq[i]->irq);
994 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
997 for (i = 0; i < adapter->req_tx_queues; i++) {
998 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
999 if (prev_state == VNIC_CLOSED)
1000 enable_irq(adapter->tx_scrq[i]->irq);
1002 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1005 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1007 for (i = 0; i < adapter->req_rx_queues; i++)
1008 napi_disable(&adapter->napi[i]);
1009 release_resources(adapter);
1013 netif_tx_start_all_queues(netdev);
1015 if (prev_state == VNIC_CLOSED) {
1016 for (i = 0; i < adapter->req_rx_queues; i++)
1017 napi_schedule(&adapter->napi[i]);
1020 adapter->state = VNIC_OPEN;
1024 static int ibmvnic_open(struct net_device *netdev)
1026 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1029 mutex_lock(&adapter->reset_lock);
1031 if (adapter->state != VNIC_CLOSED) {
1032 rc = ibmvnic_login(netdev);
1034 mutex_unlock(&adapter->reset_lock);
1038 rc = init_resources(adapter);
1040 netdev_err(netdev, "failed to initialize resources\n");
1041 release_resources(adapter);
1042 mutex_unlock(&adapter->reset_lock);
1047 rc = __ibmvnic_open(netdev);
1048 netif_carrier_on(netdev);
1050 mutex_unlock(&adapter->reset_lock);
1055 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1057 struct ibmvnic_tx_pool *tx_pool;
1062 if (!adapter->tx_pool)
1065 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
1066 tx_entries = adapter->req_tx_entries_per_subcrq;
1068 /* Free any remaining skbs in the tx buffer pools */
1069 for (i = 0; i < tx_scrqs; i++) {
1070 tx_pool = &adapter->tx_pool[i];
1074 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1075 for (j = 0; j < tx_entries; j++) {
1076 if (tx_pool->tx_buff[j].skb) {
1077 dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
1078 tx_pool->tx_buff[j].skb = NULL;
1084 static int __ibmvnic_close(struct net_device *netdev)
1086 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1090 adapter->state = VNIC_CLOSING;
1092 /* ensure that transmissions are stopped if called by do_reset */
1093 if (adapter->resetting)
1094 netif_tx_disable(netdev);
1096 netif_tx_stop_all_queues(netdev);
1098 ibmvnic_napi_disable(adapter);
1100 if (adapter->tx_scrq) {
1101 for (i = 0; i < adapter->req_tx_queues; i++)
1102 if (adapter->tx_scrq[i]->irq) {
1103 netdev_dbg(adapter->netdev,
1104 "Disabling tx_scrq[%d] irq\n", i);
1105 disable_irq(adapter->tx_scrq[i]->irq);
1109 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1113 if (adapter->rx_scrq) {
1114 for (i = 0; i < adapter->req_rx_queues; i++) {
1117 while (pending_scrq(adapter, adapter->rx_scrq[i])) {
1125 if (adapter->rx_scrq[i]->irq) {
1126 netdev_dbg(adapter->netdev,
1127 "Disabling rx_scrq[%d] irq\n", i);
1128 disable_irq(adapter->rx_scrq[i]->irq);
1133 clean_tx_pools(adapter);
1134 adapter->state = VNIC_CLOSED;
1138 static int ibmvnic_close(struct net_device *netdev)
1140 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1143 mutex_lock(&adapter->reset_lock);
1144 rc = __ibmvnic_close(netdev);
1145 mutex_unlock(&adapter->reset_lock);
1151 * build_hdr_data - creates L2/L3/L4 header data buffer
1152 * @hdr_field - bitfield determining needed headers
1153 * @skb - socket buffer
1154 * @hdr_len - array of header lengths
1155 * @tot_len - total length of data
1157 * Reads hdr_field to determine which headers are needed by firmware.
1158 * Builds a buffer containing these headers. Saves individual header
1159 * lengths and total buffer length to be used to build descriptors.
1161 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1162 int *hdr_len, u8 *hdr_data)
1167 hdr_len[0] = sizeof(struct ethhdr);
1169 if (skb->protocol == htons(ETH_P_IP)) {
1170 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1171 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1172 hdr_len[2] = tcp_hdrlen(skb);
1173 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1174 hdr_len[2] = sizeof(struct udphdr);
1175 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1176 hdr_len[1] = sizeof(struct ipv6hdr);
1177 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1178 hdr_len[2] = tcp_hdrlen(skb);
1179 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1180 hdr_len[2] = sizeof(struct udphdr);
1181 } else if (skb->protocol == htons(ETH_P_ARP)) {
1182 hdr_len[1] = arp_hdr_len(skb->dev);
1186 memset(hdr_data, 0, 120);
1187 if ((hdr_field >> 6) & 1) {
1188 hdr = skb_mac_header(skb);
1189 memcpy(hdr_data, hdr, hdr_len[0]);
1193 if ((hdr_field >> 5) & 1) {
1194 hdr = skb_network_header(skb);
1195 memcpy(hdr_data + len, hdr, hdr_len[1]);
1199 if ((hdr_field >> 4) & 1) {
1200 hdr = skb_transport_header(skb);
1201 memcpy(hdr_data + len, hdr, hdr_len[2]);
1208 * create_hdr_descs - create header and header extension descriptors
1209 * @hdr_field - bitfield determining needed headers
1210 * @data - buffer containing header data
1211 * @len - length of data buffer
1212 * @hdr_len - array of individual header lengths
1213 * @scrq_arr - descriptor array
1215 * Creates header and, if needed, header extension descriptors and
1216 * places them in a descriptor array, scrq_arr
1219 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1220 union sub_crq *scrq_arr)
1222 union sub_crq hdr_desc;
1228 while (tmp_len > 0) {
1229 cur = hdr_data + len - tmp_len;
1231 memset(&hdr_desc, 0, sizeof(hdr_desc));
1232 if (cur != hdr_data) {
1233 data = hdr_desc.hdr_ext.data;
1234 tmp = tmp_len > 29 ? 29 : tmp_len;
1235 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1236 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1237 hdr_desc.hdr_ext.len = tmp;
1239 data = hdr_desc.hdr.data;
1240 tmp = tmp_len > 24 ? 24 : tmp_len;
1241 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1242 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1243 hdr_desc.hdr.len = tmp;
1244 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1245 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1246 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1247 hdr_desc.hdr.flag = hdr_field << 1;
1249 memcpy(data, cur, tmp);
1251 *scrq_arr = hdr_desc;
1260 * build_hdr_descs_arr - build a header descriptor array
1261 * @skb - socket buffer
1262 * @num_entries - number of descriptors to be sent
1263 * @subcrq - first TX descriptor
1264 * @hdr_field - bit field determining which headers will be sent
1266 * This function will build a TX descriptor array with applicable
1267 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1270 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1271 int *num_entries, u8 hdr_field)
1273 int hdr_len[3] = {0, 0, 0};
1275 u8 *hdr_data = txbuff->hdr_data;
1277 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1279 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1280 txbuff->indir_arr + 1);
1283 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1285 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1286 int queue_num = skb_get_queue_mapping(skb);
1287 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1288 struct device *dev = &adapter->vdev->dev;
1289 struct ibmvnic_tx_buff *tx_buff = NULL;
1290 struct ibmvnic_sub_crq_queue *tx_scrq;
1291 struct ibmvnic_tx_pool *tx_pool;
1292 unsigned int tx_send_failed = 0;
1293 unsigned int tx_map_failed = 0;
1294 unsigned int tx_dropped = 0;
1295 unsigned int tx_packets = 0;
1296 unsigned int tx_bytes = 0;
1297 dma_addr_t data_dma_addr;
1298 struct netdev_queue *txq;
1299 unsigned long lpar_rc;
1300 union sub_crq tx_crq;
1301 unsigned int offset;
1302 int num_entries = 1;
1309 if (adapter->resetting) {
1310 if (!netif_subqueue_stopped(netdev, skb))
1311 netif_stop_subqueue(netdev, queue_num);
1312 dev_kfree_skb_any(skb);
1320 tx_pool = &adapter->tx_pool[queue_num];
1321 tx_scrq = adapter->tx_scrq[queue_num];
1322 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1323 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1324 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1326 index = tx_pool->free_map[tx_pool->consumer_index];
1328 if (skb_is_gso(skb)) {
1329 offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ;
1330 dst = tx_pool->tso_ltb.buff + offset;
1331 memset(dst, 0, IBMVNIC_TSO_BUF_SZ);
1332 data_dma_addr = tx_pool->tso_ltb.addr + offset;
1333 tx_pool->tso_index++;
1334 if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
1335 tx_pool->tso_index = 0;
1337 offset = index * adapter->req_mtu;
1338 dst = tx_pool->long_term_buff.buff + offset;
1339 memset(dst, 0, adapter->req_mtu);
1340 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1343 if (skb_shinfo(skb)->nr_frags) {
1347 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1348 cur = skb_headlen(skb);
1350 /* Copy the frags */
1351 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1352 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1355 page_address(skb_frag_page(frag)) +
1356 frag->page_offset, skb_frag_size(frag));
1357 cur += skb_frag_size(frag);
1360 skb_copy_from_linear_data(skb, dst, skb->len);
1363 tx_pool->consumer_index =
1364 (tx_pool->consumer_index + 1) %
1365 adapter->req_tx_entries_per_subcrq;
1367 tx_buff = &tx_pool->tx_buff[index];
1369 tx_buff->data_dma[0] = data_dma_addr;
1370 tx_buff->data_len[0] = skb->len;
1371 tx_buff->index = index;
1372 tx_buff->pool_index = queue_num;
1373 tx_buff->last_frag = true;
1375 memset(&tx_crq, 0, sizeof(tx_crq));
1376 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1377 tx_crq.v1.type = IBMVNIC_TX_DESC;
1378 tx_crq.v1.n_crq_elem = 1;
1379 tx_crq.v1.n_sge = 1;
1380 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1381 tx_crq.v1.correlator = cpu_to_be32(index);
1382 if (skb_is_gso(skb))
1383 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id);
1385 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1386 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1387 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1389 if (adapter->vlan_header_insertion) {
1390 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1391 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1394 if (skb->protocol == htons(ETH_P_IP)) {
1395 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1396 proto = ip_hdr(skb)->protocol;
1397 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1398 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1399 proto = ipv6_hdr(skb)->nexthdr;
1402 if (proto == IPPROTO_TCP)
1403 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1404 else if (proto == IPPROTO_UDP)
1405 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1407 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1408 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1411 if (skb_is_gso(skb)) {
1412 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1413 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1416 /* determine if l2/3/4 headers are sent to firmware */
1417 if ((*hdrs >> 7) & 1 &&
1418 (skb->protocol == htons(ETH_P_IP) ||
1419 skb->protocol == htons(ETH_P_IPV6) ||
1420 skb->protocol == htons(ETH_P_ARP))) {
1421 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1422 tx_crq.v1.n_crq_elem = num_entries;
1423 tx_buff->indir_arr[0] = tx_crq;
1424 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1425 sizeof(tx_buff->indir_arr),
1427 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1428 dev_kfree_skb_any(skb);
1429 tx_buff->skb = NULL;
1430 if (!firmware_has_feature(FW_FEATURE_CMO))
1431 dev_err(dev, "tx: unable to map descriptor array\n");
1437 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1438 (u64)tx_buff->indir_dma,
1441 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1444 if (lpar_rc != H_SUCCESS) {
1445 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1447 if (tx_pool->consumer_index == 0)
1448 tx_pool->consumer_index =
1449 adapter->req_tx_entries_per_subcrq - 1;
1451 tx_pool->consumer_index--;
1453 dev_kfree_skb_any(skb);
1454 tx_buff->skb = NULL;
1456 if (lpar_rc == H_CLOSED) {
1457 /* Disable TX and report carrier off if queue is closed.
1458 * Firmware guarantees that a signal will be sent to the
1459 * driver, triggering a reset or some other action.
1461 netif_tx_stop_all_queues(netdev);
1462 netif_carrier_off(netdev);
1471 if (atomic_inc_return(&tx_scrq->used)
1472 >= adapter->req_tx_entries_per_subcrq) {
1473 netdev_info(netdev, "Stopping queue %d\n", queue_num);
1474 netif_stop_subqueue(netdev, queue_num);
1478 tx_bytes += skb->len;
1479 txq->trans_start = jiffies;
1483 netdev->stats.tx_dropped += tx_dropped;
1484 netdev->stats.tx_bytes += tx_bytes;
1485 netdev->stats.tx_packets += tx_packets;
1486 adapter->tx_send_failed += tx_send_failed;
1487 adapter->tx_map_failed += tx_map_failed;
1488 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1489 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1490 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1495 static void ibmvnic_set_multi(struct net_device *netdev)
1497 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1498 struct netdev_hw_addr *ha;
1499 union ibmvnic_crq crq;
1501 memset(&crq, 0, sizeof(crq));
1502 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1503 crq.request_capability.cmd = REQUEST_CAPABILITY;
1505 if (netdev->flags & IFF_PROMISC) {
1506 if (!adapter->promisc_supported)
1509 if (netdev->flags & IFF_ALLMULTI) {
1510 /* Accept all multicast */
1511 memset(&crq, 0, sizeof(crq));
1512 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1513 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1514 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1515 ibmvnic_send_crq(adapter, &crq);
1516 } else if (netdev_mc_empty(netdev)) {
1517 /* Reject all multicast */
1518 memset(&crq, 0, sizeof(crq));
1519 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1520 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1521 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1522 ibmvnic_send_crq(adapter, &crq);
1524 /* Accept one or more multicast(s) */
1525 netdev_for_each_mc_addr(ha, netdev) {
1526 memset(&crq, 0, sizeof(crq));
1527 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1528 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1529 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1530 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1532 ibmvnic_send_crq(adapter, &crq);
1538 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1540 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1541 struct sockaddr *addr = p;
1542 union ibmvnic_crq crq;
1544 if (!is_valid_ether_addr(addr->sa_data))
1545 return -EADDRNOTAVAIL;
1547 memset(&crq, 0, sizeof(crq));
1548 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1549 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1550 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1552 init_completion(&adapter->fw_done);
1553 ibmvnic_send_crq(adapter, &crq);
1554 wait_for_completion(&adapter->fw_done);
1555 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1556 return adapter->fw_done_rc ? -EIO : 0;
1559 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1561 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1562 struct sockaddr *addr = p;
1565 if (adapter->state == VNIC_PROBED) {
1566 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1567 adapter->mac_change_pending = true;
1571 rc = __ibmvnic_set_mac(netdev, addr);
1577 * do_reset returns zero if we are able to keep processing reset events, or
1578 * non-zero if we hit a fatal error and must halt.
1580 static int do_reset(struct ibmvnic_adapter *adapter,
1581 struct ibmvnic_rwi *rwi, u32 reset_state)
1583 u64 old_num_rx_queues, old_num_tx_queues;
1584 struct net_device *netdev = adapter->netdev;
1587 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1590 netif_carrier_off(netdev);
1591 adapter->reset_reason = rwi->reset_reason;
1593 old_num_rx_queues = adapter->req_rx_queues;
1594 old_num_tx_queues = adapter->req_tx_queues;
1596 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1597 rc = ibmvnic_reenable_crq_queue(adapter);
1602 rc = __ibmvnic_close(netdev);
1606 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1607 adapter->wait_for_reset) {
1608 release_resources(adapter);
1609 release_sub_crqs(adapter);
1610 release_crq_queue(adapter);
1613 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1614 /* remove the closed state so when we call open it appears
1615 * we are coming from the probed state.
1617 adapter->state = VNIC_PROBED;
1619 rc = ibmvnic_init(adapter);
1621 return IBMVNIC_INIT_FAILED;
1623 /* If the adapter was in PROBE state prior to the reset,
1626 if (reset_state == VNIC_PROBED)
1629 rc = ibmvnic_login(netdev);
1631 adapter->state = VNIC_PROBED;
1635 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1636 adapter->wait_for_reset) {
1637 rc = init_resources(adapter);
1640 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1641 adapter->req_tx_queues != old_num_tx_queues) {
1642 release_rx_pools(adapter);
1643 release_tx_pools(adapter);
1644 init_rx_pools(netdev);
1645 init_tx_pools(netdev);
1647 rc = reset_tx_pools(adapter);
1651 rc = reset_rx_pools(adapter);
1655 if (reset_state == VNIC_CLOSED)
1660 rc = __ibmvnic_open(netdev);
1662 if (list_empty(&adapter->rwi_list))
1663 adapter->state = VNIC_CLOSED;
1665 adapter->state = reset_state;
1670 netif_carrier_on(netdev);
1673 for (i = 0; i < adapter->req_rx_queues; i++)
1674 napi_schedule(&adapter->napi[i]);
1676 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1677 netdev_notify_peers(netdev);
1682 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1684 struct ibmvnic_rwi *rwi;
1686 mutex_lock(&adapter->rwi_lock);
1688 if (!list_empty(&adapter->rwi_list)) {
1689 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1691 list_del(&rwi->list);
1696 mutex_unlock(&adapter->rwi_lock);
1700 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1702 struct ibmvnic_rwi *rwi;
1704 rwi = get_next_rwi(adapter);
1707 rwi = get_next_rwi(adapter);
1711 static void __ibmvnic_reset(struct work_struct *work)
1713 struct ibmvnic_rwi *rwi;
1714 struct ibmvnic_adapter *adapter;
1715 struct net_device *netdev;
1719 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1720 netdev = adapter->netdev;
1722 mutex_lock(&adapter->reset_lock);
1723 adapter->resetting = true;
1724 reset_state = adapter->state;
1726 rwi = get_next_rwi(adapter);
1728 rc = do_reset(adapter, rwi, reset_state);
1730 if (rc && rc != IBMVNIC_INIT_FAILED)
1733 rwi = get_next_rwi(adapter);
1736 if (adapter->wait_for_reset) {
1737 adapter->wait_for_reset = false;
1738 adapter->reset_done_rc = rc;
1739 complete(&adapter->reset_done);
1743 netdev_dbg(adapter->netdev, "Reset failed\n");
1744 free_all_rwi(adapter);
1745 mutex_unlock(&adapter->reset_lock);
1749 adapter->resetting = false;
1750 mutex_unlock(&adapter->reset_lock);
1753 static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1754 enum ibmvnic_reset_reason reason)
1756 struct ibmvnic_rwi *rwi, *tmp;
1757 struct net_device *netdev = adapter->netdev;
1758 struct list_head *entry;
1760 if (adapter->state == VNIC_REMOVING ||
1761 adapter->state == VNIC_REMOVED) {
1762 netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1766 if (adapter->state == VNIC_PROBING) {
1767 netdev_warn(netdev, "Adapter reset during probe\n");
1768 adapter->init_done_rc = EAGAIN;
1772 mutex_lock(&adapter->rwi_lock);
1774 list_for_each(entry, &adapter->rwi_list) {
1775 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1776 if (tmp->reset_reason == reason) {
1777 netdev_dbg(netdev, "Skipping matching reset\n");
1778 mutex_unlock(&adapter->rwi_lock);
1783 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1785 mutex_unlock(&adapter->rwi_lock);
1786 ibmvnic_close(netdev);
1790 rwi->reset_reason = reason;
1791 list_add_tail(&rwi->list, &adapter->rwi_list);
1792 mutex_unlock(&adapter->rwi_lock);
1794 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
1795 schedule_work(&adapter->ibmvnic_reset);
1798 static void ibmvnic_tx_timeout(struct net_device *dev)
1800 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1802 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
1805 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1806 struct ibmvnic_rx_buff *rx_buff)
1808 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1810 rx_buff->skb = NULL;
1812 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1813 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1815 atomic_dec(&pool->available);
1818 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1820 struct net_device *netdev = napi->dev;
1821 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1822 int scrq_num = (int)(napi - adapter->napi);
1823 int frames_processed = 0;
1826 while (frames_processed < budget) {
1827 struct sk_buff *skb;
1828 struct ibmvnic_rx_buff *rx_buff;
1829 union sub_crq *next;
1834 if (unlikely(adapter->resetting)) {
1835 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1836 napi_complete_done(napi, frames_processed);
1837 return frames_processed;
1840 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1842 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1844 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1845 rx_comp.correlator);
1846 /* do error checking */
1847 if (next->rx_comp.rc) {
1848 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
1849 be16_to_cpu(next->rx_comp.rc));
1850 /* free the entry */
1851 next->rx_comp.first = 0;
1852 remove_buff_from_pool(adapter, rx_buff);
1856 length = be32_to_cpu(next->rx_comp.len);
1857 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1858 flags = next->rx_comp.flags;
1860 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1863 /* VLAN Header has been stripped by the system firmware and
1864 * needs to be inserted by the driver
1866 if (adapter->rx_vlan_header_insertion &&
1867 (flags & IBMVNIC_VLAN_STRIPPED))
1868 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1869 ntohs(next->rx_comp.vlan_tci));
1871 /* free the entry */
1872 next->rx_comp.first = 0;
1873 remove_buff_from_pool(adapter, rx_buff);
1875 skb_put(skb, length);
1876 skb->protocol = eth_type_trans(skb, netdev);
1877 skb_record_rx_queue(skb, scrq_num);
1879 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1880 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1881 skb->ip_summed = CHECKSUM_UNNECESSARY;
1885 napi_gro_receive(napi, skb); /* send it up */
1886 netdev->stats.rx_packets++;
1887 netdev->stats.rx_bytes += length;
1888 adapter->rx_stats_buffers[scrq_num].packets++;
1889 adapter->rx_stats_buffers[scrq_num].bytes += length;
1893 if (adapter->state != VNIC_CLOSING)
1894 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1896 if (frames_processed < budget) {
1897 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1898 napi_complete_done(napi, frames_processed);
1899 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1900 napi_reschedule(napi)) {
1901 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1905 return frames_processed;
1908 #ifdef CONFIG_NET_POLL_CONTROLLER
1909 static void ibmvnic_netpoll_controller(struct net_device *dev)
1911 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1914 replenish_pools(netdev_priv(dev));
1915 for (i = 0; i < adapter->req_rx_queues; i++)
1916 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1917 adapter->rx_scrq[i]);
1921 static int wait_for_reset(struct ibmvnic_adapter *adapter)
1923 adapter->fallback.mtu = adapter->req_mtu;
1924 adapter->fallback.rx_queues = adapter->req_rx_queues;
1925 adapter->fallback.tx_queues = adapter->req_tx_queues;
1926 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
1927 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
1929 init_completion(&adapter->reset_done);
1930 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
1931 adapter->wait_for_reset = true;
1932 wait_for_completion(&adapter->reset_done);
1934 if (adapter->reset_done_rc) {
1935 adapter->desired.mtu = adapter->fallback.mtu;
1936 adapter->desired.rx_queues = adapter->fallback.rx_queues;
1937 adapter->desired.tx_queues = adapter->fallback.tx_queues;
1938 adapter->desired.rx_entries = adapter->fallback.rx_entries;
1939 adapter->desired.tx_entries = adapter->fallback.tx_entries;
1941 init_completion(&adapter->reset_done);
1942 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
1943 wait_for_completion(&adapter->reset_done);
1945 adapter->wait_for_reset = false;
1947 return adapter->reset_done_rc;
1950 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
1952 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1954 adapter->desired.mtu = new_mtu + ETH_HLEN;
1956 return wait_for_reset(adapter);
1959 static const struct net_device_ops ibmvnic_netdev_ops = {
1960 .ndo_open = ibmvnic_open,
1961 .ndo_stop = ibmvnic_close,
1962 .ndo_start_xmit = ibmvnic_xmit,
1963 .ndo_set_rx_mode = ibmvnic_set_multi,
1964 .ndo_set_mac_address = ibmvnic_set_mac,
1965 .ndo_validate_addr = eth_validate_addr,
1966 .ndo_tx_timeout = ibmvnic_tx_timeout,
1967 #ifdef CONFIG_NET_POLL_CONTROLLER
1968 .ndo_poll_controller = ibmvnic_netpoll_controller,
1970 .ndo_change_mtu = ibmvnic_change_mtu,
1973 /* ethtool functions */
1975 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1976 struct ethtool_link_ksettings *cmd)
1978 u32 supported, advertising;
1980 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1982 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1984 cmd->base.speed = SPEED_1000;
1985 cmd->base.duplex = DUPLEX_FULL;
1986 cmd->base.port = PORT_FIBRE;
1987 cmd->base.phy_address = 0;
1988 cmd->base.autoneg = AUTONEG_ENABLE;
1990 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1992 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1998 static void ibmvnic_get_drvinfo(struct net_device *netdev,
1999 struct ethtool_drvinfo *info)
2001 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2003 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2004 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2005 strlcpy(info->fw_version, adapter->fw_version,
2006 sizeof(info->fw_version));
2009 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2011 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2013 return adapter->msg_enable;
2016 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2018 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2020 adapter->msg_enable = data;
2023 static u32 ibmvnic_get_link(struct net_device *netdev)
2025 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2027 /* Don't need to send a query because we request a logical link up at
2028 * init and then we wait for link state indications
2030 return adapter->logical_link_state;
2033 static void ibmvnic_get_ringparam(struct net_device *netdev,
2034 struct ethtool_ringparam *ring)
2036 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2038 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2039 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2040 ring->rx_mini_max_pending = 0;
2041 ring->rx_jumbo_max_pending = 0;
2042 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2043 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2044 ring->rx_mini_pending = 0;
2045 ring->rx_jumbo_pending = 0;
2048 static int ibmvnic_set_ringparam(struct net_device *netdev,
2049 struct ethtool_ringparam *ring)
2051 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2053 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
2054 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2055 netdev_err(netdev, "Invalid request.\n");
2056 netdev_err(netdev, "Max tx buffers = %llu\n",
2057 adapter->max_rx_add_entries_per_subcrq);
2058 netdev_err(netdev, "Max rx buffers = %llu\n",
2059 adapter->max_tx_entries_per_subcrq);
2063 adapter->desired.rx_entries = ring->rx_pending;
2064 adapter->desired.tx_entries = ring->tx_pending;
2066 return wait_for_reset(adapter);
2069 static void ibmvnic_get_channels(struct net_device *netdev,
2070 struct ethtool_channels *channels)
2072 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2074 channels->max_rx = adapter->max_rx_queues;
2075 channels->max_tx = adapter->max_tx_queues;
2076 channels->max_other = 0;
2077 channels->max_combined = 0;
2078 channels->rx_count = adapter->req_rx_queues;
2079 channels->tx_count = adapter->req_tx_queues;
2080 channels->other_count = 0;
2081 channels->combined_count = 0;
2084 static int ibmvnic_set_channels(struct net_device *netdev,
2085 struct ethtool_channels *channels)
2087 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2089 adapter->desired.rx_queues = channels->rx_count;
2090 adapter->desired.tx_queues = channels->tx_count;
2092 return wait_for_reset(adapter);
2095 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2097 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2100 if (stringset != ETH_SS_STATS)
2103 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2104 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2106 for (i = 0; i < adapter->req_tx_queues; i++) {
2107 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2108 data += ETH_GSTRING_LEN;
2110 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2111 data += ETH_GSTRING_LEN;
2113 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2114 data += ETH_GSTRING_LEN;
2117 for (i = 0; i < adapter->req_rx_queues; i++) {
2118 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2119 data += ETH_GSTRING_LEN;
2121 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2122 data += ETH_GSTRING_LEN;
2124 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2125 data += ETH_GSTRING_LEN;
2129 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2131 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2135 return ARRAY_SIZE(ibmvnic_stats) +
2136 adapter->req_tx_queues * NUM_TX_STATS +
2137 adapter->req_rx_queues * NUM_RX_STATS;
2143 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2144 struct ethtool_stats *stats, u64 *data)
2146 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2147 union ibmvnic_crq crq;
2150 memset(&crq, 0, sizeof(crq));
2151 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2152 crq.request_statistics.cmd = REQUEST_STATISTICS;
2153 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2154 crq.request_statistics.len =
2155 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2157 /* Wait for data to be written */
2158 init_completion(&adapter->stats_done);
2159 ibmvnic_send_crq(adapter, &crq);
2160 wait_for_completion(&adapter->stats_done);
2162 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2163 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2164 ibmvnic_stats[i].offset));
2166 for (j = 0; j < adapter->req_tx_queues; j++) {
2167 data[i] = adapter->tx_stats_buffers[j].packets;
2169 data[i] = adapter->tx_stats_buffers[j].bytes;
2171 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2175 for (j = 0; j < adapter->req_rx_queues; j++) {
2176 data[i] = adapter->rx_stats_buffers[j].packets;
2178 data[i] = adapter->rx_stats_buffers[j].bytes;
2180 data[i] = adapter->rx_stats_buffers[j].interrupts;
2185 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2186 .get_drvinfo = ibmvnic_get_drvinfo,
2187 .get_msglevel = ibmvnic_get_msglevel,
2188 .set_msglevel = ibmvnic_set_msglevel,
2189 .get_link = ibmvnic_get_link,
2190 .get_ringparam = ibmvnic_get_ringparam,
2191 .set_ringparam = ibmvnic_set_ringparam,
2192 .get_channels = ibmvnic_get_channels,
2193 .set_channels = ibmvnic_set_channels,
2194 .get_strings = ibmvnic_get_strings,
2195 .get_sset_count = ibmvnic_get_sset_count,
2196 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2197 .get_link_ksettings = ibmvnic_get_link_ksettings,
2200 /* Routines for managing CRQs/sCRQs */
2202 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2203 struct ibmvnic_sub_crq_queue *scrq)
2208 free_irq(scrq->irq, scrq);
2209 irq_dispose_mapping(scrq->irq);
2213 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2216 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2217 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2221 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2225 for (i = 0; i < adapter->req_tx_queues; i++) {
2226 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2227 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2232 for (i = 0; i < adapter->req_rx_queues; i++) {
2233 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2234 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2242 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2243 struct ibmvnic_sub_crq_queue *scrq)
2245 struct device *dev = &adapter->vdev->dev;
2248 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2250 /* Close the sub-crqs */
2252 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2253 adapter->vdev->unit_address,
2255 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2258 netdev_err(adapter->netdev,
2259 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2263 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2265 free_pages((unsigned long)scrq->msgs, 2);
2269 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2272 struct device *dev = &adapter->vdev->dev;
2273 struct ibmvnic_sub_crq_queue *scrq;
2276 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2281 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2283 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2284 goto zero_page_failed;
2287 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2289 if (dma_mapping_error(dev, scrq->msg_token)) {
2290 dev_warn(dev, "Couldn't map crq queue messages page\n");
2294 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2295 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2297 if (rc == H_RESOURCE)
2298 rc = ibmvnic_reset_crq(adapter);
2300 if (rc == H_CLOSED) {
2301 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2303 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2307 scrq->adapter = adapter;
2308 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2309 spin_lock_init(&scrq->lock);
2311 netdev_dbg(adapter->netdev,
2312 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2313 scrq->crq_num, scrq->hw_irq, scrq->irq);
2318 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2321 free_pages((unsigned long)scrq->msgs, 2);
2328 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
2332 if (adapter->tx_scrq) {
2333 for (i = 0; i < adapter->req_tx_queues; i++) {
2334 if (!adapter->tx_scrq[i])
2337 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2339 if (adapter->tx_scrq[i]->irq) {
2340 free_irq(adapter->tx_scrq[i]->irq,
2341 adapter->tx_scrq[i]);
2342 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2343 adapter->tx_scrq[i]->irq = 0;
2346 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2349 kfree(adapter->tx_scrq);
2350 adapter->tx_scrq = NULL;
2353 if (adapter->rx_scrq) {
2354 for (i = 0; i < adapter->req_rx_queues; i++) {
2355 if (!adapter->rx_scrq[i])
2358 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2360 if (adapter->rx_scrq[i]->irq) {
2361 free_irq(adapter->rx_scrq[i]->irq,
2362 adapter->rx_scrq[i]);
2363 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2364 adapter->rx_scrq[i]->irq = 0;
2367 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2370 kfree(adapter->rx_scrq);
2371 adapter->rx_scrq = NULL;
2375 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2376 struct ibmvnic_sub_crq_queue *scrq)
2378 struct device *dev = &adapter->vdev->dev;
2381 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2382 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2384 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2389 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2390 struct ibmvnic_sub_crq_queue *scrq)
2392 struct device *dev = &adapter->vdev->dev;
2395 if (scrq->hw_irq > 0x100000000ULL) {
2396 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2400 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2401 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2403 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2408 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2409 struct ibmvnic_sub_crq_queue *scrq)
2411 struct device *dev = &adapter->vdev->dev;
2412 struct ibmvnic_tx_buff *txbuff;
2413 union sub_crq *next;
2419 while (pending_scrq(adapter, scrq)) {
2420 unsigned int pool = scrq->pool_index;
2422 next = ibmvnic_next_scrq(adapter, scrq);
2423 for (i = 0; i < next->tx_comp.num_comps; i++) {
2424 if (next->tx_comp.rcs[i]) {
2425 dev_err(dev, "tx error %x\n",
2426 next->tx_comp.rcs[i]);
2429 index = be32_to_cpu(next->tx_comp.correlators[i]);
2430 txbuff = &adapter->tx_pool[pool].tx_buff[index];
2432 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2433 if (!txbuff->data_dma[j])
2436 txbuff->data_dma[j] = 0;
2438 /* if sub_crq was sent indirectly */
2439 first = txbuff->indir_arr[0].generic.first;
2440 if (first == IBMVNIC_CRQ_CMD) {
2441 dma_unmap_single(dev, txbuff->indir_dma,
2442 sizeof(txbuff->indir_arr),
2446 if (txbuff->last_frag) {
2447 dev_kfree_skb_any(txbuff->skb);
2451 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
2452 producer_index] = index;
2453 adapter->tx_pool[pool].producer_index =
2454 (adapter->tx_pool[pool].producer_index + 1) %
2455 adapter->req_tx_entries_per_subcrq;
2457 /* remove tx_comp scrq*/
2458 next->tx_comp.first = 0;
2460 if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <=
2461 (adapter->req_tx_entries_per_subcrq / 2) &&
2462 __netif_subqueue_stopped(adapter->netdev,
2463 scrq->pool_index)) {
2464 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2465 netdev_info(adapter->netdev, "Started queue %d\n",
2470 enable_scrq_irq(adapter, scrq);
2472 if (pending_scrq(adapter, scrq)) {
2473 disable_scrq_irq(adapter, scrq);
2480 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2482 struct ibmvnic_sub_crq_queue *scrq = instance;
2483 struct ibmvnic_adapter *adapter = scrq->adapter;
2485 disable_scrq_irq(adapter, scrq);
2486 ibmvnic_complete_tx(adapter, scrq);
2491 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2493 struct ibmvnic_sub_crq_queue *scrq = instance;
2494 struct ibmvnic_adapter *adapter = scrq->adapter;
2496 /* When booting a kdump kernel we can hit pending interrupts
2497 * prior to completing driver initialization.
2499 if (unlikely(adapter->state != VNIC_OPEN))
2502 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2504 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2505 disable_scrq_irq(adapter, scrq);
2506 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2512 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2514 struct device *dev = &adapter->vdev->dev;
2515 struct ibmvnic_sub_crq_queue *scrq;
2519 for (i = 0; i < adapter->req_tx_queues; i++) {
2520 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2522 scrq = adapter->tx_scrq[i];
2523 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2527 dev_err(dev, "Error mapping irq\n");
2528 goto req_tx_irq_failed;
2531 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2532 0, "ibmvnic_tx", scrq);
2535 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2537 irq_dispose_mapping(scrq->irq);
2538 goto req_rx_irq_failed;
2542 for (i = 0; i < adapter->req_rx_queues; i++) {
2543 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2545 scrq = adapter->rx_scrq[i];
2546 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2549 dev_err(dev, "Error mapping irq\n");
2550 goto req_rx_irq_failed;
2552 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2553 0, "ibmvnic_rx", scrq);
2555 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2557 irq_dispose_mapping(scrq->irq);
2558 goto req_rx_irq_failed;
2564 for (j = 0; j < i; j++) {
2565 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2566 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2568 i = adapter->req_tx_queues;
2570 for (j = 0; j < i; j++) {
2571 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2572 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2574 release_sub_crqs(adapter);
2578 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2580 struct device *dev = &adapter->vdev->dev;
2581 struct ibmvnic_sub_crq_queue **allqueues;
2582 int registered_queues = 0;
2587 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2589 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2593 for (i = 0; i < total_queues; i++) {
2594 allqueues[i] = init_sub_crq_queue(adapter);
2595 if (!allqueues[i]) {
2596 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2599 registered_queues++;
2602 /* Make sure we were able to register the minimum number of queues */
2603 if (registered_queues <
2604 adapter->min_tx_queues + adapter->min_rx_queues) {
2605 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2609 /* Distribute the failed allocated queues*/
2610 for (i = 0; i < total_queues - registered_queues + more ; i++) {
2611 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2614 if (adapter->req_rx_queues > adapter->min_rx_queues)
2615 adapter->req_rx_queues--;
2620 if (adapter->req_tx_queues > adapter->min_tx_queues)
2621 adapter->req_tx_queues--;
2628 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
2629 sizeof(*adapter->tx_scrq), GFP_KERNEL);
2630 if (!adapter->tx_scrq)
2633 for (i = 0; i < adapter->req_tx_queues; i++) {
2634 adapter->tx_scrq[i] = allqueues[i];
2635 adapter->tx_scrq[i]->pool_index = i;
2638 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
2639 sizeof(*adapter->rx_scrq), GFP_KERNEL);
2640 if (!adapter->rx_scrq)
2643 for (i = 0; i < adapter->req_rx_queues; i++) {
2644 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2645 adapter->rx_scrq[i]->scrq_num = i;
2652 kfree(adapter->tx_scrq);
2653 adapter->tx_scrq = NULL;
2655 for (i = 0; i < registered_queues; i++)
2656 release_sub_crq_queue(adapter, allqueues[i]);
2661 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2663 struct device *dev = &adapter->vdev->dev;
2664 union ibmvnic_crq crq;
2668 /* Sub-CRQ entries are 32 byte long */
2669 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2671 if (adapter->min_tx_entries_per_subcrq > entries_page ||
2672 adapter->min_rx_add_entries_per_subcrq > entries_page) {
2673 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2677 if (adapter->desired.mtu)
2678 adapter->req_mtu = adapter->desired.mtu;
2680 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2682 if (!adapter->desired.tx_entries)
2683 adapter->desired.tx_entries =
2684 adapter->max_tx_entries_per_subcrq;
2685 if (!adapter->desired.rx_entries)
2686 adapter->desired.rx_entries =
2687 adapter->max_rx_add_entries_per_subcrq;
2689 max_entries = IBMVNIC_MAX_LTB_SIZE /
2690 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
2692 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2693 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
2694 adapter->desired.tx_entries = max_entries;
2697 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2698 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
2699 adapter->desired.rx_entries = max_entries;
2702 if (adapter->desired.tx_entries)
2703 adapter->req_tx_entries_per_subcrq =
2704 adapter->desired.tx_entries;
2706 adapter->req_tx_entries_per_subcrq =
2707 adapter->max_tx_entries_per_subcrq;
2709 if (adapter->desired.rx_entries)
2710 adapter->req_rx_add_entries_per_subcrq =
2711 adapter->desired.rx_entries;
2713 adapter->req_rx_add_entries_per_subcrq =
2714 adapter->max_rx_add_entries_per_subcrq;
2716 if (adapter->desired.tx_queues)
2717 adapter->req_tx_queues =
2718 adapter->desired.tx_queues;
2720 adapter->req_tx_queues =
2721 adapter->opt_tx_comp_sub_queues;
2723 if (adapter->desired.rx_queues)
2724 adapter->req_rx_queues =
2725 adapter->desired.rx_queues;
2727 adapter->req_rx_queues =
2728 adapter->opt_rx_comp_queues;
2730 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
2733 memset(&crq, 0, sizeof(crq));
2734 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2735 crq.request_capability.cmd = REQUEST_CAPABILITY;
2737 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2738 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2739 atomic_inc(&adapter->running_cap_crqs);
2740 ibmvnic_send_crq(adapter, &crq);
2742 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2743 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2744 atomic_inc(&adapter->running_cap_crqs);
2745 ibmvnic_send_crq(adapter, &crq);
2747 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
2748 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
2749 atomic_inc(&adapter->running_cap_crqs);
2750 ibmvnic_send_crq(adapter, &crq);
2752 crq.request_capability.capability =
2753 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2754 crq.request_capability.number =
2755 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
2756 atomic_inc(&adapter->running_cap_crqs);
2757 ibmvnic_send_crq(adapter, &crq);
2759 crq.request_capability.capability =
2760 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2761 crq.request_capability.number =
2762 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
2763 atomic_inc(&adapter->running_cap_crqs);
2764 ibmvnic_send_crq(adapter, &crq);
2766 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
2767 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
2768 atomic_inc(&adapter->running_cap_crqs);
2769 ibmvnic_send_crq(adapter, &crq);
2771 if (adapter->netdev->flags & IFF_PROMISC) {
2772 if (adapter->promisc_supported) {
2773 crq.request_capability.capability =
2774 cpu_to_be16(PROMISC_REQUESTED);
2775 crq.request_capability.number = cpu_to_be64(1);
2776 atomic_inc(&adapter->running_cap_crqs);
2777 ibmvnic_send_crq(adapter, &crq);
2780 crq.request_capability.capability =
2781 cpu_to_be16(PROMISC_REQUESTED);
2782 crq.request_capability.number = cpu_to_be64(0);
2783 atomic_inc(&adapter->running_cap_crqs);
2784 ibmvnic_send_crq(adapter, &crq);
2788 static int pending_scrq(struct ibmvnic_adapter *adapter,
2789 struct ibmvnic_sub_crq_queue *scrq)
2791 union sub_crq *entry = &scrq->msgs[scrq->cur];
2793 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
2799 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2800 struct ibmvnic_sub_crq_queue *scrq)
2802 union sub_crq *entry;
2803 unsigned long flags;
2805 spin_lock_irqsave(&scrq->lock, flags);
2806 entry = &scrq->msgs[scrq->cur];
2807 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2808 if (++scrq->cur == scrq->size)
2813 spin_unlock_irqrestore(&scrq->lock, flags);
2818 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2820 struct ibmvnic_crq_queue *queue = &adapter->crq;
2821 union ibmvnic_crq *crq;
2823 crq = &queue->msgs[queue->cur];
2824 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2825 if (++queue->cur == queue->size)
2834 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2835 union sub_crq *sub_crq)
2837 unsigned int ua = adapter->vdev->unit_address;
2838 struct device *dev = &adapter->vdev->dev;
2839 u64 *u64_crq = (u64 *)sub_crq;
2842 netdev_dbg(adapter->netdev,
2843 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2844 (unsigned long int)cpu_to_be64(remote_handle),
2845 (unsigned long int)cpu_to_be64(u64_crq[0]),
2846 (unsigned long int)cpu_to_be64(u64_crq[1]),
2847 (unsigned long int)cpu_to_be64(u64_crq[2]),
2848 (unsigned long int)cpu_to_be64(u64_crq[3]));
2850 /* Make sure the hypervisor sees the complete request */
2853 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2854 cpu_to_be64(remote_handle),
2855 cpu_to_be64(u64_crq[0]),
2856 cpu_to_be64(u64_crq[1]),
2857 cpu_to_be64(u64_crq[2]),
2858 cpu_to_be64(u64_crq[3]));
2862 dev_warn(dev, "CRQ Queue closed\n");
2863 dev_err(dev, "Send error (rc=%d)\n", rc);
2869 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
2870 u64 remote_handle, u64 ioba, u64 num_entries)
2872 unsigned int ua = adapter->vdev->unit_address;
2873 struct device *dev = &adapter->vdev->dev;
2876 /* Make sure the hypervisor sees the complete request */
2878 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
2879 cpu_to_be64(remote_handle),
2884 dev_warn(dev, "CRQ Queue closed\n");
2885 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
2891 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
2892 union ibmvnic_crq *crq)
2894 unsigned int ua = adapter->vdev->unit_address;
2895 struct device *dev = &adapter->vdev->dev;
2896 u64 *u64_crq = (u64 *)crq;
2899 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
2900 (unsigned long int)cpu_to_be64(u64_crq[0]),
2901 (unsigned long int)cpu_to_be64(u64_crq[1]));
2903 /* Make sure the hypervisor sees the complete request */
2906 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
2907 cpu_to_be64(u64_crq[0]),
2908 cpu_to_be64(u64_crq[1]));
2912 dev_warn(dev, "CRQ Queue closed\n");
2913 dev_warn(dev, "Send error (rc=%d)\n", rc);
2919 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
2921 union ibmvnic_crq crq;
2923 memset(&crq, 0, sizeof(crq));
2924 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
2925 crq.generic.cmd = IBMVNIC_CRQ_INIT;
2926 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
2928 return ibmvnic_send_crq(adapter, &crq);
2931 static int send_version_xchg(struct ibmvnic_adapter *adapter)
2933 union ibmvnic_crq crq;
2935 memset(&crq, 0, sizeof(crq));
2936 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
2937 crq.version_exchange.cmd = VERSION_EXCHANGE;
2938 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
2940 return ibmvnic_send_crq(adapter, &crq);
2943 struct vnic_login_client_data {
2949 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
2953 /* Calculate the amount of buffer space needed for the
2954 * vnic client data in the login buffer. There are four entries,
2955 * OS name, LPAR name, device name, and a null last entry.
2957 len = 4 * sizeof(struct vnic_login_client_data);
2958 len += 6; /* "Linux" plus NULL */
2959 len += strlen(utsname()->nodename) + 1;
2960 len += strlen(adapter->netdev->name) + 1;
2965 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
2966 struct vnic_login_client_data *vlcd)
2968 const char *os_name = "Linux";
2971 /* Type 1 - LPAR OS */
2973 len = strlen(os_name) + 1;
2974 vlcd->len = cpu_to_be16(len);
2975 strncpy(&vlcd->name, os_name, len);
2976 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
2978 /* Type 2 - LPAR name */
2980 len = strlen(utsname()->nodename) + 1;
2981 vlcd->len = cpu_to_be16(len);
2982 strncpy(&vlcd->name, utsname()->nodename, len);
2983 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
2985 /* Type 3 - device name */
2987 len = strlen(adapter->netdev->name) + 1;
2988 vlcd->len = cpu_to_be16(len);
2989 strncpy(&vlcd->name, adapter->netdev->name, len);
2992 static void send_login(struct ibmvnic_adapter *adapter)
2994 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
2995 struct ibmvnic_login_buffer *login_buffer;
2996 struct device *dev = &adapter->vdev->dev;
2997 dma_addr_t rsp_buffer_token;
2998 dma_addr_t buffer_token;
2999 size_t rsp_buffer_size;
3000 union ibmvnic_crq crq;
3004 int client_data_len;
3005 struct vnic_login_client_data *vlcd;
3008 client_data_len = vnic_client_data_len(adapter);
3011 sizeof(struct ibmvnic_login_buffer) +
3012 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3015 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3017 goto buf_alloc_failed;
3019 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3021 if (dma_mapping_error(dev, buffer_token)) {
3022 dev_err(dev, "Couldn't map login buffer\n");
3023 goto buf_map_failed;
3026 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3027 sizeof(u64) * adapter->req_tx_queues +
3028 sizeof(u64) * adapter->req_rx_queues +
3029 sizeof(u64) * adapter->req_rx_queues +
3030 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3032 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3033 if (!login_rsp_buffer)
3034 goto buf_rsp_alloc_failed;
3036 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3037 rsp_buffer_size, DMA_FROM_DEVICE);
3038 if (dma_mapping_error(dev, rsp_buffer_token)) {
3039 dev_err(dev, "Couldn't map login rsp buffer\n");
3040 goto buf_rsp_map_failed;
3043 adapter->login_buf = login_buffer;
3044 adapter->login_buf_token = buffer_token;
3045 adapter->login_buf_sz = buffer_size;
3046 adapter->login_rsp_buf = login_rsp_buffer;
3047 adapter->login_rsp_buf_token = rsp_buffer_token;
3048 adapter->login_rsp_buf_sz = rsp_buffer_size;
3050 login_buffer->len = cpu_to_be32(buffer_size);
3051 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3052 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3053 login_buffer->off_txcomp_subcrqs =
3054 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3055 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3056 login_buffer->off_rxcomp_subcrqs =
3057 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3058 sizeof(u64) * adapter->req_tx_queues);
3059 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3060 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3062 tx_list_p = (__be64 *)((char *)login_buffer +
3063 sizeof(struct ibmvnic_login_buffer));
3064 rx_list_p = (__be64 *)((char *)login_buffer +
3065 sizeof(struct ibmvnic_login_buffer) +
3066 sizeof(u64) * adapter->req_tx_queues);
3068 for (i = 0; i < adapter->req_tx_queues; i++) {
3069 if (adapter->tx_scrq[i]) {
3070 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3075 for (i = 0; i < adapter->req_rx_queues; i++) {
3076 if (adapter->rx_scrq[i]) {
3077 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3082 /* Insert vNIC login client data */
3083 vlcd = (struct vnic_login_client_data *)
3084 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3085 login_buffer->client_data_offset =
3086 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3087 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3089 vnic_add_client_data(adapter, vlcd);
3091 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3092 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3093 netdev_dbg(adapter->netdev, "%016lx\n",
3094 ((unsigned long int *)(adapter->login_buf))[i]);
3097 memset(&crq, 0, sizeof(crq));
3098 crq.login.first = IBMVNIC_CRQ_CMD;
3099 crq.login.cmd = LOGIN;
3100 crq.login.ioba = cpu_to_be32(buffer_token);
3101 crq.login.len = cpu_to_be32(buffer_size);
3102 ibmvnic_send_crq(adapter, &crq);
3107 kfree(login_rsp_buffer);
3108 buf_rsp_alloc_failed:
3109 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3111 kfree(login_buffer);
3116 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3119 union ibmvnic_crq crq;
3121 memset(&crq, 0, sizeof(crq));
3122 crq.request_map.first = IBMVNIC_CRQ_CMD;
3123 crq.request_map.cmd = REQUEST_MAP;
3124 crq.request_map.map_id = map_id;
3125 crq.request_map.ioba = cpu_to_be32(addr);
3126 crq.request_map.len = cpu_to_be32(len);
3127 ibmvnic_send_crq(adapter, &crq);
3130 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3132 union ibmvnic_crq crq;
3134 memset(&crq, 0, sizeof(crq));
3135 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3136 crq.request_unmap.cmd = REQUEST_UNMAP;
3137 crq.request_unmap.map_id = map_id;
3138 ibmvnic_send_crq(adapter, &crq);
3141 static void send_map_query(struct ibmvnic_adapter *adapter)
3143 union ibmvnic_crq crq;
3145 memset(&crq, 0, sizeof(crq));
3146 crq.query_map.first = IBMVNIC_CRQ_CMD;
3147 crq.query_map.cmd = QUERY_MAP;
3148 ibmvnic_send_crq(adapter, &crq);
3151 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3152 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3154 union ibmvnic_crq crq;
3156 atomic_set(&adapter->running_cap_crqs, 0);
3157 memset(&crq, 0, sizeof(crq));
3158 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3159 crq.query_capability.cmd = QUERY_CAPABILITY;
3161 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3162 atomic_inc(&adapter->running_cap_crqs);
3163 ibmvnic_send_crq(adapter, &crq);
3165 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3166 atomic_inc(&adapter->running_cap_crqs);
3167 ibmvnic_send_crq(adapter, &crq);
3169 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3170 atomic_inc(&adapter->running_cap_crqs);
3171 ibmvnic_send_crq(adapter, &crq);
3173 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3174 atomic_inc(&adapter->running_cap_crqs);
3175 ibmvnic_send_crq(adapter, &crq);
3177 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3178 atomic_inc(&adapter->running_cap_crqs);
3179 ibmvnic_send_crq(adapter, &crq);
3181 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3182 atomic_inc(&adapter->running_cap_crqs);
3183 ibmvnic_send_crq(adapter, &crq);
3185 crq.query_capability.capability =
3186 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3187 atomic_inc(&adapter->running_cap_crqs);
3188 ibmvnic_send_crq(adapter, &crq);
3190 crq.query_capability.capability =
3191 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3192 atomic_inc(&adapter->running_cap_crqs);
3193 ibmvnic_send_crq(adapter, &crq);
3195 crq.query_capability.capability =
3196 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3197 atomic_inc(&adapter->running_cap_crqs);
3198 ibmvnic_send_crq(adapter, &crq);
3200 crq.query_capability.capability =
3201 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3202 atomic_inc(&adapter->running_cap_crqs);
3203 ibmvnic_send_crq(adapter, &crq);
3205 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3206 atomic_inc(&adapter->running_cap_crqs);
3207 ibmvnic_send_crq(adapter, &crq);
3209 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3210 atomic_inc(&adapter->running_cap_crqs);
3211 ibmvnic_send_crq(adapter, &crq);
3213 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3214 atomic_inc(&adapter->running_cap_crqs);
3215 ibmvnic_send_crq(adapter, &crq);
3217 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3218 atomic_inc(&adapter->running_cap_crqs);
3219 ibmvnic_send_crq(adapter, &crq);
3221 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3222 atomic_inc(&adapter->running_cap_crqs);
3223 ibmvnic_send_crq(adapter, &crq);
3225 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3226 atomic_inc(&adapter->running_cap_crqs);
3227 ibmvnic_send_crq(adapter, &crq);
3229 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3230 atomic_inc(&adapter->running_cap_crqs);
3231 ibmvnic_send_crq(adapter, &crq);
3233 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3234 atomic_inc(&adapter->running_cap_crqs);
3235 ibmvnic_send_crq(adapter, &crq);
3237 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3238 atomic_inc(&adapter->running_cap_crqs);
3239 ibmvnic_send_crq(adapter, &crq);
3241 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3242 atomic_inc(&adapter->running_cap_crqs);
3243 ibmvnic_send_crq(adapter, &crq);
3245 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3246 atomic_inc(&adapter->running_cap_crqs);
3247 ibmvnic_send_crq(adapter, &crq);
3249 crq.query_capability.capability =
3250 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3251 atomic_inc(&adapter->running_cap_crqs);
3252 ibmvnic_send_crq(adapter, &crq);
3254 crq.query_capability.capability =
3255 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3256 atomic_inc(&adapter->running_cap_crqs);
3257 ibmvnic_send_crq(adapter, &crq);
3259 crq.query_capability.capability =
3260 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3261 atomic_inc(&adapter->running_cap_crqs);
3262 ibmvnic_send_crq(adapter, &crq);
3264 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3265 atomic_inc(&adapter->running_cap_crqs);
3266 ibmvnic_send_crq(adapter, &crq);
3269 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3270 struct ibmvnic_adapter *adapter)
3272 struct device *dev = &adapter->vdev->dev;
3274 if (crq->get_vpd_size_rsp.rc.code) {
3275 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3276 crq->get_vpd_size_rsp.rc.code);
3277 complete(&adapter->fw_done);
3281 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3282 complete(&adapter->fw_done);
3285 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3286 struct ibmvnic_adapter *adapter)
3288 struct device *dev = &adapter->vdev->dev;
3289 unsigned char *substr = NULL, *ptr = NULL;
3290 u8 fw_level_len = 0;
3292 memset(adapter->fw_version, 0, 32);
3294 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3297 if (crq->get_vpd_rsp.rc.code) {
3298 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3299 crq->get_vpd_rsp.rc.code);
3303 /* get the position of the firmware version info
3304 * located after the ASCII 'RM' substring in the buffer
3306 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3308 dev_info(dev, "No FW level provided by VPD\n");
3312 /* get length of firmware level ASCII substring */
3313 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3314 fw_level_len = *(substr + 2);
3316 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3320 /* copy firmware version string from vpd into adapter */
3321 if ((substr + 3 + fw_level_len) <
3322 (adapter->vpd->buff + adapter->vpd->len)) {
3323 ptr = strncpy((char *)adapter->fw_version,
3324 substr + 3, fw_level_len);
3327 dev_err(dev, "Failed to isolate FW level string\n");
3329 dev_info(dev, "FW substr extrapolated VPD buff\n");
3333 complete(&adapter->fw_done);
3336 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3338 struct device *dev = &adapter->vdev->dev;
3339 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3340 union ibmvnic_crq crq;
3343 dma_unmap_single(dev, adapter->ip_offload_tok,
3344 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3346 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3347 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3348 netdev_dbg(adapter->netdev, "%016lx\n",
3349 ((unsigned long int *)(buf))[i]);
3351 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3352 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3353 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3354 buf->tcp_ipv4_chksum);
3355 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3356 buf->tcp_ipv6_chksum);
3357 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3358 buf->udp_ipv4_chksum);
3359 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3360 buf->udp_ipv6_chksum);
3361 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3362 buf->large_tx_ipv4);
3363 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3364 buf->large_tx_ipv6);
3365 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3366 buf->large_rx_ipv4);
3367 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3368 buf->large_rx_ipv6);
3369 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3370 buf->max_ipv4_header_size);
3371 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3372 buf->max_ipv6_header_size);
3373 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3374 buf->max_tcp_header_size);
3375 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3376 buf->max_udp_header_size);
3377 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3378 buf->max_large_tx_size);
3379 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3380 buf->max_large_rx_size);
3381 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3382 buf->ipv6_extension_header);
3383 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3384 buf->tcp_pseudosum_req);
3385 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3386 buf->num_ipv6_ext_headers);
3387 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3388 buf->off_ipv6_ext_headers);
3390 adapter->ip_offload_ctrl_tok =
3391 dma_map_single(dev, &adapter->ip_offload_ctrl,
3392 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3394 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3395 dev_err(dev, "Couldn't map ip offload control buffer\n");
3399 adapter->ip_offload_ctrl.len =
3400 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3401 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3402 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3403 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3404 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3405 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3406 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3407 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3408 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3409 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3411 /* large_rx disabled for now, additional features needed */
3412 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3413 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3415 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3417 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3418 adapter->netdev->features |= NETIF_F_IP_CSUM;
3420 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3421 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3423 if ((adapter->netdev->features &
3424 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3425 adapter->netdev->features |= NETIF_F_RXCSUM;
3427 if (buf->large_tx_ipv4)
3428 adapter->netdev->features |= NETIF_F_TSO;
3429 if (buf->large_tx_ipv6)
3430 adapter->netdev->features |= NETIF_F_TSO6;
3432 adapter->netdev->hw_features |= adapter->netdev->features;
3434 memset(&crq, 0, sizeof(crq));
3435 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3436 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3437 crq.control_ip_offload.len =
3438 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3439 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3440 ibmvnic_send_crq(adapter, &crq);
3443 static void handle_error_info_rsp(union ibmvnic_crq *crq,
3444 struct ibmvnic_adapter *adapter)
3446 struct device *dev = &adapter->vdev->dev;
3447 struct ibmvnic_error_buff *error_buff, *tmp;
3448 unsigned long flags;
3452 if (!crq->request_error_rsp.rc.code) {
3453 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
3454 crq->request_error_rsp.rc.code);
3458 spin_lock_irqsave(&adapter->error_list_lock, flags);
3459 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
3460 if (error_buff->error_id == crq->request_error_rsp.error_id) {
3462 list_del(&error_buff->list);
3465 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3468 dev_err(dev, "Couldn't find error id %x\n",
3469 be32_to_cpu(crq->request_error_rsp.error_id));
3473 dev_err(dev, "Detailed info for error id %x:",
3474 be32_to_cpu(crq->request_error_rsp.error_id));
3476 for (i = 0; i < error_buff->len; i++) {
3477 pr_cont("%02x", (int)error_buff->buff[i]);
3483 dma_unmap_single(dev, error_buff->dma, error_buff->len,
3485 kfree(error_buff->buff);
3489 static void request_error_information(struct ibmvnic_adapter *adapter,
3490 union ibmvnic_crq *err_crq)
3492 struct device *dev = &adapter->vdev->dev;
3493 struct net_device *netdev = adapter->netdev;
3494 struct ibmvnic_error_buff *error_buff;
3495 unsigned long timeout = msecs_to_jiffies(30000);
3496 union ibmvnic_crq crq;
3497 unsigned long flags;
3500 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
3504 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
3505 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
3506 if (!error_buff->buff) {
3511 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
3513 if (dma_mapping_error(dev, error_buff->dma)) {
3514 netdev_err(netdev, "Couldn't map error buffer\n");
3515 kfree(error_buff->buff);
3520 error_buff->len = detail_len;
3521 error_buff->error_id = err_crq->error_indication.error_id;
3523 spin_lock_irqsave(&adapter->error_list_lock, flags);
3524 list_add_tail(&error_buff->list, &adapter->errors);
3525 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3527 memset(&crq, 0, sizeof(crq));
3528 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
3529 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
3530 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
3531 crq.request_error_info.len = cpu_to_be32(detail_len);
3532 crq.request_error_info.error_id = err_crq->error_indication.error_id;
3534 rc = ibmvnic_send_crq(adapter, &crq);
3536 netdev_err(netdev, "failed to request error information\n");
3540 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3541 netdev_err(netdev, "timeout waiting for error information\n");
3548 spin_lock_irqsave(&adapter->error_list_lock, flags);
3549 list_del(&error_buff->list);
3550 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3552 kfree(error_buff->buff);
3556 static void handle_error_indication(union ibmvnic_crq *crq,
3557 struct ibmvnic_adapter *adapter)
3559 struct device *dev = &adapter->vdev->dev;
3561 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
3562 crq->error_indication.flags
3563 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3564 be32_to_cpu(crq->error_indication.error_id),
3565 be16_to_cpu(crq->error_indication.error_cause));
3567 if (be32_to_cpu(crq->error_indication.error_id))
3568 request_error_information(adapter, crq);
3570 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3571 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3573 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3576 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3577 struct ibmvnic_adapter *adapter)
3579 struct net_device *netdev = adapter->netdev;
3580 struct device *dev = &adapter->vdev->dev;
3583 rc = crq->change_mac_addr_rsp.rc.code;
3585 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3588 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3591 complete(&adapter->fw_done);
3595 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3596 struct ibmvnic_adapter *adapter)
3598 struct device *dev = &adapter->vdev->dev;
3602 atomic_dec(&adapter->running_cap_crqs);
3603 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3605 req_value = &adapter->req_tx_queues;
3609 req_value = &adapter->req_rx_queues;
3612 case REQ_RX_ADD_QUEUES:
3613 req_value = &adapter->req_rx_add_queues;
3616 case REQ_TX_ENTRIES_PER_SUBCRQ:
3617 req_value = &adapter->req_tx_entries_per_subcrq;
3618 name = "tx_entries_per_subcrq";
3620 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3621 req_value = &adapter->req_rx_add_entries_per_subcrq;
3622 name = "rx_add_entries_per_subcrq";
3625 req_value = &adapter->req_mtu;
3628 case PROMISC_REQUESTED:
3629 req_value = &adapter->promisc;
3633 dev_err(dev, "Got invalid cap request rsp %d\n",
3634 crq->request_capability.capability);
3638 switch (crq->request_capability_rsp.rc.code) {
3641 case PARTIALSUCCESS:
3642 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3644 (long int)be64_to_cpu(crq->request_capability_rsp.
3647 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3649 pr_err("mtu of %llu is not supported. Reverting.\n",
3651 *req_value = adapter->fallback.mtu;
3654 be64_to_cpu(crq->request_capability_rsp.number);
3657 ibmvnic_send_req_caps(adapter, 1);
3660 dev_err(dev, "Error %d in request cap rsp\n",
3661 crq->request_capability_rsp.rc.code);
3665 /* Done receiving requested capabilities, query IP offload support */
3666 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3667 union ibmvnic_crq newcrq;
3668 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3669 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3670 &adapter->ip_offload_buf;
3672 adapter->wait_capability = false;
3673 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3677 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3678 if (!firmware_has_feature(FW_FEATURE_CMO))
3679 dev_err(dev, "Couldn't map offload buffer\n");
3683 memset(&newcrq, 0, sizeof(newcrq));
3684 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3685 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3686 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3687 newcrq.query_ip_offload.ioba =
3688 cpu_to_be32(adapter->ip_offload_tok);
3690 ibmvnic_send_crq(adapter, &newcrq);
3694 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3695 struct ibmvnic_adapter *adapter)
3697 struct device *dev = &adapter->vdev->dev;
3698 struct net_device *netdev = adapter->netdev;
3699 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
3700 struct ibmvnic_login_buffer *login = adapter->login_buf;
3703 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3705 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3706 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
3708 /* If the number of queues requested can't be allocated by the
3709 * server, the login response will return with code 1. We will need
3710 * to resend the login buffer with fewer queues requested.
3712 if (login_rsp_crq->generic.rc.code) {
3713 adapter->renegotiate = true;
3714 complete(&adapter->init_done);
3718 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3720 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3721 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
3722 netdev_dbg(adapter->netdev, "%016lx\n",
3723 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
3727 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
3728 (be32_to_cpu(login->num_rxcomp_subcrqs) *
3729 adapter->req_rx_add_queues !=
3730 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
3731 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
3732 ibmvnic_remove(adapter->vdev);
3735 complete(&adapter->init_done);
3740 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
3741 struct ibmvnic_adapter *adapter)
3743 struct device *dev = &adapter->vdev->dev;
3746 rc = crq->request_unmap_rsp.rc.code;
3748 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
3751 static void handle_query_map_rsp(union ibmvnic_crq *crq,
3752 struct ibmvnic_adapter *adapter)
3754 struct net_device *netdev = adapter->netdev;
3755 struct device *dev = &adapter->vdev->dev;
3758 rc = crq->query_map_rsp.rc.code;
3760 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
3763 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3764 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
3765 crq->query_map_rsp.free_pages);
3768 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
3769 struct ibmvnic_adapter *adapter)
3771 struct net_device *netdev = adapter->netdev;
3772 struct device *dev = &adapter->vdev->dev;
3775 atomic_dec(&adapter->running_cap_crqs);
3776 netdev_dbg(netdev, "Outstanding queries: %d\n",
3777 atomic_read(&adapter->running_cap_crqs));
3778 rc = crq->query_capability.rc.code;
3780 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
3784 switch (be16_to_cpu(crq->query_capability.capability)) {
3786 adapter->min_tx_queues =
3787 be64_to_cpu(crq->query_capability.number);
3788 netdev_dbg(netdev, "min_tx_queues = %lld\n",
3789 adapter->min_tx_queues);
3792 adapter->min_rx_queues =
3793 be64_to_cpu(crq->query_capability.number);
3794 netdev_dbg(netdev, "min_rx_queues = %lld\n",
3795 adapter->min_rx_queues);
3797 case MIN_RX_ADD_QUEUES:
3798 adapter->min_rx_add_queues =
3799 be64_to_cpu(crq->query_capability.number);
3800 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
3801 adapter->min_rx_add_queues);
3804 adapter->max_tx_queues =
3805 be64_to_cpu(crq->query_capability.number);
3806 netdev_dbg(netdev, "max_tx_queues = %lld\n",
3807 adapter->max_tx_queues);
3810 adapter->max_rx_queues =
3811 be64_to_cpu(crq->query_capability.number);
3812 netdev_dbg(netdev, "max_rx_queues = %lld\n",
3813 adapter->max_rx_queues);
3815 case MAX_RX_ADD_QUEUES:
3816 adapter->max_rx_add_queues =
3817 be64_to_cpu(crq->query_capability.number);
3818 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
3819 adapter->max_rx_add_queues);
3821 case MIN_TX_ENTRIES_PER_SUBCRQ:
3822 adapter->min_tx_entries_per_subcrq =
3823 be64_to_cpu(crq->query_capability.number);
3824 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
3825 adapter->min_tx_entries_per_subcrq);
3827 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
3828 adapter->min_rx_add_entries_per_subcrq =
3829 be64_to_cpu(crq->query_capability.number);
3830 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
3831 adapter->min_rx_add_entries_per_subcrq);
3833 case MAX_TX_ENTRIES_PER_SUBCRQ:
3834 adapter->max_tx_entries_per_subcrq =
3835 be64_to_cpu(crq->query_capability.number);
3836 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
3837 adapter->max_tx_entries_per_subcrq);
3839 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
3840 adapter->max_rx_add_entries_per_subcrq =
3841 be64_to_cpu(crq->query_capability.number);
3842 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
3843 adapter->max_rx_add_entries_per_subcrq);
3845 case TCP_IP_OFFLOAD:
3846 adapter->tcp_ip_offload =
3847 be64_to_cpu(crq->query_capability.number);
3848 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
3849 adapter->tcp_ip_offload);
3851 case PROMISC_SUPPORTED:
3852 adapter->promisc_supported =
3853 be64_to_cpu(crq->query_capability.number);
3854 netdev_dbg(netdev, "promisc_supported = %lld\n",
3855 adapter->promisc_supported);
3858 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
3859 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3860 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
3863 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
3864 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3865 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
3867 case MAX_MULTICAST_FILTERS:
3868 adapter->max_multicast_filters =
3869 be64_to_cpu(crq->query_capability.number);
3870 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
3871 adapter->max_multicast_filters);
3873 case VLAN_HEADER_INSERTION:
3874 adapter->vlan_header_insertion =
3875 be64_to_cpu(crq->query_capability.number);
3876 if (adapter->vlan_header_insertion)
3877 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
3878 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
3879 adapter->vlan_header_insertion);
3881 case RX_VLAN_HEADER_INSERTION:
3882 adapter->rx_vlan_header_insertion =
3883 be64_to_cpu(crq->query_capability.number);
3884 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
3885 adapter->rx_vlan_header_insertion);
3887 case MAX_TX_SG_ENTRIES:
3888 adapter->max_tx_sg_entries =
3889 be64_to_cpu(crq->query_capability.number);
3890 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
3891 adapter->max_tx_sg_entries);
3893 case RX_SG_SUPPORTED:
3894 adapter->rx_sg_supported =
3895 be64_to_cpu(crq->query_capability.number);
3896 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
3897 adapter->rx_sg_supported);
3899 case OPT_TX_COMP_SUB_QUEUES:
3900 adapter->opt_tx_comp_sub_queues =
3901 be64_to_cpu(crq->query_capability.number);
3902 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
3903 adapter->opt_tx_comp_sub_queues);
3905 case OPT_RX_COMP_QUEUES:
3906 adapter->opt_rx_comp_queues =
3907 be64_to_cpu(crq->query_capability.number);
3908 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
3909 adapter->opt_rx_comp_queues);
3911 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
3912 adapter->opt_rx_bufadd_q_per_rx_comp_q =
3913 be64_to_cpu(crq->query_capability.number);
3914 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
3915 adapter->opt_rx_bufadd_q_per_rx_comp_q);
3917 case OPT_TX_ENTRIES_PER_SUBCRQ:
3918 adapter->opt_tx_entries_per_subcrq =
3919 be64_to_cpu(crq->query_capability.number);
3920 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
3921 adapter->opt_tx_entries_per_subcrq);
3923 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
3924 adapter->opt_rxba_entries_per_subcrq =
3925 be64_to_cpu(crq->query_capability.number);
3926 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
3927 adapter->opt_rxba_entries_per_subcrq);
3929 case TX_RX_DESC_REQ:
3930 adapter->tx_rx_desc_req = crq->query_capability.number;
3931 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
3932 adapter->tx_rx_desc_req);
3936 netdev_err(netdev, "Got invalid cap rsp %d\n",
3937 crq->query_capability.capability);
3941 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3942 adapter->wait_capability = false;
3943 ibmvnic_send_req_caps(adapter, 0);
3947 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3948 struct ibmvnic_adapter *adapter)
3950 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3951 struct net_device *netdev = adapter->netdev;
3952 struct device *dev = &adapter->vdev->dev;
3953 u64 *u64_crq = (u64 *)crq;
3956 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3957 (unsigned long int)cpu_to_be64(u64_crq[0]),
3958 (unsigned long int)cpu_to_be64(u64_crq[1]));
3959 switch (gen_crq->first) {
3960 case IBMVNIC_CRQ_INIT_RSP:
3961 switch (gen_crq->cmd) {
3962 case IBMVNIC_CRQ_INIT:
3963 dev_info(dev, "Partner initialized\n");
3964 adapter->from_passive_init = true;
3965 complete(&adapter->init_done);
3967 case IBMVNIC_CRQ_INIT_COMPLETE:
3968 dev_info(dev, "Partner initialization complete\n");
3969 send_version_xchg(adapter);
3972 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3975 case IBMVNIC_CRQ_XPORT_EVENT:
3976 netif_carrier_off(netdev);
3977 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3978 dev_info(dev, "Migrated, re-enabling adapter\n");
3979 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
3980 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3981 dev_info(dev, "Backing device failover detected\n");
3982 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
3984 /* The adapter lost the connection */
3985 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3987 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3990 case IBMVNIC_CRQ_CMD_RSP:
3993 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3998 switch (gen_crq->cmd) {
3999 case VERSION_EXCHANGE_RSP:
4000 rc = crq->version_exchange_rsp.rc.code;
4002 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4005 dev_info(dev, "Partner protocol version is %d\n",
4006 crq->version_exchange_rsp.version);
4007 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4010 be16_to_cpu(crq->version_exchange_rsp.version);
4011 send_cap_queries(adapter);
4013 case QUERY_CAPABILITY_RSP:
4014 handle_query_cap_rsp(crq, adapter);
4017 handle_query_map_rsp(crq, adapter);
4019 case REQUEST_MAP_RSP:
4020 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4021 complete(&adapter->fw_done);
4023 case REQUEST_UNMAP_RSP:
4024 handle_request_unmap_rsp(crq, adapter);
4026 case REQUEST_CAPABILITY_RSP:
4027 handle_request_cap_rsp(crq, adapter);
4030 netdev_dbg(netdev, "Got Login Response\n");
4031 handle_login_rsp(crq, adapter);
4033 case LOGICAL_LINK_STATE_RSP:
4035 "Got Logical Link State Response, state: %d rc: %d\n",
4036 crq->logical_link_state_rsp.link_state,
4037 crq->logical_link_state_rsp.rc.code);
4038 adapter->logical_link_state =
4039 crq->logical_link_state_rsp.link_state;
4040 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4041 complete(&adapter->init_done);
4043 case LINK_STATE_INDICATION:
4044 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4045 adapter->phys_link_state =
4046 crq->link_state_indication.phys_link_state;
4047 adapter->logical_link_state =
4048 crq->link_state_indication.logical_link_state;
4050 case CHANGE_MAC_ADDR_RSP:
4051 netdev_dbg(netdev, "Got MAC address change Response\n");
4052 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4054 case ERROR_INDICATION:
4055 netdev_dbg(netdev, "Got Error Indication\n");
4056 handle_error_indication(crq, adapter);
4058 case REQUEST_ERROR_RSP:
4059 netdev_dbg(netdev, "Got Error Detail Response\n");
4060 handle_error_info_rsp(crq, adapter);
4062 case REQUEST_STATISTICS_RSP:
4063 netdev_dbg(netdev, "Got Statistics Response\n");
4064 complete(&adapter->stats_done);
4066 case QUERY_IP_OFFLOAD_RSP:
4067 netdev_dbg(netdev, "Got Query IP offload Response\n");
4068 handle_query_ip_offload_rsp(adapter);
4070 case MULTICAST_CTRL_RSP:
4071 netdev_dbg(netdev, "Got multicast control Response\n");
4073 case CONTROL_IP_OFFLOAD_RSP:
4074 netdev_dbg(netdev, "Got Control IP offload Response\n");
4075 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4076 sizeof(adapter->ip_offload_ctrl),
4078 complete(&adapter->init_done);
4080 case COLLECT_FW_TRACE_RSP:
4081 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4082 complete(&adapter->fw_done);
4084 case GET_VPD_SIZE_RSP:
4085 handle_vpd_size_rsp(crq, adapter);
4088 handle_vpd_rsp(crq, adapter);
4091 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4096 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4098 struct ibmvnic_adapter *adapter = instance;
4100 tasklet_schedule(&adapter->tasklet);
4104 static void ibmvnic_tasklet(void *data)
4106 struct ibmvnic_adapter *adapter = data;
4107 struct ibmvnic_crq_queue *queue = &adapter->crq;
4108 union ibmvnic_crq *crq;
4109 unsigned long flags;
4112 spin_lock_irqsave(&queue->lock, flags);
4114 /* Pull all the valid messages off the CRQ */
4115 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4116 ibmvnic_handle_crq(crq, adapter);
4117 crq->generic.first = 0;
4120 /* remain in tasklet until all
4121 * capabilities responses are received
4123 if (!adapter->wait_capability)
4126 /* if capabilities CRQ's were sent in this tasklet, the following
4127 * tasklet must wait until all responses are received
4129 if (atomic_read(&adapter->running_cap_crqs) != 0)
4130 adapter->wait_capability = true;
4131 spin_unlock_irqrestore(&queue->lock, flags);
4134 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4136 struct vio_dev *vdev = adapter->vdev;
4140 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4141 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4144 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4149 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4151 struct ibmvnic_crq_queue *crq = &adapter->crq;
4152 struct device *dev = &adapter->vdev->dev;
4153 struct vio_dev *vdev = adapter->vdev;
4158 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4159 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4161 /* Clean out the queue */
4162 memset(crq->msgs, 0, PAGE_SIZE);
4165 /* And re-open it again */
4166 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4167 crq->msg_token, PAGE_SIZE);
4170 /* Adapter is good, but other end is not ready */
4171 dev_warn(dev, "Partner adapter not ready\n");
4173 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4178 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4180 struct ibmvnic_crq_queue *crq = &adapter->crq;
4181 struct vio_dev *vdev = adapter->vdev;
4187 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4188 free_irq(vdev->irq, adapter);
4189 tasklet_kill(&adapter->tasklet);
4191 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4192 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4194 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4196 free_page((unsigned long)crq->msgs);
4200 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4202 struct ibmvnic_crq_queue *crq = &adapter->crq;
4203 struct device *dev = &adapter->vdev->dev;
4204 struct vio_dev *vdev = adapter->vdev;
4205 int rc, retrc = -ENOMEM;
4210 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4211 /* Should we allocate more than one page? */
4216 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4217 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4219 if (dma_mapping_error(dev, crq->msg_token))
4222 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4223 crq->msg_token, PAGE_SIZE);
4225 if (rc == H_RESOURCE)
4226 /* maybe kexecing and resource is busy. try a reset */
4227 rc = ibmvnic_reset_crq(adapter);
4230 if (rc == H_CLOSED) {
4231 dev_warn(dev, "Partner adapter not ready\n");
4233 dev_warn(dev, "Error %d opening adapter\n", rc);
4234 goto reg_crq_failed;
4239 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4240 (unsigned long)adapter);
4242 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4243 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4246 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4248 goto req_irq_failed;
4251 rc = vio_enable_interrupts(vdev);
4253 dev_err(dev, "Error %d enabling interrupts\n", rc);
4254 goto req_irq_failed;
4258 spin_lock_init(&crq->lock);
4263 tasklet_kill(&adapter->tasklet);
4265 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4266 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4268 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4270 free_page((unsigned long)crq->msgs);
4275 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4277 struct device *dev = &adapter->vdev->dev;
4278 unsigned long timeout = msecs_to_jiffies(30000);
4281 if (adapter->resetting && !adapter->wait_for_reset) {
4282 rc = ibmvnic_reset_crq(adapter);
4284 rc = vio_enable_interrupts(adapter->vdev);
4286 rc = init_crq_queue(adapter);
4290 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
4294 adapter->from_passive_init = false;
4296 init_completion(&adapter->init_done);
4297 adapter->init_done_rc = 0;
4298 ibmvnic_send_crq_init(adapter);
4299 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4300 dev_err(dev, "Initialization sequence timed out\n");
4304 if (adapter->init_done_rc) {
4305 release_crq_queue(adapter);
4306 return adapter->init_done_rc;
4309 if (adapter->from_passive_init) {
4310 adapter->state = VNIC_OPEN;
4311 adapter->from_passive_init = false;
4315 if (adapter->resetting && !adapter->wait_for_reset)
4316 rc = reset_sub_crq_queues(adapter);
4318 rc = init_sub_crqs(adapter);
4320 dev_err(dev, "Initialization of sub crqs failed\n");
4321 release_crq_queue(adapter);
4325 rc = init_sub_crq_irqs(adapter);
4327 dev_err(dev, "Failed to initialize sub crq irqs\n");
4328 release_crq_queue(adapter);
4334 static struct device_attribute dev_attr_failover;
4336 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4338 struct ibmvnic_adapter *adapter;
4339 struct net_device *netdev;
4340 unsigned char *mac_addr_p;
4343 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4346 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4347 VETH_MAC_ADDR, NULL);
4350 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4351 __FILE__, __LINE__);
4355 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4356 IBMVNIC_MAX_QUEUES);
4360 adapter = netdev_priv(netdev);
4361 adapter->state = VNIC_PROBING;
4362 dev_set_drvdata(&dev->dev, netdev);
4363 adapter->vdev = dev;
4364 adapter->netdev = netdev;
4366 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4367 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4368 netdev->irq = dev->irq;
4369 netdev->netdev_ops = &ibmvnic_netdev_ops;
4370 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4371 SET_NETDEV_DEV(netdev, &dev->dev);
4373 spin_lock_init(&adapter->stats_lock);
4375 INIT_LIST_HEAD(&adapter->errors);
4376 spin_lock_init(&adapter->error_list_lock);
4378 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4379 INIT_LIST_HEAD(&adapter->rwi_list);
4380 mutex_init(&adapter->reset_lock);
4381 mutex_init(&adapter->rwi_lock);
4382 adapter->resetting = false;
4384 adapter->mac_change_pending = false;
4387 rc = ibmvnic_init(adapter);
4388 if (rc && rc != EAGAIN)
4389 goto ibmvnic_init_fail;
4390 } while (rc == EAGAIN);
4392 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4393 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4394 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4396 rc = device_create_file(&dev->dev, &dev_attr_failover);
4398 goto ibmvnic_init_fail;
4400 netif_carrier_off(netdev);
4401 rc = register_netdev(netdev);
4403 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4404 goto ibmvnic_register_fail;
4406 dev_info(&dev->dev, "ibmvnic registered\n");
4408 adapter->state = VNIC_PROBED;
4410 adapter->wait_for_reset = false;
4414 ibmvnic_register_fail:
4415 device_remove_file(&dev->dev, &dev_attr_failover);
4418 release_sub_crqs(adapter);
4419 release_crq_queue(adapter);
4420 free_netdev(netdev);
4425 static int ibmvnic_remove(struct vio_dev *dev)
4427 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4428 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4430 adapter->state = VNIC_REMOVING;
4431 unregister_netdev(netdev);
4432 mutex_lock(&adapter->reset_lock);
4434 release_resources(adapter);
4435 release_sub_crqs(adapter);
4436 release_crq_queue(adapter);
4438 adapter->state = VNIC_REMOVED;
4440 mutex_unlock(&adapter->reset_lock);
4441 device_remove_file(&dev->dev, &dev_attr_failover);
4442 free_netdev(netdev);
4443 dev_set_drvdata(&dev->dev, NULL);
4448 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4449 const char *buf, size_t count)
4451 struct net_device *netdev = dev_get_drvdata(dev);
4452 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4453 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4454 __be64 session_token;
4457 if (!sysfs_streq(buf, "1"))
4460 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4461 H_GET_SESSION_TOKEN, 0, 0, 0);
4463 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4468 session_token = (__be64)retbuf[0];
4469 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4470 be64_to_cpu(session_token));
4471 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4472 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4474 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4482 static DEVICE_ATTR_WO(failover);
4484 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4486 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4487 struct ibmvnic_adapter *adapter;
4488 struct iommu_table *tbl;
4489 unsigned long ret = 0;
4492 tbl = get_iommu_table_base(&vdev->dev);
4494 /* netdev inits at probe time along with the structures we need below*/
4496 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4498 adapter = netdev_priv(netdev);
4500 ret += PAGE_SIZE; /* the crq message queue */
4501 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4503 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4504 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4506 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4508 ret += adapter->rx_pool[i].size *
4509 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4514 static int ibmvnic_resume(struct device *dev)
4516 struct net_device *netdev = dev_get_drvdata(dev);
4517 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4519 if (adapter->state != VNIC_OPEN)
4522 tasklet_schedule(&adapter->tasklet);
4527 static const struct vio_device_id ibmvnic_device_table[] = {
4528 {"network", "IBM,vnic"},
4531 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4533 static const struct dev_pm_ops ibmvnic_pm_ops = {
4534 .resume = ibmvnic_resume
4537 static struct vio_driver ibmvnic_driver = {
4538 .id_table = ibmvnic_device_table,
4539 .probe = ibmvnic_probe,
4540 .remove = ibmvnic_remove,
4541 .get_desired_dma = ibmvnic_get_desired_dma,
4542 .name = ibmvnic_driver_name,
4543 .pm = &ibmvnic_pm_ops,
4546 /* module functions */
4547 static int __init ibmvnic_module_init(void)
4549 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4550 IBMVNIC_DRIVER_VERSION);
4552 return vio_register_driver(&ibmvnic_driver);
4555 static void __exit ibmvnic_module_exit(void)
4557 vio_unregister_driver(&ibmvnic_driver);
4560 module_init(ibmvnic_module_init);
4561 module_exit(ibmvnic_module_exit);