1 /* cnic.c: QLogic CNIC core network driver.
3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/slab.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/uio_driver.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/delay.h>
30 #include <linux/ethtool.h>
31 #include <linux/if_vlan.h>
32 #include <linux/prefetch.h>
33 #include <linux/random.h>
34 #if IS_ENABLED(CONFIG_VLAN_8021Q)
39 #include <net/route.h>
41 #include <net/ip6_route.h>
42 #include <net/ip6_checksum.h>
43 #include <scsi/iscsi_if.h>
48 #include "bnx2x/bnx2x.h"
49 #include "bnx2x/bnx2x_reg.h"
50 #include "bnx2x/bnx2x_fw_defs.h"
51 #include "bnx2x/bnx2x_hsi.h"
52 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
53 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
54 #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
56 #include "cnic_defs.h"
58 #define CNIC_MODULE_NAME "cnic"
60 static char version[] =
61 "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
65 MODULE_DESCRIPTION("QLogic cnic Driver");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(CNIC_MODULE_VERSION);
69 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
70 static LIST_HEAD(cnic_dev_list);
71 static LIST_HEAD(cnic_udev_list);
72 static DEFINE_RWLOCK(cnic_dev_lock);
73 static DEFINE_MUTEX(cnic_lock);
75 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
77 /* helper function, assuming cnic_lock is held */
78 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
80 return rcu_dereference_protected(cnic_ulp_tbl[type],
81 lockdep_is_held(&cnic_lock));
84 static int cnic_service_bnx2(void *, void *);
85 static int cnic_service_bnx2x(void *, void *);
86 static int cnic_ctl(void *, struct cnic_ctl_info *);
88 static struct cnic_ops cnic_bnx2_ops = {
89 .cnic_owner = THIS_MODULE,
90 .cnic_handler = cnic_service_bnx2,
94 static struct cnic_ops cnic_bnx2x_ops = {
95 .cnic_owner = THIS_MODULE,
96 .cnic_handler = cnic_service_bnx2x,
100 static struct workqueue_struct *cnic_wq;
102 static void cnic_shutdown_rings(struct cnic_dev *);
103 static void cnic_init_rings(struct cnic_dev *);
104 static int cnic_cm_set_pg(struct cnic_sock *);
106 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
108 struct cnic_uio_dev *udev = uinfo->priv;
109 struct cnic_dev *dev;
111 if (!capable(CAP_NET_ADMIN))
114 if (udev->uio_dev != -1)
120 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
125 udev->uio_dev = iminor(inode);
127 cnic_shutdown_rings(dev);
128 cnic_init_rings(dev);
134 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
136 struct cnic_uio_dev *udev = uinfo->priv;
142 static inline void cnic_hold(struct cnic_dev *dev)
144 atomic_inc(&dev->ref_count);
147 static inline void cnic_put(struct cnic_dev *dev)
149 atomic_dec(&dev->ref_count);
152 static inline void csk_hold(struct cnic_sock *csk)
154 atomic_inc(&csk->ref_count);
157 static inline void csk_put(struct cnic_sock *csk)
159 atomic_dec(&csk->ref_count);
162 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
164 struct cnic_dev *cdev;
166 read_lock(&cnic_dev_lock);
167 list_for_each_entry(cdev, &cnic_dev_list, list) {
168 if (netdev == cdev->netdev) {
170 read_unlock(&cnic_dev_lock);
174 read_unlock(&cnic_dev_lock);
178 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
180 atomic_inc(&ulp_ops->ref_count);
183 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
185 atomic_dec(&ulp_ops->ref_count);
188 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
195 memset(&info, 0, sizeof(struct drv_ctl_info));
196 info.cmd = DRV_CTL_CTX_WR_CMD;
197 io->cid_addr = cid_addr;
200 ethdev->drv_ctl(dev->netdev, &info);
203 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
205 struct cnic_local *cp = dev->cnic_priv;
206 struct cnic_eth_dev *ethdev = cp->ethdev;
207 struct drv_ctl_info info;
208 struct drv_ctl_io *io = &info.data.io;
210 memset(&info, 0, sizeof(struct drv_ctl_info));
211 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
214 ethdev->drv_ctl(dev->netdev, &info);
217 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
219 struct cnic_local *cp = dev->cnic_priv;
220 struct cnic_eth_dev *ethdev = cp->ethdev;
221 struct drv_ctl_info info;
222 struct drv_ctl_l2_ring *ring = &info.data.ring;
224 memset(&info, 0, sizeof(struct drv_ctl_info));
226 info.cmd = DRV_CTL_START_L2_CMD;
228 info.cmd = DRV_CTL_STOP_L2_CMD;
231 ring->client_id = cl_id;
232 ethdev->drv_ctl(dev->netdev, &info);
235 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
237 struct cnic_local *cp = dev->cnic_priv;
238 struct cnic_eth_dev *ethdev = cp->ethdev;
239 struct drv_ctl_info info;
240 struct drv_ctl_io *io = &info.data.io;
242 memset(&info, 0, sizeof(struct drv_ctl_info));
243 info.cmd = DRV_CTL_IO_WR_CMD;
246 ethdev->drv_ctl(dev->netdev, &info);
249 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
251 struct cnic_local *cp = dev->cnic_priv;
252 struct cnic_eth_dev *ethdev = cp->ethdev;
253 struct drv_ctl_info info;
254 struct drv_ctl_io *io = &info.data.io;
256 memset(&info, 0, sizeof(struct drv_ctl_info));
257 info.cmd = DRV_CTL_IO_RD_CMD;
259 ethdev->drv_ctl(dev->netdev, &info);
263 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
265 struct cnic_local *cp = dev->cnic_priv;
266 struct cnic_eth_dev *ethdev = cp->ethdev;
267 struct drv_ctl_info info;
268 struct fcoe_capabilities *fcoe_cap =
269 &info.data.register_data.fcoe_features;
271 memset(&info, 0, sizeof(struct drv_ctl_info));
273 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
274 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
275 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
277 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
280 info.data.ulp_type = ulp_type;
281 info.drv_state = state;
282 ethdev->drv_ctl(dev->netdev, &info);
285 static int cnic_in_use(struct cnic_sock *csk)
287 return test_bit(SK_F_INUSE, &csk->flags);
290 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
292 struct cnic_local *cp = dev->cnic_priv;
293 struct cnic_eth_dev *ethdev = cp->ethdev;
294 struct drv_ctl_info info;
296 memset(&info, 0, sizeof(struct drv_ctl_info));
298 info.data.credit.credit_count = count;
299 ethdev->drv_ctl(dev->netdev, &info);
302 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
309 for (i = 0; i < cp->max_cid_space; i++) {
310 if (cp->ctx_tbl[i].cid == cid) {
318 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
319 struct cnic_sock *csk)
321 struct iscsi_path path_req;
324 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
325 struct cnic_ulp_ops *ulp_ops;
326 struct cnic_uio_dev *udev = cp->udev;
327 int rc = 0, retry = 0;
329 if (!udev || udev->uio_dev == -1)
333 len = sizeof(path_req);
334 buf = (char *) &path_req;
335 memset(&path_req, 0, len);
337 msg_type = ISCSI_KEVENT_PATH_REQ;
338 path_req.handle = (u64) csk->l5_cid;
339 if (test_bit(SK_F_IPV6, &csk->flags)) {
340 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
341 sizeof(struct in6_addr));
342 path_req.ip_addr_len = 16;
344 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
345 sizeof(struct in_addr));
346 path_req.ip_addr_len = 4;
348 path_req.vlan_id = csk->vlan_id;
349 path_req.pmtu = csk->mtu;
355 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
357 rc = ulp_ops->iscsi_nl_send_msg(
358 cp->ulp_handle[CNIC_ULP_ISCSI],
361 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
370 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
372 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
378 case ISCSI_UEVENT_PATH_UPDATE: {
379 struct cnic_local *cp;
381 struct cnic_sock *csk;
382 struct iscsi_path *path_resp;
384 if (len < sizeof(*path_resp))
387 path_resp = (struct iscsi_path *) buf;
389 l5_cid = (u32) path_resp->handle;
390 if (l5_cid >= MAX_CM_SK_TBL_SZ)
393 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
397 csk = &cp->csk_tbl[l5_cid];
399 if (cnic_in_use(csk) &&
400 test_bit(SK_F_CONNECT_START, &csk->flags)) {
402 csk->vlan_id = path_resp->vlan_id;
404 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
405 if (test_bit(SK_F_IPV6, &csk->flags))
406 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
407 sizeof(struct in6_addr));
409 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
410 sizeof(struct in_addr));
412 if (is_valid_ether_addr(csk->ha)) {
414 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
415 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
417 cnic_cm_upcall(cp, csk,
418 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
419 clear_bit(SK_F_CONNECT_START, &csk->flags);
430 static int cnic_offld_prep(struct cnic_sock *csk)
432 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
435 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
436 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
443 static int cnic_close_prep(struct cnic_sock *csk)
445 clear_bit(SK_F_CONNECT_START, &csk->flags);
446 smp_mb__after_atomic();
448 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
449 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
457 static int cnic_abort_prep(struct cnic_sock *csk)
459 clear_bit(SK_F_CONNECT_START, &csk->flags);
460 smp_mb__after_atomic();
462 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
465 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
466 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
473 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
475 struct cnic_dev *dev;
477 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
478 pr_err("%s: Bad type %d\n", __func__, ulp_type);
481 mutex_lock(&cnic_lock);
482 if (cnic_ulp_tbl_prot(ulp_type)) {
483 pr_err("%s: Type %d has already been registered\n",
485 mutex_unlock(&cnic_lock);
489 read_lock(&cnic_dev_lock);
490 list_for_each_entry(dev, &cnic_dev_list, list) {
491 struct cnic_local *cp = dev->cnic_priv;
493 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
495 read_unlock(&cnic_dev_lock);
497 atomic_set(&ulp_ops->ref_count, 0);
498 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
499 mutex_unlock(&cnic_lock);
501 /* Prevent race conditions with netdev_event */
503 list_for_each_entry(dev, &cnic_dev_list, list) {
504 struct cnic_local *cp = dev->cnic_priv;
506 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
507 ulp_ops->cnic_init(dev);
514 int cnic_unregister_driver(int ulp_type)
516 struct cnic_dev *dev;
517 struct cnic_ulp_ops *ulp_ops;
520 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
521 pr_err("%s: Bad type %d\n", __func__, ulp_type);
524 mutex_lock(&cnic_lock);
525 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
527 pr_err("%s: Type %d has not been registered\n",
531 read_lock(&cnic_dev_lock);
532 list_for_each_entry(dev, &cnic_dev_list, list) {
533 struct cnic_local *cp = dev->cnic_priv;
535 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
536 pr_err("%s: Type %d still has devices registered\n",
538 read_unlock(&cnic_dev_lock);
542 read_unlock(&cnic_dev_lock);
544 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
546 mutex_unlock(&cnic_lock);
548 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
553 if (atomic_read(&ulp_ops->ref_count) != 0)
554 pr_warn("%s: Failed waiting for ref count to go to zero\n",
559 mutex_unlock(&cnic_lock);
563 static int cnic_start_hw(struct cnic_dev *);
564 static void cnic_stop_hw(struct cnic_dev *);
566 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
569 struct cnic_local *cp = dev->cnic_priv;
570 struct cnic_ulp_ops *ulp_ops;
572 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
573 pr_err("%s: Bad type %d\n", __func__, ulp_type);
576 mutex_lock(&cnic_lock);
577 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
578 pr_err("%s: Driver with type %d has not been registered\n",
580 mutex_unlock(&cnic_lock);
583 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
584 pr_err("%s: Type %d has already been registered to this device\n",
586 mutex_unlock(&cnic_lock);
590 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
591 cp->ulp_handle[ulp_type] = ulp_ctx;
592 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
593 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
596 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
597 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
598 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
600 mutex_unlock(&cnic_lock);
602 cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
607 EXPORT_SYMBOL(cnic_register_driver);
609 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
611 struct cnic_local *cp = dev->cnic_priv;
614 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
615 pr_err("%s: Bad type %d\n", __func__, ulp_type);
619 if (ulp_type == CNIC_ULP_ISCSI)
620 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
622 mutex_lock(&cnic_lock);
623 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
624 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
627 pr_err("%s: device not registered to this ulp type %d\n",
629 mutex_unlock(&cnic_lock);
632 mutex_unlock(&cnic_lock);
634 if (ulp_type == CNIC_ULP_FCOE)
635 dev->fcoe_cap = NULL;
639 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
644 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
645 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
647 if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
648 cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
650 cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
654 EXPORT_SYMBOL(cnic_unregister_driver);
656 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
659 id_tbl->start = start_id;
662 spin_lock_init(&id_tbl->lock);
663 id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
670 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
672 bitmap_free(id_tbl->table);
673 id_tbl->table = NULL;
676 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
681 if (id >= id_tbl->max)
684 spin_lock(&id_tbl->lock);
685 if (!test_bit(id, id_tbl->table)) {
686 set_bit(id, id_tbl->table);
689 spin_unlock(&id_tbl->lock);
693 /* Returns -1 if not successful */
694 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
698 spin_lock(&id_tbl->lock);
699 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
700 if (id >= id_tbl->max) {
702 if (id_tbl->next != 0) {
703 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
704 if (id >= id_tbl->next)
709 if (id < id_tbl->max) {
710 set_bit(id, id_tbl->table);
711 id_tbl->next = (id + 1) & (id_tbl->max - 1);
715 spin_unlock(&id_tbl->lock);
720 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
726 if (id >= id_tbl->max)
729 clear_bit(id, id_tbl->table);
732 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
739 for (i = 0; i < dma->num_pages; i++) {
740 if (dma->pg_arr[i]) {
741 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
742 dma->pg_arr[i], dma->pg_map_arr[i]);
743 dma->pg_arr[i] = NULL;
747 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
748 dma->pgtbl, dma->pgtbl_map);
756 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
759 __le32 *page_table = (__le32 *) dma->pgtbl;
761 for (i = 0; i < dma->num_pages; i++) {
762 /* Each entry needs to be in big endian format. */
763 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
765 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
770 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
773 __le32 *page_table = (__le32 *) dma->pgtbl;
775 for (i = 0; i < dma->num_pages; i++) {
776 /* Each entry needs to be in little endian format. */
777 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
779 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
784 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
785 int pages, int use_pg_tbl)
788 struct cnic_local *cp = dev->cnic_priv;
790 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
791 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
792 if (dma->pg_arr == NULL)
795 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
796 dma->num_pages = pages;
798 for (i = 0; i < pages; i++) {
799 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
803 if (dma->pg_arr[i] == NULL)
809 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
810 ~(CNIC_PAGE_SIZE - 1);
811 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
812 &dma->pgtbl_map, GFP_ATOMIC);
813 if (dma->pgtbl == NULL)
816 cp->setup_pgtbl(dev, dma);
821 cnic_free_dma(dev, dma);
825 static void cnic_free_context(struct cnic_dev *dev)
827 struct cnic_local *cp = dev->cnic_priv;
830 for (i = 0; i < cp->ctx_blks; i++) {
831 if (cp->ctx_arr[i].ctx) {
832 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
834 cp->ctx_arr[i].mapping);
835 cp->ctx_arr[i].ctx = NULL;
840 static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
843 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
844 udev->l2_buf, udev->l2_buf_map);
849 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
850 udev->l2_ring, udev->l2_ring_map);
851 udev->l2_ring = NULL;
856 static void __cnic_free_uio(struct cnic_uio_dev *udev)
858 uio_unregister_device(&udev->cnic_uinfo);
860 __cnic_free_uio_rings(udev);
862 pci_dev_put(udev->pdev);
866 static void cnic_free_uio(struct cnic_uio_dev *udev)
871 write_lock(&cnic_dev_lock);
872 list_del_init(&udev->list);
873 write_unlock(&cnic_dev_lock);
874 __cnic_free_uio(udev);
877 static void cnic_free_resc(struct cnic_dev *dev)
879 struct cnic_local *cp = dev->cnic_priv;
880 struct cnic_uio_dev *udev = cp->udev;
885 if (udev->uio_dev == -1)
886 __cnic_free_uio_rings(udev);
889 cnic_free_context(dev);
894 cnic_free_dma(dev, &cp->gbl_buf_info);
895 cnic_free_dma(dev, &cp->kwq_info);
896 cnic_free_dma(dev, &cp->kwq_16_data_info);
897 cnic_free_dma(dev, &cp->kcq2.dma);
898 cnic_free_dma(dev, &cp->kcq1.dma);
899 kfree(cp->iscsi_tbl);
900 cp->iscsi_tbl = NULL;
904 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
905 cnic_free_id_tbl(&cp->cid_tbl);
908 static int cnic_alloc_context(struct cnic_dev *dev)
910 struct cnic_local *cp = dev->cnic_priv;
912 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
915 cp->ctx_blk_size = CNIC_PAGE_SIZE;
916 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
917 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
918 sizeof(struct cnic_ctx);
919 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
920 if (cp->ctx_arr == NULL)
924 for (i = 0; i < 2; i++) {
925 u32 j, reg, off, lo, hi;
928 off = BNX2_PG_CTX_MAP;
930 off = BNX2_ISCSI_CTX_MAP;
932 reg = cnic_reg_rd_ind(dev, off);
935 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
936 cp->ctx_arr[k].cid = j;
940 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
945 for (i = 0; i < cp->ctx_blks; i++) {
947 dma_alloc_coherent(&dev->pcidev->dev,
949 &cp->ctx_arr[i].mapping,
951 if (cp->ctx_arr[i].ctx == NULL)
958 static u16 cnic_bnx2_next_idx(u16 idx)
963 static u16 cnic_bnx2_hw_idx(u16 idx)
968 static u16 cnic_bnx2x_next_idx(u16 idx)
971 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
977 static u16 cnic_bnx2x_hw_idx(u16 idx)
979 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
984 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
987 int err, i, use_page_tbl = 0;
993 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
997 kcq = (struct kcqe **) info->dma.pg_arr;
1000 info->next_idx = cnic_bnx2_next_idx;
1001 info->hw_idx = cnic_bnx2_hw_idx;
1005 info->next_idx = cnic_bnx2x_next_idx;
1006 info->hw_idx = cnic_bnx2x_hw_idx;
1008 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1009 struct bnx2x_bd_chain_next *next =
1010 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1013 if (j >= KCQ_PAGE_CNT)
1015 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1016 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1021 static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1023 struct cnic_local *cp = udev->dev->cnic_priv;
1028 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1029 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1030 &udev->l2_ring_map, GFP_KERNEL);
1034 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1035 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1036 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1037 &udev->l2_buf_map, GFP_KERNEL);
1038 if (!udev->l2_buf) {
1039 __cnic_free_uio_rings(udev);
1047 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1049 struct cnic_local *cp = dev->cnic_priv;
1050 struct cnic_uio_dev *udev;
1052 list_for_each_entry(udev, &cnic_udev_list, list) {
1053 if (udev->pdev == dev->pcidev) {
1055 if (__cnic_alloc_uio_rings(udev, pages)) {
1064 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1071 udev->pdev = dev->pcidev;
1073 if (__cnic_alloc_uio_rings(udev, pages))
1076 list_add(&udev->list, &cnic_udev_list);
1078 pci_dev_get(udev->pdev);
1089 static int cnic_init_uio(struct cnic_dev *dev)
1091 struct cnic_local *cp = dev->cnic_priv;
1092 struct cnic_uio_dev *udev = cp->udev;
1093 struct uio_info *uinfo;
1099 uinfo = &udev->cnic_uinfo;
1101 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1102 uinfo->mem[0].internal_addr = dev->regview;
1103 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1105 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1106 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1107 TX_MAX_TSS_RINGS + 1);
1108 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1110 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1111 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1113 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1115 uinfo->name = "bnx2_cnic";
1116 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1117 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1119 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1121 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1123 uinfo->name = "bnx2x_cnic";
1126 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1128 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1129 uinfo->mem[2].size = udev->l2_ring_size;
1130 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1132 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1133 uinfo->mem[3].size = udev->l2_buf_size;
1134 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1136 uinfo->version = CNIC_MODULE_VERSION;
1137 uinfo->irq = UIO_IRQ_CUSTOM;
1139 uinfo->open = cnic_uio_open;
1140 uinfo->release = cnic_uio_close;
1142 if (udev->uio_dev == -1) {
1146 ret = uio_register_device(&udev->pdev->dev, uinfo);
1149 cnic_init_rings(dev);
1155 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1157 struct cnic_local *cp = dev->cnic_priv;
1160 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1163 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1165 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1169 ret = cnic_alloc_context(dev);
1173 ret = cnic_alloc_uio_rings(dev, 2);
1177 ret = cnic_init_uio(dev);
1184 cnic_free_resc(dev);
1188 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1190 struct cnic_local *cp = dev->cnic_priv;
1191 struct bnx2x *bp = netdev_priv(dev->netdev);
1192 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1193 int total_mem, blks, i;
1195 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1196 blks = total_mem / ctx_blk_size;
1197 if (total_mem % ctx_blk_size)
1200 if (blks > cp->ethdev->ctx_tbl_len)
1203 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1204 if (cp->ctx_arr == NULL)
1207 cp->ctx_blks = blks;
1208 cp->ctx_blk_size = ctx_blk_size;
1209 if (!CHIP_IS_E1(bp))
1212 cp->ctx_align = ctx_blk_size;
1214 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1216 for (i = 0; i < blks; i++) {
1217 cp->ctx_arr[i].ctx =
1218 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1219 &cp->ctx_arr[i].mapping,
1221 if (cp->ctx_arr[i].ctx == NULL)
1224 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1225 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1226 cnic_free_context(dev);
1227 cp->ctx_blk_size += cp->ctx_align;
1236 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1238 struct cnic_local *cp = dev->cnic_priv;
1239 struct bnx2x *bp = netdev_priv(dev->netdev);
1240 struct cnic_eth_dev *ethdev = cp->ethdev;
1241 u32 start_cid = ethdev->starting_cid;
1242 int i, j, n, ret, pages;
1243 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1245 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1246 cp->iscsi_start_cid = start_cid;
1247 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1249 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1250 cp->max_cid_space += dev->max_fcoe_conn;
1251 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1252 if (!cp->fcoe_init_cid)
1253 cp->fcoe_init_cid = 0x10;
1256 cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1261 cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1266 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1267 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1268 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1271 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1272 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1274 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1277 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1281 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1282 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1283 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1285 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1286 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1289 if ((i % n) == (n - 1))
1293 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1297 if (CNIC_SUPPORTS_FCOE(bp)) {
1298 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1303 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1304 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1308 ret = cnic_alloc_bnx2x_context(dev);
1312 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1315 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1317 cp->l2_rx_ring_size = 15;
1319 ret = cnic_alloc_uio_rings(dev, 4);
1323 ret = cnic_init_uio(dev);
1330 cnic_free_resc(dev);
1334 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1336 return cp->max_kwq_idx -
1337 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1340 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1343 struct cnic_local *cp = dev->cnic_priv;
1344 struct kwqe *prod_qe;
1345 u16 prod, sw_prod, i;
1347 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1348 return -EAGAIN; /* bnx2 is down */
1350 spin_lock_bh(&cp->cnic_ulp_lock);
1351 if (num_wqes > cnic_kwq_avail(cp) &&
1352 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1353 spin_unlock_bh(&cp->cnic_ulp_lock);
1357 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1359 prod = cp->kwq_prod_idx;
1360 sw_prod = prod & MAX_KWQ_IDX;
1361 for (i = 0; i < num_wqes; i++) {
1362 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1363 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1365 sw_prod = prod & MAX_KWQ_IDX;
1367 cp->kwq_prod_idx = prod;
1369 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1371 spin_unlock_bh(&cp->cnic_ulp_lock);
1375 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1376 union l5cm_specific_data *l5_data)
1378 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1381 map = ctx->kwqe_data_mapping;
1382 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1383 l5_data->phy_address.hi = (u64) map >> 32;
1384 return ctx->kwqe_data;
1387 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1388 u32 type, union l5cm_specific_data *l5_data)
1390 struct cnic_local *cp = dev->cnic_priv;
1391 struct bnx2x *bp = netdev_priv(dev->netdev);
1392 struct l5cm_spe kwqe;
1393 struct kwqe_16 *kwq[1];
1397 kwqe.hdr.conn_and_cmd_data =
1398 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1399 BNX2X_HW_CID(bp, cid)));
1401 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1402 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1403 SPE_HDR_FUNCTION_ID;
1405 kwqe.hdr.type = cpu_to_le16(type_16);
1406 kwqe.hdr.reserved1 = 0;
1407 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1408 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1410 kwq[0] = (struct kwqe_16 *) &kwqe;
1412 spin_lock_bh(&cp->cnic_ulp_lock);
1413 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1414 spin_unlock_bh(&cp->cnic_ulp_lock);
1422 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1423 struct kcqe *cqes[], u32 num_cqes)
1425 struct cnic_local *cp = dev->cnic_priv;
1426 struct cnic_ulp_ops *ulp_ops;
1429 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1430 if (likely(ulp_ops)) {
1431 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1437 static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1440 struct bnx2x *bp = netdev_priv(dev->netdev);
1441 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1442 u16 tstorm_flags = 0;
1445 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1446 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1449 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1451 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1452 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1454 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1455 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1458 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1460 struct cnic_local *cp = dev->cnic_priv;
1461 struct bnx2x *bp = netdev_priv(dev->netdev);
1462 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1464 u32 pfid = bp->pfid;
1466 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1467 cp->num_ccells = req1->num_ccells_per_conn;
1468 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1469 cp->num_iscsi_tasks;
1470 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1471 BNX2X_ISCSI_R2TQE_SIZE;
1472 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1473 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1474 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1475 cp->num_cqs = req1->num_cqs;
1477 if (!dev->max_iscsi_conn)
1480 /* init Tstorm RAM */
1481 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1485 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1486 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1487 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1488 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1489 req1->num_tasks_per_conn);
1491 /* init Ustorm RAM */
1492 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1493 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1494 req1->rq_buffer_size);
1495 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1497 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1498 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1499 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1500 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1501 req1->num_tasks_per_conn);
1502 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1504 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1506 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1507 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1509 /* init Xstorm RAM */
1510 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1512 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1513 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1514 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1515 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1516 req1->num_tasks_per_conn);
1517 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1519 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1520 req1->num_tasks_per_conn);
1521 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1522 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1524 /* init Cstorm RAM */
1525 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1527 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1528 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1529 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1530 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1531 req1->num_tasks_per_conn);
1532 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1534 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1537 cnic_bnx2x_set_tcp_options(dev,
1538 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1539 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1544 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1546 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1547 struct bnx2x *bp = netdev_priv(dev->netdev);
1548 u32 pfid = bp->pfid;
1549 struct iscsi_kcqe kcqe;
1550 struct kcqe *cqes[1];
1552 memset(&kcqe, 0, sizeof(kcqe));
1553 if (!dev->max_iscsi_conn) {
1554 kcqe.completion_status =
1555 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1559 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1560 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1561 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1562 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1563 req2->error_bit_map[1]);
1565 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1566 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1567 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1568 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1569 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1570 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1571 req2->error_bit_map[1]);
1573 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1574 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1576 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1579 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1580 cqes[0] = (struct kcqe *) &kcqe;
1581 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1586 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1588 struct cnic_local *cp = dev->cnic_priv;
1589 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1591 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1592 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1594 cnic_free_dma(dev, &iscsi->hq_info);
1595 cnic_free_dma(dev, &iscsi->r2tq_info);
1596 cnic_free_dma(dev, &iscsi->task_array_info);
1597 cnic_free_id(&cp->cid_tbl, ctx->cid);
1599 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1605 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1609 struct cnic_local *cp = dev->cnic_priv;
1610 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1611 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1613 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1614 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1623 cid = cnic_alloc_new_id(&cp->cid_tbl);
1630 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1632 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1636 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1637 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1641 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1642 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1649 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1653 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1654 struct regpair *ctx_addr)
1656 struct cnic_local *cp = dev->cnic_priv;
1657 struct cnic_eth_dev *ethdev = cp->ethdev;
1658 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1659 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1660 unsigned long align_off = 0;
1664 if (cp->ctx_align) {
1665 unsigned long mask = cp->ctx_align - 1;
1667 if (cp->ctx_arr[blk].mapping & mask)
1668 align_off = cp->ctx_align -
1669 (cp->ctx_arr[blk].mapping & mask);
1671 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1672 (off * BNX2X_CONTEXT_MEM_SIZE);
1673 ctx = cp->ctx_arr[blk].ctx + align_off +
1674 (off * BNX2X_CONTEXT_MEM_SIZE);
1676 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1678 ctx_addr->lo = ctx_map & 0xffffffff;
1679 ctx_addr->hi = (u64) ctx_map >> 32;
1683 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1686 struct cnic_local *cp = dev->cnic_priv;
1687 struct bnx2x *bp = netdev_priv(dev->netdev);
1688 struct iscsi_kwqe_conn_offload1 *req1 =
1689 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1690 struct iscsi_kwqe_conn_offload2 *req2 =
1691 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1692 struct iscsi_kwqe_conn_offload3 *req3;
1693 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1694 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1696 u32 hw_cid = BNX2X_HW_CID(bp, cid);
1697 struct iscsi_context *ictx;
1698 struct regpair context_addr;
1699 int i, j, n = 2, n_max;
1700 u8 port = BP_PORT(bp);
1703 if (!req2->num_additional_wqes)
1706 n_max = req2->num_additional_wqes + 2;
1708 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1712 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1714 ictx->xstorm_ag_context.hq_prod = 1;
1716 ictx->xstorm_st_context.iscsi.first_burst_length =
1717 ISCSI_DEF_FIRST_BURST_LEN;
1718 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1719 ISCSI_DEF_MAX_RECV_SEG_LEN;
1720 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1721 req1->sq_page_table_addr_lo;
1722 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1723 req1->sq_page_table_addr_hi;
1724 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1725 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1726 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1727 iscsi->hq_info.pgtbl_map & 0xffffffff;
1728 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1729 (u64) iscsi->hq_info.pgtbl_map >> 32;
1730 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1731 iscsi->hq_info.pgtbl[0];
1732 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1733 iscsi->hq_info.pgtbl[1];
1734 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1735 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1736 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1737 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1738 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1739 iscsi->r2tq_info.pgtbl[0];
1740 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1741 iscsi->r2tq_info.pgtbl[1];
1742 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1743 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1744 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1745 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1746 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1747 BNX2X_ISCSI_PBL_NOT_CACHED;
1748 ictx->xstorm_st_context.iscsi.flags.flags |=
1749 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1750 ictx->xstorm_st_context.iscsi.flags.flags |=
1751 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1752 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1754 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1755 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1759 ictx->xstorm_st_context.common.flags =
1760 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1761 ictx->xstorm_st_context.common.flags =
1762 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1764 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1765 /* TSTORM requires the base address of RQ DB & not PTE */
1766 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1767 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1768 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1769 req2->rq_page_table_addr_hi;
1770 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1771 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1772 ictx->tstorm_st_context.tcp.flags2 |=
1773 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1774 ictx->tstorm_st_context.tcp.ooo_support_mode =
1775 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1777 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1779 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1780 req2->rq_page_table_addr_lo;
1781 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1782 req2->rq_page_table_addr_hi;
1783 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1784 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1785 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1786 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1787 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1788 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1789 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1790 iscsi->r2tq_info.pgtbl[0];
1791 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1792 iscsi->r2tq_info.pgtbl[1];
1793 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1794 req1->cq_page_table_addr_lo;
1795 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1796 req1->cq_page_table_addr_hi;
1797 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1798 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1799 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1800 ictx->ustorm_st_context.task_pbe_cache_index =
1801 BNX2X_ISCSI_PBL_NOT_CACHED;
1802 ictx->ustorm_st_context.task_pdu_cache_index =
1803 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1805 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1809 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1812 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1813 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1814 req3->qp_first_pte[j].hi;
1815 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1816 req3->qp_first_pte[j].lo;
1819 ictx->ustorm_st_context.task_pbl_base.lo =
1820 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1821 ictx->ustorm_st_context.task_pbl_base.hi =
1822 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1823 ictx->ustorm_st_context.tce_phy_addr.lo =
1824 iscsi->task_array_info.pgtbl[0];
1825 ictx->ustorm_st_context.tce_phy_addr.hi =
1826 iscsi->task_array_info.pgtbl[1];
1827 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1828 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1829 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1830 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1831 ISCSI_DEF_MAX_BURST_LEN;
1832 ictx->ustorm_st_context.negotiated_rx |=
1833 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1834 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1836 ictx->cstorm_st_context.hq_pbl_base.lo =
1837 iscsi->hq_info.pgtbl_map & 0xffffffff;
1838 ictx->cstorm_st_context.hq_pbl_base.hi =
1839 (u64) iscsi->hq_info.pgtbl_map >> 32;
1840 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1841 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1842 ictx->cstorm_st_context.task_pbl_base.lo =
1843 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1844 ictx->cstorm_st_context.task_pbl_base.hi =
1845 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1846 /* CSTORM and USTORM initialization is different, CSTORM requires
1847 * CQ DB base & not PTE addr */
1848 ictx->cstorm_st_context.cq_db_base.lo =
1849 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1850 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1851 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1852 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1853 for (i = 0; i < cp->num_cqs; i++) {
1854 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1856 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1860 ictx->xstorm_ag_context.cdu_reserved =
1861 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1862 ISCSI_CONNECTION_TYPE);
1863 ictx->ustorm_ag_context.cdu_usage =
1864 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1865 ISCSI_CONNECTION_TYPE);
1870 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1873 struct iscsi_kwqe_conn_offload1 *req1;
1874 struct iscsi_kwqe_conn_offload2 *req2;
1875 struct cnic_local *cp = dev->cnic_priv;
1876 struct bnx2x *bp = netdev_priv(dev->netdev);
1877 struct cnic_context *ctx;
1878 struct iscsi_kcqe kcqe;
1879 struct kcqe *cqes[1];
1888 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1889 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1890 if ((num - 2) < req2->num_additional_wqes) {
1894 *work = 2 + req2->num_additional_wqes;
1896 l5_cid = req1->iscsi_conn_id;
1897 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1900 memset(&kcqe, 0, sizeof(kcqe));
1901 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1902 kcqe.iscsi_conn_id = l5_cid;
1903 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1905 ctx = &cp->ctx_tbl[l5_cid];
1906 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1907 kcqe.completion_status =
1908 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1912 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1913 atomic_dec(&cp->iscsi_conn);
1916 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1918 atomic_dec(&cp->iscsi_conn);
1921 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1923 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1924 atomic_dec(&cp->iscsi_conn);
1928 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1929 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1932 cqes[0] = (struct kcqe *) &kcqe;
1933 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1938 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1940 struct cnic_local *cp = dev->cnic_priv;
1941 struct iscsi_kwqe_conn_update *req =
1942 (struct iscsi_kwqe_conn_update *) kwqe;
1944 union l5cm_specific_data l5_data;
1945 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1948 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1951 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1955 memcpy(data, kwqe, sizeof(struct kwqe));
1957 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1958 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1962 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1964 struct cnic_local *cp = dev->cnic_priv;
1965 struct bnx2x *bp = netdev_priv(dev->netdev);
1966 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1967 union l5cm_specific_data l5_data;
1971 init_waitqueue_head(&ctx->waitq);
1973 memset(&l5_data, 0, sizeof(l5_data));
1974 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1976 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1977 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1980 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1981 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1988 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1990 struct cnic_local *cp = dev->cnic_priv;
1991 struct iscsi_kwqe_conn_destroy *req =
1992 (struct iscsi_kwqe_conn_destroy *) kwqe;
1993 u32 l5_cid = req->reserved0;
1994 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1996 struct iscsi_kcqe kcqe;
1997 struct kcqe *cqes[1];
1999 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2000 goto skip_cfc_delete;
2002 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2003 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2005 if (delta > (2 * HZ))
2008 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2009 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2013 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2016 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2019 atomic_dec(&cp->iscsi_conn);
2020 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2024 memset(&kcqe, 0, sizeof(kcqe));
2025 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2026 kcqe.iscsi_conn_id = l5_cid;
2027 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2028 kcqe.iscsi_conn_context_id = req->context_id;
2030 cqes[0] = (struct kcqe *) &kcqe;
2031 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2036 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2037 struct l4_kwq_connect_req1 *kwqe1,
2038 struct l4_kwq_connect_req3 *kwqe3,
2039 struct l5cm_active_conn_buffer *conn_buf)
2041 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2042 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2043 &conn_buf->xstorm_conn_buffer;
2044 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2045 &conn_buf->tstorm_conn_buffer;
2046 struct regpair context_addr;
2047 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2048 struct in6_addr src_ip, dst_ip;
2052 addrp = (u32 *) &conn_addr->local_ip_addr;
2053 for (i = 0; i < 4; i++, addrp++)
2054 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2056 addrp = (u32 *) &conn_addr->remote_ip_addr;
2057 for (i = 0; i < 4; i++, addrp++)
2058 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2060 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2062 xstorm_buf->context_addr.hi = context_addr.hi;
2063 xstorm_buf->context_addr.lo = context_addr.lo;
2064 xstorm_buf->mss = 0xffff;
2065 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2066 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2067 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2068 xstorm_buf->pseudo_header_checksum =
2069 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2071 if (kwqe3->ka_timeout) {
2072 tstorm_buf->ka_enable = 1;
2073 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2074 tstorm_buf->ka_interval = kwqe3->ka_interval;
2075 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2077 tstorm_buf->max_rt_time = 0xffffffff;
2080 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2082 struct bnx2x *bp = netdev_priv(dev->netdev);
2083 u32 pfid = bp->pfid;
2084 u8 *mac = dev->mac_addr;
2086 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2087 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2088 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2089 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2090 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2091 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2092 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2093 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2094 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2095 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2096 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2097 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2099 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2100 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2101 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2102 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2104 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2105 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2106 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2107 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2109 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2110 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2111 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2112 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2116 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2119 struct cnic_local *cp = dev->cnic_priv;
2120 struct bnx2x *bp = netdev_priv(dev->netdev);
2121 struct l4_kwq_connect_req1 *kwqe1 =
2122 (struct l4_kwq_connect_req1 *) wqes[0];
2123 struct l4_kwq_connect_req3 *kwqe3;
2124 struct l5cm_active_conn_buffer *conn_buf;
2125 struct l5cm_conn_addr_params *conn_addr;
2126 union l5cm_specific_data l5_data;
2127 u32 l5_cid = kwqe1->pg_cid;
2128 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2129 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2137 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2147 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2148 netdev_err(dev->netdev, "conn_buf size too big\n");
2151 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2155 memset(conn_buf, 0, sizeof(*conn_buf));
2157 conn_addr = &conn_buf->conn_addr_buf;
2158 conn_addr->remote_addr_0 = csk->ha[0];
2159 conn_addr->remote_addr_1 = csk->ha[1];
2160 conn_addr->remote_addr_2 = csk->ha[2];
2161 conn_addr->remote_addr_3 = csk->ha[3];
2162 conn_addr->remote_addr_4 = csk->ha[4];
2163 conn_addr->remote_addr_5 = csk->ha[5];
2165 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2166 struct l4_kwq_connect_req2 *kwqe2 =
2167 (struct l4_kwq_connect_req2 *) wqes[1];
2169 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2170 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2171 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2173 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2174 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2175 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2176 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2178 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2180 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2181 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2182 conn_addr->local_tcp_port = kwqe1->src_port;
2183 conn_addr->remote_tcp_port = kwqe1->dst_port;
2185 conn_addr->pmtu = kwqe3->pmtu;
2186 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2188 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2189 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2191 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2192 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2194 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2199 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2201 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2202 union l5cm_specific_data l5_data;
2205 memset(&l5_data, 0, sizeof(l5_data));
2206 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2207 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2211 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2213 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2214 union l5cm_specific_data l5_data;
2217 memset(&l5_data, 0, sizeof(l5_data));
2218 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2219 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2222 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2224 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2226 struct kcqe *cqes[1];
2228 memset(&kcqe, 0, sizeof(kcqe));
2229 kcqe.pg_host_opaque = req->host_opaque;
2230 kcqe.pg_cid = req->host_opaque;
2231 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2232 cqes[0] = (struct kcqe *) &kcqe;
2233 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2237 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2239 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2241 struct kcqe *cqes[1];
2243 memset(&kcqe, 0, sizeof(kcqe));
2244 kcqe.pg_host_opaque = req->pg_host_opaque;
2245 kcqe.pg_cid = req->pg_cid;
2246 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2247 cqes[0] = (struct kcqe *) &kcqe;
2248 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2252 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2254 struct fcoe_kwqe_stat *req;
2255 struct fcoe_stat_ramrod_params *fcoe_stat;
2256 union l5cm_specific_data l5_data;
2257 struct cnic_local *cp = dev->cnic_priv;
2258 struct bnx2x *bp = netdev_priv(dev->netdev);
2262 req = (struct fcoe_kwqe_stat *) kwqe;
2263 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2265 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2269 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2270 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2272 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2273 FCOE_CONNECTION_TYPE, &l5_data);
2277 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2281 struct cnic_local *cp = dev->cnic_priv;
2282 struct bnx2x *bp = netdev_priv(dev->netdev);
2284 struct fcoe_init_ramrod_params *fcoe_init;
2285 struct fcoe_kwqe_init1 *req1;
2286 struct fcoe_kwqe_init2 *req2;
2287 struct fcoe_kwqe_init3 *req3;
2288 union l5cm_specific_data l5_data;
2294 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2295 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2296 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2297 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2301 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2306 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2307 netdev_err(dev->netdev, "fcoe_init size too big\n");
2310 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2314 memset(fcoe_init, 0, sizeof(*fcoe_init));
2315 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2316 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2317 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2318 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2319 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2320 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2322 fcoe_init->sb_num = cp->status_blk_num;
2323 fcoe_init->eq_prod = MAX_KCQ_IDX;
2324 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2325 cp->kcq2.sw_prod_idx = 0;
2327 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2328 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2329 FCOE_CONNECTION_TYPE, &l5_data);
2334 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2338 u32 cid = -1, l5_cid;
2339 struct cnic_local *cp = dev->cnic_priv;
2340 struct bnx2x *bp = netdev_priv(dev->netdev);
2341 struct fcoe_kwqe_conn_offload1 *req1;
2342 struct fcoe_kwqe_conn_offload2 *req2;
2343 struct fcoe_kwqe_conn_offload3 *req3;
2344 struct fcoe_kwqe_conn_offload4 *req4;
2345 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2346 struct cnic_context *ctx;
2347 struct fcoe_context *fctx;
2348 struct regpair ctx_addr;
2349 union l5cm_specific_data l5_data;
2350 struct fcoe_kcqe kcqe;
2351 struct kcqe *cqes[1];
2357 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2358 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2359 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2360 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2364 l5_cid = req1->fcoe_conn_id;
2365 if (l5_cid >= dev->max_fcoe_conn)
2368 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2370 ctx = &cp->ctx_tbl[l5_cid];
2371 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2374 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2381 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2383 u32 hw_cid = BNX2X_HW_CID(bp, cid);
2386 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2387 FCOE_CONNECTION_TYPE);
2388 fctx->xstorm_ag_context.cdu_reserved = val;
2389 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2390 FCOE_CONNECTION_TYPE);
2391 fctx->ustorm_ag_context.cdu_usage = val;
2393 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2394 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2397 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2401 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2402 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2403 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2404 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2405 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2407 cid = BNX2X_HW_CID(bp, cid);
2408 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2409 FCOE_CONNECTION_TYPE, &l5_data);
2411 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2417 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2419 memset(&kcqe, 0, sizeof(kcqe));
2420 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2421 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2422 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2424 cqes[0] = (struct kcqe *) &kcqe;
2425 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2429 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2431 struct fcoe_kwqe_conn_enable_disable *req;
2432 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2433 union l5cm_specific_data l5_data;
2436 struct cnic_local *cp = dev->cnic_priv;
2438 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2439 cid = req->context_id;
2440 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2442 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2443 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2446 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2450 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2451 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2452 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2453 FCOE_CONNECTION_TYPE, &l5_data);
2457 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2459 struct fcoe_kwqe_conn_enable_disable *req;
2460 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2461 union l5cm_specific_data l5_data;
2464 struct cnic_local *cp = dev->cnic_priv;
2466 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2467 cid = req->context_id;
2468 l5_cid = req->conn_id;
2469 if (l5_cid >= dev->max_fcoe_conn)
2472 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2474 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2475 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2478 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2482 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2483 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2484 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2485 FCOE_CONNECTION_TYPE, &l5_data);
2489 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2491 struct fcoe_kwqe_conn_destroy *req;
2492 union l5cm_specific_data l5_data;
2495 struct cnic_local *cp = dev->cnic_priv;
2496 struct cnic_context *ctx;
2497 struct fcoe_kcqe kcqe;
2498 struct kcqe *cqes[1];
2500 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2501 cid = req->context_id;
2502 l5_cid = req->conn_id;
2503 if (l5_cid >= dev->max_fcoe_conn)
2506 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2508 ctx = &cp->ctx_tbl[l5_cid];
2510 init_waitqueue_head(&ctx->waitq);
2513 memset(&kcqe, 0, sizeof(kcqe));
2514 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2515 memset(&l5_data, 0, sizeof(l5_data));
2516 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2517 FCOE_CONNECTION_TYPE, &l5_data);
2519 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2521 kcqe.completion_status = 0;
2524 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2525 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2527 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2528 kcqe.fcoe_conn_id = req->conn_id;
2529 kcqe.fcoe_conn_context_id = cid;
2531 cqes[0] = (struct kcqe *) &kcqe;
2532 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2536 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2538 struct cnic_local *cp = dev->cnic_priv;
2541 for (i = start_cid; i < cp->max_cid_space; i++) {
2542 struct cnic_context *ctx = &cp->ctx_tbl[i];
2545 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2548 for (j = 0; j < 5; j++) {
2549 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2554 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2555 netdev_warn(dev->netdev, "CID %x not deleted\n",
2560 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2562 union l5cm_specific_data l5_data;
2563 struct cnic_local *cp = dev->cnic_priv;
2564 struct bnx2x *bp = netdev_priv(dev->netdev);
2568 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2570 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2572 memset(&l5_data, 0, sizeof(l5_data));
2573 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2574 FCOE_CONNECTION_TYPE, &l5_data);
2578 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2580 struct cnic_local *cp = dev->cnic_priv;
2582 struct kcqe *cqes[1];
2584 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2585 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2589 cid = kwqe->kwqe_info0;
2590 memset(&kcqe, 0, sizeof(kcqe));
2592 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2595 ulp_type = CNIC_ULP_FCOE;
2596 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2597 struct fcoe_kwqe_conn_enable_disable *req;
2599 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2600 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2601 cid = req->context_id;
2602 l5_cid = req->conn_id;
2603 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2604 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2608 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2609 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2610 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2611 kcqe.kcqe_info2 = cid;
2612 kcqe.kcqe_info0 = l5_cid;
2614 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2615 ulp_type = CNIC_ULP_ISCSI;
2616 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2617 cid = kwqe->kwqe_info1;
2619 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2620 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2621 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2622 kcqe.kcqe_info2 = cid;
2623 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2625 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2626 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2628 ulp_type = CNIC_ULP_L4;
2629 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2630 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2631 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2632 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2633 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2634 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2638 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2639 KCQE_FLAGS_LAYER_MASK_L4;
2640 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2642 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2648 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2651 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2652 struct kwqe *wqes[], u32 num_wqes)
2658 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2659 return -EAGAIN; /* bnx2 is down */
2661 for (i = 0; i < num_wqes; ) {
2663 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2667 case ISCSI_KWQE_OPCODE_INIT1:
2668 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2670 case ISCSI_KWQE_OPCODE_INIT2:
2671 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2673 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2674 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2675 num_wqes - i, &work);
2677 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2678 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2680 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2681 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2683 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2684 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2687 case L4_KWQE_OPCODE_VALUE_CLOSE:
2688 ret = cnic_bnx2x_close(dev, kwqe);
2690 case L4_KWQE_OPCODE_VALUE_RESET:
2691 ret = cnic_bnx2x_reset(dev, kwqe);
2693 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2694 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2696 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2697 ret = cnic_bnx2x_update_pg(dev, kwqe);
2699 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2704 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2709 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2712 /* Possibly bnx2x parity error, send completion
2713 * to ulp drivers with error code to speed up
2714 * cleanup and reset recovery.
2716 if (ret == -EIO || ret == -EAGAIN)
2717 cnic_bnx2x_kwqe_err(dev, kwqe);
2724 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2725 struct kwqe *wqes[], u32 num_wqes)
2727 struct bnx2x *bp = netdev_priv(dev->netdev);
2732 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2733 return -EAGAIN; /* bnx2 is down */
2735 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2738 for (i = 0; i < num_wqes; ) {
2740 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2744 case FCOE_KWQE_OPCODE_INIT1:
2745 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2746 num_wqes - i, &work);
2748 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2749 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2750 num_wqes - i, &work);
2752 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2753 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2755 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2756 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2758 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2759 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2761 case FCOE_KWQE_OPCODE_DESTROY:
2762 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2764 case FCOE_KWQE_OPCODE_STAT:
2765 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2769 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2774 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2777 /* Possibly bnx2x parity error, send completion
2778 * to ulp drivers with error code to speed up
2779 * cleanup and reset recovery.
2781 if (ret == -EIO || ret == -EAGAIN)
2782 cnic_bnx2x_kwqe_err(dev, kwqe);
2789 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2795 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2796 return -EAGAIN; /* bnx2x is down */
2801 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2802 switch (layer_code) {
2803 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2804 case KWQE_FLAGS_LAYER_MASK_L4:
2805 case KWQE_FLAGS_LAYER_MASK_L2:
2806 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2809 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2810 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2816 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2818 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2819 return KCQE_FLAGS_LAYER_MASK_L4;
2821 return opflag & KCQE_FLAGS_LAYER_MASK;
2824 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2826 struct cnic_local *cp = dev->cnic_priv;
2832 struct cnic_ulp_ops *ulp_ops;
2834 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2835 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2837 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2840 while (j < num_cqes) {
2841 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2843 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2846 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2851 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2852 ulp_type = CNIC_ULP_RDMA;
2853 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2854 ulp_type = CNIC_ULP_ISCSI;
2855 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2856 ulp_type = CNIC_ULP_FCOE;
2857 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2858 ulp_type = CNIC_ULP_L4;
2859 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2862 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2868 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2869 if (likely(ulp_ops)) {
2870 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2871 cp->completed_kcq + i, j);
2880 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2883 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2885 struct cnic_local *cp = dev->cnic_priv;
2886 u16 i, ri, hw_prod, last;
2888 int kcqe_cnt = 0, last_cnt = 0;
2890 i = ri = last = info->sw_prod_idx;
2892 hw_prod = *info->hw_prod_idx_ptr;
2893 hw_prod = info->hw_idx(hw_prod);
2895 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2896 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2897 cp->completed_kcq[kcqe_cnt++] = kcqe;
2898 i = info->next_idx(i);
2899 ri = i & MAX_KCQ_IDX;
2900 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2901 last_cnt = kcqe_cnt;
2906 info->sw_prod_idx = last;
2910 static int cnic_l2_completion(struct cnic_local *cp)
2912 u16 hw_cons, sw_cons;
2913 struct cnic_uio_dev *udev = cp->udev;
2914 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2915 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2919 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2922 hw_cons = *cp->rx_cons_ptr;
2923 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2926 sw_cons = cp->rx_cons;
2927 while (sw_cons != hw_cons) {
2930 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2931 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2932 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2933 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2934 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2935 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2936 cmd == RAMROD_CMD_ID_ETH_HALT)
2939 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2944 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2946 u16 rx_cons, tx_cons;
2949 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2952 rx_cons = *cp->rx_cons_ptr;
2953 tx_cons = *cp->tx_cons_ptr;
2954 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2955 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2956 comp = cnic_l2_completion(cp);
2958 cp->tx_cons = tx_cons;
2959 cp->rx_cons = rx_cons;
2962 uio_event_notify(&cp->udev->cnic_uinfo);
2965 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2968 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2970 struct cnic_local *cp = dev->cnic_priv;
2971 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2974 /* status block index must be read before reading other fields */
2976 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2978 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2980 service_kcqes(dev, kcqe_cnt);
2982 /* Tell compiler that status_blk fields can change. */
2984 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2985 /* status block index must be read first */
2987 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2990 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2992 cnic_chk_pkt_rings(cp);
2997 static int cnic_service_bnx2(void *data, void *status_blk)
2999 struct cnic_dev *dev = data;
3001 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3002 struct status_block *sblk = status_blk;
3004 return sblk->status_idx;
3007 return cnic_service_bnx2_queues(dev);
3010 static void cnic_service_bnx2_msix(struct tasklet_struct *t)
3012 struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3013 struct cnic_dev *dev = cp->dev;
3015 cp->last_status_idx = cnic_service_bnx2_queues(dev);
3017 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3018 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3021 static void cnic_doirq(struct cnic_dev *dev)
3023 struct cnic_local *cp = dev->cnic_priv;
3025 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3026 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3028 prefetch(cp->status_blk.gen);
3029 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3031 tasklet_schedule(&cp->cnic_irq_task);
3035 static irqreturn_t cnic_irq(int irq, void *dev_instance)
3037 struct cnic_dev *dev = dev_instance;
3038 struct cnic_local *cp = dev->cnic_priv;
3048 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3049 u16 index, u8 op, u8 update)
3051 struct bnx2x *bp = netdev_priv(dev->netdev);
3052 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3053 COMMAND_REG_INT_ACK);
3054 struct igu_ack_register igu_ack;
3056 igu_ack.status_block_index = index;
3057 igu_ack.sb_id_and_flags =
3058 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3059 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3060 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3061 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3063 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3066 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3067 u16 index, u8 op, u8 update)
3069 struct igu_regular cmd_data;
3070 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3072 cmd_data.sb_id_and_flags =
3073 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3074 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3075 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3076 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3079 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3082 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3084 struct cnic_local *cp = dev->cnic_priv;
3086 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3087 IGU_INT_DISABLE, 0);
3090 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3092 struct cnic_local *cp = dev->cnic_priv;
3094 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3095 IGU_INT_DISABLE, 0);
3098 static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3100 struct cnic_local *cp = dev->cnic_priv;
3102 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3106 static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3108 struct cnic_local *cp = dev->cnic_priv;
3110 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3114 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3116 u32 last_status = *info->status_idx_ptr;
3119 /* status block index must be read before reading the KCQ */
3121 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3123 service_kcqes(dev, kcqe_cnt);
3125 /* Tell compiler that sblk fields can change. */
3128 last_status = *info->status_idx_ptr;
3129 /* status block index must be read before reading the KCQ */
3135 static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
3137 struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3138 struct cnic_dev *dev = cp->dev;
3139 struct bnx2x *bp = netdev_priv(dev->netdev);
3140 u32 status_idx, new_status_idx;
3142 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3146 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3148 CNIC_WR16(dev, cp->kcq1.io_addr,
3149 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3151 if (!CNIC_SUPPORTS_FCOE(bp)) {
3152 cp->arm_int(dev, status_idx);
3156 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3158 if (new_status_idx != status_idx)
3161 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3164 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3165 status_idx, IGU_INT_ENABLE, 1);
3171 static int cnic_service_bnx2x(void *data, void *status_blk)
3173 struct cnic_dev *dev = data;
3174 struct cnic_local *cp = dev->cnic_priv;
3176 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3179 cnic_chk_pkt_rings(cp);
3184 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3186 struct cnic_ulp_ops *ulp_ops;
3188 if (if_type == CNIC_ULP_ISCSI)
3189 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3191 mutex_lock(&cnic_lock);
3192 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3193 lockdep_is_held(&cnic_lock));
3195 mutex_unlock(&cnic_lock);
3198 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3199 mutex_unlock(&cnic_lock);
3201 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3202 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3204 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3207 static void cnic_ulp_stop(struct cnic_dev *dev)
3209 struct cnic_local *cp = dev->cnic_priv;
3212 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3213 cnic_ulp_stop_one(cp, if_type);
3216 static void cnic_ulp_start(struct cnic_dev *dev)
3218 struct cnic_local *cp = dev->cnic_priv;
3221 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3222 struct cnic_ulp_ops *ulp_ops;
3224 mutex_lock(&cnic_lock);
3225 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3226 lockdep_is_held(&cnic_lock));
3227 if (!ulp_ops || !ulp_ops->cnic_start) {
3228 mutex_unlock(&cnic_lock);
3231 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3232 mutex_unlock(&cnic_lock);
3234 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3235 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3237 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3241 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3243 struct cnic_local *cp = dev->cnic_priv;
3244 struct cnic_ulp_ops *ulp_ops;
3247 mutex_lock(&cnic_lock);
3248 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3249 lockdep_is_held(&cnic_lock));
3250 if (ulp_ops && ulp_ops->cnic_get_stats)
3251 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3254 mutex_unlock(&cnic_lock);
3258 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3260 struct cnic_dev *dev = data;
3261 int ulp_type = CNIC_ULP_ISCSI;
3263 switch (info->cmd) {
3264 case CNIC_CTL_STOP_CMD:
3272 case CNIC_CTL_START_CMD:
3275 if (!cnic_start_hw(dev))
3276 cnic_ulp_start(dev);
3280 case CNIC_CTL_STOP_ISCSI_CMD: {
3281 struct cnic_local *cp = dev->cnic_priv;
3282 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3283 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3286 case CNIC_CTL_COMPLETION_CMD: {
3287 struct cnic_ctl_completion *comp = &info->data.comp;
3288 u32 cid = BNX2X_SW_CID(comp->cid);
3290 struct cnic_local *cp = dev->cnic_priv;
3292 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3295 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3296 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3298 if (unlikely(comp->error)) {
3299 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3300 netdev_err(dev->netdev,
3301 "CID %x CFC delete comp error %x\n",
3306 wake_up(&ctx->waitq);
3310 case CNIC_CTL_FCOE_STATS_GET_CMD:
3311 ulp_type = CNIC_ULP_FCOE;
3313 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3315 cnic_copy_ulp_stats(dev, ulp_type);
3325 static void cnic_ulp_init(struct cnic_dev *dev)
3328 struct cnic_local *cp = dev->cnic_priv;
3330 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3331 struct cnic_ulp_ops *ulp_ops;
3333 mutex_lock(&cnic_lock);
3334 ulp_ops = cnic_ulp_tbl_prot(i);
3335 if (!ulp_ops || !ulp_ops->cnic_init) {
3336 mutex_unlock(&cnic_lock);
3340 mutex_unlock(&cnic_lock);
3342 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3343 ulp_ops->cnic_init(dev);
3349 static void cnic_ulp_exit(struct cnic_dev *dev)
3352 struct cnic_local *cp = dev->cnic_priv;
3354 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3355 struct cnic_ulp_ops *ulp_ops;
3357 mutex_lock(&cnic_lock);
3358 ulp_ops = cnic_ulp_tbl_prot(i);
3359 if (!ulp_ops || !ulp_ops->cnic_exit) {
3360 mutex_unlock(&cnic_lock);
3364 mutex_unlock(&cnic_lock);
3366 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3367 ulp_ops->cnic_exit(dev);
3373 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3375 struct cnic_dev *dev = csk->dev;
3376 struct l4_kwq_offload_pg *l4kwqe;
3377 struct kwqe *wqes[1];
3379 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3380 memset(l4kwqe, 0, sizeof(*l4kwqe));
3381 wqes[0] = (struct kwqe *) l4kwqe;
3383 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3385 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3386 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3388 l4kwqe->da0 = csk->ha[0];
3389 l4kwqe->da1 = csk->ha[1];
3390 l4kwqe->da2 = csk->ha[2];
3391 l4kwqe->da3 = csk->ha[3];
3392 l4kwqe->da4 = csk->ha[4];
3393 l4kwqe->da5 = csk->ha[5];
3395 l4kwqe->sa0 = dev->mac_addr[0];
3396 l4kwqe->sa1 = dev->mac_addr[1];
3397 l4kwqe->sa2 = dev->mac_addr[2];
3398 l4kwqe->sa3 = dev->mac_addr[3];
3399 l4kwqe->sa4 = dev->mac_addr[4];
3400 l4kwqe->sa5 = dev->mac_addr[5];
3402 l4kwqe->etype = ETH_P_IP;
3403 l4kwqe->ipid_start = DEF_IPID_START;
3404 l4kwqe->host_opaque = csk->l5_cid;
3407 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3408 l4kwqe->vlan_tag = csk->vlan_id;
3409 l4kwqe->l2hdr_nbytes += 4;
3412 return dev->submit_kwqes(dev, wqes, 1);
3415 static int cnic_cm_update_pg(struct cnic_sock *csk)
3417 struct cnic_dev *dev = csk->dev;
3418 struct l4_kwq_update_pg *l4kwqe;
3419 struct kwqe *wqes[1];
3421 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3422 memset(l4kwqe, 0, sizeof(*l4kwqe));
3423 wqes[0] = (struct kwqe *) l4kwqe;
3425 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3427 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3428 l4kwqe->pg_cid = csk->pg_cid;
3430 l4kwqe->da0 = csk->ha[0];
3431 l4kwqe->da1 = csk->ha[1];
3432 l4kwqe->da2 = csk->ha[2];
3433 l4kwqe->da3 = csk->ha[3];
3434 l4kwqe->da4 = csk->ha[4];
3435 l4kwqe->da5 = csk->ha[5];
3437 l4kwqe->pg_host_opaque = csk->l5_cid;
3438 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3440 return dev->submit_kwqes(dev, wqes, 1);
3443 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3445 struct cnic_dev *dev = csk->dev;
3446 struct l4_kwq_upload *l4kwqe;
3447 struct kwqe *wqes[1];
3449 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3450 memset(l4kwqe, 0, sizeof(*l4kwqe));
3451 wqes[0] = (struct kwqe *) l4kwqe;
3453 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3455 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3456 l4kwqe->cid = csk->pg_cid;
3458 return dev->submit_kwqes(dev, wqes, 1);
3461 static int cnic_cm_conn_req(struct cnic_sock *csk)
3463 struct cnic_dev *dev = csk->dev;
3464 struct l4_kwq_connect_req1 *l4kwqe1;
3465 struct l4_kwq_connect_req2 *l4kwqe2;
3466 struct l4_kwq_connect_req3 *l4kwqe3;
3467 struct kwqe *wqes[3];
3471 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3472 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3473 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3474 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3475 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3476 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3478 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3480 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3481 l4kwqe3->ka_timeout = csk->ka_timeout;
3482 l4kwqe3->ka_interval = csk->ka_interval;
3483 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3484 l4kwqe3->tos = csk->tos;
3485 l4kwqe3->ttl = csk->ttl;
3486 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3487 l4kwqe3->pmtu = csk->mtu;
3488 l4kwqe3->rcv_buf = csk->rcv_buf;
3489 l4kwqe3->snd_buf = csk->snd_buf;
3490 l4kwqe3->seed = csk->seed;
3492 wqes[0] = (struct kwqe *) l4kwqe1;
3493 if (test_bit(SK_F_IPV6, &csk->flags)) {
3494 wqes[1] = (struct kwqe *) l4kwqe2;
3495 wqes[2] = (struct kwqe *) l4kwqe3;
3498 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3499 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3501 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3502 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3503 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3504 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3505 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3506 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3507 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3508 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3509 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3510 sizeof(struct tcphdr);
3512 wqes[1] = (struct kwqe *) l4kwqe3;
3513 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3514 sizeof(struct tcphdr);
3517 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3519 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3520 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3521 l4kwqe1->cid = csk->cid;
3522 l4kwqe1->pg_cid = csk->pg_cid;
3523 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3524 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3525 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3526 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3527 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3528 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3529 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3530 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3531 if (csk->tcp_flags & SK_TCP_NAGLE)
3532 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3533 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3534 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3535 if (csk->tcp_flags & SK_TCP_SACK)
3536 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3537 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3538 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3540 l4kwqe1->tcp_flags = tcp_flags;
3542 return dev->submit_kwqes(dev, wqes, num_wqes);
3545 static int cnic_cm_close_req(struct cnic_sock *csk)
3547 struct cnic_dev *dev = csk->dev;
3548 struct l4_kwq_close_req *l4kwqe;
3549 struct kwqe *wqes[1];
3551 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3552 memset(l4kwqe, 0, sizeof(*l4kwqe));
3553 wqes[0] = (struct kwqe *) l4kwqe;
3555 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3556 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3557 l4kwqe->cid = csk->cid;
3559 return dev->submit_kwqes(dev, wqes, 1);
3562 static int cnic_cm_abort_req(struct cnic_sock *csk)
3564 struct cnic_dev *dev = csk->dev;
3565 struct l4_kwq_reset_req *l4kwqe;
3566 struct kwqe *wqes[1];
3568 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3569 memset(l4kwqe, 0, sizeof(*l4kwqe));
3570 wqes[0] = (struct kwqe *) l4kwqe;
3572 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3573 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3574 l4kwqe->cid = csk->cid;
3576 return dev->submit_kwqes(dev, wqes, 1);
3579 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3580 u32 l5_cid, struct cnic_sock **csk, void *context)
3582 struct cnic_local *cp = dev->cnic_priv;
3583 struct cnic_sock *csk1;
3585 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3589 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3591 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3595 csk1 = &cp->csk_tbl[l5_cid];
3596 if (atomic_read(&csk1->ref_count))
3599 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3604 csk1->l5_cid = l5_cid;
3605 csk1->ulp_type = ulp_type;
3606 csk1->context = context;
3608 csk1->ka_timeout = DEF_KA_TIMEOUT;
3609 csk1->ka_interval = DEF_KA_INTERVAL;
3610 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3611 csk1->tos = DEF_TOS;
3612 csk1->ttl = DEF_TTL;
3613 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3614 csk1->rcv_buf = DEF_RCV_BUF;
3615 csk1->snd_buf = DEF_SND_BUF;
3616 csk1->seed = DEF_SEED;
3617 csk1->tcp_flags = 0;
3623 static void cnic_cm_cleanup(struct cnic_sock *csk)
3625 if (csk->src_port) {
3626 struct cnic_dev *dev = csk->dev;
3627 struct cnic_local *cp = dev->cnic_priv;
3629 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3634 static void cnic_close_conn(struct cnic_sock *csk)
3636 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3637 cnic_cm_upload_pg(csk);
3638 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3640 cnic_cm_cleanup(csk);
3643 static int cnic_cm_destroy(struct cnic_sock *csk)
3645 if (!cnic_in_use(csk))
3649 clear_bit(SK_F_INUSE, &csk->flags);
3650 smp_mb__after_atomic();
3651 while (atomic_read(&csk->ref_count) != 1)
3653 cnic_cm_cleanup(csk);
3660 static inline u16 cnic_get_vlan(struct net_device *dev,
3661 struct net_device **vlan_dev)
3663 if (is_vlan_dev(dev)) {
3664 *vlan_dev = vlan_dev_real_dev(dev);
3665 return vlan_dev_vlan_id(dev);
3671 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3672 struct dst_entry **dst)
3674 #if defined(CONFIG_INET)
3677 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3684 return -ENETUNREACH;
3688 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3689 struct dst_entry **dst)
3691 #if IS_ENABLED(CONFIG_IPV6)
3694 memset(&fl6, 0, sizeof(fl6));
3695 fl6.daddr = dst_addr->sin6_addr;
3696 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3697 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3699 *dst = ip6_route_output(&init_net, NULL, &fl6);
3700 if ((*dst)->error) {
3703 return -ENETUNREACH;
3708 return -ENETUNREACH;
3711 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3714 struct cnic_dev *dev = NULL;
3715 struct dst_entry *dst;
3716 struct net_device *netdev = NULL;
3717 int err = -ENETUNREACH;
3719 if (dst_addr->sin_family == AF_INET)
3720 err = cnic_get_v4_route(dst_addr, &dst);
3721 else if (dst_addr->sin_family == AF_INET6) {
3722 struct sockaddr_in6 *dst_addr6 =
3723 (struct sockaddr_in6 *) dst_addr;
3725 err = cnic_get_v6_route(dst_addr6, &dst);
3735 cnic_get_vlan(dst->dev, &netdev);
3737 dev = cnic_from_netdev(netdev);
3746 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3748 struct cnic_dev *dev = csk->dev;
3749 struct cnic_local *cp = dev->cnic_priv;
3751 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3754 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3756 struct cnic_dev *dev = csk->dev;
3757 struct cnic_local *cp = dev->cnic_priv;
3759 struct dst_entry *dst = NULL;
3760 struct net_device *realdev;
3764 if (saddr->local.v6.sin6_family == AF_INET6 &&
3765 saddr->remote.v6.sin6_family == AF_INET6)
3767 else if (saddr->local.v4.sin_family == AF_INET &&
3768 saddr->remote.v4.sin_family == AF_INET)
3773 clear_bit(SK_F_IPV6, &csk->flags);
3776 set_bit(SK_F_IPV6, &csk->flags);
3777 cnic_get_v6_route(&saddr->remote.v6, &dst);
3779 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3780 sizeof(struct in6_addr));
3781 csk->dst_port = saddr->remote.v6.sin6_port;
3782 local_port = saddr->local.v6.sin6_port;
3785 cnic_get_v4_route(&saddr->remote.v4, &dst);
3787 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3788 csk->dst_port = saddr->remote.v4.sin_port;
3789 local_port = saddr->local.v4.sin_port;
3793 csk->mtu = dev->netdev->mtu;
3794 if (dst && dst->dev) {
3795 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3796 if (realdev == dev->netdev) {
3797 csk->vlan_id = vlan;
3798 csk->mtu = dst_mtu(dst);
3802 port_id = be16_to_cpu(local_port);
3803 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3804 port_id < CNIC_LOCAL_PORT_MAX) {
3805 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3811 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3812 if (port_id == -1) {
3816 local_port = cpu_to_be16(port_id);
3818 csk->src_port = local_port;
3825 static void cnic_init_csk_state(struct cnic_sock *csk)
3828 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3829 clear_bit(SK_F_CLOSING, &csk->flags);
3832 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3834 struct cnic_local *cp = csk->dev->cnic_priv;
3837 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3840 if (!cnic_in_use(csk))
3843 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3846 cnic_init_csk_state(csk);
3848 err = cnic_get_route(csk, saddr);
3852 err = cnic_resolve_addr(csk, saddr);
3857 clear_bit(SK_F_CONNECT_START, &csk->flags);
3861 static int cnic_cm_abort(struct cnic_sock *csk)
3863 struct cnic_local *cp = csk->dev->cnic_priv;
3864 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3866 if (!cnic_in_use(csk))
3869 if (cnic_abort_prep(csk))
3870 return cnic_cm_abort_req(csk);
3872 /* Getting here means that we haven't started connect, or
3873 * connect was not successful, or it has been reset by the target.
3876 cp->close_conn(csk, opcode);
3877 if (csk->state != opcode) {
3878 /* Wait for remote reset sequence to complete */
3879 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3888 static int cnic_cm_close(struct cnic_sock *csk)
3890 if (!cnic_in_use(csk))
3893 if (cnic_close_prep(csk)) {
3894 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3895 return cnic_cm_close_req(csk);
3897 /* Wait for remote reset sequence to complete */
3898 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3906 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3909 struct cnic_ulp_ops *ulp_ops;
3910 int ulp_type = csk->ulp_type;
3913 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3915 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3916 ulp_ops->cm_connect_complete(csk);
3917 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3918 ulp_ops->cm_close_complete(csk);
3919 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3920 ulp_ops->cm_remote_abort(csk);
3921 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3922 ulp_ops->cm_abort_complete(csk);
3923 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3924 ulp_ops->cm_remote_close(csk);
3929 static int cnic_cm_set_pg(struct cnic_sock *csk)
3931 if (cnic_offld_prep(csk)) {
3932 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3933 cnic_cm_update_pg(csk);
3935 cnic_cm_offload_pg(csk);
3940 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3942 struct cnic_local *cp = dev->cnic_priv;
3943 u32 l5_cid = kcqe->pg_host_opaque;
3944 u8 opcode = kcqe->op_code;
3945 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3948 if (!cnic_in_use(csk))
3951 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3952 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3955 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3956 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3957 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3958 cnic_cm_upcall(cp, csk,
3959 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3963 csk->pg_cid = kcqe->pg_cid;
3964 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3965 cnic_cm_conn_req(csk);
3971 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3973 struct cnic_local *cp = dev->cnic_priv;
3974 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3975 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3976 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3978 ctx->timestamp = jiffies;
3980 wake_up(&ctx->waitq);
3983 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3985 struct cnic_local *cp = dev->cnic_priv;
3986 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3987 u8 opcode = l4kcqe->op_code;
3989 struct cnic_sock *csk;
3991 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3992 cnic_process_fcoe_term_conn(dev, kcqe);
3995 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3996 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3997 cnic_cm_process_offld_pg(dev, l4kcqe);
4001 l5_cid = l4kcqe->conn_id;
4003 l5_cid = l4kcqe->cid;
4004 if (l5_cid >= MAX_CM_SK_TBL_SZ)
4007 csk = &cp->csk_tbl[l5_cid];
4010 if (!cnic_in_use(csk)) {
4016 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4017 if (l4kcqe->status != 0) {
4018 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4019 cnic_cm_upcall(cp, csk,
4020 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4023 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4024 if (l4kcqe->status == 0)
4025 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4026 else if (l4kcqe->status ==
4027 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4028 set_bit(SK_F_HW_ERR, &csk->flags);
4030 smp_mb__before_atomic();
4031 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4032 cnic_cm_upcall(cp, csk, opcode);
4035 case L5CM_RAMROD_CMD_ID_CLOSE: {
4036 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4038 if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
4041 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4042 l4kcqe->status, l5kcqe->completion_status);
4043 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4046 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4047 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4048 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4049 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4050 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4051 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4052 set_bit(SK_F_HW_ERR, &csk->flags);
4054 cp->close_conn(csk, opcode);
4057 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4058 /* after we already sent CLOSE_REQ */
4059 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4060 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4061 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4062 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4064 cnic_cm_upcall(cp, csk, opcode);
4070 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4072 struct cnic_dev *dev = data;
4075 for (i = 0; i < num; i++)
4076 cnic_cm_process_kcqe(dev, kcqe[i]);
4079 static struct cnic_ulp_ops cm_ulp_ops = {
4080 .indicate_kcqes = cnic_cm_indicate_kcqe,
4083 static void cnic_cm_free_mem(struct cnic_dev *dev)
4085 struct cnic_local *cp = dev->cnic_priv;
4087 kvfree(cp->csk_tbl);
4089 cnic_free_id_tbl(&cp->csk_port_tbl);
4092 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4094 struct cnic_local *cp = dev->cnic_priv;
4098 cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
4103 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
4104 atomic_set(&cp->csk_tbl[i].ref_count, 0);
4106 port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
4107 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4108 CNIC_LOCAL_PORT_MIN, port_id)) {
4109 cnic_cm_free_mem(dev);
4115 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4117 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4118 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4119 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4120 csk->state = opcode;
4123 /* 1. If event opcode matches the expected event in csk->state
4124 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4126 * 3. If the expected event is 0, meaning the connection was never
4127 * never established, we accept the opcode from cm_abort.
4129 if (opcode == csk->state || csk->state == 0 ||
4130 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4131 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4132 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4133 if (csk->state == 0)
4134 csk->state = opcode;
4141 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4143 struct cnic_dev *dev = csk->dev;
4144 struct cnic_local *cp = dev->cnic_priv;
4146 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4147 cnic_cm_upcall(cp, csk, opcode);
4151 clear_bit(SK_F_CONNECT_START, &csk->flags);
4152 cnic_close_conn(csk);
4153 csk->state = opcode;
4154 cnic_cm_upcall(cp, csk, opcode);
4157 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4161 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4165 seed = get_random_u32();
4166 cnic_ctx_wr(dev, 45, 0, seed);
4170 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4172 struct cnic_dev *dev = csk->dev;
4173 struct cnic_local *cp = dev->cnic_priv;
4174 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4175 union l5cm_specific_data l5_data;
4177 int close_complete = 0;
4180 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4181 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4182 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4183 if (cnic_ready_to_close(csk, opcode)) {
4184 if (test_bit(SK_F_HW_ERR, &csk->flags))
4186 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4187 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4192 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4193 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4195 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4200 memset(&l5_data, 0, sizeof(l5_data));
4202 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4204 } else if (close_complete) {
4205 ctx->timestamp = jiffies;
4206 cnic_close_conn(csk);
4207 cnic_cm_upcall(cp, csk, csk->state);
4211 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4213 struct cnic_local *cp = dev->cnic_priv;
4218 if (!netif_running(dev->netdev))
4221 cnic_bnx2x_delete_wait(dev, 0);
4223 cancel_delayed_work(&cp->delete_task);
4224 flush_workqueue(cnic_wq);
4226 if (atomic_read(&cp->iscsi_conn) != 0)
4227 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4228 atomic_read(&cp->iscsi_conn));
4231 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4233 struct bnx2x *bp = netdev_priv(dev->netdev);
4234 u32 pfid = bp->pfid;
4235 u32 port = BP_PORT(bp);
4237 cnic_init_bnx2x_mac(dev);
4238 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4240 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4241 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4243 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4244 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4245 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4246 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4249 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4250 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4251 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4252 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4253 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4254 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4255 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4256 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4258 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4263 static void cnic_delete_task(struct work_struct *work)
4265 struct cnic_local *cp;
4266 struct cnic_dev *dev;
4268 int need_resched = 0;
4270 cp = container_of(work, struct cnic_local, delete_task.work);
4273 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4274 struct drv_ctl_info info;
4276 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4278 memset(&info, 0, sizeof(struct drv_ctl_info));
4279 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4280 cp->ethdev->drv_ctl(dev->netdev, &info);
4283 for (i = 0; i < cp->max_cid_space; i++) {
4284 struct cnic_context *ctx = &cp->ctx_tbl[i];
4287 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4288 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4291 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4296 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4299 err = cnic_bnx2x_destroy_ramrod(dev, i);
4301 cnic_free_bnx2x_conn_resc(dev, i);
4303 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4304 atomic_dec(&cp->iscsi_conn);
4306 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4311 queue_delayed_work(cnic_wq, &cp->delete_task,
4312 msecs_to_jiffies(10));
4316 static int cnic_cm_open(struct cnic_dev *dev)
4318 struct cnic_local *cp = dev->cnic_priv;
4321 err = cnic_cm_alloc_mem(dev);
4325 err = cp->start_cm(dev);
4330 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4332 dev->cm_create = cnic_cm_create;
4333 dev->cm_destroy = cnic_cm_destroy;
4334 dev->cm_connect = cnic_cm_connect;
4335 dev->cm_abort = cnic_cm_abort;
4336 dev->cm_close = cnic_cm_close;
4337 dev->cm_select_dev = cnic_cm_select_dev;
4339 cp->ulp_handle[CNIC_ULP_L4] = dev;
4340 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4344 cnic_cm_free_mem(dev);
4348 static int cnic_cm_shutdown(struct cnic_dev *dev)
4350 struct cnic_local *cp = dev->cnic_priv;
4356 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4357 struct cnic_sock *csk = &cp->csk_tbl[i];
4359 clear_bit(SK_F_INUSE, &csk->flags);
4360 cnic_cm_cleanup(csk);
4362 cnic_cm_free_mem(dev);
4367 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4372 cid_addr = GET_CID_ADDR(cid);
4374 for (i = 0; i < CTX_SIZE; i += 4)
4375 cnic_ctx_wr(dev, cid_addr, i, 0);
4378 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4380 struct cnic_local *cp = dev->cnic_priv;
4382 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4384 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4387 for (i = 0; i < cp->ctx_blks; i++) {
4389 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4392 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4394 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4395 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4396 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4397 (u64) cp->ctx_arr[i].mapping >> 32);
4398 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4399 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4400 for (j = 0; j < 10; j++) {
4402 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4403 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4407 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4415 static void cnic_free_irq(struct cnic_dev *dev)
4417 struct cnic_local *cp = dev->cnic_priv;
4418 struct cnic_eth_dev *ethdev = cp->ethdev;
4420 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4421 cp->disable_int_sync(dev);
4422 tasklet_kill(&cp->cnic_irq_task);
4423 free_irq(ethdev->irq_arr[0].vector, dev);
4427 static int cnic_request_irq(struct cnic_dev *dev)
4429 struct cnic_local *cp = dev->cnic_priv;
4430 struct cnic_eth_dev *ethdev = cp->ethdev;
4433 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4435 tasklet_disable(&cp->cnic_irq_task);
4440 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4442 struct cnic_local *cp = dev->cnic_priv;
4443 struct cnic_eth_dev *ethdev = cp->ethdev;
4445 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4447 int sblk_num = cp->status_blk_num;
4448 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4449 BNX2_HC_SB_CONFIG_1;
4451 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4453 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4454 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4455 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4457 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4458 tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
4459 err = cnic_request_irq(dev);
4463 while (cp->status_blk.bnx2->status_completion_producer_index &&
4465 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4466 1 << (11 + sblk_num));
4471 if (cp->status_blk.bnx2->status_completion_producer_index) {
4477 struct status_block *sblk = cp->status_blk.gen;
4478 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4481 while (sblk->status_completion_producer_index && i < 10) {
4482 CNIC_WR(dev, BNX2_HC_COMMAND,
4483 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4488 if (sblk->status_completion_producer_index)
4495 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4499 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4501 struct cnic_local *cp = dev->cnic_priv;
4502 struct cnic_eth_dev *ethdev = cp->ethdev;
4504 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4507 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4508 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4511 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4513 struct cnic_local *cp = dev->cnic_priv;
4514 struct cnic_eth_dev *ethdev = cp->ethdev;
4516 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4519 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4520 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4521 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4522 synchronize_irq(ethdev->irq_arr[0].vector);
4525 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4527 struct cnic_local *cp = dev->cnic_priv;
4528 struct cnic_eth_dev *ethdev = cp->ethdev;
4529 struct cnic_uio_dev *udev = cp->udev;
4530 u32 cid_addr, tx_cid, sb_id;
4531 u32 val, offset0, offset1, offset2, offset3;
4533 struct bnx2_tx_bd *txbd;
4534 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4535 struct status_block *s_blk = cp->status_blk.gen;
4537 sb_id = cp->status_blk_num;
4539 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4540 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4541 struct status_block_msix *sblk = cp->status_blk.bnx2;
4543 tx_cid = TX_TSS_CID + sb_id - 1;
4544 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4546 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4548 cp->tx_cons = *cp->tx_cons_ptr;
4550 cid_addr = GET_CID_ADDR(tx_cid);
4551 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4552 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4554 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4555 cnic_ctx_wr(dev, cid_addr2, i, 0);
4557 offset0 = BNX2_L2CTX_TYPE_XI;
4558 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4559 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4560 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4562 cnic_init_context(dev, tx_cid);
4563 cnic_init_context(dev, tx_cid + 1);
4565 offset0 = BNX2_L2CTX_TYPE;
4566 offset1 = BNX2_L2CTX_CMD_TYPE;
4567 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4568 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4570 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4571 cnic_ctx_wr(dev, cid_addr, offset0, val);
4573 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4574 cnic_ctx_wr(dev, cid_addr, offset1, val);
4576 txbd = udev->l2_ring;
4578 buf_map = udev->l2_buf_map;
4579 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4580 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4581 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4583 val = (u64) ring_map >> 32;
4584 cnic_ctx_wr(dev, cid_addr, offset2, val);
4585 txbd->tx_bd_haddr_hi = val;
4587 val = (u64) ring_map & 0xffffffff;
4588 cnic_ctx_wr(dev, cid_addr, offset3, val);
4589 txbd->tx_bd_haddr_lo = val;
4592 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4594 struct cnic_local *cp = dev->cnic_priv;
4595 struct cnic_eth_dev *ethdev = cp->ethdev;
4596 struct cnic_uio_dev *udev = cp->udev;
4597 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4599 struct bnx2_rx_bd *rxbd;
4600 struct status_block *s_blk = cp->status_blk.gen;
4601 dma_addr_t ring_map = udev->l2_ring_map;
4603 sb_id = cp->status_blk_num;
4604 cnic_init_context(dev, 2);
4605 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4606 coal_reg = BNX2_HC_COMMAND;
4607 coal_val = CNIC_RD(dev, coal_reg);
4608 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4609 struct status_block_msix *sblk = cp->status_blk.bnx2;
4611 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4612 coal_reg = BNX2_HC_COALESCE_NOW;
4613 coal_val = 1 << (11 + sb_id);
4616 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4617 CNIC_WR(dev, coal_reg, coal_val);
4622 cp->rx_cons = *cp->rx_cons_ptr;
4624 cid_addr = GET_CID_ADDR(2);
4625 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4626 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4627 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4630 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4632 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4633 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4635 rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4636 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4638 int n = (i % cp->l2_rx_ring_size) + 1;
4640 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4641 rxbd->rx_bd_len = cp->l2_single_buf_size;
4642 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4643 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4644 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4646 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4647 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4648 rxbd->rx_bd_haddr_hi = val;
4650 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4651 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4652 rxbd->rx_bd_haddr_lo = val;
4654 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4655 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4658 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4660 struct kwqe *wqes[1], l2kwqe;
4662 memset(&l2kwqe, 0, sizeof(l2kwqe));
4664 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4665 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4666 KWQE_OPCODE_SHIFT) | 2;
4667 dev->submit_kwqes(dev, wqes, 1);
4670 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4672 struct cnic_local *cp = dev->cnic_priv;
4675 val = cp->func << 2;
4677 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4679 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4680 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4681 dev->mac_addr[0] = (u8) (val >> 8);
4682 dev->mac_addr[1] = (u8) val;
4684 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4686 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4687 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4688 dev->mac_addr[2] = (u8) (val >> 24);
4689 dev->mac_addr[3] = (u8) (val >> 16);
4690 dev->mac_addr[4] = (u8) (val >> 8);
4691 dev->mac_addr[5] = (u8) val;
4693 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4695 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4696 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4697 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4699 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4700 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4701 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4704 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4706 struct cnic_local *cp = dev->cnic_priv;
4707 struct cnic_eth_dev *ethdev = cp->ethdev;
4708 struct status_block *sblk = cp->status_blk.gen;
4709 u32 val, kcq_cid_addr, kwq_cid_addr;
4712 cnic_set_bnx2_mac(dev);
4714 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4715 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4716 if (CNIC_PAGE_BITS > 12)
4717 val |= (12 - 8) << 4;
4719 val |= (CNIC_PAGE_BITS - 8) << 4;
4721 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4723 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4724 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4725 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4727 err = cnic_setup_5709_context(dev, 1);
4731 cnic_init_context(dev, KWQ_CID);
4732 cnic_init_context(dev, KCQ_CID);
4734 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4735 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4737 cp->max_kwq_idx = MAX_KWQ_IDX;
4738 cp->kwq_prod_idx = 0;
4739 cp->kwq_con_idx = 0;
4740 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4742 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4743 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4745 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4747 /* Initialize the kernel work queue context. */
4748 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4749 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4750 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4752 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4753 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4755 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4756 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4758 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4759 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4761 val = (u32) cp->kwq_info.pgtbl_map;
4762 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4764 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4765 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4767 cp->kcq1.sw_prod_idx = 0;
4768 cp->kcq1.hw_prod_idx_ptr =
4769 &sblk->status_completion_producer_index;
4771 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4773 /* Initialize the kernel complete queue context. */
4774 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4775 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4776 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4778 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4779 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4781 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4782 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4784 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4785 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4787 val = (u32) cp->kcq1.dma.pgtbl_map;
4788 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4791 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4792 struct status_block_msix *msblk = cp->status_blk.bnx2;
4793 u32 sb_id = cp->status_blk_num;
4794 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4796 cp->kcq1.hw_prod_idx_ptr =
4797 &msblk->status_completion_producer_index;
4798 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4799 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4800 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4801 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4802 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4805 /* Enable Commnad Scheduler notification when we write to the
4806 * host producer index of the kernel contexts. */
4807 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4809 /* Enable Command Scheduler notification when we write to either
4810 * the Send Queue or Receive Queue producer indexes of the kernel
4811 * bypass contexts. */
4812 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4813 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4815 /* Notify COM when the driver post an application buffer. */
4816 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4818 /* Set the CP and COM doorbells. These two processors polls the
4819 * doorbell for a non zero value before running. This must be done
4820 * after setting up the kernel queue contexts. */
4821 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4822 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4824 cnic_init_bnx2_tx_ring(dev);
4825 cnic_init_bnx2_rx_ring(dev);
4827 err = cnic_init_bnx2_irq(dev);
4829 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4830 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4831 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4835 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4840 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4842 struct cnic_local *cp = dev->cnic_priv;
4843 struct cnic_eth_dev *ethdev = cp->ethdev;
4844 u32 start_offset = ethdev->ctx_tbl_offset;
4847 for (i = 0; i < cp->ctx_blks; i++) {
4848 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4849 dma_addr_t map = ctx->mapping;
4851 if (cp->ctx_align) {
4852 unsigned long mask = cp->ctx_align - 1;
4854 map = (map + mask) & ~mask;
4857 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4861 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4863 struct cnic_local *cp = dev->cnic_priv;
4864 struct cnic_eth_dev *ethdev = cp->ethdev;
4867 tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
4868 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4869 err = cnic_request_irq(dev);
4874 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4875 u16 sb_id, u8 sb_index,
4878 struct bnx2x *bp = netdev_priv(dev->netdev);
4880 u32 addr = BAR_CSTRORM_INTMEM +
4881 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4882 offsetof(struct hc_status_block_data_e1x, index_data) +
4883 sizeof(struct hc_index_data)*sb_index +
4884 offsetof(struct hc_index_data, flags);
4885 u16 flags = CNIC_RD16(dev, addr);
4887 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4888 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4889 HC_INDEX_DATA_HC_ENABLED);
4890 CNIC_WR16(dev, addr, flags);
4893 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4895 struct cnic_local *cp = dev->cnic_priv;
4896 struct bnx2x *bp = netdev_priv(dev->netdev);
4897 u8 sb_id = cp->status_blk_num;
4899 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4900 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4901 offsetof(struct hc_status_block_data_e1x, index_data) +
4902 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4903 offsetof(struct hc_index_data, timeout), 64 / 4);
4904 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4907 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4911 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4912 struct client_init_ramrod_data *data)
4914 struct cnic_local *cp = dev->cnic_priv;
4915 struct bnx2x *bp = netdev_priv(dev->netdev);
4916 struct cnic_uio_dev *udev = cp->udev;
4917 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4918 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4919 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4921 u32 cli = cp->ethdev->iscsi_l2_client_id;
4924 memset(txbd, 0, CNIC_PAGE_SIZE);
4926 buf_map = udev->l2_buf_map;
4927 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4928 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4929 struct eth_tx_parse_bd_e1x *pbd_e1x =
4930 &((txbd + 1)->parse_bd_e1x);
4931 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4932 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4934 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4935 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4936 reg_bd->addr_hi = start_bd->addr_hi;
4937 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4938 start_bd->nbytes = cpu_to_le16(0x10);
4939 start_bd->nbd = cpu_to_le16(3);
4940 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4941 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4942 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4944 if (BNX2X_CHIP_IS_E2_PLUS(bp))
4945 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4946 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4948 pbd_e1x->global_data = (UNICAST_ADDRESS <<
4949 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4952 val = (u64) ring_map >> 32;
4953 txbd->next_bd.addr_hi = cpu_to_le32(val);
4955 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4957 val = (u64) ring_map & 0xffffffff;
4958 txbd->next_bd.addr_lo = cpu_to_le32(val);
4960 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4962 /* Other ramrod params */
4963 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4964 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4966 /* reset xstorm per client statistics */
4967 if (cli < MAX_STAT_COUNTER_ID) {
4968 data->general.statistics_zero_flg = 1;
4969 data->general.statistics_en_flg = 1;
4970 data->general.statistics_counter_id = cli;
4974 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4977 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4978 struct client_init_ramrod_data *data)
4980 struct cnic_local *cp = dev->cnic_priv;
4981 struct bnx2x *bp = netdev_priv(dev->netdev);
4982 struct cnic_uio_dev *udev = cp->udev;
4983 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4985 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4986 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4987 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4989 u32 cli = cp->ethdev->iscsi_l2_client_id;
4990 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4992 dma_addr_t ring_map = udev->l2_ring_map;
4995 data->general.client_id = cli;
4996 data->general.activate_flg = 1;
4997 data->general.sp_client_id = cli;
4998 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4999 data->general.func_id = bp->pfid;
5001 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5003 int n = (i % cp->l2_rx_ring_size) + 1;
5005 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5006 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5007 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5010 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5011 rxbd->addr_hi = cpu_to_le32(val);
5012 data->rx.bd_page_base.hi = cpu_to_le32(val);
5014 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5015 rxbd->addr_lo = cpu_to_le32(val);
5016 data->rx.bd_page_base.lo = cpu_to_le32(val);
5018 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5019 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5020 rxcqe->addr_hi = cpu_to_le32(val);
5021 data->rx.cqe_page_base.hi = cpu_to_le32(val);
5023 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5024 rxcqe->addr_lo = cpu_to_le32(val);
5025 data->rx.cqe_page_base.lo = cpu_to_le32(val);
5027 /* Other ramrod params */
5028 data->rx.client_qzone_id = cl_qzone_id;
5029 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5030 data->rx.status_block_id = BNX2X_DEF_SB_ID;
5032 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5034 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5035 data->rx.outer_vlan_removal_enable_flg = 1;
5036 data->rx.silent_vlan_removal_flg = 1;
5037 data->rx.silent_vlan_value = 0;
5038 data->rx.silent_vlan_mask = 0xffff;
5041 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5042 cp->rx_cons = *cp->rx_cons_ptr;
5045 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5047 struct cnic_local *cp = dev->cnic_priv;
5048 struct bnx2x *bp = netdev_priv(dev->netdev);
5049 u32 pfid = bp->pfid;
5051 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5052 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5053 cp->kcq1.sw_prod_idx = 0;
5055 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5056 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5058 cp->kcq1.hw_prod_idx_ptr =
5059 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5060 cp->kcq1.status_idx_ptr =
5061 &sb->sb.running_index[SM_RX_ID];
5063 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5065 cp->kcq1.hw_prod_idx_ptr =
5066 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5067 cp->kcq1.status_idx_ptr =
5068 &sb->sb.running_index[SM_RX_ID];
5071 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5072 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5074 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5075 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5076 cp->kcq2.sw_prod_idx = 0;
5077 cp->kcq2.hw_prod_idx_ptr =
5078 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5079 cp->kcq2.status_idx_ptr =
5080 &sb->sb.running_index[SM_RX_ID];
5084 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5086 struct cnic_local *cp = dev->cnic_priv;
5087 struct bnx2x *bp = netdev_priv(dev->netdev);
5088 struct cnic_eth_dev *ethdev = cp->ethdev;
5092 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5093 cp->func = bp->pf_num;
5097 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5098 cp->iscsi_start_cid, 0);
5103 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5104 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5105 cp->fcoe_start_cid, 0);
5111 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5113 cnic_init_bnx2x_kcq(dev);
5116 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5117 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5118 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5119 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5120 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5121 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5122 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5123 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5124 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5125 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5126 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5127 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5128 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5129 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5130 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5131 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5132 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5133 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5134 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5135 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5136 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5137 HC_INDEX_ISCSI_EQ_CONS);
5139 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5140 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5141 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5142 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5143 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5144 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5146 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5147 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5149 cnic_setup_bnx2x_context(dev);
5151 ret = cnic_init_bnx2x_irq(dev);
5155 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5159 static void cnic_init_rings(struct cnic_dev *dev)
5161 struct cnic_local *cp = dev->cnic_priv;
5162 struct bnx2x *bp = netdev_priv(dev->netdev);
5163 struct cnic_uio_dev *udev = cp->udev;
5165 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5168 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5169 cnic_init_bnx2_tx_ring(dev);
5170 cnic_init_bnx2_rx_ring(dev);
5171 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5172 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5173 u32 cli = cp->ethdev->iscsi_l2_client_id;
5174 u32 cid = cp->ethdev->iscsi_l2_cid;
5176 struct client_init_ramrod_data *data;
5177 union l5cm_specific_data l5_data;
5178 struct ustorm_eth_rx_producers rx_prods = {0};
5179 u32 off, i, *cid_ptr;
5181 rx_prods.bd_prod = 0;
5182 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5185 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5187 off = BAR_USTRORM_INTMEM +
5188 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
5189 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5190 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5192 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5193 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5195 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5197 data = udev->l2_buf;
5198 cid_ptr = udev->l2_buf + 12;
5200 memset(data, 0, sizeof(*data));
5202 cnic_init_bnx2x_tx_ring(dev, data);
5203 cnic_init_bnx2x_rx_ring(dev, data);
5205 data->general.fp_hsi_ver = ETH_FP_HSI_VERSION;
5207 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5208 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5210 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5212 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5213 cid, ETH_CONNECTION_TYPE, &l5_data);
5216 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5220 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5221 netdev_err(dev->netdev,
5222 "iSCSI CLIENT_SETUP did not complete\n");
5223 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5224 cnic_ring_ctl(dev, cid, cli, 1);
5225 *cid_ptr = cid >> 4;
5226 *(cid_ptr + 1) = cid * bp->db_size;
5227 *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5231 static void cnic_shutdown_rings(struct cnic_dev *dev)
5233 struct cnic_local *cp = dev->cnic_priv;
5234 struct cnic_uio_dev *udev = cp->udev;
5237 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5240 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5241 cnic_shutdown_bnx2_rx_ring(dev);
5242 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5243 u32 cli = cp->ethdev->iscsi_l2_client_id;
5244 u32 cid = cp->ethdev->iscsi_l2_cid;
5245 union l5cm_specific_data l5_data;
5248 cnic_ring_ctl(dev, cid, cli, 0);
5250 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5252 l5_data.phy_address.lo = cli;
5253 l5_data.phy_address.hi = 0;
5254 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5255 cid, ETH_CONNECTION_TYPE, &l5_data);
5257 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5261 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5262 netdev_err(dev->netdev,
5263 "iSCSI CLIENT_HALT did not complete\n");
5264 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5266 memset(&l5_data, 0, sizeof(l5_data));
5267 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5268 cid, NONE_CONNECTION_TYPE, &l5_data);
5271 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5272 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5273 memset(rx_ring, 0, CNIC_PAGE_SIZE);
5276 static int cnic_register_netdev(struct cnic_dev *dev)
5278 struct cnic_local *cp = dev->cnic_priv;
5279 struct cnic_eth_dev *ethdev = cp->ethdev;
5285 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5288 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5290 netdev_err(dev->netdev, "register_cnic failed\n");
5292 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5293 * can change after firmware is downloaded.
5295 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5296 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5297 dev->max_iscsi_conn = 0;
5302 static void cnic_unregister_netdev(struct cnic_dev *dev)
5304 struct cnic_local *cp = dev->cnic_priv;
5305 struct cnic_eth_dev *ethdev = cp->ethdev;
5310 ethdev->drv_unregister_cnic(dev->netdev);
5313 static int cnic_start_hw(struct cnic_dev *dev)
5315 struct cnic_local *cp = dev->cnic_priv;
5316 struct cnic_eth_dev *ethdev = cp->ethdev;
5319 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5322 dev->regview = ethdev->io_base;
5323 pci_dev_get(dev->pcidev);
5324 cp->func = PCI_FUNC(dev->pcidev->devfn);
5325 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5326 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5328 err = cp->alloc_resc(dev);
5330 netdev_err(dev->netdev, "allocate resource failure\n");
5334 err = cp->start_hw(dev);
5338 err = cnic_cm_open(dev);
5342 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5344 cp->enable_int(dev);
5349 if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
5353 pci_dev_put(dev->pcidev);
5357 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5359 cnic_disable_bnx2_int_sync(dev);
5361 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5362 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5364 cnic_init_context(dev, KWQ_CID);
5365 cnic_init_context(dev, KCQ_CID);
5367 cnic_setup_5709_context(dev, 0);
5370 cnic_free_resc(dev);
5374 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5376 struct cnic_local *cp = dev->cnic_priv;
5377 struct bnx2x *bp = netdev_priv(dev->netdev);
5378 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5379 u32 sb_id = cp->status_blk_num;
5380 u32 idx_off, syn_off;
5384 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5385 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5386 (hc_index * sizeof(u16));
5388 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5390 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5391 (hc_index * sizeof(u16));
5393 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5395 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5396 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5399 *cp->kcq1.hw_prod_idx_ptr = 0;
5400 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5401 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5402 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5403 cnic_free_resc(dev);
5406 static void cnic_stop_hw(struct cnic_dev *dev)
5408 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5409 struct cnic_local *cp = dev->cnic_priv;
5412 /* Need to wait for the ring shutdown event to complete
5413 * before clearing the CNIC_UP flag.
5415 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5419 cnic_shutdown_rings(dev);
5421 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5422 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5423 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5425 cnic_cm_shutdown(dev);
5427 pci_dev_put(dev->pcidev);
5431 static void cnic_free_dev(struct cnic_dev *dev)
5435 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5439 if (atomic_read(&dev->ref_count) != 0)
5440 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5442 netdev_info(dev->netdev, "Removed CNIC device\n");
5443 dev_put(dev->netdev);
5447 static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5448 struct cnic_fc_npiv_tbl *npiv_tbl)
5450 struct cnic_local *cp = dev->cnic_priv;
5451 struct bnx2x *bp = netdev_priv(dev->netdev);
5454 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5455 return -EAGAIN; /* bnx2x is down */
5457 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
5460 ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5464 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5465 struct pci_dev *pdev)
5467 struct cnic_dev *cdev;
5468 struct cnic_local *cp;
5471 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5473 cdev = kzalloc(alloc_size, GFP_KERNEL);
5478 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5479 cdev->register_device = cnic_register_device;
5480 cdev->unregister_device = cnic_unregister_device;
5481 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5482 cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
5483 atomic_set(&cdev->ref_count, 0);
5485 cp = cdev->cnic_priv;
5487 cp->l2_single_buf_size = 0x400;
5488 cp->l2_rx_ring_size = 3;
5490 spin_lock_init(&cp->cnic_ulp_lock);
5492 netdev_info(dev, "Added CNIC device\n");
5497 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5499 struct pci_dev *pdev;
5500 struct cnic_dev *cdev;
5501 struct cnic_local *cp;
5502 struct bnx2 *bp = netdev_priv(dev);
5503 struct cnic_eth_dev *ethdev = NULL;
5506 ethdev = (bp->cnic_probe)(dev);
5511 pdev = ethdev->pdev;
5517 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5518 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5519 (pdev->revision < 0x10)) {
5525 cdev = cnic_alloc_dev(dev, pdev);
5529 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5530 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5532 cp = cdev->cnic_priv;
5533 cp->ethdev = ethdev;
5534 cdev->pcidev = pdev;
5535 cp->chip_id = ethdev->chip_id;
5537 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5539 cp->cnic_ops = &cnic_bnx2_ops;
5540 cp->start_hw = cnic_start_bnx2_hw;
5541 cp->stop_hw = cnic_stop_bnx2_hw;
5542 cp->setup_pgtbl = cnic_setup_page_tbl;
5543 cp->alloc_resc = cnic_alloc_bnx2_resc;
5544 cp->free_resc = cnic_free_resc;
5545 cp->start_cm = cnic_cm_init_bnx2_hw;
5546 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5547 cp->enable_int = cnic_enable_bnx2_int;
5548 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5549 cp->close_conn = cnic_close_bnx2_conn;
5557 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5559 struct pci_dev *pdev;
5560 struct cnic_dev *cdev;
5561 struct cnic_local *cp;
5562 struct bnx2x *bp = netdev_priv(dev);
5563 struct cnic_eth_dev *ethdev = NULL;
5566 ethdev = bp->cnic_probe(dev);
5571 pdev = ethdev->pdev;
5576 cdev = cnic_alloc_dev(dev, pdev);
5582 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5583 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5585 cp = cdev->cnic_priv;
5586 cp->ethdev = ethdev;
5587 cdev->pcidev = pdev;
5588 cp->chip_id = ethdev->chip_id;
5590 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5592 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5593 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5594 if (CNIC_SUPPORTS_FCOE(bp)) {
5595 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5596 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5599 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5600 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5602 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5604 cp->cnic_ops = &cnic_bnx2x_ops;
5605 cp->start_hw = cnic_start_bnx2x_hw;
5606 cp->stop_hw = cnic_stop_bnx2x_hw;
5607 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5608 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5609 cp->free_resc = cnic_free_resc;
5610 cp->start_cm = cnic_cm_init_bnx2x_hw;
5611 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5612 cp->enable_int = cnic_enable_bnx2x_int;
5613 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5614 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5615 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5616 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5618 cp->ack_int = cnic_ack_bnx2x_msix;
5619 cp->arm_int = cnic_arm_bnx2x_msix;
5621 cp->close_conn = cnic_close_bnx2x_conn;
5625 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5627 struct ethtool_drvinfo drvinfo;
5628 struct cnic_dev *cdev = NULL;
5630 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5631 memset(&drvinfo, 0, sizeof(drvinfo));
5632 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5634 if (!strcmp(drvinfo.driver, "bnx2"))
5635 cdev = init_bnx2_cnic(dev);
5636 if (!strcmp(drvinfo.driver, "bnx2x"))
5637 cdev = init_bnx2x_cnic(dev);
5639 write_lock(&cnic_dev_lock);
5640 list_add(&cdev->list, &cnic_dev_list);
5641 write_unlock(&cnic_dev_lock);
5647 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5652 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5653 struct cnic_ulp_ops *ulp_ops;
5656 mutex_lock(&cnic_lock);
5657 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5658 lockdep_is_held(&cnic_lock));
5659 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5660 mutex_unlock(&cnic_lock);
5664 ctx = cp->ulp_handle[if_type];
5666 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5667 mutex_unlock(&cnic_lock);
5669 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5671 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5675 /* netdev event handler */
5676 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5679 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5680 struct cnic_dev *dev;
5683 dev = cnic_from_netdev(netdev);
5685 if (!dev && event == NETDEV_REGISTER) {
5686 /* Check for the hot-plug device */
5687 dev = is_cnic_dev(netdev);
5694 struct cnic_local *cp = dev->cnic_priv;
5698 else if (event == NETDEV_UNREGISTER)
5701 if (event == NETDEV_UP) {
5702 if (cnic_register_netdev(dev) != 0) {
5706 if (!cnic_start_hw(dev))
5707 cnic_ulp_start(dev);
5710 cnic_rcv_netevent(cp, event, 0);
5712 if (event == NETDEV_GOING_DOWN) {
5715 cnic_unregister_netdev(dev);
5716 } else if (event == NETDEV_UNREGISTER) {
5717 write_lock(&cnic_dev_lock);
5718 list_del_init(&dev->list);
5719 write_unlock(&cnic_dev_lock);
5727 struct net_device *realdev;
5730 vid = cnic_get_vlan(netdev, &realdev);
5732 dev = cnic_from_netdev(realdev);
5734 vid |= VLAN_CFI_MASK; /* make non-zero */
5735 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5744 static struct notifier_block cnic_netdev_notifier = {
5745 .notifier_call = cnic_netdev_event
5748 static void cnic_release(void)
5750 struct cnic_uio_dev *udev;
5752 while (!list_empty(&cnic_udev_list)) {
5753 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5755 cnic_free_uio(udev);
5759 static int __init cnic_init(void)
5763 pr_info("%s", version);
5765 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5771 cnic_wq = create_singlethread_workqueue("cnic_wq");
5774 unregister_netdevice_notifier(&cnic_netdev_notifier);
5781 static void __exit cnic_exit(void)
5783 unregister_netdevice_notifier(&cnic_netdev_notifier);
5785 destroy_workqueue(cnic_wq);
5788 module_init(cnic_init);
5789 module_exit(cnic_exit);