1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 #ifdef CONFIG_IXGBE_DCB
30 #include "ixgbe_dcb_82599.h"
31 #endif /* CONFIG_IXGBE_DCB */
32 #include <linux/if_ether.h>
33 #include <linux/gfp.h>
34 #include <linux/if_vlan.h>
35 #include <scsi/scsi_cmnd.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/fc/fc_fs.h>
38 #include <scsi/fc/fc_fcoe.h>
39 #include <scsi/libfc.h>
40 #include <scsi/libfcoe.h>
43 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
44 * @rx_desc: advanced rx descriptor
46 * Returns : true if it is FCoE pkt
48 static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
52 p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
53 if (p & IXGBE_RXDADV_PKTTYPE_ETQF) {
54 p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK;
55 p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT;
56 return p == IXGBE_ETQF_FILTER_FCOE;
62 * ixgbe_fcoe_clear_ddp - clear the given ddp context
63 * @ddp - ptr to the ixgbe_fcoe_ddp
68 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
79 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
80 * @netdev: the corresponding net_device
81 * @xid: the xid that corresponding ddp will be freed
83 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
84 * and it is expected to be called by ULD, i.e., FCP layer of libfc
85 * to release the corresponding ddp context when the I/O is done.
87 * Returns : data length already ddp-ed in bytes
89 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
92 struct ixgbe_fcoe *fcoe;
93 struct ixgbe_adapter *adapter;
94 struct ixgbe_fcoe_ddp *ddp;
100 if (xid >= IXGBE_FCOE_DDP_MAX)
103 adapter = netdev_priv(netdev);
104 fcoe = &adapter->fcoe;
105 ddp = &fcoe->ddp[xid];
110 /* if there an error, force to invalidate ddp context */
112 spin_lock_bh(&fcoe->lock);
113 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
114 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
115 (xid | IXGBE_FCFLTRW_WE));
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
117 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
118 (xid | IXGBE_FCDMARW_WE));
120 /* guaranteed to be invalidated after 100us */
121 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
122 (xid | IXGBE_FCDMARW_RE));
123 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
124 spin_unlock_bh(&fcoe->lock);
125 if (fcbuff & IXGBE_FCBUFF_VALID)
129 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
131 pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
132 ixgbe_fcoe_clear_ddp(ddp);
140 * ixgbe_fcoe_ddp_setup - called to set up ddp context
141 * @netdev: the corresponding net_device
142 * @xid: the exchange id requesting ddp
143 * @sgl: the scatter-gather list for this request
144 * @sgc: the number of scatter-gather items
146 * Returns : 1 for success and 0 for no ddp
148 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
149 struct scatterlist *sgl, unsigned int sgc,
152 struct ixgbe_adapter *adapter;
154 struct ixgbe_fcoe *fcoe;
155 struct ixgbe_fcoe_ddp *ddp;
156 struct scatterlist *sg;
157 unsigned int i, j, dmacount;
159 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
160 unsigned int firstoff = 0;
161 unsigned int lastsize;
162 unsigned int thisoff = 0;
163 unsigned int thislen = 0;
164 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
170 adapter = netdev_priv(netdev);
171 if (xid >= IXGBE_FCOE_DDP_MAX) {
172 e_warn(drv, "xid=0x%x out-of-range\n", xid);
176 /* no DDP if we are already down or resetting */
177 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
178 test_bit(__IXGBE_RESETTING, &adapter->state))
181 fcoe = &adapter->fcoe;
183 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
187 ddp = &fcoe->ddp[xid];
189 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
190 xid, ddp->sgl, ddp->sgc);
193 ixgbe_fcoe_clear_ddp(ddp);
195 /* setup dma from scsi command sgl */
196 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
198 e_err(drv, "xid 0x%x DMA map error\n", xid);
202 /* alloc the udl from our ddp pool */
203 ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp);
205 e_err(drv, "failed allocated ddp context\n");
206 goto out_noddp_unmap;
212 for_each_sg(sgl, sg, dmacount, i) {
213 addr = sg_dma_address(sg);
214 len = sg_dma_len(sg);
216 /* max number of buffers allowed in one DDP context */
217 if (j >= IXGBE_BUFFCNT_MAX) {
218 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
219 "not enough descriptors\n",
220 xid, i, j, dmacount, (u64)addr);
224 /* get the offset of length of current buffer */
225 thisoff = addr & ((dma_addr_t)bufflen - 1);
226 thislen = min((bufflen - thisoff), len);
228 * all but the 1st buffer (j == 0)
229 * must be aligned on bufflen
231 if ((j != 0) && (thisoff))
234 * all but the last buffer
235 * ((i == (dmacount - 1)) && (thislen == len))
236 * must end at bufflen
238 if (((i != (dmacount - 1)) || (thislen != len))
239 && ((thislen + thisoff) != bufflen))
242 ddp->udl[j] = (u64)(addr - thisoff);
243 /* only the first buffer may have none-zero offset */
251 /* only the last buffer may have non-full bufflen */
252 lastsize = thisoff + thislen;
255 * lastsize can not be buffer len.
256 * If it is then adding another buffer with lastsize = 1.
258 if (lastsize == bufflen) {
259 if (j >= IXGBE_BUFFCNT_MAX) {
260 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
261 "not enough user buffers. We need an extra "
262 "buffer because lastsize is bufflen.\n",
263 xid, i, j, dmacount, (u64)addr);
267 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
272 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
273 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
274 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
275 /* Set WRCONTX bit to allow DDP for target */
277 fcbuff |= (IXGBE_FCBUFF_WRCONTX);
278 fcbuff |= (IXGBE_FCBUFF_VALID);
281 fcdmarw |= IXGBE_FCDMARW_WE;
282 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
285 fcfltrw |= IXGBE_FCFLTRW_WE;
287 /* program DMA context */
289 spin_lock_bh(&fcoe->lock);
291 /* turn on last frame indication for target mode as FCP_RSPtarget is
292 * supposed to send FCP_RSP when it is done. */
293 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
294 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
295 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
296 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
297 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
300 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
301 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
302 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
303 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
304 /* program filter context */
305 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
306 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
307 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
309 spin_unlock_bh(&fcoe->lock);
314 pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
315 ixgbe_fcoe_clear_ddp(ddp);
318 pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
323 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
324 * @netdev: the corresponding net_device
325 * @xid: the exchange id requesting ddp
326 * @sgl: the scatter-gather list for this request
327 * @sgc: the number of scatter-gather items
329 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
330 * and is expected to be called from ULD, e.g., FCP layer of libfc
331 * to set up ddp for the corresponding xid of the given sglist for
332 * the corresponding I/O.
334 * Returns : 1 for success and 0 for no ddp
336 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
337 struct scatterlist *sgl, unsigned int sgc)
339 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
343 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
344 * @netdev: the corresponding net_device
345 * @xid: the exchange id requesting ddp
346 * @sgl: the scatter-gather list for this request
347 * @sgc: the number of scatter-gather items
349 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
350 * and is expected to be called from ULD, e.g., FCP layer of libfc
351 * to set up ddp for the corresponding xid of the given sglist for
352 * the corresponding I/O. The DDP in target mode is a write I/O request
353 * from the initiator.
355 * Returns : 1 for success and 0 for no ddp
357 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
358 struct scatterlist *sgl, unsigned int sgc)
360 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
364 * ixgbe_fcoe_ddp - check ddp status and mark it done
365 * @adapter: ixgbe adapter
366 * @rx_desc: advanced rx descriptor
367 * @skb: the skb holding the received data
369 * This checks ddp status.
371 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
372 * not passing the skb to ULD, > 0 indicates is the length of data
375 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
376 union ixgbe_adv_rx_desc *rx_desc,
381 u32 sterr, fceofe, fcerr, fcstat;
383 struct ixgbe_fcoe *fcoe;
384 struct ixgbe_fcoe_ddp *ddp;
385 struct fc_frame_header *fh;
386 struct fcoe_crc_eof *crc;
388 if (!ixgbe_rx_is_fcoe(rx_desc))
391 sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
392 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
393 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
394 if (fcerr == IXGBE_FCERR_BADCRC)
395 skb_checksum_none_assert(skb);
397 skb->ip_summed = CHECKSUM_UNNECESSARY;
399 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
400 fh = (struct fc_frame_header *)(skb->data +
401 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
403 fh = (struct fc_frame_header *)(skb->data +
404 sizeof(struct fcoe_hdr));
405 fctl = ntoh24(fh->fh_f_ctl);
406 if (fctl & FC_FC_EX_CTX)
407 xid = be16_to_cpu(fh->fh_ox_id);
409 xid = be16_to_cpu(fh->fh_rx_id);
411 if (xid >= IXGBE_FCOE_DDP_MAX)
414 fcoe = &adapter->fcoe;
415 ddp = &fcoe->ddp[xid];
422 fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
424 /* update length of DDPed data */
425 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
426 /* unmap the sg list when FCP_RSP is received */
427 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
428 pci_unmap_sg(adapter->pdev, ddp->sgl,
429 ddp->sgc, DMA_FROM_DEVICE);
430 ddp->err = (fcerr | fceofe);
434 /* return 0 to bypass going to ULD for DDPed data */
435 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
440 /* In target mode, check the last data frame of the sequence.
441 * For DDP in target mode, data is already DDPed but the header
442 * indication of the last data frame ould allow is to tell if we
443 * got all the data and the ULP can send FCP_RSP back, as this is
444 * not a full fcoe frame, we fill the trailer here so it won't be
445 * dropped by the ULP stack.
447 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
448 (fctl & FC_FC_END_SEQ)) {
449 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
450 crc->fcoe_eof = FC_EOF_T;
457 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
458 * @adapter: ixgbe adapter
459 * @tx_ring: tx desc ring
460 * @skb: associated skb
461 * @tx_flags: tx flags
462 * @hdr_len: hdr_len to be returned
464 * This sets up large send offload for FCoE
466 * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
468 int ixgbe_fso(struct ixgbe_adapter *adapter,
469 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
470 u32 tx_flags, u8 *hdr_len)
479 struct ixgbe_tx_buffer *tx_buffer_info;
480 struct ixgbe_adv_tx_context_desc *context_desc;
481 struct fc_frame_header *fh;
483 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
484 e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
485 skb_shinfo(skb)->gso_type);
489 /* resets the header to point fcoe/fc */
490 skb_set_network_header(skb, skb->mac_len);
491 skb_set_transport_header(skb, skb->mac_len +
492 sizeof(struct fcoe_hdr));
494 /* sets up SOF and ORIS */
496 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
499 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
502 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
503 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
508 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
511 e_warn(drv, "unknown sof = 0x%x\n", sof);
515 /* the first byte of the last dword is EOF */
516 skb_copy_bits(skb, skb->len - 4, &eof, 1);
517 /* sets up EOF and ORIE */
520 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
524 if (skb_is_gso(skb)) {
525 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
526 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE;
528 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
532 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
535 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
538 e_warn(drv, "unknown eof = 0x%x\n", eof);
542 /* sets up PARINC indicating data offset */
543 fh = (struct fc_frame_header *)skb_transport_header(skb);
544 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
545 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
547 /* hdr_len includes fc_hdr if FCoE lso is enabled */
548 *hdr_len = sizeof(struct fcoe_crc_eof);
550 *hdr_len += (skb_transport_offset(skb) +
551 sizeof(struct fc_frame_header));
552 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
553 vlan_macip_lens = (skb_transport_offset(skb) +
554 sizeof(struct fc_frame_header));
555 vlan_macip_lens |= ((skb_transport_offset(skb) - 4)
556 << IXGBE_ADVTXD_MACLEN_SHIFT);
557 vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
559 /* type_tycmd and mss: set TUCMD.FCoE to enable offload */
560 type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT |
561 IXGBE_ADVTXT_TUCMD_FCOE;
563 mss = skb_shinfo(skb)->gso_size;
564 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
565 mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) |
566 (1 << IXGBE_ADVTXD_IDX_SHIFT);
568 /* write context desc */
569 i = tx_ring->next_to_use;
570 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
571 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
572 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
573 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
574 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
576 tx_buffer_info = &tx_ring->tx_buffer_info[i];
577 tx_buffer_info->time_stamp = jiffies;
578 tx_buffer_info->next_to_watch = i;
581 if (i == tx_ring->count)
583 tx_ring->next_to_use = i;
585 return skb_is_gso(skb);
589 * ixgbe_configure_fcoe - configures registers for fcoe at start
590 * @adapter: ptr to ixgbe adapter
592 * This sets up FCoE related registers
596 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
598 int i, fcoe_q, fcoe_i;
599 struct ixgbe_hw *hw = &adapter->hw;
600 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
601 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
602 #ifdef CONFIG_IXGBE_DCB
607 /* create the pool for ddp if not created yet */
609 /* allocate ddp pool */
610 fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp",
611 adapter->pdev, IXGBE_FCPTR_MAX,
612 IXGBE_FCPTR_ALIGN, PAGE_SIZE);
614 e_err(drv, "failed to allocated FCoE DDP pool\n");
616 spin_lock_init(&fcoe->lock);
618 /* Extra buffer to be shared by all DDPs for HW work around */
619 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
620 if (fcoe->extra_ddp_buffer == NULL) {
621 e_err(drv, "failed to allocated extra DDP buffer\n");
622 goto out_extra_ddp_buffer_alloc;
625 fcoe->extra_ddp_buffer_dma =
626 dma_map_single(&adapter->pdev->dev,
627 fcoe->extra_ddp_buffer,
630 if (dma_mapping_error(&adapter->pdev->dev,
631 fcoe->extra_ddp_buffer_dma)) {
632 e_err(drv, "failed to map extra DDP buffer\n");
633 goto out_extra_ddp_buffer_dma;
637 /* Enable L2 eth type filter for FCoE */
638 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
639 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
640 /* Enable L2 eth type filter for FIP */
641 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
642 (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
643 if (adapter->ring_feature[RING_F_FCOE].indices) {
644 /* Use multiple rx queues for FCoE by redirection table */
645 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
646 fcoe_i = f->mask + i % f->indices;
647 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
648 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
649 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
651 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
652 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
654 /* Use single rx queue for FCoE */
656 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
657 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
658 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
659 IXGBE_ETQS_QUEUE_EN |
660 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
662 /* send FIP frames to the first FCoE queue */
664 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
665 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
666 IXGBE_ETQS_QUEUE_EN |
667 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
669 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
670 IXGBE_FCRXCTRL_FCOELLI |
671 IXGBE_FCRXCTRL_FCCRCBO |
672 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
673 #ifdef CONFIG_IXGBE_DCB
674 up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
675 for (i = 0; i < MAX_USER_PRIORITY; i++) {
676 tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
677 tc &= (MAX_TRAFFIC_CLASS - 1);
678 if (fcoe->tc == tc) {
687 out_extra_ddp_buffer_dma:
688 kfree(fcoe->extra_ddp_buffer);
689 out_extra_ddp_buffer_alloc:
690 pci_pool_destroy(fcoe->pool);
695 * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
696 * @adapter : ixgbe adapter
698 * Cleans up outstanding ddp context resources
702 void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
705 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
707 /* release ddp resource */
709 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
710 ixgbe_fcoe_ddp_put(adapter->netdev, i);
711 dma_unmap_single(&adapter->pdev->dev,
712 fcoe->extra_ddp_buffer_dma,
715 kfree(fcoe->extra_ddp_buffer);
716 pci_pool_destroy(fcoe->pool);
722 * ixgbe_fcoe_enable - turn on FCoE offload feature
723 * @netdev: the corresponding netdev
725 * Turns on FCoE offload feature in 82599.
727 * Returns : 0 indicates success or -EINVAL on failure
729 int ixgbe_fcoe_enable(struct net_device *netdev)
732 struct ixgbe_adapter *adapter = netdev_priv(netdev);
733 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
736 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
739 atomic_inc(&fcoe->refcnt);
740 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
743 e_info(drv, "Enabling FCoE offload features.\n");
744 if (netif_running(netdev))
745 netdev->netdev_ops->ndo_stop(netdev);
747 ixgbe_clear_interrupt_scheme(adapter);
749 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
750 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
751 netdev->features |= NETIF_F_FCOE_CRC;
752 netdev->features |= NETIF_F_FSO;
753 netdev->features |= NETIF_F_FCOE_MTU;
754 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
756 ixgbe_init_interrupt_scheme(adapter);
757 netdev_features_change(netdev);
759 if (netif_running(netdev))
760 netdev->netdev_ops->ndo_open(netdev);
768 * ixgbe_fcoe_disable - turn off FCoE offload feature
769 * @netdev: the corresponding netdev
771 * Turns off FCoE offload feature in 82599.
773 * Returns : 0 indicates success or -EINVAL on failure
775 int ixgbe_fcoe_disable(struct net_device *netdev)
778 struct ixgbe_adapter *adapter = netdev_priv(netdev);
779 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
781 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
784 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
787 if (!atomic_dec_and_test(&fcoe->refcnt))
790 e_info(drv, "Disabling FCoE offload features.\n");
791 netdev->features &= ~NETIF_F_FCOE_CRC;
792 netdev->features &= ~NETIF_F_FSO;
793 netdev->features &= ~NETIF_F_FCOE_MTU;
794 netdev->fcoe_ddp_xid = 0;
795 netdev_features_change(netdev);
797 if (netif_running(netdev))
798 netdev->netdev_ops->ndo_stop(netdev);
800 ixgbe_clear_interrupt_scheme(adapter);
801 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
802 adapter->ring_feature[RING_F_FCOE].indices = 0;
803 ixgbe_cleanup_fcoe(adapter);
804 ixgbe_init_interrupt_scheme(adapter);
806 if (netif_running(netdev))
807 netdev->netdev_ops->ndo_open(netdev);
814 #ifdef CONFIG_IXGBE_DCB
816 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
817 * @adapter : ixgbe adapter
818 * @up : 802.1p user priority bitmap
820 * Finds out the traffic class from the input user priority
823 * Returns : 0 on success otherwise returns 1 on error
825 u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
830 /* valid user priority bitmap must not be 0 */
832 /* from user priority to the corresponding traffic class */
833 up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
834 for (i = 0; i < MAX_USER_PRIORITY; i++) {
836 up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
837 up2tc &= (MAX_TRAFFIC_CLASS - 1);
838 adapter->fcoe.tc = (u8)up2tc;
839 adapter->fcoe.up = i;
847 #endif /* CONFIG_IXGBE_DCB */
850 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
851 * @netdev : ixgbe adapter
852 * @wwn : the world wide name
853 * @type: the type of world wide name
855 * Returns the node or port world wide name if both the prefix and the san
856 * mac address are valid, then the wwn is formed based on the NAA-2 for
857 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
859 * Returns : 0 on success
861 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
865 struct ixgbe_adapter *adapter = netdev_priv(netdev);
866 struct ixgbe_mac_info *mac = &adapter->hw.mac;
869 case NETDEV_FCOE_WWNN:
870 prefix = mac->wwnn_prefix;
872 case NETDEV_FCOE_WWPN:
873 prefix = mac->wwpn_prefix;
879 if ((prefix != 0xffff) &&
880 is_valid_ether_addr(mac->san_addr)) {
881 *wwn = ((u64) prefix << 48) |
882 ((u64) mac->san_addr[0] << 40) |
883 ((u64) mac->san_addr[1] << 32) |
884 ((u64) mac->san_addr[2] << 24) |
885 ((u64) mac->san_addr[3] << 16) |
886 ((u64) mac->san_addr[4] << 8) |
887 ((u64) mac->san_addr[5]);