1 // SPDX-License-Identifier: GPL-2.0+
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
7 #include "lan966x_main.h"
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
12 struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 struct lan966x_rx *rx = &lan966x->rx;
16 page = page_pool_dev_alloc_pages(rx->page_pool);
20 rx->page[dcb][db] = page;
21 *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
26 static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
29 struct lan966x *lan966x = (struct lan966x *)fdma->priv;
31 *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
36 static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
39 struct lan966x *lan966x = (struct lan966x *)fdma->priv;
41 *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
46 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
48 return lan_rd(lan966x, FDMA_CH_ACTIVE);
51 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
53 struct fdma *fdma = &rx->fdma;
56 for (i = 0; i < fdma->n_dcbs; ++i) {
57 for (j = 0; j < fdma->n_dbs; ++j)
58 page_pool_put_full_page(rx->page_pool,
59 rx->page[i][j], false);
63 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
65 struct fdma *fdma = &rx->fdma;
68 page = rx->page[fdma->dcb_index][fdma->db_index];
72 page_pool_recycle_direct(rx->page_pool, page);
75 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
77 struct lan966x *lan966x = rx->lan966x;
78 struct page_pool_params pp_params = {
79 .order = rx->page_order,
80 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
81 .pool_size = rx->fdma.n_dcbs,
84 .dma_dir = DMA_FROM_DEVICE,
85 .offset = XDP_PACKET_HEADROOM,
86 .max_len = rx->max_mtu -
87 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
90 if (lan966x_xdp_present(lan966x))
91 pp_params.dma_dir = DMA_BIDIRECTIONAL;
93 rx->page_pool = page_pool_create(&pp_params);
95 for (int i = 0; i < lan966x->num_phys_ports; ++i) {
96 struct lan966x_port *port;
98 if (!lan966x->ports[i])
101 port = lan966x->ports[i];
102 xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
103 xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
107 return PTR_ERR_OR_ZERO(rx->page_pool);
110 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
112 struct lan966x *lan966x = rx->lan966x;
113 struct fdma *fdma = &rx->fdma;
116 if (lan966x_fdma_rx_alloc_page_pool(rx))
117 return PTR_ERR(rx->page_pool);
119 err = fdma_alloc_coherent(lan966x->dev, fdma);
123 fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
124 FDMA_DCB_STATUS_INTR);
129 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
131 struct lan966x *lan966x = rx->lan966x;
132 struct fdma *fdma = &rx->fdma;
135 /* When activating a channel, first is required to write the first DCB
136 * address and then to activate it
138 lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
139 FDMA_DCB_LLP(fdma->channel_id));
140 lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
141 FDMA_DCB_LLP1(fdma->channel_id));
143 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
144 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
145 FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
146 FDMA_CH_CFG_CH_MEM_SET(1),
147 lan966x, FDMA_CH_CFG(fdma->channel_id));
150 lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
151 FDMA_PORT_CTRL_XTR_STOP,
152 lan966x, FDMA_PORT_CTRL(0));
154 /* Enable interrupts */
155 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
156 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
157 mask |= BIT(fdma->channel_id);
158 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
159 FDMA_INTR_DB_ENA_INTR_DB_ENA,
160 lan966x, FDMA_INTR_DB_ENA);
162 /* Activate the channel */
163 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
164 FDMA_CH_ACTIVATE_CH_ACTIVATE,
165 lan966x, FDMA_CH_ACTIVATE);
168 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
170 struct lan966x *lan966x = rx->lan966x;
171 struct fdma *fdma = &rx->fdma;
174 /* Disable the channel */
175 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
176 FDMA_CH_DISABLE_CH_DISABLE,
177 lan966x, FDMA_CH_DISABLE);
179 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
180 val, !(val & BIT(fdma->channel_id)),
181 READL_SLEEP_US, READL_TIMEOUT_US);
183 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
184 FDMA_CH_DB_DISCARD_DB_DISCARD,
185 lan966x, FDMA_CH_DB_DISCARD);
188 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
190 struct lan966x *lan966x = rx->lan966x;
192 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
193 FDMA_CH_RELOAD_CH_RELOAD,
194 lan966x, FDMA_CH_RELOAD);
197 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
199 struct lan966x *lan966x = tx->lan966x;
200 struct fdma *fdma = &tx->fdma;
203 tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
208 err = fdma_alloc_coherent(lan966x->dev, fdma);
212 fdma_dcbs_init(fdma, 0, 0);
221 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
223 struct lan966x *lan966x = tx->lan966x;
226 fdma_free_coherent(lan966x->dev, &tx->fdma);
229 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
231 struct lan966x *lan966x = tx->lan966x;
232 struct fdma *fdma = &tx->fdma;
235 /* When activating a channel, first is required to write the first DCB
236 * address and then to activate it
238 lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
239 FDMA_DCB_LLP(fdma->channel_id));
240 lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
241 FDMA_DCB_LLP1(fdma->channel_id));
243 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
244 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
245 FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
246 FDMA_CH_CFG_CH_MEM_SET(1),
247 lan966x, FDMA_CH_CFG(fdma->channel_id));
250 lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
251 FDMA_PORT_CTRL_INJ_STOP,
252 lan966x, FDMA_PORT_CTRL(0));
254 /* Enable interrupts */
255 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
256 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
257 mask |= BIT(fdma->channel_id);
258 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
259 FDMA_INTR_DB_ENA_INTR_DB_ENA,
260 lan966x, FDMA_INTR_DB_ENA);
262 /* Activate the channel */
263 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
264 FDMA_CH_ACTIVATE_CH_ACTIVATE,
265 lan966x, FDMA_CH_ACTIVATE);
268 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
270 struct lan966x *lan966x = tx->lan966x;
271 struct fdma *fdma = &tx->fdma;
274 /* Disable the channel */
275 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
276 FDMA_CH_DISABLE_CH_DISABLE,
277 lan966x, FDMA_CH_DISABLE);
279 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
280 val, !(val & BIT(fdma->channel_id)),
281 READL_SLEEP_US, READL_TIMEOUT_US);
283 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
284 FDMA_CH_DB_DISCARD_DB_DISCARD,
285 lan966x, FDMA_CH_DB_DISCARD);
287 tx->activated = false;
290 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
292 struct lan966x *lan966x = tx->lan966x;
294 /* Write the registers to reload the channel */
295 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
296 FDMA_CH_RELOAD_CH_RELOAD,
297 lan966x, FDMA_CH_RELOAD);
300 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
302 struct lan966x_port *port;
305 for (i = 0; i < lan966x->num_phys_ports; ++i) {
306 port = lan966x->ports[i];
310 if (netif_queue_stopped(port->dev))
311 netif_wake_queue(port->dev);
315 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
317 struct lan966x_port *port;
320 for (i = 0; i < lan966x->num_phys_ports; ++i) {
321 port = lan966x->ports[i];
325 netif_stop_queue(port->dev);
329 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
331 struct lan966x_tx *tx = &lan966x->tx;
332 struct lan966x_rx *rx = &lan966x->rx;
333 struct lan966x_tx_dcb_buf *dcb_buf;
334 struct fdma *fdma = &tx->fdma;
335 struct xdp_frame_bulk bq;
341 xdp_frame_bulk_init(&bq);
343 spin_lock_irqsave(&lan966x->tx_lock, flags);
344 for (i = 0; i < fdma->n_dcbs; ++i) {
345 dcb_buf = &tx->dcbs_buf[i];
350 db = fdma_db_get(fdma, i, 0);
351 if (!fdma_db_is_done(db))
354 dcb_buf->dev->stats.tx_packets++;
355 dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
357 dcb_buf->used = false;
358 if (dcb_buf->use_skb) {
359 dma_unmap_single(lan966x->dev,
365 napi_consume_skb(dcb_buf->data.skb, weight);
367 if (dcb_buf->xdp_ndo)
368 dma_unmap_single(lan966x->dev,
373 if (dcb_buf->xdp_ndo)
374 xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
376 page_pool_recycle_direct(rx->page_pool,
383 xdp_flush_frame_bulk(&bq);
386 lan966x_fdma_wakeup_netdev(lan966x);
388 spin_unlock_irqrestore(&lan966x->tx_lock, flags);
391 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
393 struct lan966x *lan966x = rx->lan966x;
394 struct fdma *fdma = &rx->fdma;
395 struct lan966x_port *port;
399 db = fdma_db_next_get(fdma);
400 page = rx->page[fdma->dcb_index][fdma->db_index];
404 dma_sync_single_for_cpu(lan966x->dev,
405 (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
406 FDMA_DCB_STATUS_BLOCKL(db->status),
409 lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
411 if (WARN_ON(*src_port >= lan966x->num_phys_ports))
414 port = lan966x->ports[*src_port];
415 if (!lan966x_xdp_port_present(port))
418 return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
421 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
424 struct lan966x *lan966x = rx->lan966x;
425 struct fdma *fdma = &rx->fdma;
431 /* Get the received frame and unmap it */
432 db = fdma_db_next_get(fdma);
433 page = rx->page[fdma->dcb_index][fdma->db_index];
435 skb = build_skb(page_address(page), fdma->db_size);
439 skb_mark_for_recycle(skb);
441 skb_reserve(skb, XDP_PACKET_HEADROOM);
442 skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
444 lan966x_ifh_get_timestamp(skb->data, ×tamp);
446 skb->dev = lan966x->ports[src_port]->dev;
447 skb_pull(skb, IFH_LEN_BYTES);
449 if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
450 skb_trim(skb, skb->len - ETH_FCS_LEN);
452 lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
453 skb->protocol = eth_type_trans(skb, skb->dev);
455 if (lan966x->bridge_mask & BIT(src_port)) {
456 skb->offload_fwd_mark = 1;
458 skb_reset_network_header(skb);
459 if (!lan966x_hw_offload(lan966x, src_port, skb))
460 skb->offload_fwd_mark = 0;
463 skb->dev->stats.rx_bytes += skb->len;
464 skb->dev->stats.rx_packets++;
469 page_pool_recycle_direct(rx->page_pool, page);
474 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
476 struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
477 struct lan966x_rx *rx = &lan966x->rx;
478 int old_dcb, dcb_reload, counter = 0;
479 struct fdma *fdma = &rx->fdma;
480 bool redirect = false;
484 dcb_reload = fdma->dcb_index;
486 lan966x_fdma_tx_clear_buf(lan966x, weight);
488 /* Get all received skb */
489 while (counter < weight) {
490 if (!fdma_has_frames(fdma))
495 switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
499 lan966x_fdma_rx_free_page(rx);
500 fdma_dcb_advance(fdma);
506 fdma_dcb_advance(fdma);
509 lan966x_fdma_rx_free_page(rx);
510 fdma_dcb_advance(fdma);
514 skb = lan966x_fdma_rx_get_frame(rx, src_port);
515 fdma_dcb_advance(fdma);
519 napi_gro_receive(&lan966x->napi, skb);
523 /* Allocate new pages and map them */
524 while (dcb_reload != fdma->dcb_index) {
525 old_dcb = dcb_reload;
527 dcb_reload &= fdma->n_dcbs - 1;
529 fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
530 FDMA_DCB_STATUS_INTR);
532 lan966x_fdma_rx_reload(rx);
538 if (counter < weight && napi_complete_done(napi, counter))
539 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
544 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
546 struct lan966x *lan966x = args;
547 u32 db, err, err_type;
549 db = lan_rd(lan966x, FDMA_INTR_DB);
550 err = lan_rd(lan966x, FDMA_INTR_ERR);
553 lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
554 lan_wr(db, lan966x, FDMA_INTR_DB);
556 napi_schedule(&lan966x->napi);
560 err_type = lan_rd(lan966x, FDMA_ERRORS);
562 WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
564 lan_wr(err, lan966x, FDMA_INTR_ERR);
565 lan_wr(err_type, lan966x, FDMA_ERRORS);
571 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
573 struct lan966x_tx_dcb_buf *dcb_buf;
574 struct fdma *fdma = &tx->fdma;
577 for (i = 0; i < fdma->n_dcbs; ++i) {
578 dcb_buf = &tx->dcbs_buf[i];
579 if (!dcb_buf->used &&
580 !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
587 static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
589 struct lan966x *lan966x = tx->lan966x;
591 if (likely(lan966x->tx.activated)) {
592 lan966x_fdma_tx_reload(tx);
594 /* Because it is first time, then just activate */
595 lan966x->tx.activated = true;
596 lan966x_fdma_tx_activate(tx);
600 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
602 struct lan966x *lan966x = port->lan966x;
603 struct lan966x_tx_dcb_buf *next_dcb_buf;
604 struct lan966x_tx *tx = &lan966x->tx;
605 struct xdp_frame *xdpf;
612 spin_lock(&lan966x->tx_lock);
615 next_to_use = lan966x_fdma_get_next_dcb(tx);
616 if (next_to_use < 0) {
617 netif_stop_queue(port->dev);
618 ret = NETDEV_TX_BUSY;
622 /* Get the next buffer */
623 next_dcb_buf = &tx->dcbs_buf[next_to_use];
625 /* Generate new IFH */
629 if (xdpf->headroom < IFH_LEN_BYTES) {
634 ifh = xdpf->data - IFH_LEN_BYTES;
635 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
636 lan966x_ifh_set_bypass(ifh, 1);
637 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
639 dma_addr = dma_map_single(lan966x->dev,
640 xdpf->data - IFH_LEN_BYTES,
641 xdpf->len + IFH_LEN_BYTES,
643 if (dma_mapping_error(lan966x->dev, dma_addr)) {
648 next_dcb_buf->data.xdpf = xdpf;
649 next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
653 ifh = page_address(page) + XDP_PACKET_HEADROOM;
654 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
655 lan966x_ifh_set_bypass(ifh, 1);
656 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
658 dma_addr = page_pool_get_dma_addr(page);
659 dma_sync_single_for_device(lan966x->dev,
660 dma_addr + XDP_PACKET_HEADROOM,
664 next_dcb_buf->data.page = page;
665 next_dcb_buf->len = len + IFH_LEN_BYTES;
668 /* Fill up the buffer */
669 next_dcb_buf->use_skb = false;
670 next_dcb_buf->xdp_ndo = !len;
671 next_dcb_buf->dma_addr = dma_addr;
672 next_dcb_buf->used = true;
673 next_dcb_buf->ptp = false;
674 next_dcb_buf->dev = port->dev;
676 __fdma_dcb_add(&tx->fdma,
679 FDMA_DCB_STATUS_INTR |
680 FDMA_DCB_STATUS_SOF |
681 FDMA_DCB_STATUS_EOF |
682 FDMA_DCB_STATUS_BLOCKO(0) |
683 FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
685 &lan966x_fdma_xdp_tx_dataptr_cb);
687 /* Start the transmission */
688 lan966x_fdma_tx_start(tx);
691 spin_unlock(&lan966x->tx_lock);
696 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
698 struct lan966x_port *port = netdev_priv(dev);
699 struct lan966x *lan966x = port->lan966x;
700 struct lan966x_tx_dcb_buf *next_dcb_buf;
701 struct lan966x_tx *tx = &lan966x->tx;
709 next_to_use = lan966x_fdma_get_next_dcb(tx);
710 if (next_to_use < 0) {
711 netif_stop_queue(dev);
712 return NETDEV_TX_BUSY;
715 if (skb_put_padto(skb, ETH_ZLEN)) {
716 dev->stats.tx_dropped++;
721 needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
722 needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
723 if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
724 err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
727 dev->stats.tx_dropped++;
733 skb_tx_timestamp(skb);
734 skb_push(skb, IFH_LEN_BYTES);
735 memcpy(skb->data, ifh, IFH_LEN_BYTES);
738 dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
740 if (dma_mapping_error(lan966x->dev, dma_addr)) {
741 dev->stats.tx_dropped++;
746 /* Fill up the buffer */
747 next_dcb_buf = &tx->dcbs_buf[next_to_use];
748 next_dcb_buf->use_skb = true;
749 next_dcb_buf->data.skb = skb;
750 next_dcb_buf->xdp_ndo = false;
751 next_dcb_buf->len = skb->len;
752 next_dcb_buf->dma_addr = dma_addr;
753 next_dcb_buf->used = true;
754 next_dcb_buf->ptp = false;
755 next_dcb_buf->dev = dev;
757 fdma_dcb_add(&tx->fdma,
760 FDMA_DCB_STATUS_INTR |
761 FDMA_DCB_STATUS_SOF |
762 FDMA_DCB_STATUS_EOF |
763 FDMA_DCB_STATUS_BLOCKO(0) |
764 FDMA_DCB_STATUS_BLOCKL(skb->len));
766 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
767 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
768 next_dcb_buf->ptp = true;
770 /* Start the transmission */
771 lan966x_fdma_tx_start(tx);
776 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
777 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
778 lan966x_ptp_txtstamp_release(port, skb);
780 dev_kfree_skb_any(skb);
784 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
789 for (i = 0; i < lan966x->num_phys_ports; ++i) {
790 struct lan966x_port *port;
793 port = lan966x->ports[i];
797 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
805 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
807 return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
810 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
812 struct page_pool *page_pool;
813 struct fdma fdma_rx_old;
816 /* Store these for later to free them */
817 memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
818 page_pool = lan966x->rx.page_pool;
820 napi_synchronize(&lan966x->napi);
821 napi_disable(&lan966x->napi);
822 lan966x_fdma_stop_netdev(lan966x);
824 lan966x_fdma_rx_disable(&lan966x->rx);
825 lan966x_fdma_rx_free_pages(&lan966x->rx);
826 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
827 lan966x->rx.max_mtu = new_mtu;
828 err = lan966x_fdma_rx_alloc(&lan966x->rx);
831 lan966x_fdma_rx_start(&lan966x->rx);
833 fdma_free_coherent(lan966x->dev, &fdma_rx_old);
835 page_pool_destroy(page_pool);
837 lan966x_fdma_wakeup_netdev(lan966x);
838 napi_enable(&lan966x->napi);
842 lan966x->rx.page_pool = page_pool;
843 memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
844 lan966x_fdma_rx_start(&lan966x->rx);
849 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
851 return lan966x_fdma_get_max_mtu(lan966x) +
853 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
858 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
863 /* Disable the CPU port */
864 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
865 QSYS_SW_PORT_MODE_PORT_ENA,
866 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
868 /* Flush the CPU queues */
869 readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
870 val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
871 READL_SLEEP_US, READL_TIMEOUT_US);
873 /* Add a sleep in case there are frames between the queues and the CPU
876 usleep_range(1000, 2000);
878 err = lan966x_fdma_reload(lan966x, max_mtu);
880 /* Enable back the CPU port */
881 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
882 QSYS_SW_PORT_MODE_PORT_ENA,
883 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
888 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
892 max_mtu = lan966x_fdma_get_max_frame(lan966x);
893 if (max_mtu == lan966x->rx.max_mtu)
896 return __lan966x_fdma_reload(lan966x, max_mtu);
899 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
903 max_mtu = lan966x_fdma_get_max_frame(lan966x);
904 return __lan966x_fdma_reload(lan966x, max_mtu);
907 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
909 if (lan966x->fdma_ndev)
912 lan966x->fdma_ndev = dev;
913 netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
914 napi_enable(&lan966x->napi);
917 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
919 if (lan966x->fdma_ndev == dev) {
920 netif_napi_del(&lan966x->napi);
921 lan966x->fdma_ndev = NULL;
925 int lan966x_fdma_init(struct lan966x *lan966x)
932 lan966x->rx.lan966x = lan966x;
933 lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
934 lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
935 lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
936 lan966x->rx.fdma.priv = lan966x;
937 lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
938 lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
939 lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
940 lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
941 lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
942 lan966x->tx.lan966x = lan966x;
943 lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
944 lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
945 lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
946 lan966x->tx.fdma.priv = lan966x;
947 lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
948 lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
949 lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
950 lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
952 err = lan966x_fdma_rx_alloc(&lan966x->rx);
956 err = lan966x_fdma_tx_alloc(&lan966x->tx);
958 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
962 lan966x_fdma_rx_start(&lan966x->rx);
967 void lan966x_fdma_deinit(struct lan966x *lan966x)
972 lan966x_fdma_rx_disable(&lan966x->rx);
973 lan966x_fdma_tx_disable(&lan966x->tx);
975 napi_synchronize(&lan966x->napi);
976 napi_disable(&lan966x->napi);
978 lan966x_fdma_rx_free_pages(&lan966x->rx);
979 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
980 page_pool_destroy(lan966x->rx.page_pool);
981 lan966x_fdma_tx_free(&lan966x->tx);