]> Git Repo - J-linux.git/blob - drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / ethernet / microchip / lan966x / lan966x_fdma.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6
7 #include "lan966x_main.h"
8
9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10                                       u64 *dataptr)
11 {
12         struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13         struct lan966x_rx *rx = &lan966x->rx;
14         struct page *page;
15
16         page = page_pool_dev_alloc_pages(rx->page_pool);
17         if (unlikely(!page))
18                 return -ENOMEM;
19
20         rx->page[dcb][db] = page;
21         *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22
23         return 0;
24 }
25
26 static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
27                                       u64 *dataptr)
28 {
29         struct lan966x *lan966x = (struct lan966x *)fdma->priv;
30
31         *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
32
33         return 0;
34 }
35
36 static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
37                                           u64 *dataptr)
38 {
39         struct lan966x *lan966x = (struct lan966x *)fdma->priv;
40
41         *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
42
43         return 0;
44 }
45
46 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
47 {
48         return lan_rd(lan966x, FDMA_CH_ACTIVE);
49 }
50
51 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
52 {
53         struct fdma *fdma = &rx->fdma;
54         int i, j;
55
56         for (i = 0; i < fdma->n_dcbs; ++i) {
57                 for (j = 0; j < fdma->n_dbs; ++j)
58                         page_pool_put_full_page(rx->page_pool,
59                                                 rx->page[i][j], false);
60         }
61 }
62
63 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
64 {
65         struct fdma *fdma = &rx->fdma;
66         struct page *page;
67
68         page = rx->page[fdma->dcb_index][fdma->db_index];
69         if (unlikely(!page))
70                 return;
71
72         page_pool_recycle_direct(rx->page_pool, page);
73 }
74
75 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
76 {
77         struct lan966x *lan966x = rx->lan966x;
78         struct page_pool_params pp_params = {
79                 .order = rx->page_order,
80                 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
81                 .pool_size = rx->fdma.n_dcbs,
82                 .nid = NUMA_NO_NODE,
83                 .dev = lan966x->dev,
84                 .dma_dir = DMA_FROM_DEVICE,
85                 .offset = XDP_PACKET_HEADROOM,
86                 .max_len = rx->max_mtu -
87                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
88         };
89
90         if (lan966x_xdp_present(lan966x))
91                 pp_params.dma_dir = DMA_BIDIRECTIONAL;
92
93         rx->page_pool = page_pool_create(&pp_params);
94
95         for (int i = 0; i < lan966x->num_phys_ports; ++i) {
96                 struct lan966x_port *port;
97
98                 if (!lan966x->ports[i])
99                         continue;
100
101                 port = lan966x->ports[i];
102                 xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
103                 xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
104                                            rx->page_pool);
105         }
106
107         return PTR_ERR_OR_ZERO(rx->page_pool);
108 }
109
110 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
111 {
112         struct lan966x *lan966x = rx->lan966x;
113         struct fdma *fdma = &rx->fdma;
114         int err;
115
116         if (lan966x_fdma_rx_alloc_page_pool(rx))
117                 return PTR_ERR(rx->page_pool);
118
119         err = fdma_alloc_coherent(lan966x->dev, fdma);
120         if (err)
121                 return err;
122
123         fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
124                        FDMA_DCB_STATUS_INTR);
125
126         return 0;
127 }
128
129 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
130 {
131         struct lan966x *lan966x = rx->lan966x;
132         struct fdma *fdma = &rx->fdma;
133         u32 mask;
134
135         /* When activating a channel, first is required to write the first DCB
136          * address and then to activate it
137          */
138         lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
139                FDMA_DCB_LLP(fdma->channel_id));
140         lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
141                FDMA_DCB_LLP1(fdma->channel_id));
142
143         lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
144                FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
145                FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
146                FDMA_CH_CFG_CH_MEM_SET(1),
147                lan966x, FDMA_CH_CFG(fdma->channel_id));
148
149         /* Start fdma */
150         lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
151                 FDMA_PORT_CTRL_XTR_STOP,
152                 lan966x, FDMA_PORT_CTRL(0));
153
154         /* Enable interrupts */
155         mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
156         mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
157         mask |= BIT(fdma->channel_id);
158         lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
159                 FDMA_INTR_DB_ENA_INTR_DB_ENA,
160                 lan966x, FDMA_INTR_DB_ENA);
161
162         /* Activate the channel */
163         lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
164                 FDMA_CH_ACTIVATE_CH_ACTIVATE,
165                 lan966x, FDMA_CH_ACTIVATE);
166 }
167
168 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
169 {
170         struct lan966x *lan966x = rx->lan966x;
171         struct fdma *fdma = &rx->fdma;
172         u32 val;
173
174         /* Disable the channel */
175         lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
176                 FDMA_CH_DISABLE_CH_DISABLE,
177                 lan966x, FDMA_CH_DISABLE);
178
179         readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
180                                   val, !(val & BIT(fdma->channel_id)),
181                                   READL_SLEEP_US, READL_TIMEOUT_US);
182
183         lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
184                 FDMA_CH_DB_DISCARD_DB_DISCARD,
185                 lan966x, FDMA_CH_DB_DISCARD);
186 }
187
188 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
189 {
190         struct lan966x *lan966x = rx->lan966x;
191
192         lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
193                 FDMA_CH_RELOAD_CH_RELOAD,
194                 lan966x, FDMA_CH_RELOAD);
195 }
196
197 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
198 {
199         struct lan966x *lan966x = tx->lan966x;
200         struct fdma *fdma = &tx->fdma;
201         int err;
202
203         tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
204                                GFP_KERNEL);
205         if (!tx->dcbs_buf)
206                 return -ENOMEM;
207
208         err = fdma_alloc_coherent(lan966x->dev, fdma);
209         if (err)
210                 goto out;
211
212         fdma_dcbs_init(fdma, 0, 0);
213
214         return 0;
215
216 out:
217         kfree(tx->dcbs_buf);
218         return -ENOMEM;
219 }
220
221 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
222 {
223         struct lan966x *lan966x = tx->lan966x;
224
225         kfree(tx->dcbs_buf);
226         fdma_free_coherent(lan966x->dev, &tx->fdma);
227 }
228
229 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
230 {
231         struct lan966x *lan966x = tx->lan966x;
232         struct fdma *fdma = &tx->fdma;
233         u32 mask;
234
235         /* When activating a channel, first is required to write the first DCB
236          * address and then to activate it
237          */
238         lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
239                FDMA_DCB_LLP(fdma->channel_id));
240         lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
241                FDMA_DCB_LLP1(fdma->channel_id));
242
243         lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
244                FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
245                FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
246                FDMA_CH_CFG_CH_MEM_SET(1),
247                lan966x, FDMA_CH_CFG(fdma->channel_id));
248
249         /* Start fdma */
250         lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
251                 FDMA_PORT_CTRL_INJ_STOP,
252                 lan966x, FDMA_PORT_CTRL(0));
253
254         /* Enable interrupts */
255         mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
256         mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
257         mask |= BIT(fdma->channel_id);
258         lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
259                 FDMA_INTR_DB_ENA_INTR_DB_ENA,
260                 lan966x, FDMA_INTR_DB_ENA);
261
262         /* Activate the channel */
263         lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
264                 FDMA_CH_ACTIVATE_CH_ACTIVATE,
265                 lan966x, FDMA_CH_ACTIVATE);
266 }
267
268 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
269 {
270         struct lan966x *lan966x = tx->lan966x;
271         struct fdma *fdma = &tx->fdma;
272         u32 val;
273
274         /* Disable the channel */
275         lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
276                 FDMA_CH_DISABLE_CH_DISABLE,
277                 lan966x, FDMA_CH_DISABLE);
278
279         readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
280                                   val, !(val & BIT(fdma->channel_id)),
281                                   READL_SLEEP_US, READL_TIMEOUT_US);
282
283         lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
284                 FDMA_CH_DB_DISCARD_DB_DISCARD,
285                 lan966x, FDMA_CH_DB_DISCARD);
286
287         tx->activated = false;
288 }
289
290 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
291 {
292         struct lan966x *lan966x = tx->lan966x;
293
294         /* Write the registers to reload the channel */
295         lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
296                 FDMA_CH_RELOAD_CH_RELOAD,
297                 lan966x, FDMA_CH_RELOAD);
298 }
299
300 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
301 {
302         struct lan966x_port *port;
303         int i;
304
305         for (i = 0; i < lan966x->num_phys_ports; ++i) {
306                 port = lan966x->ports[i];
307                 if (!port)
308                         continue;
309
310                 if (netif_queue_stopped(port->dev))
311                         netif_wake_queue(port->dev);
312         }
313 }
314
315 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
316 {
317         struct lan966x_port *port;
318         int i;
319
320         for (i = 0; i < lan966x->num_phys_ports; ++i) {
321                 port = lan966x->ports[i];
322                 if (!port)
323                         continue;
324
325                 netif_stop_queue(port->dev);
326         }
327 }
328
329 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
330 {
331         struct lan966x_tx *tx = &lan966x->tx;
332         struct lan966x_rx *rx = &lan966x->rx;
333         struct lan966x_tx_dcb_buf *dcb_buf;
334         struct fdma *fdma = &tx->fdma;
335         struct xdp_frame_bulk bq;
336         unsigned long flags;
337         bool clear = false;
338         struct fdma_db *db;
339         int i;
340
341         xdp_frame_bulk_init(&bq);
342
343         spin_lock_irqsave(&lan966x->tx_lock, flags);
344         for (i = 0; i < fdma->n_dcbs; ++i) {
345                 dcb_buf = &tx->dcbs_buf[i];
346
347                 if (!dcb_buf->used)
348                         continue;
349
350                 db = fdma_db_get(fdma, i, 0);
351                 if (!fdma_db_is_done(db))
352                         continue;
353
354                 dcb_buf->dev->stats.tx_packets++;
355                 dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
356
357                 dcb_buf->used = false;
358                 if (dcb_buf->use_skb) {
359                         dma_unmap_single(lan966x->dev,
360                                          dcb_buf->dma_addr,
361                                          dcb_buf->len,
362                                          DMA_TO_DEVICE);
363
364                         if (!dcb_buf->ptp)
365                                 napi_consume_skb(dcb_buf->data.skb, weight);
366                 } else {
367                         if (dcb_buf->xdp_ndo)
368                                 dma_unmap_single(lan966x->dev,
369                                                  dcb_buf->dma_addr,
370                                                  dcb_buf->len,
371                                                  DMA_TO_DEVICE);
372
373                         if (dcb_buf->xdp_ndo)
374                                 xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
375                         else
376                                 page_pool_recycle_direct(rx->page_pool,
377                                                          dcb_buf->data.page);
378                 }
379
380                 clear = true;
381         }
382
383         xdp_flush_frame_bulk(&bq);
384
385         if (clear)
386                 lan966x_fdma_wakeup_netdev(lan966x);
387
388         spin_unlock_irqrestore(&lan966x->tx_lock, flags);
389 }
390
391 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
392 {
393         struct lan966x *lan966x = rx->lan966x;
394         struct fdma *fdma = &rx->fdma;
395         struct lan966x_port *port;
396         struct fdma_db *db;
397         struct page *page;
398
399         db = fdma_db_next_get(fdma);
400         page = rx->page[fdma->dcb_index][fdma->db_index];
401         if (unlikely(!page))
402                 return FDMA_ERROR;
403
404         dma_sync_single_for_cpu(lan966x->dev,
405                                 (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
406                                 FDMA_DCB_STATUS_BLOCKL(db->status),
407                                 DMA_FROM_DEVICE);
408
409         lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
410                                  src_port);
411         if (WARN_ON(*src_port >= lan966x->num_phys_ports))
412                 return FDMA_ERROR;
413
414         port = lan966x->ports[*src_port];
415         if (!lan966x_xdp_port_present(port))
416                 return FDMA_PASS;
417
418         return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
419 }
420
421 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
422                                                  u64 src_port)
423 {
424         struct lan966x *lan966x = rx->lan966x;
425         struct fdma *fdma = &rx->fdma;
426         struct sk_buff *skb;
427         struct fdma_db *db;
428         struct page *page;
429         u64 timestamp;
430
431         /* Get the received frame and unmap it */
432         db = fdma_db_next_get(fdma);
433         page = rx->page[fdma->dcb_index][fdma->db_index];
434
435         skb = build_skb(page_address(page), fdma->db_size);
436         if (unlikely(!skb))
437                 goto free_page;
438
439         skb_mark_for_recycle(skb);
440
441         skb_reserve(skb, XDP_PACKET_HEADROOM);
442         skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
443
444         lan966x_ifh_get_timestamp(skb->data, &timestamp);
445
446         skb->dev = lan966x->ports[src_port]->dev;
447         skb_pull(skb, IFH_LEN_BYTES);
448
449         if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
450                 skb_trim(skb, skb->len - ETH_FCS_LEN);
451
452         lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
453         skb->protocol = eth_type_trans(skb, skb->dev);
454
455         if (lan966x->bridge_mask & BIT(src_port)) {
456                 skb->offload_fwd_mark = 1;
457
458                 skb_reset_network_header(skb);
459                 if (!lan966x_hw_offload(lan966x, src_port, skb))
460                         skb->offload_fwd_mark = 0;
461         }
462
463         skb->dev->stats.rx_bytes += skb->len;
464         skb->dev->stats.rx_packets++;
465
466         return skb;
467
468 free_page:
469         page_pool_recycle_direct(rx->page_pool, page);
470
471         return NULL;
472 }
473
474 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
475 {
476         struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
477         struct lan966x_rx *rx = &lan966x->rx;
478         int old_dcb, dcb_reload, counter = 0;
479         struct fdma *fdma = &rx->fdma;
480         bool redirect = false;
481         struct sk_buff *skb;
482         u64 src_port;
483
484         dcb_reload = fdma->dcb_index;
485
486         lan966x_fdma_tx_clear_buf(lan966x, weight);
487
488         /* Get all received skb */
489         while (counter < weight) {
490                 if (!fdma_has_frames(fdma))
491                         break;
492
493                 counter++;
494
495                 switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
496                 case FDMA_PASS:
497                         break;
498                 case FDMA_ERROR:
499                         lan966x_fdma_rx_free_page(rx);
500                         fdma_dcb_advance(fdma);
501                         goto allocate_new;
502                 case FDMA_REDIRECT:
503                         redirect = true;
504                         fallthrough;
505                 case FDMA_TX:
506                         fdma_dcb_advance(fdma);
507                         continue;
508                 case FDMA_DROP:
509                         lan966x_fdma_rx_free_page(rx);
510                         fdma_dcb_advance(fdma);
511                         continue;
512                 }
513
514                 skb = lan966x_fdma_rx_get_frame(rx, src_port);
515                 fdma_dcb_advance(fdma);
516                 if (!skb)
517                         goto allocate_new;
518
519                 napi_gro_receive(&lan966x->napi, skb);
520         }
521
522 allocate_new:
523         /* Allocate new pages and map them */
524         while (dcb_reload != fdma->dcb_index) {
525                 old_dcb = dcb_reload;
526                 dcb_reload++;
527                 dcb_reload &= fdma->n_dcbs - 1;
528
529                 fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
530                              FDMA_DCB_STATUS_INTR);
531
532                 lan966x_fdma_rx_reload(rx);
533         }
534
535         if (redirect)
536                 xdp_do_flush();
537
538         if (counter < weight && napi_complete_done(napi, counter))
539                 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
540
541         return counter;
542 }
543
544 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
545 {
546         struct lan966x *lan966x = args;
547         u32 db, err, err_type;
548
549         db = lan_rd(lan966x, FDMA_INTR_DB);
550         err = lan_rd(lan966x, FDMA_INTR_ERR);
551
552         if (db) {
553                 lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
554                 lan_wr(db, lan966x, FDMA_INTR_DB);
555
556                 napi_schedule(&lan966x->napi);
557         }
558
559         if (err) {
560                 err_type = lan_rd(lan966x, FDMA_ERRORS);
561
562                 WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
563
564                 lan_wr(err, lan966x, FDMA_INTR_ERR);
565                 lan_wr(err_type, lan966x, FDMA_ERRORS);
566         }
567
568         return IRQ_HANDLED;
569 }
570
571 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
572 {
573         struct lan966x_tx_dcb_buf *dcb_buf;
574         struct fdma *fdma = &tx->fdma;
575         int i;
576
577         for (i = 0; i < fdma->n_dcbs; ++i) {
578                 dcb_buf = &tx->dcbs_buf[i];
579                 if (!dcb_buf->used &&
580                     !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
581                         return i;
582         }
583
584         return -1;
585 }
586
587 static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
588 {
589         struct lan966x *lan966x = tx->lan966x;
590
591         if (likely(lan966x->tx.activated)) {
592                 lan966x_fdma_tx_reload(tx);
593         } else {
594                 /* Because it is first time, then just activate */
595                 lan966x->tx.activated = true;
596                 lan966x_fdma_tx_activate(tx);
597         }
598 }
599
600 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
601 {
602         struct lan966x *lan966x = port->lan966x;
603         struct lan966x_tx_dcb_buf *next_dcb_buf;
604         struct lan966x_tx *tx = &lan966x->tx;
605         struct xdp_frame *xdpf;
606         dma_addr_t dma_addr;
607         struct page *page;
608         int next_to_use;
609         __be32 *ifh;
610         int ret = 0;
611
612         spin_lock(&lan966x->tx_lock);
613
614         /* Get next index */
615         next_to_use = lan966x_fdma_get_next_dcb(tx);
616         if (next_to_use < 0) {
617                 netif_stop_queue(port->dev);
618                 ret = NETDEV_TX_BUSY;
619                 goto out;
620         }
621
622         /* Get the next buffer */
623         next_dcb_buf = &tx->dcbs_buf[next_to_use];
624
625         /* Generate new IFH */
626         if (!len) {
627                 xdpf = ptr;
628
629                 if (xdpf->headroom < IFH_LEN_BYTES) {
630                         ret = NETDEV_TX_OK;
631                         goto out;
632                 }
633
634                 ifh = xdpf->data - IFH_LEN_BYTES;
635                 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
636                 lan966x_ifh_set_bypass(ifh, 1);
637                 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
638
639                 dma_addr = dma_map_single(lan966x->dev,
640                                           xdpf->data - IFH_LEN_BYTES,
641                                           xdpf->len + IFH_LEN_BYTES,
642                                           DMA_TO_DEVICE);
643                 if (dma_mapping_error(lan966x->dev, dma_addr)) {
644                         ret = NETDEV_TX_OK;
645                         goto out;
646                 }
647
648                 next_dcb_buf->data.xdpf = xdpf;
649                 next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
650         } else {
651                 page = ptr;
652
653                 ifh = page_address(page) + XDP_PACKET_HEADROOM;
654                 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
655                 lan966x_ifh_set_bypass(ifh, 1);
656                 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
657
658                 dma_addr = page_pool_get_dma_addr(page);
659                 dma_sync_single_for_device(lan966x->dev,
660                                            dma_addr + XDP_PACKET_HEADROOM,
661                                            len + IFH_LEN_BYTES,
662                                            DMA_TO_DEVICE);
663
664                 next_dcb_buf->data.page = page;
665                 next_dcb_buf->len = len + IFH_LEN_BYTES;
666         }
667
668         /* Fill up the buffer */
669         next_dcb_buf->use_skb = false;
670         next_dcb_buf->xdp_ndo = !len;
671         next_dcb_buf->dma_addr = dma_addr;
672         next_dcb_buf->used = true;
673         next_dcb_buf->ptp = false;
674         next_dcb_buf->dev = port->dev;
675
676         __fdma_dcb_add(&tx->fdma,
677                        next_to_use,
678                        0,
679                        FDMA_DCB_STATUS_INTR |
680                        FDMA_DCB_STATUS_SOF |
681                        FDMA_DCB_STATUS_EOF |
682                        FDMA_DCB_STATUS_BLOCKO(0) |
683                        FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
684                        &fdma_nextptr_cb,
685                        &lan966x_fdma_xdp_tx_dataptr_cb);
686
687         /* Start the transmission */
688         lan966x_fdma_tx_start(tx);
689
690 out:
691         spin_unlock(&lan966x->tx_lock);
692
693         return ret;
694 }
695
696 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
697 {
698         struct lan966x_port *port = netdev_priv(dev);
699         struct lan966x *lan966x = port->lan966x;
700         struct lan966x_tx_dcb_buf *next_dcb_buf;
701         struct lan966x_tx *tx = &lan966x->tx;
702         int needed_headroom;
703         int needed_tailroom;
704         dma_addr_t dma_addr;
705         int next_to_use;
706         int err;
707
708         /* Get next index */
709         next_to_use = lan966x_fdma_get_next_dcb(tx);
710         if (next_to_use < 0) {
711                 netif_stop_queue(dev);
712                 return NETDEV_TX_BUSY;
713         }
714
715         if (skb_put_padto(skb, ETH_ZLEN)) {
716                 dev->stats.tx_dropped++;
717                 return NETDEV_TX_OK;
718         }
719
720         /* skb processing */
721         needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
722         needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
723         if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
724                 err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
725                                        GFP_ATOMIC);
726                 if (unlikely(err)) {
727                         dev->stats.tx_dropped++;
728                         err = NETDEV_TX_OK;
729                         goto release;
730                 }
731         }
732
733         skb_tx_timestamp(skb);
734         skb_push(skb, IFH_LEN_BYTES);
735         memcpy(skb->data, ifh, IFH_LEN_BYTES);
736         skb_put(skb, 4);
737
738         dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
739                                   DMA_TO_DEVICE);
740         if (dma_mapping_error(lan966x->dev, dma_addr)) {
741                 dev->stats.tx_dropped++;
742                 err = NETDEV_TX_OK;
743                 goto release;
744         }
745
746         /* Fill up the buffer */
747         next_dcb_buf = &tx->dcbs_buf[next_to_use];
748         next_dcb_buf->use_skb = true;
749         next_dcb_buf->data.skb = skb;
750         next_dcb_buf->xdp_ndo = false;
751         next_dcb_buf->len = skb->len;
752         next_dcb_buf->dma_addr = dma_addr;
753         next_dcb_buf->used = true;
754         next_dcb_buf->ptp = false;
755         next_dcb_buf->dev = dev;
756
757         fdma_dcb_add(&tx->fdma,
758                      next_to_use,
759                      0,
760                      FDMA_DCB_STATUS_INTR |
761                      FDMA_DCB_STATUS_SOF |
762                      FDMA_DCB_STATUS_EOF |
763                      FDMA_DCB_STATUS_BLOCKO(0) |
764                      FDMA_DCB_STATUS_BLOCKL(skb->len));
765
766         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
767             LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
768                 next_dcb_buf->ptp = true;
769
770         /* Start the transmission */
771         lan966x_fdma_tx_start(tx);
772
773         return NETDEV_TX_OK;
774
775 release:
776         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
777             LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
778                 lan966x_ptp_txtstamp_release(port, skb);
779
780         dev_kfree_skb_any(skb);
781         return err;
782 }
783
784 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
785 {
786         int max_mtu = 0;
787         int i;
788
789         for (i = 0; i < lan966x->num_phys_ports; ++i) {
790                 struct lan966x_port *port;
791                 int mtu;
792
793                 port = lan966x->ports[i];
794                 if (!port)
795                         continue;
796
797                 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
798                 if (mtu > max_mtu)
799                         max_mtu = mtu;
800         }
801
802         return max_mtu;
803 }
804
805 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
806 {
807         return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
808 }
809
810 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
811 {
812         struct page_pool *page_pool;
813         struct fdma fdma_rx_old;
814         int err;
815
816         /* Store these for later to free them */
817         memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
818         page_pool = lan966x->rx.page_pool;
819
820         napi_synchronize(&lan966x->napi);
821         napi_disable(&lan966x->napi);
822         lan966x_fdma_stop_netdev(lan966x);
823
824         lan966x_fdma_rx_disable(&lan966x->rx);
825         lan966x_fdma_rx_free_pages(&lan966x->rx);
826         lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
827         lan966x->rx.max_mtu = new_mtu;
828         err = lan966x_fdma_rx_alloc(&lan966x->rx);
829         if (err)
830                 goto restore;
831         lan966x_fdma_rx_start(&lan966x->rx);
832
833         fdma_free_coherent(lan966x->dev, &fdma_rx_old);
834
835         page_pool_destroy(page_pool);
836
837         lan966x_fdma_wakeup_netdev(lan966x);
838         napi_enable(&lan966x->napi);
839
840         return err;
841 restore:
842         lan966x->rx.page_pool = page_pool;
843         memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
844         lan966x_fdma_rx_start(&lan966x->rx);
845
846         return err;
847 }
848
849 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
850 {
851         return lan966x_fdma_get_max_mtu(lan966x) +
852                IFH_LEN_BYTES +
853                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
854                VLAN_HLEN * 2 +
855                XDP_PACKET_HEADROOM;
856 }
857
858 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
859 {
860         int err;
861         u32 val;
862
863         /* Disable the CPU port */
864         lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
865                 QSYS_SW_PORT_MODE_PORT_ENA,
866                 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
867
868         /* Flush the CPU queues */
869         readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
870                            val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
871                            READL_SLEEP_US, READL_TIMEOUT_US);
872
873         /* Add a sleep in case there are frames between the queues and the CPU
874          * port
875          */
876         usleep_range(1000, 2000);
877
878         err = lan966x_fdma_reload(lan966x, max_mtu);
879
880         /* Enable back the CPU port */
881         lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
882                 QSYS_SW_PORT_MODE_PORT_ENA,
883                 lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
884
885         return err;
886 }
887
888 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
889 {
890         int max_mtu;
891
892         max_mtu = lan966x_fdma_get_max_frame(lan966x);
893         if (max_mtu == lan966x->rx.max_mtu)
894                 return 0;
895
896         return __lan966x_fdma_reload(lan966x, max_mtu);
897 }
898
899 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
900 {
901         int max_mtu;
902
903         max_mtu = lan966x_fdma_get_max_frame(lan966x);
904         return __lan966x_fdma_reload(lan966x, max_mtu);
905 }
906
907 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
908 {
909         if (lan966x->fdma_ndev)
910                 return;
911
912         lan966x->fdma_ndev = dev;
913         netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
914         napi_enable(&lan966x->napi);
915 }
916
917 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
918 {
919         if (lan966x->fdma_ndev == dev) {
920                 netif_napi_del(&lan966x->napi);
921                 lan966x->fdma_ndev = NULL;
922         }
923 }
924
925 int lan966x_fdma_init(struct lan966x *lan966x)
926 {
927         int err;
928
929         if (!lan966x->fdma)
930                 return 0;
931
932         lan966x->rx.lan966x = lan966x;
933         lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
934         lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
935         lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
936         lan966x->rx.fdma.priv = lan966x;
937         lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
938         lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
939         lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
940         lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
941         lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
942         lan966x->tx.lan966x = lan966x;
943         lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
944         lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
945         lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
946         lan966x->tx.fdma.priv = lan966x;
947         lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
948         lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
949         lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
950         lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
951
952         err = lan966x_fdma_rx_alloc(&lan966x->rx);
953         if (err)
954                 return err;
955
956         err = lan966x_fdma_tx_alloc(&lan966x->tx);
957         if (err) {
958                 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
959                 return err;
960         }
961
962         lan966x_fdma_rx_start(&lan966x->rx);
963
964         return 0;
965 }
966
967 void lan966x_fdma_deinit(struct lan966x *lan966x)
968 {
969         if (!lan966x->fdma)
970                 return;
971
972         lan966x_fdma_rx_disable(&lan966x->rx);
973         lan966x_fdma_tx_disable(&lan966x->tx);
974
975         napi_synchronize(&lan966x->napi);
976         napi_disable(&lan966x->napi);
977
978         lan966x_fdma_rx_free_pages(&lan966x->rx);
979         fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
980         page_pool_destroy(lan966x->rx.page_pool);
981         lan966x_fdma_tx_free(&lan966x->tx);
982 }
This page took 0.083801 seconds and 4 git commands to generate.