]> Git Repo - J-linux.git/blob - drivers/net/ethernet/mediatek/mtk_eth_soc.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / ethernet / mediatek / mtk_eth_soc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <[email protected]>
5  *   Copyright (C) 2009-2016 Felix Fietkau <[email protected]>
6  *   Copyright (C) 2013-2016 Michael Lee <[email protected]>
7  */
8
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30
31 #include "mtk_eth_soc.h"
32 #include "mtk_wed.h"
33
34 static int mtk_msg_level = -1;
35 module_param_named(msg_level, mtk_msg_level, int, 0);
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37
38 #define MTK_ETHTOOL_STAT(x) { #x, \
39                               offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
40
41 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42                                   offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43                                   sizeof(u64) }
44
45 static const struct mtk_reg_map mtk_reg_map = {
46         .tx_irq_mask            = 0x1a1c,
47         .tx_irq_status          = 0x1a18,
48         .pdma = {
49                 .rx_ptr         = 0x0900,
50                 .rx_cnt_cfg     = 0x0904,
51                 .pcrx_ptr       = 0x0908,
52                 .glo_cfg        = 0x0a04,
53                 .rst_idx        = 0x0a08,
54                 .delay_irq      = 0x0a0c,
55                 .irq_status     = 0x0a20,
56                 .irq_mask       = 0x0a28,
57                 .adma_rx_dbg0   = 0x0a38,
58                 .int_grp        = 0x0a50,
59         },
60         .qdma = {
61                 .qtx_cfg        = 0x1800,
62                 .qtx_sch        = 0x1804,
63                 .rx_ptr         = 0x1900,
64                 .rx_cnt_cfg     = 0x1904,
65                 .qcrx_ptr       = 0x1908,
66                 .glo_cfg        = 0x1a04,
67                 .rst_idx        = 0x1a08,
68                 .delay_irq      = 0x1a0c,
69                 .fc_th          = 0x1a10,
70                 .tx_sch_rate    = 0x1a14,
71                 .int_grp        = 0x1a20,
72                 .hred           = 0x1a44,
73                 .ctx_ptr        = 0x1b00,
74                 .dtx_ptr        = 0x1b04,
75                 .crx_ptr        = 0x1b10,
76                 .drx_ptr        = 0x1b14,
77                 .fq_head        = 0x1b20,
78                 .fq_tail        = 0x1b24,
79                 .fq_count       = 0x1b28,
80                 .fq_blen        = 0x1b2c,
81         },
82         .gdm1_cnt               = 0x2400,
83         .gdma_to_ppe    = {
84                 [0]             = 0x4444,
85         },
86         .ppe_base               = 0x0c00,
87         .wdma_base = {
88                 [0]             = 0x2800,
89                 [1]             = 0x2c00,
90         },
91         .pse_iq_sta             = 0x0110,
92         .pse_oq_sta             = 0x0118,
93 };
94
95 static const struct mtk_reg_map mt7628_reg_map = {
96         .tx_irq_mask            = 0x0a28,
97         .tx_irq_status          = 0x0a20,
98         .pdma = {
99                 .rx_ptr         = 0x0900,
100                 .rx_cnt_cfg     = 0x0904,
101                 .pcrx_ptr       = 0x0908,
102                 .glo_cfg        = 0x0a04,
103                 .rst_idx        = 0x0a08,
104                 .delay_irq      = 0x0a0c,
105                 .irq_status     = 0x0a20,
106                 .irq_mask       = 0x0a28,
107                 .int_grp        = 0x0a50,
108         },
109 };
110
111 static const struct mtk_reg_map mt7986_reg_map = {
112         .tx_irq_mask            = 0x461c,
113         .tx_irq_status          = 0x4618,
114         .pdma = {
115                 .rx_ptr         = 0x4100,
116                 .rx_cnt_cfg     = 0x4104,
117                 .pcrx_ptr       = 0x4108,
118                 .glo_cfg        = 0x4204,
119                 .rst_idx        = 0x4208,
120                 .delay_irq      = 0x420c,
121                 .irq_status     = 0x4220,
122                 .irq_mask       = 0x4228,
123                 .adma_rx_dbg0   = 0x4238,
124                 .int_grp        = 0x4250,
125         },
126         .qdma = {
127                 .qtx_cfg        = 0x4400,
128                 .qtx_sch        = 0x4404,
129                 .rx_ptr         = 0x4500,
130                 .rx_cnt_cfg     = 0x4504,
131                 .qcrx_ptr       = 0x4508,
132                 .glo_cfg        = 0x4604,
133                 .rst_idx        = 0x4608,
134                 .delay_irq      = 0x460c,
135                 .fc_th          = 0x4610,
136                 .int_grp        = 0x4620,
137                 .hred           = 0x4644,
138                 .ctx_ptr        = 0x4700,
139                 .dtx_ptr        = 0x4704,
140                 .crx_ptr        = 0x4710,
141                 .drx_ptr        = 0x4714,
142                 .fq_head        = 0x4720,
143                 .fq_tail        = 0x4724,
144                 .fq_count       = 0x4728,
145                 .fq_blen        = 0x472c,
146                 .tx_sch_rate    = 0x4798,
147         },
148         .gdm1_cnt               = 0x1c00,
149         .gdma_to_ppe    = {
150                 [0]             = 0x3333,
151                 [1]             = 0x4444,
152         },
153         .ppe_base               = 0x2000,
154         .wdma_base = {
155                 [0]             = 0x4800,
156                 [1]             = 0x4c00,
157         },
158         .pse_iq_sta             = 0x0180,
159         .pse_oq_sta             = 0x01a0,
160 };
161
162 static const struct mtk_reg_map mt7988_reg_map = {
163         .tx_irq_mask            = 0x461c,
164         .tx_irq_status          = 0x4618,
165         .pdma = {
166                 .rx_ptr         = 0x6900,
167                 .rx_cnt_cfg     = 0x6904,
168                 .pcrx_ptr       = 0x6908,
169                 .glo_cfg        = 0x6a04,
170                 .rst_idx        = 0x6a08,
171                 .delay_irq      = 0x6a0c,
172                 .irq_status     = 0x6a20,
173                 .irq_mask       = 0x6a28,
174                 .adma_rx_dbg0   = 0x6a38,
175                 .int_grp        = 0x6a50,
176         },
177         .qdma = {
178                 .qtx_cfg        = 0x4400,
179                 .qtx_sch        = 0x4404,
180                 .rx_ptr         = 0x4500,
181                 .rx_cnt_cfg     = 0x4504,
182                 .qcrx_ptr       = 0x4508,
183                 .glo_cfg        = 0x4604,
184                 .rst_idx        = 0x4608,
185                 .delay_irq      = 0x460c,
186                 .fc_th          = 0x4610,
187                 .int_grp        = 0x4620,
188                 .hred           = 0x4644,
189                 .ctx_ptr        = 0x4700,
190                 .dtx_ptr        = 0x4704,
191                 .crx_ptr        = 0x4710,
192                 .drx_ptr        = 0x4714,
193                 .fq_head        = 0x4720,
194                 .fq_tail        = 0x4724,
195                 .fq_count       = 0x4728,
196                 .fq_blen        = 0x472c,
197                 .tx_sch_rate    = 0x4798,
198         },
199         .gdm1_cnt               = 0x1c00,
200         .gdma_to_ppe    = {
201                 [0]             = 0x3333,
202                 [1]             = 0x4444,
203                 [2]             = 0xcccc,
204         },
205         .ppe_base               = 0x2000,
206         .wdma_base = {
207                 [0]             = 0x4800,
208                 [1]             = 0x4c00,
209                 [2]             = 0x5000,
210         },
211         .pse_iq_sta             = 0x0180,
212         .pse_oq_sta             = 0x01a0,
213 };
214
215 /* strings used by ethtool */
216 static const struct mtk_ethtool_stats {
217         char str[ETH_GSTRING_LEN];
218         u32 offset;
219 } mtk_ethtool_stats[] = {
220         MTK_ETHTOOL_STAT(tx_bytes),
221         MTK_ETHTOOL_STAT(tx_packets),
222         MTK_ETHTOOL_STAT(tx_skip),
223         MTK_ETHTOOL_STAT(tx_collisions),
224         MTK_ETHTOOL_STAT(rx_bytes),
225         MTK_ETHTOOL_STAT(rx_packets),
226         MTK_ETHTOOL_STAT(rx_overflow),
227         MTK_ETHTOOL_STAT(rx_fcs_errors),
228         MTK_ETHTOOL_STAT(rx_short_errors),
229         MTK_ETHTOOL_STAT(rx_long_errors),
230         MTK_ETHTOOL_STAT(rx_checksum_errors),
231         MTK_ETHTOOL_STAT(rx_flow_control_packets),
232         MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
233         MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
234         MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
235         MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
236         MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
237         MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
238         MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
239 };
240
241 static const char * const mtk_clks_source_name[] = {
242         "ethif",
243         "sgmiitop",
244         "esw",
245         "gp0",
246         "gp1",
247         "gp2",
248         "gp3",
249         "xgp1",
250         "xgp2",
251         "xgp3",
252         "crypto",
253         "fe",
254         "trgpll",
255         "sgmii_tx250m",
256         "sgmii_rx250m",
257         "sgmii_cdr_ref",
258         "sgmii_cdr_fb",
259         "sgmii2_tx250m",
260         "sgmii2_rx250m",
261         "sgmii2_cdr_ref",
262         "sgmii2_cdr_fb",
263         "sgmii_ck",
264         "eth2pll",
265         "wocpu0",
266         "wocpu1",
267         "netsys0",
268         "netsys1",
269         "ethwarp_wocpu2",
270         "ethwarp_wocpu1",
271         "ethwarp_wocpu0",
272         "top_usxgmii0_sel",
273         "top_usxgmii1_sel",
274         "top_sgm0_sel",
275         "top_sgm1_sel",
276         "top_xfi_phy0_xtal_sel",
277         "top_xfi_phy1_xtal_sel",
278         "top_eth_gmii_sel",
279         "top_eth_refck_50m_sel",
280         "top_eth_sys_200m_sel",
281         "top_eth_sys_sel",
282         "top_eth_xgmii_sel",
283         "top_eth_mii_sel",
284         "top_netsys_sel",
285         "top_netsys_500m_sel",
286         "top_netsys_pao_2x_sel",
287         "top_netsys_sync_250m_sel",
288         "top_netsys_ppefb_250m_sel",
289         "top_netsys_warp_sel",
290 };
291
292 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
293 {
294         __raw_writel(val, eth->base + reg);
295 }
296
297 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
298 {
299         return __raw_readl(eth->base + reg);
300 }
301
302 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
303 {
304         u32 val;
305
306         val = mtk_r32(eth, reg);
307         val &= ~mask;
308         val |= set;
309         mtk_w32(eth, val, reg);
310         return reg;
311 }
312
313 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
314 {
315         unsigned long t_start = jiffies;
316
317         while (1) {
318                 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
319                         return 0;
320                 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
321                         break;
322                 cond_resched();
323         }
324
325         dev_err(eth->dev, "mdio: MDIO timeout\n");
326         return -ETIMEDOUT;
327 }
328
329 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
330                                u32 write_data)
331 {
332         int ret;
333
334         ret = mtk_mdio_busy_wait(eth);
335         if (ret < 0)
336                 return ret;
337
338         mtk_w32(eth, PHY_IAC_ACCESS |
339                 PHY_IAC_START_C22 |
340                 PHY_IAC_CMD_WRITE |
341                 PHY_IAC_REG(phy_reg) |
342                 PHY_IAC_ADDR(phy_addr) |
343                 PHY_IAC_DATA(write_data),
344                 MTK_PHY_IAC);
345
346         ret = mtk_mdio_busy_wait(eth);
347         if (ret < 0)
348                 return ret;
349
350         return 0;
351 }
352
353 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
354                                u32 devad, u32 phy_reg, u32 write_data)
355 {
356         int ret;
357
358         ret = mtk_mdio_busy_wait(eth);
359         if (ret < 0)
360                 return ret;
361
362         mtk_w32(eth, PHY_IAC_ACCESS |
363                 PHY_IAC_START_C45 |
364                 PHY_IAC_CMD_C45_ADDR |
365                 PHY_IAC_REG(devad) |
366                 PHY_IAC_ADDR(phy_addr) |
367                 PHY_IAC_DATA(phy_reg),
368                 MTK_PHY_IAC);
369
370         ret = mtk_mdio_busy_wait(eth);
371         if (ret < 0)
372                 return ret;
373
374         mtk_w32(eth, PHY_IAC_ACCESS |
375                 PHY_IAC_START_C45 |
376                 PHY_IAC_CMD_WRITE |
377                 PHY_IAC_REG(devad) |
378                 PHY_IAC_ADDR(phy_addr) |
379                 PHY_IAC_DATA(write_data),
380                 MTK_PHY_IAC);
381
382         ret = mtk_mdio_busy_wait(eth);
383         if (ret < 0)
384                 return ret;
385
386         return 0;
387 }
388
389 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
390 {
391         int ret;
392
393         ret = mtk_mdio_busy_wait(eth);
394         if (ret < 0)
395                 return ret;
396
397         mtk_w32(eth, PHY_IAC_ACCESS |
398                 PHY_IAC_START_C22 |
399                 PHY_IAC_CMD_C22_READ |
400                 PHY_IAC_REG(phy_reg) |
401                 PHY_IAC_ADDR(phy_addr),
402                 MTK_PHY_IAC);
403
404         ret = mtk_mdio_busy_wait(eth);
405         if (ret < 0)
406                 return ret;
407
408         return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
409 }
410
411 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
412                               u32 devad, u32 phy_reg)
413 {
414         int ret;
415
416         ret = mtk_mdio_busy_wait(eth);
417         if (ret < 0)
418                 return ret;
419
420         mtk_w32(eth, PHY_IAC_ACCESS |
421                 PHY_IAC_START_C45 |
422                 PHY_IAC_CMD_C45_ADDR |
423                 PHY_IAC_REG(devad) |
424                 PHY_IAC_ADDR(phy_addr) |
425                 PHY_IAC_DATA(phy_reg),
426                 MTK_PHY_IAC);
427
428         ret = mtk_mdio_busy_wait(eth);
429         if (ret < 0)
430                 return ret;
431
432         mtk_w32(eth, PHY_IAC_ACCESS |
433                 PHY_IAC_START_C45 |
434                 PHY_IAC_CMD_C45_READ |
435                 PHY_IAC_REG(devad) |
436                 PHY_IAC_ADDR(phy_addr),
437                 MTK_PHY_IAC);
438
439         ret = mtk_mdio_busy_wait(eth);
440         if (ret < 0)
441                 return ret;
442
443         return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
444 }
445
446 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
447                               int phy_reg, u16 val)
448 {
449         struct mtk_eth *eth = bus->priv;
450
451         return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
452 }
453
454 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
455                               int devad, int phy_reg, u16 val)
456 {
457         struct mtk_eth *eth = bus->priv;
458
459         return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
460 }
461
462 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
463 {
464         struct mtk_eth *eth = bus->priv;
465
466         return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
467 }
468
469 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
470                              int phy_reg)
471 {
472         struct mtk_eth *eth = bus->priv;
473
474         return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
475 }
476
477 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
478                                      phy_interface_t interface)
479 {
480         u32 val;
481
482         val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
483                 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
484
485         regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
486                            ETHSYS_TRGMII_MT7621_MASK, val);
487
488         return 0;
489 }
490
491 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
492                                    phy_interface_t interface)
493 {
494         int ret;
495
496         if (interface == PHY_INTERFACE_MODE_TRGMII) {
497                 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
498                 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
499                 if (ret)
500                         dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
501                 return;
502         }
503
504         dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
505 }
506
507 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
508 {
509         /* Force Port1 XGMAC Link Up */
510         mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
511                 MTK_XGMAC_STS(MTK_GMAC1_ID));
512
513         /* Adjust GSW bridge IPG to 11 */
514         mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
515                 (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
516                 (GSW_IPG_11 << GSWRX_IPG_SHIFT),
517                 MTK_GSW_CFG);
518 }
519
520 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
521                                               phy_interface_t interface)
522 {
523         struct mtk_mac *mac = container_of(config, struct mtk_mac,
524                                            phylink_config);
525         struct mtk_eth *eth = mac->hw;
526         unsigned int sid;
527
528         if (interface == PHY_INTERFACE_MODE_SGMII ||
529             phy_interface_mode_is_8023z(interface)) {
530                 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
531                        0 : mac->id;
532
533                 return eth->sgmii_pcs[sid];
534         }
535
536         return NULL;
537 }
538
539 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
540                            const struct phylink_link_state *state)
541 {
542         struct mtk_mac *mac = container_of(config, struct mtk_mac,
543                                            phylink_config);
544         struct mtk_eth *eth = mac->hw;
545         int val, ge_mode, err = 0;
546         u32 i;
547
548         /* MT76x8 has no hardware settings between for the MAC */
549         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
550             mac->interface != state->interface) {
551                 /* Setup soc pin functions */
552                 switch (state->interface) {
553                 case PHY_INTERFACE_MODE_TRGMII:
554                 case PHY_INTERFACE_MODE_RGMII_TXID:
555                 case PHY_INTERFACE_MODE_RGMII_RXID:
556                 case PHY_INTERFACE_MODE_RGMII_ID:
557                 case PHY_INTERFACE_MODE_RGMII:
558                 case PHY_INTERFACE_MODE_MII:
559                         if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
560                                 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
561                                 if (err)
562                                         goto init_err;
563                         }
564                         break;
565                 case PHY_INTERFACE_MODE_1000BASEX:
566                 case PHY_INTERFACE_MODE_2500BASEX:
567                 case PHY_INTERFACE_MODE_SGMII:
568                         err = mtk_gmac_sgmii_path_setup(eth, mac->id);
569                         if (err)
570                                 goto init_err;
571                         break;
572                 case PHY_INTERFACE_MODE_GMII:
573                         if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
574                                 err = mtk_gmac_gephy_path_setup(eth, mac->id);
575                                 if (err)
576                                         goto init_err;
577                         }
578                         break;
579                 case PHY_INTERFACE_MODE_INTERNAL:
580                         break;
581                 default:
582                         goto err_phy;
583                 }
584
585                 /* Setup clock for 1st gmac */
586                 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
587                     !phy_interface_mode_is_8023z(state->interface) &&
588                     MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
589                         if (MTK_HAS_CAPS(mac->hw->soc->caps,
590                                          MTK_TRGMII_MT7621_CLK)) {
591                                 if (mt7621_gmac0_rgmii_adjust(mac->hw,
592                                                               state->interface))
593                                         goto err_phy;
594                         } else {
595                                 mtk_gmac0_rgmii_adjust(mac->hw,
596                                                        state->interface);
597
598                                 /* mt7623_pad_clk_setup */
599                                 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
600                                         mtk_w32(mac->hw,
601                                                 TD_DM_DRVP(8) | TD_DM_DRVN(8),
602                                                 TRGMII_TD_ODT(i));
603
604                                 /* Assert/release MT7623 RXC reset */
605                                 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
606                                         TRGMII_RCK_CTRL);
607                                 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
608                         }
609                 }
610
611                 switch (state->interface) {
612                 case PHY_INTERFACE_MODE_MII:
613                 case PHY_INTERFACE_MODE_GMII:
614                         ge_mode = 1;
615                         break;
616                 default:
617                         ge_mode = 0;
618                         break;
619                 }
620
621                 /* put the gmac into the right mode */
622                 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
623                 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
624                 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
625                 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
626
627                 mac->interface = state->interface;
628         }
629
630         /* SGMII */
631         if (state->interface == PHY_INTERFACE_MODE_SGMII ||
632             phy_interface_mode_is_8023z(state->interface)) {
633                 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
634                  * being setup done.
635                  */
636                 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
637
638                 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
639                                    SYSCFG0_SGMII_MASK,
640                                    ~(u32)SYSCFG0_SGMII_MASK);
641
642                 /* Save the syscfg0 value for mac_finish */
643                 mac->syscfg0 = val;
644         } else if (phylink_autoneg_inband(mode)) {
645                 dev_err(eth->dev,
646                         "In-band mode not supported in non SGMII mode!\n");
647                 return;
648         }
649
650         /* Setup gmac */
651         if (mtk_is_netsys_v3_or_greater(eth) &&
652             mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
653                 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
654                 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
655
656                 mtk_setup_bridge_switch(eth);
657         }
658
659         return;
660
661 err_phy:
662         dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
663                 mac->id, phy_modes(state->interface));
664         return;
665
666 init_err:
667         dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
668                 mac->id, phy_modes(state->interface), err);
669 }
670
671 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
672                           phy_interface_t interface)
673 {
674         struct mtk_mac *mac = container_of(config, struct mtk_mac,
675                                            phylink_config);
676         struct mtk_eth *eth = mac->hw;
677         u32 mcr_cur, mcr_new;
678
679         /* Enable SGMII */
680         if (interface == PHY_INTERFACE_MODE_SGMII ||
681             phy_interface_mode_is_8023z(interface))
682                 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
683                                    SYSCFG0_SGMII_MASK, mac->syscfg0);
684
685         /* Setup gmac */
686         mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
687         mcr_new = mcr_cur;
688         mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
689                    MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
690
691         /* Only update control register when needed! */
692         if (mcr_new != mcr_cur)
693                 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
694
695         return 0;
696 }
697
698 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
699                               phy_interface_t interface)
700 {
701         struct mtk_mac *mac = container_of(config, struct mtk_mac,
702                                            phylink_config);
703         u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
704
705         mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
706         mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
707 }
708
709 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
710                                 int speed)
711 {
712         const struct mtk_soc_data *soc = eth->soc;
713         u32 ofs, val;
714
715         if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
716                 return;
717
718         val = MTK_QTX_SCH_MIN_RATE_EN |
719               /* minimum: 10 Mbps */
720               FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
721               FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
722               MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
723         if (mtk_is_netsys_v1(eth))
724                 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
725
726         if (IS_ENABLED(CONFIG_SOC_MT7621)) {
727                 switch (speed) {
728                 case SPEED_10:
729                         val |= MTK_QTX_SCH_MAX_RATE_EN |
730                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
731                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
732                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
733                         break;
734                 case SPEED_100:
735                         val |= MTK_QTX_SCH_MAX_RATE_EN |
736                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
737                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
738                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
739                         break;
740                 case SPEED_1000:
741                         val |= MTK_QTX_SCH_MAX_RATE_EN |
742                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
743                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
744                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
745                         break;
746                 default:
747                         break;
748                 }
749         } else {
750                 switch (speed) {
751                 case SPEED_10:
752                         val |= MTK_QTX_SCH_MAX_RATE_EN |
753                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
754                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
755                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
756                         break;
757                 case SPEED_100:
758                         val |= MTK_QTX_SCH_MAX_RATE_EN |
759                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
760                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
761                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
762                         break;
763                 case SPEED_1000:
764                         val |= MTK_QTX_SCH_MAX_RATE_EN |
765                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
766                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
767                                FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
768                         break;
769                 default:
770                         break;
771                 }
772         }
773
774         ofs = MTK_QTX_OFFSET * idx;
775         mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
776 }
777
778 static void mtk_mac_link_up(struct phylink_config *config,
779                             struct phy_device *phy,
780                             unsigned int mode, phy_interface_t interface,
781                             int speed, int duplex, bool tx_pause, bool rx_pause)
782 {
783         struct mtk_mac *mac = container_of(config, struct mtk_mac,
784                                            phylink_config);
785         u32 mcr;
786
787         mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
788         mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
789                  MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
790                  MAC_MCR_FORCE_RX_FC);
791
792         /* Configure speed */
793         mac->speed = speed;
794         switch (speed) {
795         case SPEED_2500:
796         case SPEED_1000:
797                 mcr |= MAC_MCR_SPEED_1000;
798                 break;
799         case SPEED_100:
800                 mcr |= MAC_MCR_SPEED_100;
801                 break;
802         }
803
804         /* Configure duplex */
805         if (duplex == DUPLEX_FULL)
806                 mcr |= MAC_MCR_FORCE_DPX;
807
808         /* Configure pause modes - phylink will avoid these for half duplex */
809         if (tx_pause)
810                 mcr |= MAC_MCR_FORCE_TX_FC;
811         if (rx_pause)
812                 mcr |= MAC_MCR_FORCE_RX_FC;
813
814         mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
815         mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
816 }
817
818 static const struct phylink_mac_ops mtk_phylink_ops = {
819         .mac_select_pcs = mtk_mac_select_pcs,
820         .mac_config = mtk_mac_config,
821         .mac_finish = mtk_mac_finish,
822         .mac_link_down = mtk_mac_link_down,
823         .mac_link_up = mtk_mac_link_up,
824 };
825
826 static int mtk_mdio_init(struct mtk_eth *eth)
827 {
828         unsigned int max_clk = 2500000, divider;
829         struct device_node *mii_np;
830         int ret;
831         u32 val;
832
833         mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
834         if (!mii_np) {
835                 dev_err(eth->dev, "no %s child node found", "mdio-bus");
836                 return -ENODEV;
837         }
838
839         if (!of_device_is_available(mii_np)) {
840                 ret = -ENODEV;
841                 goto err_put_node;
842         }
843
844         eth->mii_bus = devm_mdiobus_alloc(eth->dev);
845         if (!eth->mii_bus) {
846                 ret = -ENOMEM;
847                 goto err_put_node;
848         }
849
850         eth->mii_bus->name = "mdio";
851         eth->mii_bus->read = mtk_mdio_read_c22;
852         eth->mii_bus->write = mtk_mdio_write_c22;
853         eth->mii_bus->read_c45 = mtk_mdio_read_c45;
854         eth->mii_bus->write_c45 = mtk_mdio_write_c45;
855         eth->mii_bus->priv = eth;
856         eth->mii_bus->parent = eth->dev;
857
858         snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
859
860         if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
861                 if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
862                         dev_err(eth->dev, "MDIO clock frequency out of range");
863                         ret = -EINVAL;
864                         goto err_put_node;
865                 }
866                 max_clk = val;
867         }
868         divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
869
870         /* Configure MDC Turbo Mode */
871         if (mtk_is_netsys_v3_or_greater(eth))
872                 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
873
874         /* Configure MDC Divider */
875         val = FIELD_PREP(PPSC_MDC_CFG, divider);
876         if (!mtk_is_netsys_v3_or_greater(eth))
877                 val |= PPSC_MDC_TURBO;
878         mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
879
880         dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
881
882         ret = of_mdiobus_register(eth->mii_bus, mii_np);
883
884 err_put_node:
885         of_node_put(mii_np);
886         return ret;
887 }
888
889 static void mtk_mdio_cleanup(struct mtk_eth *eth)
890 {
891         if (!eth->mii_bus)
892                 return;
893
894         mdiobus_unregister(eth->mii_bus);
895 }
896
897 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
898 {
899         unsigned long flags;
900         u32 val;
901
902         spin_lock_irqsave(&eth->tx_irq_lock, flags);
903         val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
904         mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
905         spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
906 }
907
908 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
909 {
910         unsigned long flags;
911         u32 val;
912
913         spin_lock_irqsave(&eth->tx_irq_lock, flags);
914         val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
915         mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
916         spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
917 }
918
919 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
920 {
921         unsigned long flags;
922         u32 val;
923
924         spin_lock_irqsave(&eth->rx_irq_lock, flags);
925         val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
926         mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
927         spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
928 }
929
930 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
931 {
932         unsigned long flags;
933         u32 val;
934
935         spin_lock_irqsave(&eth->rx_irq_lock, flags);
936         val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
937         mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
938         spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
939 }
940
941 static int mtk_set_mac_address(struct net_device *dev, void *p)
942 {
943         int ret = eth_mac_addr(dev, p);
944         struct mtk_mac *mac = netdev_priv(dev);
945         struct mtk_eth *eth = mac->hw;
946         const char *macaddr = dev->dev_addr;
947
948         if (ret)
949                 return ret;
950
951         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
952                 return -EBUSY;
953
954         spin_lock_bh(&mac->hw->page_lock);
955         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
956                 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
957                         MT7628_SDM_MAC_ADRH);
958                 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
959                         (macaddr[4] << 8) | macaddr[5],
960                         MT7628_SDM_MAC_ADRL);
961         } else {
962                 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
963                         MTK_GDMA_MAC_ADRH(mac->id));
964                 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
965                         (macaddr[4] << 8) | macaddr[5],
966                         MTK_GDMA_MAC_ADRL(mac->id));
967         }
968         spin_unlock_bh(&mac->hw->page_lock);
969
970         return 0;
971 }
972
973 void mtk_stats_update_mac(struct mtk_mac *mac)
974 {
975         struct mtk_hw_stats *hw_stats = mac->hw_stats;
976         struct mtk_eth *eth = mac->hw;
977
978         u64_stats_update_begin(&hw_stats->syncp);
979
980         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
981                 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
982                 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
983                 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
984                 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
985                 hw_stats->rx_checksum_errors +=
986                         mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
987         } else {
988                 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
989                 unsigned int offs = hw_stats->reg_offset;
990                 u64 stats;
991
992                 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
993                 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
994                 if (stats)
995                         hw_stats->rx_bytes += (stats << 32);
996                 hw_stats->rx_packets +=
997                         mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
998                 hw_stats->rx_overflow +=
999                         mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1000                 hw_stats->rx_fcs_errors +=
1001                         mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1002                 hw_stats->rx_short_errors +=
1003                         mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1004                 hw_stats->rx_long_errors +=
1005                         mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1006                 hw_stats->rx_checksum_errors +=
1007                         mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1008                 hw_stats->rx_flow_control_packets +=
1009                         mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1010
1011                 if (mtk_is_netsys_v3_or_greater(eth)) {
1012                         hw_stats->tx_skip +=
1013                                 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1014                         hw_stats->tx_collisions +=
1015                                 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1016                         hw_stats->tx_bytes +=
1017                                 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1018                         stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1019                         if (stats)
1020                                 hw_stats->tx_bytes += (stats << 32);
1021                         hw_stats->tx_packets +=
1022                                 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1023                 } else {
1024                         hw_stats->tx_skip +=
1025                                 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1026                         hw_stats->tx_collisions +=
1027                                 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1028                         hw_stats->tx_bytes +=
1029                                 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1030                         stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1031                         if (stats)
1032                                 hw_stats->tx_bytes += (stats << 32);
1033                         hw_stats->tx_packets +=
1034                                 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1035                 }
1036         }
1037
1038         u64_stats_update_end(&hw_stats->syncp);
1039 }
1040
1041 static void mtk_stats_update(struct mtk_eth *eth)
1042 {
1043         int i;
1044
1045         for (i = 0; i < MTK_MAX_DEVS; i++) {
1046                 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1047                         continue;
1048                 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1049                         mtk_stats_update_mac(eth->mac[i]);
1050                         spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1051                 }
1052         }
1053 }
1054
1055 static void mtk_get_stats64(struct net_device *dev,
1056                             struct rtnl_link_stats64 *storage)
1057 {
1058         struct mtk_mac *mac = netdev_priv(dev);
1059         struct mtk_hw_stats *hw_stats = mac->hw_stats;
1060         unsigned int start;
1061
1062         if (netif_running(dev) && netif_device_present(dev)) {
1063                 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1064                         mtk_stats_update_mac(mac);
1065                         spin_unlock_bh(&hw_stats->stats_lock);
1066                 }
1067         }
1068
1069         do {
1070                 start = u64_stats_fetch_begin(&hw_stats->syncp);
1071                 storage->rx_packets = hw_stats->rx_packets;
1072                 storage->tx_packets = hw_stats->tx_packets;
1073                 storage->rx_bytes = hw_stats->rx_bytes;
1074                 storage->tx_bytes = hw_stats->tx_bytes;
1075                 storage->collisions = hw_stats->tx_collisions;
1076                 storage->rx_length_errors = hw_stats->rx_short_errors +
1077                         hw_stats->rx_long_errors;
1078                 storage->rx_over_errors = hw_stats->rx_overflow;
1079                 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1080                 storage->rx_errors = hw_stats->rx_checksum_errors;
1081                 storage->tx_aborted_errors = hw_stats->tx_skip;
1082         } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1083
1084         storage->tx_errors = dev->stats.tx_errors;
1085         storage->rx_dropped = dev->stats.rx_dropped;
1086         storage->tx_dropped = dev->stats.tx_dropped;
1087 }
1088
1089 static inline int mtk_max_frag_size(int mtu)
1090 {
1091         /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1092         if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1093                 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1094
1095         return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1096                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1097 }
1098
1099 static inline int mtk_max_buf_size(int frag_size)
1100 {
1101         int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1102                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1103
1104         WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1105
1106         return buf_size;
1107 }
1108
1109 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1110                             struct mtk_rx_dma_v2 *dma_rxd)
1111 {
1112         rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1113         if (!(rxd->rxd2 & RX_DMA_DONE))
1114                 return false;
1115
1116         rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1117         rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1118         rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1119         if (mtk_is_netsys_v3_or_greater(eth)) {
1120                 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1121                 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1122         }
1123
1124         return true;
1125 }
1126
1127 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1128 {
1129         unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1130         unsigned long data;
1131
1132         data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1133                                 get_order(size));
1134
1135         return (void *)data;
1136 }
1137
1138 /* the qdma core needs scratch memory to be setup */
1139 static int mtk_init_fq_dma(struct mtk_eth *eth)
1140 {
1141         const struct mtk_soc_data *soc = eth->soc;
1142         dma_addr_t phy_ring_tail;
1143         int cnt = soc->tx.fq_dma_size;
1144         dma_addr_t dma_addr;
1145         int i, j, len;
1146
1147         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
1148                 eth->scratch_ring = eth->sram_base;
1149         else
1150                 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1151                                                        cnt * soc->tx.desc_size,
1152                                                        &eth->phy_scratch_ring,
1153                                                        GFP_KERNEL);
1154
1155         if (unlikely(!eth->scratch_ring))
1156                 return -ENOMEM;
1157
1158         phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1159
1160         for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1161                 len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1162                 eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1163
1164                 if (unlikely(!eth->scratch_head[j]))
1165                         return -ENOMEM;
1166
1167                 dma_addr = dma_map_single(eth->dma_dev,
1168                                           eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1169                                           DMA_FROM_DEVICE);
1170
1171                 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1172                         return -ENOMEM;
1173
1174                 for (i = 0; i < len; i++) {
1175                         struct mtk_tx_dma_v2 *txd;
1176
1177                         txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1178                         txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1179                         if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1180                                 txd->txd2 = eth->phy_scratch_ring +
1181                                             (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1182
1183                         txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1184                         if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1185                                 txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1186
1187                         txd->txd4 = 0;
1188                         if (mtk_is_netsys_v2_or_greater(eth)) {
1189                                 txd->txd5 = 0;
1190                                 txd->txd6 = 0;
1191                                 txd->txd7 = 0;
1192                                 txd->txd8 = 0;
1193                         }
1194                 }
1195         }
1196
1197         mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1198         mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1199         mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1200         mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1201
1202         return 0;
1203 }
1204
1205 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1206 {
1207         return ring->dma + (desc - ring->phys);
1208 }
1209
1210 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1211                                              void *txd, u32 txd_size)
1212 {
1213         int idx = (txd - ring->dma) / txd_size;
1214
1215         return &ring->buf[idx];
1216 }
1217
1218 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1219                                        struct mtk_tx_dma *dma)
1220 {
1221         return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1222 }
1223
1224 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1225 {
1226         return (dma - ring->dma) / txd_size;
1227 }
1228
1229 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1230                          struct xdp_frame_bulk *bq, bool napi)
1231 {
1232         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1233                 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1234                         dma_unmap_single(eth->dma_dev,
1235                                          dma_unmap_addr(tx_buf, dma_addr0),
1236                                          dma_unmap_len(tx_buf, dma_len0),
1237                                          DMA_TO_DEVICE);
1238                 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1239                         dma_unmap_page(eth->dma_dev,
1240                                        dma_unmap_addr(tx_buf, dma_addr0),
1241                                        dma_unmap_len(tx_buf, dma_len0),
1242                                        DMA_TO_DEVICE);
1243                 }
1244         } else {
1245                 if (dma_unmap_len(tx_buf, dma_len0)) {
1246                         dma_unmap_page(eth->dma_dev,
1247                                        dma_unmap_addr(tx_buf, dma_addr0),
1248                                        dma_unmap_len(tx_buf, dma_len0),
1249                                        DMA_TO_DEVICE);
1250                 }
1251
1252                 if (dma_unmap_len(tx_buf, dma_len1)) {
1253                         dma_unmap_page(eth->dma_dev,
1254                                        dma_unmap_addr(tx_buf, dma_addr1),
1255                                        dma_unmap_len(tx_buf, dma_len1),
1256                                        DMA_TO_DEVICE);
1257                 }
1258         }
1259
1260         if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1261                 if (tx_buf->type == MTK_TYPE_SKB) {
1262                         struct sk_buff *skb = tx_buf->data;
1263
1264                         if (napi)
1265                                 napi_consume_skb(skb, napi);
1266                         else
1267                                 dev_kfree_skb_any(skb);
1268                 } else {
1269                         struct xdp_frame *xdpf = tx_buf->data;
1270
1271                         if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1272                                 xdp_return_frame_rx_napi(xdpf);
1273                         else if (bq)
1274                                 xdp_return_frame_bulk(xdpf, bq);
1275                         else
1276                                 xdp_return_frame(xdpf);
1277                 }
1278         }
1279         tx_buf->flags = 0;
1280         tx_buf->data = NULL;
1281 }
1282
1283 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1284                          struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1285                          size_t size, int idx)
1286 {
1287         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1288                 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1289                 dma_unmap_len_set(tx_buf, dma_len0, size);
1290         } else {
1291                 if (idx & 1) {
1292                         txd->txd3 = mapped_addr;
1293                         txd->txd2 |= TX_DMA_PLEN1(size);
1294                         dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1295                         dma_unmap_len_set(tx_buf, dma_len1, size);
1296                 } else {
1297                         tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1298                         txd->txd1 = mapped_addr;
1299                         txd->txd2 = TX_DMA_PLEN0(size);
1300                         dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1301                         dma_unmap_len_set(tx_buf, dma_len0, size);
1302                 }
1303         }
1304 }
1305
1306 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1307                                    struct mtk_tx_dma_desc_info *info)
1308 {
1309         struct mtk_mac *mac = netdev_priv(dev);
1310         struct mtk_eth *eth = mac->hw;
1311         struct mtk_tx_dma *desc = txd;
1312         u32 data;
1313
1314         WRITE_ONCE(desc->txd1, info->addr);
1315
1316         data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1317                FIELD_PREP(TX_DMA_PQID, info->qid);
1318         if (info->last)
1319                 data |= TX_DMA_LS0;
1320         WRITE_ONCE(desc->txd3, data);
1321
1322         data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1323         if (info->first) {
1324                 if (info->gso)
1325                         data |= TX_DMA_TSO;
1326                 /* tx checksum offload */
1327                 if (info->csum)
1328                         data |= TX_DMA_CHKSUM;
1329                 /* vlan header offload */
1330                 if (info->vlan)
1331                         data |= TX_DMA_INS_VLAN | info->vlan_tci;
1332         }
1333         WRITE_ONCE(desc->txd4, data);
1334 }
1335
1336 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1337                                    struct mtk_tx_dma_desc_info *info)
1338 {
1339         struct mtk_mac *mac = netdev_priv(dev);
1340         struct mtk_tx_dma_v2 *desc = txd;
1341         struct mtk_eth *eth = mac->hw;
1342         u32 data;
1343
1344         WRITE_ONCE(desc->txd1, info->addr);
1345
1346         data = TX_DMA_PLEN0(info->size);
1347         if (info->last)
1348                 data |= TX_DMA_LS0;
1349
1350         if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1351                 data |= TX_DMA_PREP_ADDR64(info->addr);
1352
1353         WRITE_ONCE(desc->txd3, data);
1354
1355          /* set forward port */
1356         switch (mac->id) {
1357         case MTK_GMAC1_ID:
1358                 data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1359                 break;
1360         case MTK_GMAC2_ID:
1361                 data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1362                 break;
1363         case MTK_GMAC3_ID:
1364                 data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1365                 break;
1366         }
1367
1368         data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1369         WRITE_ONCE(desc->txd4, data);
1370
1371         data = 0;
1372         if (info->first) {
1373                 if (info->gso)
1374                         data |= TX_DMA_TSO_V2;
1375                 /* tx checksum offload */
1376                 if (info->csum)
1377                         data |= TX_DMA_CHKSUM_V2;
1378                 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1379                         data |= TX_DMA_SPTAG_V3;
1380         }
1381         WRITE_ONCE(desc->txd5, data);
1382
1383         data = 0;
1384         if (info->first && info->vlan)
1385                 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1386         WRITE_ONCE(desc->txd6, data);
1387
1388         WRITE_ONCE(desc->txd7, 0);
1389         WRITE_ONCE(desc->txd8, 0);
1390 }
1391
1392 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1393                                 struct mtk_tx_dma_desc_info *info)
1394 {
1395         struct mtk_mac *mac = netdev_priv(dev);
1396         struct mtk_eth *eth = mac->hw;
1397
1398         if (mtk_is_netsys_v2_or_greater(eth))
1399                 mtk_tx_set_dma_desc_v2(dev, txd, info);
1400         else
1401                 mtk_tx_set_dma_desc_v1(dev, txd, info);
1402 }
1403
1404 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1405                       int tx_num, struct mtk_tx_ring *ring, bool gso)
1406 {
1407         struct mtk_tx_dma_desc_info txd_info = {
1408                 .size = skb_headlen(skb),
1409                 .gso = gso,
1410                 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1411                 .vlan = skb_vlan_tag_present(skb),
1412                 .qid = skb_get_queue_mapping(skb),
1413                 .vlan_tci = skb_vlan_tag_get(skb),
1414                 .first = true,
1415                 .last = !skb_is_nonlinear(skb),
1416         };
1417         struct netdev_queue *txq;
1418         struct mtk_mac *mac = netdev_priv(dev);
1419         struct mtk_eth *eth = mac->hw;
1420         const struct mtk_soc_data *soc = eth->soc;
1421         struct mtk_tx_dma *itxd, *txd;
1422         struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1423         struct mtk_tx_buf *itx_buf, *tx_buf;
1424         int i, n_desc = 1;
1425         int queue = skb_get_queue_mapping(skb);
1426         int k = 0;
1427
1428         txq = netdev_get_tx_queue(dev, queue);
1429         itxd = ring->next_free;
1430         itxd_pdma = qdma_to_pdma(ring, itxd);
1431         if (itxd == ring->last_free)
1432                 return -ENOMEM;
1433
1434         itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1435         memset(itx_buf, 0, sizeof(*itx_buf));
1436
1437         txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1438                                        DMA_TO_DEVICE);
1439         if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1440                 return -ENOMEM;
1441
1442         mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1443
1444         itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1445         itx_buf->mac_id = mac->id;
1446         setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1447                      k++);
1448
1449         /* TX SG offload */
1450         txd = itxd;
1451         txd_pdma = qdma_to_pdma(ring, txd);
1452
1453         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1454                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1455                 unsigned int offset = 0;
1456                 int frag_size = skb_frag_size(frag);
1457
1458                 while (frag_size) {
1459                         bool new_desc = true;
1460
1461                         if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1462                             (i & 0x1)) {
1463                                 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1464                                 txd_pdma = qdma_to_pdma(ring, txd);
1465                                 if (txd == ring->last_free)
1466                                         goto err_dma;
1467
1468                                 n_desc++;
1469                         } else {
1470                                 new_desc = false;
1471                         }
1472
1473                         memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1474                         txd_info.size = min_t(unsigned int, frag_size,
1475                                               soc->tx.dma_max_len);
1476                         txd_info.qid = queue;
1477                         txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1478                                         !(frag_size - txd_info.size);
1479                         txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1480                                                          offset, txd_info.size,
1481                                                          DMA_TO_DEVICE);
1482                         if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1483                                 goto err_dma;
1484
1485                         mtk_tx_set_dma_desc(dev, txd, &txd_info);
1486
1487                         tx_buf = mtk_desc_to_tx_buf(ring, txd,
1488                                                     soc->tx.desc_size);
1489                         if (new_desc)
1490                                 memset(tx_buf, 0, sizeof(*tx_buf));
1491                         tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1492                         tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1493                         tx_buf->mac_id = mac->id;
1494
1495                         setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1496                                      txd_info.size, k++);
1497
1498                         frag_size -= txd_info.size;
1499                         offset += txd_info.size;
1500                 }
1501         }
1502
1503         /* store skb to cleanup */
1504         itx_buf->type = MTK_TYPE_SKB;
1505         itx_buf->data = skb;
1506
1507         if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1508                 if (k & 0x1)
1509                         txd_pdma->txd2 |= TX_DMA_LS0;
1510                 else
1511                         txd_pdma->txd2 |= TX_DMA_LS1;
1512         }
1513
1514         netdev_tx_sent_queue(txq, skb->len);
1515         skb_tx_timestamp(skb);
1516
1517         ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1518         atomic_sub(n_desc, &ring->free_count);
1519
1520         /* make sure that all changes to the dma ring are flushed before we
1521          * continue
1522          */
1523         wmb();
1524
1525         if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1526                 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1527                         mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1528         } else {
1529                 int next_idx;
1530
1531                 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1532                                          ring->dma_size);
1533                 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1534         }
1535
1536         return 0;
1537
1538 err_dma:
1539         do {
1540                 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1541
1542                 /* unmap dma */
1543                 mtk_tx_unmap(eth, tx_buf, NULL, false);
1544
1545                 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1546                 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1547                         itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1548
1549                 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1550                 itxd_pdma = qdma_to_pdma(ring, itxd);
1551         } while (itxd != txd);
1552
1553         return -ENOMEM;
1554 }
1555
1556 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1557 {
1558         int i, nfrags = 1;
1559         skb_frag_t *frag;
1560
1561         if (skb_is_gso(skb)) {
1562                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1563                         frag = &skb_shinfo(skb)->frags[i];
1564                         nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1565                                                eth->soc->tx.dma_max_len);
1566                 }
1567         } else {
1568                 nfrags += skb_shinfo(skb)->nr_frags;
1569         }
1570
1571         return nfrags;
1572 }
1573
1574 static int mtk_queue_stopped(struct mtk_eth *eth)
1575 {
1576         int i;
1577
1578         for (i = 0; i < MTK_MAX_DEVS; i++) {
1579                 if (!eth->netdev[i])
1580                         continue;
1581                 if (netif_queue_stopped(eth->netdev[i]))
1582                         return 1;
1583         }
1584
1585         return 0;
1586 }
1587
1588 static void mtk_wake_queue(struct mtk_eth *eth)
1589 {
1590         int i;
1591
1592         for (i = 0; i < MTK_MAX_DEVS; i++) {
1593                 if (!eth->netdev[i])
1594                         continue;
1595                 netif_tx_wake_all_queues(eth->netdev[i]);
1596         }
1597 }
1598
1599 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1600 {
1601         struct mtk_mac *mac = netdev_priv(dev);
1602         struct mtk_eth *eth = mac->hw;
1603         struct mtk_tx_ring *ring = &eth->tx_ring;
1604         struct net_device_stats *stats = &dev->stats;
1605         bool gso = false;
1606         int tx_num;
1607
1608         /* normally we can rely on the stack not calling this more than once,
1609          * however we have 2 queues running on the same ring so we need to lock
1610          * the ring access
1611          */
1612         spin_lock(&eth->page_lock);
1613
1614         if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1615                 goto drop;
1616
1617         tx_num = mtk_cal_txd_req(eth, skb);
1618         if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1619                 netif_tx_stop_all_queues(dev);
1620                 netif_err(eth, tx_queued, dev,
1621                           "Tx Ring full when queue awake!\n");
1622                 spin_unlock(&eth->page_lock);
1623                 return NETDEV_TX_BUSY;
1624         }
1625
1626         /* TSO: fill MSS info in tcp checksum field */
1627         if (skb_is_gso(skb)) {
1628                 if (skb_cow_head(skb, 0)) {
1629                         netif_warn(eth, tx_err, dev,
1630                                    "GSO expand head fail.\n");
1631                         goto drop;
1632                 }
1633
1634                 if (skb_shinfo(skb)->gso_type &
1635                                 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1636                         gso = true;
1637                         tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1638                 }
1639         }
1640
1641         if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1642                 goto drop;
1643
1644         if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1645                 netif_tx_stop_all_queues(dev);
1646
1647         spin_unlock(&eth->page_lock);
1648
1649         return NETDEV_TX_OK;
1650
1651 drop:
1652         spin_unlock(&eth->page_lock);
1653         stats->tx_dropped++;
1654         dev_kfree_skb_any(skb);
1655         return NETDEV_TX_OK;
1656 }
1657
1658 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1659 {
1660         int i;
1661         struct mtk_rx_ring *ring;
1662         int idx;
1663
1664         if (!eth->hwlro)
1665                 return &eth->rx_ring[0];
1666
1667         for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1668                 struct mtk_rx_dma *rxd;
1669
1670                 ring = &eth->rx_ring[i];
1671                 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1672                 rxd = ring->dma + idx * eth->soc->rx.desc_size;
1673                 if (rxd->rxd2 & RX_DMA_DONE) {
1674                         ring->calc_idx_update = true;
1675                         return ring;
1676                 }
1677         }
1678
1679         return NULL;
1680 }
1681
1682 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1683 {
1684         struct mtk_rx_ring *ring;
1685         int i;
1686
1687         if (!eth->hwlro) {
1688                 ring = &eth->rx_ring[0];
1689                 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1690         } else {
1691                 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1692                         ring = &eth->rx_ring[i];
1693                         if (ring->calc_idx_update) {
1694                                 ring->calc_idx_update = false;
1695                                 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1696                         }
1697                 }
1698         }
1699 }
1700
1701 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1702 {
1703         return mtk_is_netsys_v2_or_greater(eth);
1704 }
1705
1706 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1707                                               struct xdp_rxq_info *xdp_q,
1708                                               int id, int size)
1709 {
1710         struct page_pool_params pp_params = {
1711                 .order = 0,
1712                 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1713                 .pool_size = size,
1714                 .nid = NUMA_NO_NODE,
1715                 .dev = eth->dma_dev,
1716                 .offset = MTK_PP_HEADROOM,
1717                 .max_len = MTK_PP_MAX_BUF_SIZE,
1718         };
1719         struct page_pool *pp;
1720         int err;
1721
1722         pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1723                                                           : DMA_FROM_DEVICE;
1724         pp = page_pool_create(&pp_params);
1725         if (IS_ERR(pp))
1726                 return pp;
1727
1728         err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1729                                  eth->rx_napi.napi_id, PAGE_SIZE);
1730         if (err < 0)
1731                 goto err_free_pp;
1732
1733         err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1734         if (err)
1735                 goto err_unregister_rxq;
1736
1737         return pp;
1738
1739 err_unregister_rxq:
1740         xdp_rxq_info_unreg(xdp_q);
1741 err_free_pp:
1742         page_pool_destroy(pp);
1743
1744         return ERR_PTR(err);
1745 }
1746
1747 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1748                                     gfp_t gfp_mask)
1749 {
1750         struct page *page;
1751
1752         page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1753         if (!page)
1754                 return NULL;
1755
1756         *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1757         return page_address(page);
1758 }
1759
1760 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1761 {
1762         if (ring->page_pool)
1763                 page_pool_put_full_page(ring->page_pool,
1764                                         virt_to_head_page(data), napi);
1765         else
1766                 skb_free_frag(data);
1767 }
1768
1769 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1770                              struct mtk_tx_dma_desc_info *txd_info,
1771                              struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1772                              void *data, u16 headroom, int index, bool dma_map)
1773 {
1774         struct mtk_tx_ring *ring = &eth->tx_ring;
1775         struct mtk_mac *mac = netdev_priv(dev);
1776         struct mtk_tx_dma *txd_pdma;
1777
1778         if (dma_map) {  /* ndo_xdp_xmit */
1779                 txd_info->addr = dma_map_single(eth->dma_dev, data,
1780                                                 txd_info->size, DMA_TO_DEVICE);
1781                 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1782                         return -ENOMEM;
1783
1784                 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1785         } else {
1786                 struct page *page = virt_to_head_page(data);
1787
1788                 txd_info->addr = page_pool_get_dma_addr(page) +
1789                                  sizeof(struct xdp_frame) + headroom;
1790                 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1791                                            txd_info->size, DMA_BIDIRECTIONAL);
1792         }
1793         mtk_tx_set_dma_desc(dev, txd, txd_info);
1794
1795         tx_buf->mac_id = mac->id;
1796         tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1797         tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1798
1799         txd_pdma = qdma_to_pdma(ring, txd);
1800         setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1801                      index);
1802
1803         return 0;
1804 }
1805
1806 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1807                                 struct net_device *dev, bool dma_map)
1808 {
1809         struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1810         const struct mtk_soc_data *soc = eth->soc;
1811         struct mtk_tx_ring *ring = &eth->tx_ring;
1812         struct mtk_mac *mac = netdev_priv(dev);
1813         struct mtk_tx_dma_desc_info txd_info = {
1814                 .size   = xdpf->len,
1815                 .first  = true,
1816                 .last   = !xdp_frame_has_frags(xdpf),
1817                 .qid    = mac->id,
1818         };
1819         int err, index = 0, n_desc = 1, nr_frags;
1820         struct mtk_tx_buf *htx_buf, *tx_buf;
1821         struct mtk_tx_dma *htxd, *txd;
1822         void *data = xdpf->data;
1823
1824         if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1825                 return -EBUSY;
1826
1827         nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1828         if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1829                 return -EBUSY;
1830
1831         spin_lock(&eth->page_lock);
1832
1833         txd = ring->next_free;
1834         if (txd == ring->last_free) {
1835                 spin_unlock(&eth->page_lock);
1836                 return -ENOMEM;
1837         }
1838         htxd = txd;
1839
1840         tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
1841         memset(tx_buf, 0, sizeof(*tx_buf));
1842         htx_buf = tx_buf;
1843
1844         for (;;) {
1845                 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1846                                         data, xdpf->headroom, index, dma_map);
1847                 if (err < 0)
1848                         goto unmap;
1849
1850                 if (txd_info.last)
1851                         break;
1852
1853                 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1854                         txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1855                         if (txd == ring->last_free)
1856                                 goto unmap;
1857
1858                         tx_buf = mtk_desc_to_tx_buf(ring, txd,
1859                                                     soc->tx.desc_size);
1860                         memset(tx_buf, 0, sizeof(*tx_buf));
1861                         n_desc++;
1862                 }
1863
1864                 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1865                 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1866                 txd_info.last = index + 1 == nr_frags;
1867                 txd_info.qid = mac->id;
1868                 data = skb_frag_address(&sinfo->frags[index]);
1869
1870                 index++;
1871         }
1872         /* store xdpf for cleanup */
1873         htx_buf->data = xdpf;
1874
1875         if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1876                 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1877
1878                 if (index & 1)
1879                         txd_pdma->txd2 |= TX_DMA_LS0;
1880                 else
1881                         txd_pdma->txd2 |= TX_DMA_LS1;
1882         }
1883
1884         ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1885         atomic_sub(n_desc, &ring->free_count);
1886
1887         /* make sure that all changes to the dma ring are flushed before we
1888          * continue
1889          */
1890         wmb();
1891
1892         if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1893                 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1894         } else {
1895                 int idx;
1896
1897                 idx = txd_to_idx(ring, txd, soc->tx.desc_size);
1898                 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1899                         MT7628_TX_CTX_IDX0);
1900         }
1901
1902         spin_unlock(&eth->page_lock);
1903
1904         return 0;
1905
1906 unmap:
1907         while (htxd != txd) {
1908                 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
1909                 mtk_tx_unmap(eth, tx_buf, NULL, false);
1910
1911                 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1912                 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1913                         struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1914
1915                         txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1916                 }
1917
1918                 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1919         }
1920
1921         spin_unlock(&eth->page_lock);
1922
1923         return err;
1924 }
1925
1926 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1927                         struct xdp_frame **frames, u32 flags)
1928 {
1929         struct mtk_mac *mac = netdev_priv(dev);
1930         struct mtk_hw_stats *hw_stats = mac->hw_stats;
1931         struct mtk_eth *eth = mac->hw;
1932         int i, nxmit = 0;
1933
1934         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1935                 return -EINVAL;
1936
1937         for (i = 0; i < num_frame; i++) {
1938                 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1939                         break;
1940                 nxmit++;
1941         }
1942
1943         u64_stats_update_begin(&hw_stats->syncp);
1944         hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1945         hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1946         u64_stats_update_end(&hw_stats->syncp);
1947
1948         return nxmit;
1949 }
1950
1951 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1952                        struct xdp_buff *xdp, struct net_device *dev)
1953 {
1954         struct mtk_mac *mac = netdev_priv(dev);
1955         struct mtk_hw_stats *hw_stats = mac->hw_stats;
1956         u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1957         struct bpf_prog *prog;
1958         u32 act = XDP_PASS;
1959
1960         rcu_read_lock();
1961
1962         prog = rcu_dereference(eth->prog);
1963         if (!prog)
1964                 goto out;
1965
1966         act = bpf_prog_run_xdp(prog, xdp);
1967         switch (act) {
1968         case XDP_PASS:
1969                 count = &hw_stats->xdp_stats.rx_xdp_pass;
1970                 goto update_stats;
1971         case XDP_REDIRECT:
1972                 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1973                         act = XDP_DROP;
1974                         break;
1975                 }
1976
1977                 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1978                 goto update_stats;
1979         case XDP_TX: {
1980                 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1981
1982                 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1983                         count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1984                         act = XDP_DROP;
1985                         break;
1986                 }
1987
1988                 count = &hw_stats->xdp_stats.rx_xdp_tx;
1989                 goto update_stats;
1990         }
1991         default:
1992                 bpf_warn_invalid_xdp_action(dev, prog, act);
1993                 fallthrough;
1994         case XDP_ABORTED:
1995                 trace_xdp_exception(dev, prog, act);
1996                 fallthrough;
1997         case XDP_DROP:
1998                 break;
1999         }
2000
2001         page_pool_put_full_page(ring->page_pool,
2002                                 virt_to_head_page(xdp->data), true);
2003
2004 update_stats:
2005         u64_stats_update_begin(&hw_stats->syncp);
2006         *count = *count + 1;
2007         u64_stats_update_end(&hw_stats->syncp);
2008 out:
2009         rcu_read_unlock();
2010
2011         return act;
2012 }
2013
2014 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2015                        struct mtk_eth *eth)
2016 {
2017         struct dim_sample dim_sample = {};
2018         struct mtk_rx_ring *ring;
2019         bool xdp_flush = false;
2020         int idx;
2021         struct sk_buff *skb;
2022         u64 addr64 = 0;
2023         u8 *data, *new_data;
2024         struct mtk_rx_dma_v2 *rxd, trxd;
2025         int done = 0, bytes = 0;
2026         dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2027         int ppe_idx = 0;
2028
2029         while (done < budget) {
2030                 unsigned int pktlen, *rxdcsum;
2031                 struct net_device *netdev;
2032                 u32 hash, reason;
2033                 int mac = 0;
2034
2035                 ring = mtk_get_rx_ring(eth);
2036                 if (unlikely(!ring))
2037                         goto rx_done;
2038
2039                 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2040                 rxd = ring->dma + idx * eth->soc->rx.desc_size;
2041                 data = ring->data[idx];
2042
2043                 if (!mtk_rx_get_desc(eth, &trxd, rxd))
2044                         break;
2045
2046                 /* find out which mac the packet come from. values start at 1 */
2047                 if (mtk_is_netsys_v3_or_greater(eth)) {
2048                         u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2049
2050                         switch (val) {
2051                         case PSE_GDM1_PORT:
2052                         case PSE_GDM2_PORT:
2053                                 mac = val - 1;
2054                                 break;
2055                         case PSE_GDM3_PORT:
2056                                 mac = MTK_GMAC3_ID;
2057                                 break;
2058                         default:
2059                                 break;
2060                         }
2061                 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2062                            !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2063                         mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2064                 }
2065
2066                 if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2067                              !eth->netdev[mac]))
2068                         goto release_desc;
2069
2070                 netdev = eth->netdev[mac];
2071                 ppe_idx = eth->mac[mac]->ppe_idx;
2072
2073                 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2074                         goto release_desc;
2075
2076                 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2077
2078                 /* alloc new buffer */
2079                 if (ring->page_pool) {
2080                         struct page *page = virt_to_head_page(data);
2081                         struct xdp_buff xdp;
2082                         u32 ret;
2083
2084                         new_data = mtk_page_pool_get_buff(ring->page_pool,
2085                                                           &dma_addr,
2086                                                           GFP_ATOMIC);
2087                         if (unlikely(!new_data)) {
2088                                 netdev->stats.rx_dropped++;
2089                                 goto release_desc;
2090                         }
2091
2092                         dma_sync_single_for_cpu(eth->dma_dev,
2093                                 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2094                                 pktlen, page_pool_get_dma_dir(ring->page_pool));
2095
2096                         xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2097                         xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2098                                          false);
2099                         xdp_buff_clear_frags_flag(&xdp);
2100
2101                         ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2102                         if (ret == XDP_REDIRECT)
2103                                 xdp_flush = true;
2104
2105                         if (ret != XDP_PASS)
2106                                 goto skip_rx;
2107
2108                         skb = build_skb(data, PAGE_SIZE);
2109                         if (unlikely(!skb)) {
2110                                 page_pool_put_full_page(ring->page_pool,
2111                                                         page, true);
2112                                 netdev->stats.rx_dropped++;
2113                                 goto skip_rx;
2114                         }
2115
2116                         skb_reserve(skb, xdp.data - xdp.data_hard_start);
2117                         skb_put(skb, xdp.data_end - xdp.data);
2118                         skb_mark_for_recycle(skb);
2119                 } else {
2120                         if (ring->frag_size <= PAGE_SIZE)
2121                                 new_data = napi_alloc_frag(ring->frag_size);
2122                         else
2123                                 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2124
2125                         if (unlikely(!new_data)) {
2126                                 netdev->stats.rx_dropped++;
2127                                 goto release_desc;
2128                         }
2129
2130                         dma_addr = dma_map_single(eth->dma_dev,
2131                                 new_data + NET_SKB_PAD + eth->ip_align,
2132                                 ring->buf_size, DMA_FROM_DEVICE);
2133                         if (unlikely(dma_mapping_error(eth->dma_dev,
2134                                                        dma_addr))) {
2135                                 skb_free_frag(new_data);
2136                                 netdev->stats.rx_dropped++;
2137                                 goto release_desc;
2138                         }
2139
2140                         if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2141                                 addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2142
2143                         dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2144                                          ring->buf_size, DMA_FROM_DEVICE);
2145
2146                         skb = build_skb(data, ring->frag_size);
2147                         if (unlikely(!skb)) {
2148                                 netdev->stats.rx_dropped++;
2149                                 skb_free_frag(data);
2150                                 goto skip_rx;
2151                         }
2152
2153                         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2154                         skb_put(skb, pktlen);
2155                 }
2156
2157                 skb->dev = netdev;
2158                 bytes += skb->len;
2159
2160                 if (mtk_is_netsys_v3_or_greater(eth)) {
2161                         reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2162                         hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2163                         if (hash != MTK_RXD5_FOE_ENTRY)
2164                                 skb_set_hash(skb, jhash_1word(hash, 0),
2165                                              PKT_HASH_TYPE_L4);
2166                         rxdcsum = &trxd.rxd3;
2167                 } else {
2168                         reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2169                         hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2170                         if (hash != MTK_RXD4_FOE_ENTRY)
2171                                 skb_set_hash(skb, jhash_1word(hash, 0),
2172                                              PKT_HASH_TYPE_L4);
2173                         rxdcsum = &trxd.rxd4;
2174                 }
2175
2176                 if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2177                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2178                 else
2179                         skb_checksum_none_assert(skb);
2180                 skb->protocol = eth_type_trans(skb, netdev);
2181
2182                 /* When using VLAN untagging in combination with DSA, the
2183                  * hardware treats the MTK special tag as a VLAN and untags it.
2184                  */
2185                 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2186                     netdev_uses_dsa(netdev)) {
2187                         unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2188
2189                         if (port < ARRAY_SIZE(eth->dsa_meta) &&
2190                             eth->dsa_meta[port])
2191                                 skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2192                 }
2193
2194                 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2195                         mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
2196
2197                 skb_record_rx_queue(skb, 0);
2198                 napi_gro_receive(napi, skb);
2199
2200 skip_rx:
2201                 ring->data[idx] = new_data;
2202                 rxd->rxd1 = (unsigned int)dma_addr;
2203 release_desc:
2204                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2205                         rxd->rxd2 = RX_DMA_LSO;
2206                 else
2207                         rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2208
2209                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
2210                     likely(dma_addr != DMA_MAPPING_ERROR))
2211                         rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2212
2213                 ring->calc_idx = idx;
2214                 done++;
2215         }
2216
2217 rx_done:
2218         if (done) {
2219                 /* make sure that all changes to the dma ring are flushed before
2220                  * we continue
2221                  */
2222                 wmb();
2223                 mtk_update_rx_cpu_idx(eth);
2224         }
2225
2226         eth->rx_packets += done;
2227         eth->rx_bytes += bytes;
2228         dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2229                           &dim_sample);
2230         net_dim(&eth->rx_dim, &dim_sample);
2231
2232         if (xdp_flush)
2233                 xdp_do_flush();
2234
2235         return done;
2236 }
2237
2238 struct mtk_poll_state {
2239     struct netdev_queue *txq;
2240     unsigned int total;
2241     unsigned int done;
2242     unsigned int bytes;
2243 };
2244
2245 static void
2246 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2247                  struct sk_buff *skb)
2248 {
2249         struct netdev_queue *txq;
2250         struct net_device *dev;
2251         unsigned int bytes = skb->len;
2252
2253         state->total++;
2254         eth->tx_packets++;
2255         eth->tx_bytes += bytes;
2256
2257         dev = eth->netdev[mac];
2258         if (!dev)
2259                 return;
2260
2261         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2262         if (state->txq == txq) {
2263                 state->done++;
2264                 state->bytes += bytes;
2265                 return;
2266         }
2267
2268         if (state->txq)
2269                 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2270
2271         state->txq = txq;
2272         state->done = 1;
2273         state->bytes = bytes;
2274 }
2275
2276 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2277                             struct mtk_poll_state *state)
2278 {
2279         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2280         struct mtk_tx_ring *ring = &eth->tx_ring;
2281         struct mtk_tx_buf *tx_buf;
2282         struct xdp_frame_bulk bq;
2283         struct mtk_tx_dma *desc;
2284         u32 cpu, dma;
2285
2286         cpu = ring->last_free_ptr;
2287         dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2288
2289         desc = mtk_qdma_phys_to_virt(ring, cpu);
2290         xdp_frame_bulk_init(&bq);
2291
2292         while ((cpu != dma) && budget) {
2293                 u32 next_cpu = desc->txd2;
2294
2295                 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2296                 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2297                         break;
2298
2299                 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2300                                             eth->soc->tx.desc_size);
2301                 if (!tx_buf->data)
2302                         break;
2303
2304                 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2305                         if (tx_buf->type == MTK_TYPE_SKB)
2306                                 mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2307                                                  tx_buf->data);
2308
2309                         budget--;
2310                 }
2311                 mtk_tx_unmap(eth, tx_buf, &bq, true);
2312
2313                 ring->last_free = desc;
2314                 atomic_inc(&ring->free_count);
2315
2316                 cpu = next_cpu;
2317         }
2318         xdp_flush_frame_bulk(&bq);
2319
2320         ring->last_free_ptr = cpu;
2321         mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2322
2323         return budget;
2324 }
2325
2326 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2327                             struct mtk_poll_state *state)
2328 {
2329         struct mtk_tx_ring *ring = &eth->tx_ring;
2330         struct mtk_tx_buf *tx_buf;
2331         struct xdp_frame_bulk bq;
2332         struct mtk_tx_dma *desc;
2333         u32 cpu, dma;
2334
2335         cpu = ring->cpu_idx;
2336         dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2337         xdp_frame_bulk_init(&bq);
2338
2339         while ((cpu != dma) && budget) {
2340                 tx_buf = &ring->buf[cpu];
2341                 if (!tx_buf->data)
2342                         break;
2343
2344                 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2345                         if (tx_buf->type == MTK_TYPE_SKB)
2346                                 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2347                         budget--;
2348                 }
2349                 mtk_tx_unmap(eth, tx_buf, &bq, true);
2350
2351                 desc = ring->dma + cpu * eth->soc->tx.desc_size;
2352                 ring->last_free = desc;
2353                 atomic_inc(&ring->free_count);
2354
2355                 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2356         }
2357         xdp_flush_frame_bulk(&bq);
2358
2359         ring->cpu_idx = cpu;
2360
2361         return budget;
2362 }
2363
2364 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2365 {
2366         struct mtk_tx_ring *ring = &eth->tx_ring;
2367         struct dim_sample dim_sample = {};
2368         struct mtk_poll_state state = {};
2369
2370         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2371                 budget = mtk_poll_tx_qdma(eth, budget, &state);
2372         else
2373                 budget = mtk_poll_tx_pdma(eth, budget, &state);
2374
2375         if (state.txq)
2376                 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2377
2378         dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2379                           &dim_sample);
2380         net_dim(&eth->tx_dim, &dim_sample);
2381
2382         if (mtk_queue_stopped(eth) &&
2383             (atomic_read(&ring->free_count) > ring->thresh))
2384                 mtk_wake_queue(eth);
2385
2386         return state.total;
2387 }
2388
2389 static void mtk_handle_status_irq(struct mtk_eth *eth)
2390 {
2391         u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2392
2393         if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2394                 mtk_stats_update(eth);
2395                 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2396                         MTK_INT_STATUS2);
2397         }
2398 }
2399
2400 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2401 {
2402         struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2403         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2404         int tx_done = 0;
2405
2406         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2407                 mtk_handle_status_irq(eth);
2408         mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2409         tx_done = mtk_poll_tx(eth, budget);
2410
2411         if (unlikely(netif_msg_intr(eth))) {
2412                 dev_info(eth->dev,
2413                          "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2414                          mtk_r32(eth, reg_map->tx_irq_status),
2415                          mtk_r32(eth, reg_map->tx_irq_mask));
2416         }
2417
2418         if (tx_done == budget)
2419                 return budget;
2420
2421         if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2422                 return budget;
2423
2424         if (napi_complete_done(napi, tx_done))
2425                 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2426
2427         return tx_done;
2428 }
2429
2430 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2431 {
2432         struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2433         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2434         int rx_done_total = 0;
2435
2436         mtk_handle_status_irq(eth);
2437
2438         do {
2439                 int rx_done;
2440
2441                 mtk_w32(eth, eth->soc->rx.irq_done_mask,
2442                         reg_map->pdma.irq_status);
2443                 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2444                 rx_done_total += rx_done;
2445
2446                 if (unlikely(netif_msg_intr(eth))) {
2447                         dev_info(eth->dev,
2448                                  "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2449                                  mtk_r32(eth, reg_map->pdma.irq_status),
2450                                  mtk_r32(eth, reg_map->pdma.irq_mask));
2451                 }
2452
2453                 if (rx_done_total == budget)
2454                         return budget;
2455
2456         } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2457                  eth->soc->rx.irq_done_mask);
2458
2459         if (napi_complete_done(napi, rx_done_total))
2460                 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2461
2462         return rx_done_total;
2463 }
2464
2465 static int mtk_tx_alloc(struct mtk_eth *eth)
2466 {
2467         const struct mtk_soc_data *soc = eth->soc;
2468         struct mtk_tx_ring *ring = &eth->tx_ring;
2469         int i, sz = soc->tx.desc_size;
2470         struct mtk_tx_dma_v2 *txd;
2471         int ring_size;
2472         u32 ofs, val;
2473
2474         if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2475                 ring_size = MTK_QDMA_RING_SIZE;
2476         else
2477                 ring_size = soc->tx.dma_size;
2478
2479         ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2480                                GFP_KERNEL);
2481         if (!ring->buf)
2482                 goto no_tx_mem;
2483
2484         if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
2485                 ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
2486                 ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
2487         } else {
2488                 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2489                                                &ring->phys, GFP_KERNEL);
2490         }
2491
2492         if (!ring->dma)
2493                 goto no_tx_mem;
2494
2495         for (i = 0; i < ring_size; i++) {
2496                 int next = (i + 1) % ring_size;
2497                 u32 next_ptr = ring->phys + next * sz;
2498
2499                 txd = ring->dma + i * sz;
2500                 txd->txd2 = next_ptr;
2501                 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2502                 txd->txd4 = 0;
2503                 if (mtk_is_netsys_v2_or_greater(eth)) {
2504                         txd->txd5 = 0;
2505                         txd->txd6 = 0;
2506                         txd->txd7 = 0;
2507                         txd->txd8 = 0;
2508                 }
2509         }
2510
2511         /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2512          * only as the framework. The real HW descriptors are the PDMA
2513          * descriptors in ring->dma_pdma.
2514          */
2515         if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2516                 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2517                                                     &ring->phys_pdma, GFP_KERNEL);
2518                 if (!ring->dma_pdma)
2519                         goto no_tx_mem;
2520
2521                 for (i = 0; i < ring_size; i++) {
2522                         ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2523                         ring->dma_pdma[i].txd4 = 0;
2524                 }
2525         }
2526
2527         ring->dma_size = ring_size;
2528         atomic_set(&ring->free_count, ring_size - 2);
2529         ring->next_free = ring->dma;
2530         ring->last_free = (void *)txd;
2531         ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2532         ring->thresh = MAX_SKB_FRAGS;
2533
2534         /* make sure that all changes to the dma ring are flushed before we
2535          * continue
2536          */
2537         wmb();
2538
2539         if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2540                 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2541                 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2542                 mtk_w32(eth,
2543                         ring->phys + ((ring_size - 1) * sz),
2544                         soc->reg_map->qdma.crx_ptr);
2545                 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2546
2547                 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2548                         val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2549                         mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2550
2551                         val = MTK_QTX_SCH_MIN_RATE_EN |
2552                               /* minimum: 10 Mbps */
2553                               FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2554                               FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2555                               MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2556                         if (mtk_is_netsys_v1(eth))
2557                                 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2558                         mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2559                         ofs += MTK_QTX_OFFSET;
2560                 }
2561                 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2562                 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2563                 if (mtk_is_netsys_v2_or_greater(eth))
2564                         mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2565         } else {
2566                 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2567                 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2568                 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2569                 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2570         }
2571
2572         return 0;
2573
2574 no_tx_mem:
2575         return -ENOMEM;
2576 }
2577
2578 static void mtk_tx_clean(struct mtk_eth *eth)
2579 {
2580         const struct mtk_soc_data *soc = eth->soc;
2581         struct mtk_tx_ring *ring = &eth->tx_ring;
2582         int i;
2583
2584         if (ring->buf) {
2585                 for (i = 0; i < ring->dma_size; i++)
2586                         mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2587                 kfree(ring->buf);
2588                 ring->buf = NULL;
2589         }
2590         if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
2591                 dma_free_coherent(eth->dma_dev,
2592                                   ring->dma_size * soc->tx.desc_size,
2593                                   ring->dma, ring->phys);
2594                 ring->dma = NULL;
2595         }
2596
2597         if (ring->dma_pdma) {
2598                 dma_free_coherent(eth->dma_dev,
2599                                   ring->dma_size * soc->tx.desc_size,
2600                                   ring->dma_pdma, ring->phys_pdma);
2601                 ring->dma_pdma = NULL;
2602         }
2603 }
2604
2605 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2606 {
2607         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2608         const struct mtk_soc_data *soc = eth->soc;
2609         struct mtk_rx_ring *ring;
2610         int rx_data_len, rx_dma_size, tx_ring_size;
2611         int i;
2612
2613         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2614                 tx_ring_size = MTK_QDMA_RING_SIZE;
2615         else
2616                 tx_ring_size = soc->tx.dma_size;
2617
2618         if (rx_flag == MTK_RX_FLAGS_QDMA) {
2619                 if (ring_no)
2620                         return -EINVAL;
2621                 ring = &eth->rx_ring_qdma;
2622         } else {
2623                 ring = &eth->rx_ring[ring_no];
2624         }
2625
2626         if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2627                 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2628                 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2629         } else {
2630                 rx_data_len = ETH_DATA_LEN;
2631                 rx_dma_size = soc->rx.dma_size;
2632         }
2633
2634         ring->frag_size = mtk_max_frag_size(rx_data_len);
2635         ring->buf_size = mtk_max_buf_size(ring->frag_size);
2636         ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2637                              GFP_KERNEL);
2638         if (!ring->data)
2639                 return -ENOMEM;
2640
2641         if (mtk_page_pool_enabled(eth)) {
2642                 struct page_pool *pp;
2643
2644                 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2645                                           rx_dma_size);
2646                 if (IS_ERR(pp))
2647                         return PTR_ERR(pp);
2648
2649                 ring->page_pool = pp;
2650         }
2651
2652         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
2653             rx_flag != MTK_RX_FLAGS_NORMAL) {
2654                 ring->dma = dma_alloc_coherent(eth->dma_dev,
2655                                 rx_dma_size * eth->soc->rx.desc_size,
2656                                 &ring->phys, GFP_KERNEL);
2657         } else {
2658                 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
2659
2660                 ring->dma = tx_ring->dma + tx_ring_size *
2661                             eth->soc->tx.desc_size * (ring_no + 1);
2662                 ring->phys = tx_ring->phys + tx_ring_size *
2663                              eth->soc->tx.desc_size * (ring_no + 1);
2664         }
2665
2666         if (!ring->dma)
2667                 return -ENOMEM;
2668
2669         for (i = 0; i < rx_dma_size; i++) {
2670                 struct mtk_rx_dma_v2 *rxd;
2671                 dma_addr_t dma_addr;
2672                 void *data;
2673
2674                 rxd = ring->dma + i * eth->soc->rx.desc_size;
2675                 if (ring->page_pool) {
2676                         data = mtk_page_pool_get_buff(ring->page_pool,
2677                                                       &dma_addr, GFP_KERNEL);
2678                         if (!data)
2679                                 return -ENOMEM;
2680                 } else {
2681                         if (ring->frag_size <= PAGE_SIZE)
2682                                 data = netdev_alloc_frag(ring->frag_size);
2683                         else
2684                                 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2685
2686                         if (!data)
2687                                 return -ENOMEM;
2688
2689                         dma_addr = dma_map_single(eth->dma_dev,
2690                                 data + NET_SKB_PAD + eth->ip_align,
2691                                 ring->buf_size, DMA_FROM_DEVICE);
2692                         if (unlikely(dma_mapping_error(eth->dma_dev,
2693                                                        dma_addr))) {
2694                                 skb_free_frag(data);
2695                                 return -ENOMEM;
2696                         }
2697                 }
2698                 rxd->rxd1 = (unsigned int)dma_addr;
2699                 ring->data[i] = data;
2700
2701                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2702                         rxd->rxd2 = RX_DMA_LSO;
2703                 else
2704                         rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2705
2706                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2707                         rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2708
2709                 rxd->rxd3 = 0;
2710                 rxd->rxd4 = 0;
2711                 if (mtk_is_netsys_v3_or_greater(eth)) {
2712                         rxd->rxd5 = 0;
2713                         rxd->rxd6 = 0;
2714                         rxd->rxd7 = 0;
2715                         rxd->rxd8 = 0;
2716                 }
2717         }
2718
2719         ring->dma_size = rx_dma_size;
2720         ring->calc_idx_update = false;
2721         ring->calc_idx = rx_dma_size - 1;
2722         if (rx_flag == MTK_RX_FLAGS_QDMA)
2723                 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2724                                     ring_no * MTK_QRX_OFFSET;
2725         else
2726                 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2727                                     ring_no * MTK_QRX_OFFSET;
2728         /* make sure that all changes to the dma ring are flushed before we
2729          * continue
2730          */
2731         wmb();
2732
2733         if (rx_flag == MTK_RX_FLAGS_QDMA) {
2734                 mtk_w32(eth, ring->phys,
2735                         reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2736                 mtk_w32(eth, rx_dma_size,
2737                         reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2738                 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2739                         reg_map->qdma.rst_idx);
2740         } else {
2741                 mtk_w32(eth, ring->phys,
2742                         reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2743                 mtk_w32(eth, rx_dma_size,
2744                         reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2745                 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2746                         reg_map->pdma.rst_idx);
2747         }
2748         mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2749
2750         return 0;
2751 }
2752
2753 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2754 {
2755         u64 addr64 = 0;
2756         int i;
2757
2758         if (ring->data && ring->dma) {
2759                 for (i = 0; i < ring->dma_size; i++) {
2760                         struct mtk_rx_dma *rxd;
2761
2762                         if (!ring->data[i])
2763                                 continue;
2764
2765                         rxd = ring->dma + i * eth->soc->rx.desc_size;
2766                         if (!rxd->rxd1)
2767                                 continue;
2768
2769                         if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2770                                 addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2771
2772                         dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2773                                          ring->buf_size, DMA_FROM_DEVICE);
2774                         mtk_rx_put_buff(ring, ring->data[i], false);
2775                 }
2776                 kfree(ring->data);
2777                 ring->data = NULL;
2778         }
2779
2780         if (!in_sram && ring->dma) {
2781                 dma_free_coherent(eth->dma_dev,
2782                                   ring->dma_size * eth->soc->rx.desc_size,
2783                                   ring->dma, ring->phys);
2784                 ring->dma = NULL;
2785         }
2786
2787         if (ring->page_pool) {
2788                 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2789                         xdp_rxq_info_unreg(&ring->xdp_q);
2790                 page_pool_destroy(ring->page_pool);
2791                 ring->page_pool = NULL;
2792         }
2793 }
2794
2795 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2796 {
2797         int i;
2798         u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2799         u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2800
2801         /* set LRO rings to auto-learn modes */
2802         ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2803
2804         /* validate LRO ring */
2805         ring_ctrl_dw2 |= MTK_RING_VLD;
2806
2807         /* set AGE timer (unit: 20us) */
2808         ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2809         ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2810
2811         /* set max AGG timer (unit: 20us) */
2812         ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2813
2814         /* set max LRO AGG count */
2815         ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2816         ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2817
2818         for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2819                 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2820                 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2821                 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2822         }
2823
2824         /* IPv4 checksum update enable */
2825         lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2826
2827         /* switch priority comparison to packet count mode */
2828         lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2829
2830         /* bandwidth threshold setting */
2831         mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2832
2833         /* auto-learn score delta setting */
2834         mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2835
2836         /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2837         mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2838                 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2839
2840         /* set HW LRO mode & the max aggregation count for rx packets */
2841         lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2842
2843         /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2844         lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2845
2846         /* enable HW LRO */
2847         lro_ctrl_dw0 |= MTK_LRO_EN;
2848
2849         mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2850         mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2851
2852         return 0;
2853 }
2854
2855 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2856 {
2857         int i;
2858         u32 val;
2859
2860         /* relinquish lro rings, flush aggregated packets */
2861         mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2862
2863         /* wait for relinquishments done */
2864         for (i = 0; i < 10; i++) {
2865                 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2866                 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2867                         msleep(20);
2868                         continue;
2869                 }
2870                 break;
2871         }
2872
2873         /* invalidate lro rings */
2874         for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2875                 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2876
2877         /* disable HW LRO */
2878         mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2879 }
2880
2881 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2882 {
2883         u32 reg_val;
2884
2885         reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2886
2887         /* invalidate the IP setting */
2888         mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2889
2890         mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2891
2892         /* validate the IP setting */
2893         mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2894 }
2895
2896 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2897 {
2898         u32 reg_val;
2899
2900         reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2901
2902         /* invalidate the IP setting */
2903         mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2904
2905         mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2906 }
2907
2908 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2909 {
2910         int cnt = 0;
2911         int i;
2912
2913         for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2914                 if (mac->hwlro_ip[i])
2915                         cnt++;
2916         }
2917
2918         return cnt;
2919 }
2920
2921 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2922                                 struct ethtool_rxnfc *cmd)
2923 {
2924         struct ethtool_rx_flow_spec *fsp =
2925                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2926         struct mtk_mac *mac = netdev_priv(dev);
2927         struct mtk_eth *eth = mac->hw;
2928         int hwlro_idx;
2929
2930         if ((fsp->flow_type != TCP_V4_FLOW) ||
2931             (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2932             (fsp->location > 1))
2933                 return -EINVAL;
2934
2935         mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2936         hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2937
2938         mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2939
2940         mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2941
2942         return 0;
2943 }
2944
2945 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2946                                 struct ethtool_rxnfc *cmd)
2947 {
2948         struct ethtool_rx_flow_spec *fsp =
2949                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2950         struct mtk_mac *mac = netdev_priv(dev);
2951         struct mtk_eth *eth = mac->hw;
2952         int hwlro_idx;
2953
2954         if (fsp->location > 1)
2955                 return -EINVAL;
2956
2957         mac->hwlro_ip[fsp->location] = 0;
2958         hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2959
2960         mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2961
2962         mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2963
2964         return 0;
2965 }
2966
2967 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2968 {
2969         struct mtk_mac *mac = netdev_priv(dev);
2970         struct mtk_eth *eth = mac->hw;
2971         int i, hwlro_idx;
2972
2973         for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2974                 mac->hwlro_ip[i] = 0;
2975                 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2976
2977                 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2978         }
2979
2980         mac->hwlro_ip_cnt = 0;
2981 }
2982
2983 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2984                                     struct ethtool_rxnfc *cmd)
2985 {
2986         struct mtk_mac *mac = netdev_priv(dev);
2987         struct ethtool_rx_flow_spec *fsp =
2988                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2989
2990         if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2991                 return -EINVAL;
2992
2993         /* only tcp dst ipv4 is meaningful, others are meaningless */
2994         fsp->flow_type = TCP_V4_FLOW;
2995         fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2996         fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2997
2998         fsp->h_u.tcp_ip4_spec.ip4src = 0;
2999         fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3000         fsp->h_u.tcp_ip4_spec.psrc = 0;
3001         fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3002         fsp->h_u.tcp_ip4_spec.pdst = 0;
3003         fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3004         fsp->h_u.tcp_ip4_spec.tos = 0;
3005         fsp->m_u.tcp_ip4_spec.tos = 0xff;
3006
3007         return 0;
3008 }
3009
3010 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3011                                   struct ethtool_rxnfc *cmd,
3012                                   u32 *rule_locs)
3013 {
3014         struct mtk_mac *mac = netdev_priv(dev);
3015         int cnt = 0;
3016         int i;
3017
3018         for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3019                 if (cnt == cmd->rule_cnt)
3020                         return -EMSGSIZE;
3021
3022                 if (mac->hwlro_ip[i]) {
3023                         rule_locs[cnt] = i;
3024                         cnt++;
3025                 }
3026         }
3027
3028         cmd->rule_cnt = cnt;
3029
3030         return 0;
3031 }
3032
3033 static netdev_features_t mtk_fix_features(struct net_device *dev,
3034                                           netdev_features_t features)
3035 {
3036         if (!(features & NETIF_F_LRO)) {
3037                 struct mtk_mac *mac = netdev_priv(dev);
3038                 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3039
3040                 if (ip_cnt) {
3041                         netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3042
3043                         features |= NETIF_F_LRO;
3044                 }
3045         }
3046
3047         return features;
3048 }
3049
3050 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3051 {
3052         netdev_features_t diff = dev->features ^ features;
3053
3054         if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3055                 mtk_hwlro_netdev_disable(dev);
3056
3057         return 0;
3058 }
3059
3060 /* wait for DMA to finish whatever it is doing before we start using it again */
3061 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3062 {
3063         unsigned int reg;
3064         int ret;
3065         u32 val;
3066
3067         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3068                 reg = eth->soc->reg_map->qdma.glo_cfg;
3069         else
3070                 reg = eth->soc->reg_map->pdma.glo_cfg;
3071
3072         ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3073                                         !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3074                                         5, MTK_DMA_BUSY_TIMEOUT_US);
3075         if (ret)
3076                 dev_err(eth->dev, "DMA init timeout\n");
3077
3078         return ret;
3079 }
3080
3081 static int mtk_dma_init(struct mtk_eth *eth)
3082 {
3083         int err;
3084         u32 i;
3085
3086         if (mtk_dma_busy_wait(eth))
3087                 return -EBUSY;
3088
3089         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3090                 /* QDMA needs scratch memory for internal reordering of the
3091                  * descriptors
3092                  */
3093                 err = mtk_init_fq_dma(eth);
3094                 if (err)
3095                         return err;
3096         }
3097
3098         err = mtk_tx_alloc(eth);
3099         if (err)
3100                 return err;
3101
3102         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3103                 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3104                 if (err)
3105                         return err;
3106         }
3107
3108         err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3109         if (err)
3110                 return err;
3111
3112         if (eth->hwlro) {
3113                 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3114                         err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3115                         if (err)
3116                                 return err;
3117                 }
3118                 err = mtk_hwlro_rx_init(eth);
3119                 if (err)
3120                         return err;
3121         }
3122
3123         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3124                 /* Enable random early drop and set drop threshold
3125                  * automatically
3126                  */
3127                 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3128                         FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3129                 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3130         }
3131
3132         return 0;
3133 }
3134
3135 static void mtk_dma_free(struct mtk_eth *eth)
3136 {
3137         const struct mtk_soc_data *soc = eth->soc;
3138         int i;
3139
3140         for (i = 0; i < MTK_MAX_DEVS; i++)
3141                 if (eth->netdev[i])
3142                         netdev_reset_queue(eth->netdev[i]);
3143         if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
3144                 dma_free_coherent(eth->dma_dev,
3145                                   MTK_QDMA_RING_SIZE * soc->tx.desc_size,
3146                                   eth->scratch_ring, eth->phy_scratch_ring);
3147                 eth->scratch_ring = NULL;
3148                 eth->phy_scratch_ring = 0;
3149         }
3150         mtk_tx_clean(eth);
3151         mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
3152         mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
3153
3154         if (eth->hwlro) {
3155                 mtk_hwlro_rx_uninit(eth);
3156                 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3157                         mtk_rx_clean(eth, &eth->rx_ring[i], false);
3158         }
3159
3160         for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3161                 kfree(eth->scratch_head[i]);
3162                 eth->scratch_head[i] = NULL;
3163         }
3164 }
3165
3166 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3167 {
3168         u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3169
3170         return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3171                (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3172                (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3173 }
3174
3175 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3176 {
3177         struct mtk_mac *mac = netdev_priv(dev);
3178         struct mtk_eth *eth = mac->hw;
3179
3180         if (test_bit(MTK_RESETTING, &eth->state))
3181                 return;
3182
3183         if (!mtk_hw_reset_check(eth))
3184                 return;
3185
3186         eth->netdev[mac->id]->stats.tx_errors++;
3187         netif_err(eth, tx_err, dev, "transmit timed out\n");
3188
3189         schedule_work(&eth->pending_work);
3190 }
3191
3192 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3193 {
3194         struct mtk_eth *eth = _eth;
3195
3196         eth->rx_events++;
3197         if (likely(napi_schedule_prep(&eth->rx_napi))) {
3198                 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3199                 __napi_schedule(&eth->rx_napi);
3200         }
3201
3202         return IRQ_HANDLED;
3203 }
3204
3205 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3206 {
3207         struct mtk_eth *eth = _eth;
3208
3209         eth->tx_events++;
3210         if (likely(napi_schedule_prep(&eth->tx_napi))) {
3211                 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3212                 __napi_schedule(&eth->tx_napi);
3213         }
3214
3215         return IRQ_HANDLED;
3216 }
3217
3218 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3219 {
3220         struct mtk_eth *eth = _eth;
3221         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3222
3223         if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3224             eth->soc->rx.irq_done_mask) {
3225                 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3226                     eth->soc->rx.irq_done_mask)
3227                         mtk_handle_irq_rx(irq, _eth);
3228         }
3229         if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3230                 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3231                         mtk_handle_irq_tx(irq, _eth);
3232         }
3233
3234         return IRQ_HANDLED;
3235 }
3236
3237 #ifdef CONFIG_NET_POLL_CONTROLLER
3238 static void mtk_poll_controller(struct net_device *dev)
3239 {
3240         struct mtk_mac *mac = netdev_priv(dev);
3241         struct mtk_eth *eth = mac->hw;
3242
3243         mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3244         mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3245         mtk_handle_irq_rx(eth->irq[2], dev);
3246         mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3247         mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3248 }
3249 #endif
3250
3251 static int mtk_start_dma(struct mtk_eth *eth)
3252 {
3253         u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3254         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3255         int err;
3256
3257         err = mtk_dma_init(eth);
3258         if (err) {
3259                 mtk_dma_free(eth);
3260                 return err;
3261         }
3262
3263         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3264                 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3265                 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3266                        MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3267                        MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3268
3269                 if (mtk_is_netsys_v2_or_greater(eth))
3270                         val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3271                                MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3272                                MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3273                 else
3274                         val |= MTK_RX_BT_32DWORDS;
3275                 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3276
3277                 mtk_w32(eth,
3278                         MTK_RX_DMA_EN | rx_2b_offset |
3279                         MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3280                         reg_map->pdma.glo_cfg);
3281         } else {
3282                 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3283                         MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3284                         reg_map->pdma.glo_cfg);
3285         }
3286
3287         return 0;
3288 }
3289
3290 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
3291 {
3292         u32 val;
3293
3294         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3295                 return;
3296
3297         val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
3298
3299         /* default setup the forward port to send frame to PDMA */
3300         val &= ~0xffff;
3301
3302         /* Enable RX checksum */
3303         val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3304
3305         val |= config;
3306
3307         if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3308                 val |= MTK_GDMA_SPECIAL_TAG;
3309
3310         mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
3311 }
3312
3313
3314 static bool mtk_uses_dsa(struct net_device *dev)
3315 {
3316 #if IS_ENABLED(CONFIG_NET_DSA)
3317         return netdev_uses_dsa(dev) &&
3318                dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3319 #else
3320         return false;
3321 #endif
3322 }
3323
3324 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3325 {
3326         struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3327         struct mtk_eth *eth = mac->hw;
3328         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3329         struct ethtool_link_ksettings s;
3330         struct net_device *ldev;
3331         struct list_head *iter;
3332         struct dsa_port *dp;
3333
3334         if (event != NETDEV_CHANGE)
3335                 return NOTIFY_DONE;
3336
3337         netdev_for_each_lower_dev(dev, ldev, iter) {
3338                 if (netdev_priv(ldev) == mac)
3339                         goto found;
3340         }
3341
3342         return NOTIFY_DONE;
3343
3344 found:
3345         if (!dsa_user_dev_check(dev))
3346                 return NOTIFY_DONE;
3347
3348         if (__ethtool_get_link_ksettings(dev, &s))
3349                 return NOTIFY_DONE;
3350
3351         if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3352                 return NOTIFY_DONE;
3353
3354         dp = dsa_port_from_netdev(dev);
3355         if (dp->index >= MTK_QDMA_NUM_QUEUES)
3356                 return NOTIFY_DONE;
3357
3358         if (mac->speed > 0 && mac->speed <= s.base.speed)
3359                 s.base.speed = 0;
3360
3361         mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3362
3363         return NOTIFY_DONE;
3364 }
3365
3366 static int mtk_open(struct net_device *dev)
3367 {
3368         struct mtk_mac *mac = netdev_priv(dev);
3369         struct mtk_eth *eth = mac->hw;
3370         struct mtk_mac *target_mac;
3371         int i, err, ppe_num;
3372
3373         ppe_num = eth->soc->ppe_num;
3374
3375         err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3376         if (err) {
3377                 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3378                            err);
3379                 return err;
3380         }
3381
3382         /* we run 2 netdevs on the same dma ring so we only bring it up once */
3383         if (!refcount_read(&eth->dma_refcnt)) {
3384                 const struct mtk_soc_data *soc = eth->soc;
3385                 u32 gdm_config;
3386                 int i;
3387
3388                 err = mtk_start_dma(eth);
3389                 if (err) {
3390                         phylink_disconnect_phy(mac->phylink);
3391                         return err;
3392                 }
3393
3394                 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3395                         mtk_ppe_start(eth->ppe[i]);
3396
3397                 for (i = 0; i < MTK_MAX_DEVS; i++) {
3398                         if (!eth->netdev[i])
3399                                 continue;
3400
3401                         target_mac = netdev_priv(eth->netdev[i]);
3402                         if (!soc->offload_version) {
3403                                 target_mac->ppe_idx = 0;
3404                                 gdm_config = MTK_GDMA_TO_PDMA;
3405                         } else if (ppe_num >= 3 && target_mac->id == 2) {
3406                                 target_mac->ppe_idx = 2;
3407                                 gdm_config = soc->reg_map->gdma_to_ppe[2];
3408                         } else if (ppe_num >= 2 && target_mac->id == 1) {
3409                                 target_mac->ppe_idx = 1;
3410                                 gdm_config = soc->reg_map->gdma_to_ppe[1];
3411                         } else {
3412                                 target_mac->ppe_idx = 0;
3413                                 gdm_config = soc->reg_map->gdma_to_ppe[0];
3414                         }
3415                         mtk_gdm_config(eth, target_mac->id, gdm_config);
3416                 }
3417                 /* Reset and enable PSE */
3418                 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3419                 mtk_w32(eth, 0, MTK_RST_GL);
3420
3421                 napi_enable(&eth->tx_napi);
3422                 napi_enable(&eth->rx_napi);
3423                 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3424                 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3425                 refcount_set(&eth->dma_refcnt, 1);
3426         } else {
3427                 refcount_inc(&eth->dma_refcnt);
3428         }
3429
3430         phylink_start(mac->phylink);
3431         netif_tx_start_all_queues(dev);
3432
3433         if (mtk_is_netsys_v2_or_greater(eth))
3434                 return 0;
3435
3436         if (mtk_uses_dsa(dev) && !eth->prog) {
3437                 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3438                         struct metadata_dst *md_dst = eth->dsa_meta[i];
3439
3440                         if (md_dst)
3441                                 continue;
3442
3443                         md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3444                                                     GFP_KERNEL);
3445                         if (!md_dst)
3446                                 return -ENOMEM;
3447
3448                         md_dst->u.port_info.port_id = i;
3449                         eth->dsa_meta[i] = md_dst;
3450                 }
3451         } else {
3452                 /* Hardware DSA untagging and VLAN RX offloading need to be
3453                  * disabled if at least one MAC does not use DSA.
3454                  */
3455                 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3456
3457                 val &= ~MTK_CDMP_STAG_EN;
3458                 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3459
3460                 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3461         }
3462
3463         return 0;
3464 }
3465
3466 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3467 {
3468         u32 val;
3469         int i;
3470
3471         /* stop the dma engine */
3472         spin_lock_bh(&eth->page_lock);
3473         val = mtk_r32(eth, glo_cfg);
3474         mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3475                 glo_cfg);
3476         spin_unlock_bh(&eth->page_lock);
3477
3478         /* wait for dma stop */
3479         for (i = 0; i < 10; i++) {
3480                 val = mtk_r32(eth, glo_cfg);
3481                 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3482                         msleep(20);
3483                         continue;
3484                 }
3485                 break;
3486         }
3487 }
3488
3489 static int mtk_stop(struct net_device *dev)
3490 {
3491         struct mtk_mac *mac = netdev_priv(dev);
3492         struct mtk_eth *eth = mac->hw;
3493         int i;
3494
3495         phylink_stop(mac->phylink);
3496
3497         netif_tx_disable(dev);
3498
3499         phylink_disconnect_phy(mac->phylink);
3500
3501         /* only shutdown DMA if this is the last user */
3502         if (!refcount_dec_and_test(&eth->dma_refcnt))
3503                 return 0;
3504
3505         for (i = 0; i < MTK_MAX_DEVS; i++)
3506                 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
3507
3508         mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3509         mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3510         napi_disable(&eth->tx_napi);
3511         napi_disable(&eth->rx_napi);
3512
3513         cancel_work_sync(&eth->rx_dim.work);
3514         cancel_work_sync(&eth->tx_dim.work);
3515
3516         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3517                 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3518         mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3519
3520         mtk_dma_free(eth);
3521
3522         for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3523                 mtk_ppe_stop(eth->ppe[i]);
3524
3525         return 0;
3526 }
3527
3528 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3529                          struct netlink_ext_ack *extack)
3530 {
3531         struct mtk_mac *mac = netdev_priv(dev);
3532         struct mtk_eth *eth = mac->hw;
3533         struct bpf_prog *old_prog;
3534         bool need_update;
3535
3536         if (eth->hwlro) {
3537                 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3538                 return -EOPNOTSUPP;
3539         }
3540
3541         if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3542                 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3543                 return -EOPNOTSUPP;
3544         }
3545
3546         need_update = !!eth->prog != !!prog;
3547         if (netif_running(dev) && need_update)
3548                 mtk_stop(dev);
3549
3550         old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3551         if (old_prog)
3552                 bpf_prog_put(old_prog);
3553
3554         if (netif_running(dev) && need_update)
3555                 return mtk_open(dev);
3556
3557         return 0;
3558 }
3559
3560 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3561 {
3562         switch (xdp->command) {
3563         case XDP_SETUP_PROG:
3564                 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3565         default:
3566                 return -EINVAL;
3567         }
3568 }
3569
3570 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3571 {
3572         regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3573                            reset_bits,
3574                            reset_bits);
3575
3576         usleep_range(1000, 1100);
3577         regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3578                            reset_bits,
3579                            ~reset_bits);
3580         mdelay(10);
3581 }
3582
3583 static void mtk_clk_disable(struct mtk_eth *eth)
3584 {
3585         int clk;
3586
3587         for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3588                 clk_disable_unprepare(eth->clks[clk]);
3589 }
3590
3591 static int mtk_clk_enable(struct mtk_eth *eth)
3592 {
3593         int clk, ret;
3594
3595         for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3596                 ret = clk_prepare_enable(eth->clks[clk]);
3597                 if (ret)
3598                         goto err_disable_clks;
3599         }
3600
3601         return 0;
3602
3603 err_disable_clks:
3604         while (--clk >= 0)
3605                 clk_disable_unprepare(eth->clks[clk]);
3606
3607         return ret;
3608 }
3609
3610 static void mtk_dim_rx(struct work_struct *work)
3611 {
3612         struct dim *dim = container_of(work, struct dim, work);
3613         struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3614         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3615         struct dim_cq_moder cur_profile;
3616         u32 val, cur;
3617
3618         cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3619                                                 dim->profile_ix);
3620         spin_lock_bh(&eth->dim_lock);
3621
3622         val = mtk_r32(eth, reg_map->pdma.delay_irq);
3623         val &= MTK_PDMA_DELAY_TX_MASK;
3624         val |= MTK_PDMA_DELAY_RX_EN;
3625
3626         cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3627         val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3628
3629         cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3630         val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3631
3632         mtk_w32(eth, val, reg_map->pdma.delay_irq);
3633         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3634                 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3635
3636         spin_unlock_bh(&eth->dim_lock);
3637
3638         dim->state = DIM_START_MEASURE;
3639 }
3640
3641 static void mtk_dim_tx(struct work_struct *work)
3642 {
3643         struct dim *dim = container_of(work, struct dim, work);
3644         struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3645         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3646         struct dim_cq_moder cur_profile;
3647         u32 val, cur;
3648
3649         cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3650                                                 dim->profile_ix);
3651         spin_lock_bh(&eth->dim_lock);
3652
3653         val = mtk_r32(eth, reg_map->pdma.delay_irq);
3654         val &= MTK_PDMA_DELAY_RX_MASK;
3655         val |= MTK_PDMA_DELAY_TX_EN;
3656
3657         cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3658         val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3659
3660         cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3661         val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3662
3663         mtk_w32(eth, val, reg_map->pdma.delay_irq);
3664         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3665                 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3666
3667         spin_unlock_bh(&eth->dim_lock);
3668
3669         dim->state = DIM_START_MEASURE;
3670 }
3671
3672 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3673 {
3674         struct mtk_eth *eth = mac->hw;
3675         u32 mcr_cur, mcr_new;
3676
3677         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3678                 return;
3679
3680         mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3681         mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3682
3683         if (val <= 1518)
3684                 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3685         else if (val <= 1536)
3686                 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3687         else if (val <= 1552)
3688                 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3689         else
3690                 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3691
3692         if (mcr_new != mcr_cur)
3693                 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3694 }
3695
3696 static void mtk_hw_reset(struct mtk_eth *eth)
3697 {
3698         u32 val;
3699
3700         if (mtk_is_netsys_v2_or_greater(eth))
3701                 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3702
3703         if (mtk_is_netsys_v3_or_greater(eth)) {
3704                 val = RSTCTRL_PPE0_V3;
3705
3706                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3707                         val |= RSTCTRL_PPE1_V3;
3708
3709                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3710                         val |= RSTCTRL_PPE2;
3711
3712                 val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3713         } else if (mtk_is_netsys_v2_or_greater(eth)) {
3714                 val = RSTCTRL_PPE0_V2;
3715
3716                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3717                         val |= RSTCTRL_PPE1;
3718         } else {
3719                 val = RSTCTRL_PPE0;
3720         }
3721
3722         ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3723
3724         if (mtk_is_netsys_v3_or_greater(eth))
3725                 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3726                              0x6f8ff);
3727         else if (mtk_is_netsys_v2_or_greater(eth))
3728                 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3729                              0x3ffffff);
3730 }
3731
3732 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3733 {
3734         u32 val;
3735
3736         regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3737         return val;
3738 }
3739
3740 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3741 {
3742         u32 rst_mask, val;
3743
3744         regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3745                            RSTCTRL_FE);
3746         if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3747                                       val & RSTCTRL_FE, 1, 1000)) {
3748                 dev_err(eth->dev, "warm reset failed\n");
3749                 mtk_hw_reset(eth);
3750                 return;
3751         }
3752
3753         if (mtk_is_netsys_v3_or_greater(eth)) {
3754                 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3755                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3756                         rst_mask |= RSTCTRL_PPE1_V3;
3757                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3758                         rst_mask |= RSTCTRL_PPE2;
3759
3760                 rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3761         } else if (mtk_is_netsys_v2_or_greater(eth)) {
3762                 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3763                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3764                         rst_mask |= RSTCTRL_PPE1;
3765         } else {
3766                 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3767         }
3768
3769         regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3770
3771         udelay(1);
3772         val = mtk_hw_reset_read(eth);
3773         if (!(val & rst_mask))
3774                 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3775                         val, rst_mask);
3776
3777         rst_mask |= RSTCTRL_FE;
3778         regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3779
3780         udelay(1);
3781         val = mtk_hw_reset_read(eth);
3782         if (val & rst_mask)
3783                 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3784                         val, rst_mask);
3785 }
3786
3787 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3788 {
3789         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3790         bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3791         bool oq_hang, cdm1_busy, adma_busy;
3792         bool wtx_busy, cdm_full, oq_free;
3793         u32 wdidx, val, gdm1_fc, gdm2_fc;
3794         bool qfsm_hang, qfwd_hang;
3795         bool ret = false;
3796
3797         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3798                 return false;
3799
3800         /* WDMA sanity checks */
3801         wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3802
3803         val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3804         wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3805
3806         val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3807         cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3808
3809         oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3810                     !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3811                     !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3812
3813         if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3814                 if (++eth->reset.wdma_hang_count > 2) {
3815                         eth->reset.wdma_hang_count = 0;
3816                         ret = true;
3817                 }
3818                 goto out;
3819         }
3820
3821         /* QDMA sanity checks */
3822         qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3823         qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3824
3825         gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3826         gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3827         gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3828         gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3829         gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3830         gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3831
3832         if (qfsm_hang && qfwd_hang &&
3833             ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3834              (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3835                 if (++eth->reset.qdma_hang_count > 2) {
3836                         eth->reset.qdma_hang_count = 0;
3837                         ret = true;
3838                 }
3839                 goto out;
3840         }
3841
3842         /* ADMA sanity checks */
3843         oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3844         cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3845         adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3846                     !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3847
3848         if (oq_hang && cdm1_busy && adma_busy) {
3849                 if (++eth->reset.adma_hang_count > 2) {
3850                         eth->reset.adma_hang_count = 0;
3851                         ret = true;
3852                 }
3853                 goto out;
3854         }
3855
3856         eth->reset.wdma_hang_count = 0;
3857         eth->reset.qdma_hang_count = 0;
3858         eth->reset.adma_hang_count = 0;
3859 out:
3860         eth->reset.wdidx = wdidx;
3861
3862         return ret;
3863 }
3864
3865 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3866 {
3867         struct delayed_work *del_work = to_delayed_work(work);
3868         struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3869                                            reset.monitor_work);
3870
3871         if (test_bit(MTK_RESETTING, &eth->state))
3872                 goto out;
3873
3874         /* DMA stuck checks */
3875         if (mtk_hw_check_dma_hang(eth))
3876                 schedule_work(&eth->pending_work);
3877
3878 out:
3879         schedule_delayed_work(&eth->reset.monitor_work,
3880                               MTK_DMA_MONITOR_TIMEOUT);
3881 }
3882
3883 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3884 {
3885         u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3886                        ETHSYS_DMA_AG_MAP_PPE;
3887         const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3888         int i, val, ret;
3889
3890         if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
3891                 return 0;
3892
3893         if (!reset) {
3894                 pm_runtime_enable(eth->dev);
3895                 pm_runtime_get_sync(eth->dev);
3896
3897                 ret = mtk_clk_enable(eth);
3898                 if (ret)
3899                         goto err_disable_pm;
3900         }
3901
3902         if (eth->ethsys)
3903                 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3904                                    of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3905
3906         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3907                 ret = device_reset(eth->dev);
3908                 if (ret) {
3909                         dev_err(eth->dev, "MAC reset failed!\n");
3910                         goto err_disable_pm;
3911                 }
3912
3913                 /* set interrupt delays based on current Net DIM sample */
3914                 mtk_dim_rx(&eth->rx_dim.work);
3915                 mtk_dim_tx(&eth->tx_dim.work);
3916
3917                 /* disable delay and normal interrupt */
3918                 mtk_tx_irq_disable(eth, ~0);
3919                 mtk_rx_irq_disable(eth, ~0);
3920
3921                 return 0;
3922         }
3923
3924         msleep(100);
3925
3926         if (reset)
3927                 mtk_hw_warm_reset(eth);
3928         else
3929                 mtk_hw_reset(eth);
3930
3931         if (mtk_is_netsys_v3_or_greater(eth)) {
3932                 /* Set FE to PDMAv2 if necessary */
3933                 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3934                 mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
3935         }
3936
3937         if (eth->pctl) {
3938                 /* Set GE2 driving and slew rate */
3939                 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3940
3941                 /* set GE2 TDSEL */
3942                 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3943
3944                 /* set GE2 TUNE */
3945                 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3946         }
3947
3948         /* Set linkdown as the default for each GMAC. Its own MCR would be set
3949          * up with the more appropriate value when mtk_mac_config call is being
3950          * invoked.
3951          */
3952         for (i = 0; i < MTK_MAX_DEVS; i++) {
3953                 struct net_device *dev = eth->netdev[i];
3954
3955                 if (!dev)
3956                         continue;
3957
3958                 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3959                 mtk_set_mcr_max_rx(netdev_priv(dev),
3960                                    dev->mtu + MTK_RX_ETH_HLEN);
3961         }
3962
3963         /* Indicates CDM to parse the MTK special tag from CPU
3964          * which also is working out for untag packets.
3965          */
3966         val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3967         mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3968         if (mtk_is_netsys_v1(eth)) {
3969                 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3970                 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3971
3972                 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3973         }
3974
3975         /* set interrupt delays based on current Net DIM sample */
3976         mtk_dim_rx(&eth->rx_dim.work);
3977         mtk_dim_tx(&eth->tx_dim.work);
3978
3979         /* disable delay and normal interrupt */
3980         mtk_tx_irq_disable(eth, ~0);
3981         mtk_rx_irq_disable(eth, ~0);
3982
3983         /* FE int grouping */
3984         mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3985         mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
3986         mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3987         mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
3988         mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3989
3990         if (mtk_is_netsys_v3_or_greater(eth)) {
3991                 /* PSE should not drop port1, port8 and port9 packets */
3992                 mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
3993
3994                 /* GDM and CDM Threshold */
3995                 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3996                 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3997
3998                 /* Disable GDM1 RX CRC stripping */
3999                 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
4000
4001                 /* PSE GDM3 MIB counter has incorrect hw default values,
4002                  * so the driver ought to read clear the values beforehand
4003                  * in case ethtool retrieve wrong mib values.
4004                  */
4005                 for (i = 0; i < 0x80; i += 0x4)
4006                         mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
4007         } else if (!mtk_is_netsys_v1(eth)) {
4008                 /* PSE should not drop port8 and port9 packets from WDMA Tx */
4009                 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
4010
4011                 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
4012                 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
4013
4014                 /* PSE Free Queue Flow Control  */
4015                 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4016
4017                 /* PSE config input queue threshold */
4018                 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4019                 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4020                 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4021                 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4022                 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4023                 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4024                 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4025                 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4026
4027                 /* PSE config output queue threshold */
4028                 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4029                 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4030                 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4031                 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4032                 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4033                 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4034                 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4035                 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4036
4037                 /* GDM and CDM Threshold */
4038                 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4039                 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4040                 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4041                 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4042                 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4043                 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4044         }
4045
4046         return 0;
4047
4048 err_disable_pm:
4049         if (!reset) {
4050                 pm_runtime_put_sync(eth->dev);
4051                 pm_runtime_disable(eth->dev);
4052         }
4053
4054         return ret;
4055 }
4056
4057 static int mtk_hw_deinit(struct mtk_eth *eth)
4058 {
4059         if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
4060                 return 0;
4061
4062         mtk_clk_disable(eth);
4063
4064         pm_runtime_put_sync(eth->dev);
4065         pm_runtime_disable(eth->dev);
4066
4067         return 0;
4068 }
4069
4070 static void mtk_uninit(struct net_device *dev)
4071 {
4072         struct mtk_mac *mac = netdev_priv(dev);
4073         struct mtk_eth *eth = mac->hw;
4074
4075         phylink_disconnect_phy(mac->phylink);
4076         mtk_tx_irq_disable(eth, ~0);
4077         mtk_rx_irq_disable(eth, ~0);
4078 }
4079
4080 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4081 {
4082         int length = new_mtu + MTK_RX_ETH_HLEN;
4083         struct mtk_mac *mac = netdev_priv(dev);
4084         struct mtk_eth *eth = mac->hw;
4085
4086         if (rcu_access_pointer(eth->prog) &&
4087             length > MTK_PP_MAX_BUF_SIZE) {
4088                 netdev_err(dev, "Invalid MTU for XDP mode\n");
4089                 return -EINVAL;
4090         }
4091
4092         mtk_set_mcr_max_rx(mac, length);
4093         WRITE_ONCE(dev->mtu, new_mtu);
4094
4095         return 0;
4096 }
4097
4098 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4099 {
4100         struct mtk_mac *mac = netdev_priv(dev);
4101
4102         switch (cmd) {
4103         case SIOCGMIIPHY:
4104         case SIOCGMIIREG:
4105         case SIOCSMIIREG:
4106                 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4107         default:
4108                 break;
4109         }
4110
4111         return -EOPNOTSUPP;
4112 }
4113
4114 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4115 {
4116         u32 val;
4117         int i;
4118
4119         /* set FE PPE ports link down */
4120         for (i = MTK_GMAC1_ID;
4121              i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4122              i += 2) {
4123                 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4124                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4125                         val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4126                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4127                         val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4128                 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4129         }
4130
4131         /* adjust PPE configurations to prepare for reset */
4132         for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4133                 mtk_ppe_prepare_reset(eth->ppe[i]);
4134
4135         /* disable NETSYS interrupts */
4136         mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4137
4138         /* force link down GMAC */
4139         for (i = 0; i < 2; i++) {
4140                 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4141                 mtk_w32(eth, val, MTK_MAC_MCR(i));
4142         }
4143 }
4144
4145 static void mtk_pending_work(struct work_struct *work)
4146 {
4147         struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4148         unsigned long restart = 0;
4149         u32 val;
4150         int i;
4151
4152         rtnl_lock();
4153         set_bit(MTK_RESETTING, &eth->state);
4154
4155         mtk_prepare_for_reset(eth);
4156         mtk_wed_fe_reset();
4157         /* Run again reset preliminary configuration in order to avoid any
4158          * possible race during FE reset since it can run releasing RTNL lock.
4159          */
4160         mtk_prepare_for_reset(eth);
4161
4162         /* stop all devices to make sure that dma is properly shut down */
4163         for (i = 0; i < MTK_MAX_DEVS; i++) {
4164                 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4165                         continue;
4166
4167                 mtk_stop(eth->netdev[i]);
4168                 __set_bit(i, &restart);
4169         }
4170
4171         usleep_range(15000, 16000);
4172
4173         if (eth->dev->pins)
4174                 pinctrl_select_state(eth->dev->pins->p,
4175                                      eth->dev->pins->default_state);
4176         mtk_hw_init(eth, true);
4177
4178         /* restart DMA and enable IRQs */
4179         for (i = 0; i < MTK_MAX_DEVS; i++) {
4180                 if (!eth->netdev[i] || !test_bit(i, &restart))
4181                         continue;
4182
4183                 if (mtk_open(eth->netdev[i])) {
4184                         netif_alert(eth, ifup, eth->netdev[i],
4185                                     "Driver up/down cycle failed\n");
4186                         dev_close(eth->netdev[i]);
4187                 }
4188         }
4189
4190         /* set FE PPE ports link up */
4191         for (i = MTK_GMAC1_ID;
4192              i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4193              i += 2) {
4194                 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4195                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4196                         val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4197                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4198                         val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4199
4200                 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4201         }
4202
4203         clear_bit(MTK_RESETTING, &eth->state);
4204
4205         mtk_wed_fe_reset_complete();
4206
4207         rtnl_unlock();
4208 }
4209
4210 static int mtk_free_dev(struct mtk_eth *eth)
4211 {
4212         int i;
4213
4214         for (i = 0; i < MTK_MAX_DEVS; i++) {
4215                 if (!eth->netdev[i])
4216                         continue;
4217                 free_netdev(eth->netdev[i]);
4218         }
4219
4220         for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4221                 if (!eth->dsa_meta[i])
4222                         break;
4223                 metadata_dst_free(eth->dsa_meta[i]);
4224         }
4225
4226         return 0;
4227 }
4228
4229 static int mtk_unreg_dev(struct mtk_eth *eth)
4230 {
4231         int i;
4232
4233         for (i = 0; i < MTK_MAX_DEVS; i++) {
4234                 struct mtk_mac *mac;
4235                 if (!eth->netdev[i])
4236                         continue;
4237                 mac = netdev_priv(eth->netdev[i]);
4238                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4239                         unregister_netdevice_notifier(&mac->device_notifier);
4240                 unregister_netdev(eth->netdev[i]);
4241         }
4242
4243         return 0;
4244 }
4245
4246 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4247 {
4248         int i;
4249
4250         for (i = 0; i < MTK_MAX_DEVS; i++)
4251                 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4252 }
4253
4254 static int mtk_cleanup(struct mtk_eth *eth)
4255 {
4256         mtk_sgmii_destroy(eth);
4257         mtk_unreg_dev(eth);
4258         mtk_free_dev(eth);
4259         cancel_work_sync(&eth->pending_work);
4260         cancel_delayed_work_sync(&eth->reset.monitor_work);
4261
4262         return 0;
4263 }
4264
4265 static int mtk_get_link_ksettings(struct net_device *ndev,
4266                                   struct ethtool_link_ksettings *cmd)
4267 {
4268         struct mtk_mac *mac = netdev_priv(ndev);
4269
4270         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4271                 return -EBUSY;
4272
4273         return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4274 }
4275
4276 static int mtk_set_link_ksettings(struct net_device *ndev,
4277                                   const struct ethtool_link_ksettings *cmd)
4278 {
4279         struct mtk_mac *mac = netdev_priv(ndev);
4280
4281         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4282                 return -EBUSY;
4283
4284         return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4285 }
4286
4287 static void mtk_get_drvinfo(struct net_device *dev,
4288                             struct ethtool_drvinfo *info)
4289 {
4290         struct mtk_mac *mac = netdev_priv(dev);
4291
4292         strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4293         strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4294         info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4295 }
4296
4297 static u32 mtk_get_msglevel(struct net_device *dev)
4298 {
4299         struct mtk_mac *mac = netdev_priv(dev);
4300
4301         return mac->hw->msg_enable;
4302 }
4303
4304 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4305 {
4306         struct mtk_mac *mac = netdev_priv(dev);
4307
4308         mac->hw->msg_enable = value;
4309 }
4310
4311 static int mtk_nway_reset(struct net_device *dev)
4312 {
4313         struct mtk_mac *mac = netdev_priv(dev);
4314
4315         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4316                 return -EBUSY;
4317
4318         if (!mac->phylink)
4319                 return -ENOTSUPP;
4320
4321         return phylink_ethtool_nway_reset(mac->phylink);
4322 }
4323
4324 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4325 {
4326         int i;
4327
4328         switch (stringset) {
4329         case ETH_SS_STATS: {
4330                 struct mtk_mac *mac = netdev_priv(dev);
4331
4332                 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4333                         ethtool_puts(&data, mtk_ethtool_stats[i].str);
4334                 if (mtk_page_pool_enabled(mac->hw))
4335                         page_pool_ethtool_stats_get_strings(data);
4336                 break;
4337         }
4338         default:
4339                 break;
4340         }
4341 }
4342
4343 static int mtk_get_sset_count(struct net_device *dev, int sset)
4344 {
4345         switch (sset) {
4346         case ETH_SS_STATS: {
4347                 int count = ARRAY_SIZE(mtk_ethtool_stats);
4348                 struct mtk_mac *mac = netdev_priv(dev);
4349
4350                 if (mtk_page_pool_enabled(mac->hw))
4351                         count += page_pool_ethtool_stats_get_count();
4352                 return count;
4353         }
4354         default:
4355                 return -EOPNOTSUPP;
4356         }
4357 }
4358
4359 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4360 {
4361         struct page_pool_stats stats = {};
4362         int i;
4363
4364         for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4365                 struct mtk_rx_ring *ring = &eth->rx_ring[i];
4366
4367                 if (!ring->page_pool)
4368                         continue;
4369
4370                 page_pool_get_stats(ring->page_pool, &stats);
4371         }
4372         page_pool_ethtool_stats_get(data, &stats);
4373 }
4374
4375 static void mtk_get_ethtool_stats(struct net_device *dev,
4376                                   struct ethtool_stats *stats, u64 *data)
4377 {
4378         struct mtk_mac *mac = netdev_priv(dev);
4379         struct mtk_hw_stats *hwstats = mac->hw_stats;
4380         u64 *data_src, *data_dst;
4381         unsigned int start;
4382         int i;
4383
4384         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4385                 return;
4386
4387         if (netif_running(dev) && netif_device_present(dev)) {
4388                 if (spin_trylock_bh(&hwstats->stats_lock)) {
4389                         mtk_stats_update_mac(mac);
4390                         spin_unlock_bh(&hwstats->stats_lock);
4391                 }
4392         }
4393
4394         data_src = (u64 *)hwstats;
4395
4396         do {
4397                 data_dst = data;
4398                 start = u64_stats_fetch_begin(&hwstats->syncp);
4399
4400                 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4401                         *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4402                 if (mtk_page_pool_enabled(mac->hw))
4403                         mtk_ethtool_pp_stats(mac->hw, data_dst);
4404         } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4405 }
4406
4407 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4408                          u32 *rule_locs)
4409 {
4410         int ret = -EOPNOTSUPP;
4411
4412         switch (cmd->cmd) {
4413         case ETHTOOL_GRXRINGS:
4414                 if (dev->hw_features & NETIF_F_LRO) {
4415                         cmd->data = MTK_MAX_RX_RING_NUM;
4416                         ret = 0;
4417                 }
4418                 break;
4419         case ETHTOOL_GRXCLSRLCNT:
4420                 if (dev->hw_features & NETIF_F_LRO) {
4421                         struct mtk_mac *mac = netdev_priv(dev);
4422
4423                         cmd->rule_cnt = mac->hwlro_ip_cnt;
4424                         ret = 0;
4425                 }
4426                 break;
4427         case ETHTOOL_GRXCLSRULE:
4428                 if (dev->hw_features & NETIF_F_LRO)
4429                         ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4430                 break;
4431         case ETHTOOL_GRXCLSRLALL:
4432                 if (dev->hw_features & NETIF_F_LRO)
4433                         ret = mtk_hwlro_get_fdir_all(dev, cmd,
4434                                                      rule_locs);
4435                 break;
4436         default:
4437                 break;
4438         }
4439
4440         return ret;
4441 }
4442
4443 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4444 {
4445         int ret = -EOPNOTSUPP;
4446
4447         switch (cmd->cmd) {
4448         case ETHTOOL_SRXCLSRLINS:
4449                 if (dev->hw_features & NETIF_F_LRO)
4450                         ret = mtk_hwlro_add_ipaddr(dev, cmd);
4451                 break;
4452         case ETHTOOL_SRXCLSRLDEL:
4453                 if (dev->hw_features & NETIF_F_LRO)
4454                         ret = mtk_hwlro_del_ipaddr(dev, cmd);
4455                 break;
4456         default:
4457                 break;
4458         }
4459
4460         return ret;
4461 }
4462
4463 static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4464 {
4465         struct mtk_mac *mac = netdev_priv(dev);
4466
4467         phylink_ethtool_get_pauseparam(mac->phylink, pause);
4468 }
4469
4470 static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4471 {
4472         struct mtk_mac *mac = netdev_priv(dev);
4473
4474         return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4475 }
4476
4477 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4478                             struct net_device *sb_dev)
4479 {
4480         struct mtk_mac *mac = netdev_priv(dev);
4481         unsigned int queue = 0;
4482
4483         if (netdev_uses_dsa(dev))
4484                 queue = skb_get_queue_mapping(skb) + 3;
4485         else
4486                 queue = mac->id;
4487
4488         if (queue >= dev->num_tx_queues)
4489                 queue = 0;
4490
4491         return queue;
4492 }
4493
4494 static const struct ethtool_ops mtk_ethtool_ops = {
4495         .get_link_ksettings     = mtk_get_link_ksettings,
4496         .set_link_ksettings     = mtk_set_link_ksettings,
4497         .get_drvinfo            = mtk_get_drvinfo,
4498         .get_msglevel           = mtk_get_msglevel,
4499         .set_msglevel           = mtk_set_msglevel,
4500         .nway_reset             = mtk_nway_reset,
4501         .get_link               = ethtool_op_get_link,
4502         .get_strings            = mtk_get_strings,
4503         .get_sset_count         = mtk_get_sset_count,
4504         .get_ethtool_stats      = mtk_get_ethtool_stats,
4505         .get_pauseparam         = mtk_get_pauseparam,
4506         .set_pauseparam         = mtk_set_pauseparam,
4507         .get_rxnfc              = mtk_get_rxnfc,
4508         .set_rxnfc              = mtk_set_rxnfc,
4509 };
4510
4511 static const struct net_device_ops mtk_netdev_ops = {
4512         .ndo_uninit             = mtk_uninit,
4513         .ndo_open               = mtk_open,
4514         .ndo_stop               = mtk_stop,
4515         .ndo_start_xmit         = mtk_start_xmit,
4516         .ndo_set_mac_address    = mtk_set_mac_address,
4517         .ndo_validate_addr      = eth_validate_addr,
4518         .ndo_eth_ioctl          = mtk_do_ioctl,
4519         .ndo_change_mtu         = mtk_change_mtu,
4520         .ndo_tx_timeout         = mtk_tx_timeout,
4521         .ndo_get_stats64        = mtk_get_stats64,
4522         .ndo_fix_features       = mtk_fix_features,
4523         .ndo_set_features       = mtk_set_features,
4524 #ifdef CONFIG_NET_POLL_CONTROLLER
4525         .ndo_poll_controller    = mtk_poll_controller,
4526 #endif
4527         .ndo_setup_tc           = mtk_eth_setup_tc,
4528         .ndo_bpf                = mtk_xdp,
4529         .ndo_xdp_xmit           = mtk_xdp_xmit,
4530         .ndo_select_queue       = mtk_select_queue,
4531 };
4532
4533 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4534 {
4535         const __be32 *_id = of_get_property(np, "reg", NULL);
4536         phy_interface_t phy_mode;
4537         struct phylink *phylink;
4538         struct mtk_mac *mac;
4539         int id, err;
4540         int txqs = 1;
4541         u32 val;
4542
4543         if (!_id) {
4544                 dev_err(eth->dev, "missing mac id\n");
4545                 return -EINVAL;
4546         }
4547
4548         id = be32_to_cpup(_id);
4549         if (id >= MTK_MAX_DEVS) {
4550                 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4551                 return -EINVAL;
4552         }
4553
4554         if (eth->netdev[id]) {
4555                 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4556                 return -EINVAL;
4557         }
4558
4559         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4560                 txqs = MTK_QDMA_NUM_QUEUES;
4561
4562         eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4563         if (!eth->netdev[id]) {
4564                 dev_err(eth->dev, "alloc_etherdev failed\n");
4565                 return -ENOMEM;
4566         }
4567         mac = netdev_priv(eth->netdev[id]);
4568         eth->mac[id] = mac;
4569         mac->id = id;
4570         mac->hw = eth;
4571         mac->of_node = np;
4572
4573         err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4574         if (err == -EPROBE_DEFER)
4575                 return err;
4576
4577         if (err) {
4578                 /* If the mac address is invalid, use random mac address */
4579                 eth_hw_addr_random(eth->netdev[id]);
4580                 dev_err(eth->dev, "generated random MAC address %pM\n",
4581                         eth->netdev[id]->dev_addr);
4582         }
4583
4584         memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4585         mac->hwlro_ip_cnt = 0;
4586
4587         mac->hw_stats = devm_kzalloc(eth->dev,
4588                                      sizeof(*mac->hw_stats),
4589                                      GFP_KERNEL);
4590         if (!mac->hw_stats) {
4591                 dev_err(eth->dev, "failed to allocate counter memory\n");
4592                 err = -ENOMEM;
4593                 goto free_netdev;
4594         }
4595         spin_lock_init(&mac->hw_stats->stats_lock);
4596         u64_stats_init(&mac->hw_stats->syncp);
4597
4598         if (mtk_is_netsys_v3_or_greater(eth))
4599                 mac->hw_stats->reg_offset = id * 0x80;
4600         else
4601                 mac->hw_stats->reg_offset = id * 0x40;
4602
4603         /* phylink create */
4604         err = of_get_phy_mode(np, &phy_mode);
4605         if (err) {
4606                 dev_err(eth->dev, "incorrect phy-mode\n");
4607                 goto free_netdev;
4608         }
4609
4610         /* mac config is not set */
4611         mac->interface = PHY_INTERFACE_MODE_NA;
4612         mac->speed = SPEED_UNKNOWN;
4613
4614         mac->phylink_config.dev = &eth->netdev[id]->dev;
4615         mac->phylink_config.type = PHYLINK_NETDEV;
4616         mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4617                 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4618
4619         /* MT7623 gmac0 is now missing its speed-specific PLL configuration
4620          * in its .mac_config method (since state->speed is not valid there.
4621          * Disable support for MII, GMII and RGMII.
4622          */
4623         if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4624                 __set_bit(PHY_INTERFACE_MODE_MII,
4625                           mac->phylink_config.supported_interfaces);
4626                 __set_bit(PHY_INTERFACE_MODE_GMII,
4627                           mac->phylink_config.supported_interfaces);
4628
4629                 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4630                         phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4631         }
4632
4633         if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4634                 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4635                           mac->phylink_config.supported_interfaces);
4636
4637         /* TRGMII is not permitted on MT7621 if using DDR2 */
4638         if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4639             MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4640                 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4641                 if (val & SYSCFG_DRAM_TYPE_DDR2)
4642                         __clear_bit(PHY_INTERFACE_MODE_TRGMII,
4643                                     mac->phylink_config.supported_interfaces);
4644         }
4645
4646         if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4647                 __set_bit(PHY_INTERFACE_MODE_SGMII,
4648                           mac->phylink_config.supported_interfaces);
4649                 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4650                           mac->phylink_config.supported_interfaces);
4651                 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4652                           mac->phylink_config.supported_interfaces);
4653         }
4654
4655         if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4656             MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
4657             id == MTK_GMAC1_ID) {
4658                 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4659                                                        MAC_SYM_PAUSE |
4660                                                        MAC_10000FD;
4661                 phy_interface_zero(mac->phylink_config.supported_interfaces);
4662                 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4663                           mac->phylink_config.supported_interfaces);
4664         }
4665
4666         phylink = phylink_create(&mac->phylink_config,
4667                                  of_fwnode_handle(mac->of_node),
4668                                  phy_mode, &mtk_phylink_ops);
4669         if (IS_ERR(phylink)) {
4670                 err = PTR_ERR(phylink);
4671                 goto free_netdev;
4672         }
4673
4674         mac->phylink = phylink;
4675
4676         SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4677         eth->netdev[id]->watchdog_timeo = 5 * HZ;
4678         eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4679         eth->netdev[id]->base_addr = (unsigned long)eth->base;
4680
4681         eth->netdev[id]->hw_features = eth->soc->hw_features;
4682         if (eth->hwlro)
4683                 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4684
4685         eth->netdev[id]->vlan_features = eth->soc->hw_features &
4686                 ~NETIF_F_HW_VLAN_CTAG_TX;
4687         eth->netdev[id]->features |= eth->soc->hw_features;
4688         eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4689
4690         eth->netdev[id]->irq = eth->irq[0];
4691         eth->netdev[id]->dev.of_node = np;
4692
4693         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4694                 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4695         else
4696                 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4697
4698         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4699                 mac->device_notifier.notifier_call = mtk_device_event;
4700                 register_netdevice_notifier(&mac->device_notifier);
4701         }
4702
4703         if (mtk_page_pool_enabled(eth))
4704                 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4705                                                 NETDEV_XDP_ACT_REDIRECT |
4706                                                 NETDEV_XDP_ACT_NDO_XMIT |
4707                                                 NETDEV_XDP_ACT_NDO_XMIT_SG;
4708
4709         return 0;
4710
4711 free_netdev:
4712         free_netdev(eth->netdev[id]);
4713         return err;
4714 }
4715
4716 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4717 {
4718         struct net_device *dev, *tmp;
4719         LIST_HEAD(dev_list);
4720         int i;
4721
4722         rtnl_lock();
4723
4724         for (i = 0; i < MTK_MAX_DEVS; i++) {
4725                 dev = eth->netdev[i];
4726
4727                 if (!dev || !(dev->flags & IFF_UP))
4728                         continue;
4729
4730                 list_add_tail(&dev->close_list, &dev_list);
4731         }
4732
4733         dev_close_many(&dev_list, false);
4734
4735         eth->dma_dev = dma_dev;
4736
4737         list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4738                 list_del_init(&dev->close_list);
4739                 dev_open(dev, NULL);
4740         }
4741
4742         rtnl_unlock();
4743 }
4744
4745 static int mtk_sgmii_init(struct mtk_eth *eth)
4746 {
4747         struct device_node *np;
4748         struct regmap *regmap;
4749         u32 flags;
4750         int i;
4751
4752         for (i = 0; i < MTK_MAX_DEVS; i++) {
4753                 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4754                 if (!np)
4755                         break;
4756
4757                 regmap = syscon_node_to_regmap(np);
4758                 flags = 0;
4759                 if (of_property_read_bool(np, "mediatek,pnswap"))
4760                         flags |= MTK_SGMII_FLAG_PN_SWAP;
4761
4762                 of_node_put(np);
4763
4764                 if (IS_ERR(regmap))
4765                         return PTR_ERR(regmap);
4766
4767                 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4768                                                          eth->soc->ana_rgc3,
4769                                                          flags);
4770         }
4771
4772         return 0;
4773 }
4774
4775 static int mtk_probe(struct platform_device *pdev)
4776 {
4777         struct resource *res = NULL, *res_sram;
4778         struct device_node *mac_np;
4779         struct mtk_eth *eth;
4780         int err, i;
4781
4782         eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4783         if (!eth)
4784                 return -ENOMEM;
4785
4786         eth->soc = of_device_get_match_data(&pdev->dev);
4787
4788         eth->dev = &pdev->dev;
4789         eth->dma_dev = &pdev->dev;
4790         eth->base = devm_platform_ioremap_resource(pdev, 0);
4791         if (IS_ERR(eth->base))
4792                 return PTR_ERR(eth->base);
4793
4794         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4795                 eth->ip_align = NET_IP_ALIGN;
4796
4797         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4798                 /* SRAM is actual memory and supports transparent access just like DRAM.
4799                  * Hence we don't require __iomem being set and don't need to use accessor
4800                  * functions to read from or write to SRAM.
4801                  */
4802                 if (mtk_is_netsys_v3_or_greater(eth)) {
4803                         eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
4804                         if (IS_ERR(eth->sram_base))
4805                                 return PTR_ERR(eth->sram_base);
4806                 } else {
4807                         eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
4808                 }
4809         }
4810
4811         if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
4812                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4813                 if (!err)
4814                         err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4815
4816                 if (err) {
4817                         dev_err(&pdev->dev, "Wrong DMA config\n");
4818                         return -EINVAL;
4819                 }
4820         }
4821
4822         spin_lock_init(&eth->page_lock);
4823         spin_lock_init(&eth->tx_irq_lock);
4824         spin_lock_init(&eth->rx_irq_lock);
4825         spin_lock_init(&eth->dim_lock);
4826
4827         eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4828         INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
4829         INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
4830
4831         eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4832         INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
4833
4834         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4835                 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4836                                                               "mediatek,ethsys");
4837                 if (IS_ERR(eth->ethsys)) {
4838                         dev_err(&pdev->dev, "no ethsys regmap found\n");
4839                         return PTR_ERR(eth->ethsys);
4840                 }
4841         }
4842
4843         if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4844                 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4845                                                              "mediatek,infracfg");
4846                 if (IS_ERR(eth->infra)) {
4847                         dev_err(&pdev->dev, "no infracfg regmap found\n");
4848                         return PTR_ERR(eth->infra);
4849                 }
4850         }
4851
4852         if (of_dma_is_coherent(pdev->dev.of_node)) {
4853                 struct regmap *cci;
4854
4855                 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4856                                                       "cci-control-port");
4857                 /* enable CPU/bus coherency */
4858                 if (!IS_ERR(cci))
4859                         regmap_write(cci, 0, 3);
4860         }
4861
4862         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4863                 err = mtk_sgmii_init(eth);
4864
4865                 if (err)
4866                         return err;
4867         }
4868
4869         if (eth->soc->required_pctl) {
4870                 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4871                                                             "mediatek,pctl");
4872                 if (IS_ERR(eth->pctl)) {
4873                         dev_err(&pdev->dev, "no pctl regmap found\n");
4874                         err = PTR_ERR(eth->pctl);
4875                         goto err_destroy_sgmii;
4876                 }
4877         }
4878
4879         if (mtk_is_netsys_v2_or_greater(eth)) {
4880                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4881                 if (!res) {
4882                         err = -EINVAL;
4883                         goto err_destroy_sgmii;
4884                 }
4885                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4886                         if (mtk_is_netsys_v3_or_greater(eth)) {
4887                                 res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4888                                 if (!res_sram) {
4889                                         err = -EINVAL;
4890                                         goto err_destroy_sgmii;
4891                                 }
4892                                 eth->phy_scratch_ring = res_sram->start;
4893                         } else {
4894                                 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4895                         }
4896                 }
4897         }
4898
4899         if (eth->soc->offload_version) {
4900                 for (i = 0;; i++) {
4901                         struct device_node *np;
4902                         phys_addr_t wdma_phy;
4903                         u32 wdma_base;
4904
4905                         if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4906                                 break;
4907
4908                         np = of_parse_phandle(pdev->dev.of_node,
4909                                               "mediatek,wed", i);
4910                         if (!np)
4911                                 break;
4912
4913                         wdma_base = eth->soc->reg_map->wdma_base[i];
4914                         wdma_phy = res ? res->start + wdma_base : 0;
4915                         mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4916                                        wdma_phy, i);
4917                 }
4918         }
4919
4920         for (i = 0; i < 3; i++) {
4921                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4922                         eth->irq[i] = eth->irq[0];
4923                 else
4924                         eth->irq[i] = platform_get_irq(pdev, i);
4925                 if (eth->irq[i] < 0) {
4926                         dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4927                         err = -ENXIO;
4928                         goto err_wed_exit;
4929                 }
4930         }
4931         for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4932                 eth->clks[i] = devm_clk_get(eth->dev,
4933                                             mtk_clks_source_name[i]);
4934                 if (IS_ERR(eth->clks[i])) {
4935                         if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4936                                 err = -EPROBE_DEFER;
4937                                 goto err_wed_exit;
4938                         }
4939                         if (eth->soc->required_clks & BIT(i)) {
4940                                 dev_err(&pdev->dev, "clock %s not found\n",
4941                                         mtk_clks_source_name[i]);
4942                                 err = -EINVAL;
4943                                 goto err_wed_exit;
4944                         }
4945                         eth->clks[i] = NULL;
4946                 }
4947         }
4948
4949         eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4950         INIT_WORK(&eth->pending_work, mtk_pending_work);
4951
4952         err = mtk_hw_init(eth, false);
4953         if (err)
4954                 goto err_wed_exit;
4955
4956         eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4957
4958         for_each_child_of_node(pdev->dev.of_node, mac_np) {
4959                 if (!of_device_is_compatible(mac_np,
4960                                              "mediatek,eth-mac"))
4961                         continue;
4962
4963                 if (!of_device_is_available(mac_np))
4964                         continue;
4965
4966                 err = mtk_add_mac(eth, mac_np);
4967                 if (err) {
4968                         of_node_put(mac_np);
4969                         goto err_deinit_hw;
4970                 }
4971         }
4972
4973         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4974                 err = devm_request_irq(eth->dev, eth->irq[0],
4975                                        mtk_handle_irq, 0,
4976                                        dev_name(eth->dev), eth);
4977         } else {
4978                 err = devm_request_irq(eth->dev, eth->irq[1],
4979                                        mtk_handle_irq_tx, 0,
4980                                        dev_name(eth->dev), eth);
4981                 if (err)
4982                         goto err_free_dev;
4983
4984                 err = devm_request_irq(eth->dev, eth->irq[2],
4985                                        mtk_handle_irq_rx, 0,
4986                                        dev_name(eth->dev), eth);
4987         }
4988         if (err)
4989                 goto err_free_dev;
4990
4991         /* No MT7628/88 support yet */
4992         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4993                 err = mtk_mdio_init(eth);
4994                 if (err)
4995                         goto err_free_dev;
4996         }
4997
4998         if (eth->soc->offload_version) {
4999                 u8 ppe_num = eth->soc->ppe_num;
5000
5001                 ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
5002                 for (i = 0; i < ppe_num; i++) {
5003                         u32 ppe_addr = eth->soc->reg_map->ppe_base;
5004
5005                         ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
5006                         eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
5007
5008                         if (!eth->ppe[i]) {
5009                                 err = -ENOMEM;
5010                                 goto err_deinit_ppe;
5011                         }
5012                         err = mtk_eth_offload_init(eth, i);
5013
5014                         if (err)
5015                                 goto err_deinit_ppe;
5016                 }
5017         }
5018
5019         for (i = 0; i < MTK_MAX_DEVS; i++) {
5020                 if (!eth->netdev[i])
5021                         continue;
5022
5023                 err = register_netdev(eth->netdev[i]);
5024                 if (err) {
5025                         dev_err(eth->dev, "error bringing up device\n");
5026                         goto err_deinit_ppe;
5027                 } else
5028                         netif_info(eth, probe, eth->netdev[i],
5029                                    "mediatek frame engine at 0x%08lx, irq %d\n",
5030                                    eth->netdev[i]->base_addr, eth->irq[0]);
5031         }
5032
5033         /* we run 2 devices on the same DMA ring so we need a dummy device
5034          * for NAPI to work
5035          */
5036         eth->dummy_dev = alloc_netdev_dummy(0);
5037         if (!eth->dummy_dev) {
5038                 err = -ENOMEM;
5039                 dev_err(eth->dev, "failed to allocated dummy device\n");
5040                 goto err_unreg_netdev;
5041         }
5042         netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
5043         netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
5044
5045         platform_set_drvdata(pdev, eth);
5046         schedule_delayed_work(&eth->reset.monitor_work,
5047                               MTK_DMA_MONITOR_TIMEOUT);
5048
5049         return 0;
5050
5051 err_unreg_netdev:
5052         mtk_unreg_dev(eth);
5053 err_deinit_ppe:
5054         mtk_ppe_deinit(eth);
5055         mtk_mdio_cleanup(eth);
5056 err_free_dev:
5057         mtk_free_dev(eth);
5058 err_deinit_hw:
5059         mtk_hw_deinit(eth);
5060 err_wed_exit:
5061         mtk_wed_exit();
5062 err_destroy_sgmii:
5063         mtk_sgmii_destroy(eth);
5064
5065         return err;
5066 }
5067
5068 static void mtk_remove(struct platform_device *pdev)
5069 {
5070         struct mtk_eth *eth = platform_get_drvdata(pdev);
5071         struct mtk_mac *mac;
5072         int i;
5073
5074         /* stop all devices to make sure that dma is properly shut down */
5075         for (i = 0; i < MTK_MAX_DEVS; i++) {
5076                 if (!eth->netdev[i])
5077                         continue;
5078                 mtk_stop(eth->netdev[i]);
5079                 mac = netdev_priv(eth->netdev[i]);
5080                 phylink_disconnect_phy(mac->phylink);
5081         }
5082
5083         mtk_wed_exit();
5084         mtk_hw_deinit(eth);
5085
5086         netif_napi_del(&eth->tx_napi);
5087         netif_napi_del(&eth->rx_napi);
5088         mtk_cleanup(eth);
5089         free_netdev(eth->dummy_dev);
5090         mtk_mdio_cleanup(eth);
5091 }
5092
5093 static const struct mtk_soc_data mt2701_data = {
5094         .reg_map = &mtk_reg_map,
5095         .caps = MT7623_CAPS | MTK_HWLRO,
5096         .hw_features = MTK_HW_FEATURES,
5097         .required_clks = MT7623_CLKS_BITMAP,
5098         .required_pctl = true,
5099         .version = 1,
5100         .tx = {
5101                 .desc_size = sizeof(struct mtk_tx_dma),
5102                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5103                 .dma_len_offset = 16,
5104                 .dma_size = MTK_DMA_SIZE(2K),
5105                 .fq_dma_size = MTK_DMA_SIZE(2K),
5106         },
5107         .rx = {
5108                 .desc_size = sizeof(struct mtk_rx_dma),
5109                 .irq_done_mask = MTK_RX_DONE_INT,
5110                 .dma_l4_valid = RX_DMA_L4_VALID,
5111                 .dma_size = MTK_DMA_SIZE(2K),
5112                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5113                 .dma_len_offset = 16,
5114         },
5115 };
5116
5117 static const struct mtk_soc_data mt7621_data = {
5118         .reg_map = &mtk_reg_map,
5119         .caps = MT7621_CAPS,
5120         .hw_features = MTK_HW_FEATURES,
5121         .required_clks = MT7621_CLKS_BITMAP,
5122         .required_pctl = false,
5123         .version = 1,
5124         .offload_version = 1,
5125         .ppe_num = 1,
5126         .hash_offset = 2,
5127         .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5128         .tx = {
5129                 .desc_size = sizeof(struct mtk_tx_dma),
5130                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5131                 .dma_len_offset = 16,
5132                 .dma_size = MTK_DMA_SIZE(2K),
5133                 .fq_dma_size = MTK_DMA_SIZE(2K),
5134         },
5135         .rx = {
5136                 .desc_size = sizeof(struct mtk_rx_dma),
5137                 .irq_done_mask = MTK_RX_DONE_INT,
5138                 .dma_l4_valid = RX_DMA_L4_VALID,
5139                 .dma_size = MTK_DMA_SIZE(2K),
5140                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5141                 .dma_len_offset = 16,
5142         },
5143 };
5144
5145 static const struct mtk_soc_data mt7622_data = {
5146         .reg_map = &mtk_reg_map,
5147         .ana_rgc3 = 0x2028,
5148         .caps = MT7622_CAPS | MTK_HWLRO,
5149         .hw_features = MTK_HW_FEATURES,
5150         .required_clks = MT7622_CLKS_BITMAP,
5151         .required_pctl = false,
5152         .version = 1,
5153         .offload_version = 2,
5154         .ppe_num = 1,
5155         .hash_offset = 2,
5156         .has_accounting = true,
5157         .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5158         .tx = {
5159                 .desc_size = sizeof(struct mtk_tx_dma),
5160                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5161                 .dma_len_offset = 16,
5162                 .dma_size = MTK_DMA_SIZE(2K),
5163                 .fq_dma_size = MTK_DMA_SIZE(2K),
5164         },
5165         .rx = {
5166                 .desc_size = sizeof(struct mtk_rx_dma),
5167                 .irq_done_mask = MTK_RX_DONE_INT,
5168                 .dma_l4_valid = RX_DMA_L4_VALID,
5169                 .dma_size = MTK_DMA_SIZE(2K),
5170                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5171                 .dma_len_offset = 16,
5172         },
5173 };
5174
5175 static const struct mtk_soc_data mt7623_data = {
5176         .reg_map = &mtk_reg_map,
5177         .caps = MT7623_CAPS | MTK_HWLRO,
5178         .hw_features = MTK_HW_FEATURES,
5179         .required_clks = MT7623_CLKS_BITMAP,
5180         .required_pctl = true,
5181         .version = 1,
5182         .offload_version = 1,
5183         .ppe_num = 1,
5184         .hash_offset = 2,
5185         .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5186         .disable_pll_modes = true,
5187         .tx = {
5188                 .desc_size = sizeof(struct mtk_tx_dma),
5189                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5190                 .dma_len_offset = 16,
5191                 .dma_size = MTK_DMA_SIZE(2K),
5192                 .fq_dma_size = MTK_DMA_SIZE(2K),
5193         },
5194         .rx = {
5195                 .desc_size = sizeof(struct mtk_rx_dma),
5196                 .irq_done_mask = MTK_RX_DONE_INT,
5197                 .dma_l4_valid = RX_DMA_L4_VALID,
5198                 .dma_size = MTK_DMA_SIZE(2K),
5199                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5200                 .dma_len_offset = 16,
5201         },
5202 };
5203
5204 static const struct mtk_soc_data mt7629_data = {
5205         .reg_map = &mtk_reg_map,
5206         .ana_rgc3 = 0x128,
5207         .caps = MT7629_CAPS | MTK_HWLRO,
5208         .hw_features = MTK_HW_FEATURES,
5209         .required_clks = MT7629_CLKS_BITMAP,
5210         .required_pctl = false,
5211         .has_accounting = true,
5212         .version = 1,
5213         .tx = {
5214                 .desc_size = sizeof(struct mtk_tx_dma),
5215                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5216                 .dma_len_offset = 16,
5217                 .dma_size = MTK_DMA_SIZE(2K),
5218                 .fq_dma_size = MTK_DMA_SIZE(2K),
5219         },
5220         .rx = {
5221                 .desc_size = sizeof(struct mtk_rx_dma),
5222                 .irq_done_mask = MTK_RX_DONE_INT,
5223                 .dma_l4_valid = RX_DMA_L4_VALID,
5224                 .dma_size = MTK_DMA_SIZE(2K),
5225                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5226                 .dma_len_offset = 16,
5227         },
5228 };
5229
5230 static const struct mtk_soc_data mt7981_data = {
5231         .reg_map = &mt7986_reg_map,
5232         .ana_rgc3 = 0x128,
5233         .caps = MT7981_CAPS,
5234         .hw_features = MTK_HW_FEATURES,
5235         .required_clks = MT7981_CLKS_BITMAP,
5236         .required_pctl = false,
5237         .version = 2,
5238         .offload_version = 2,
5239         .ppe_num = 2,
5240         .hash_offset = 4,
5241         .has_accounting = true,
5242         .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5243         .tx = {
5244                 .desc_size = sizeof(struct mtk_tx_dma_v2),
5245                 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5246                 .dma_len_offset = 8,
5247                 .dma_size = MTK_DMA_SIZE(2K),
5248                 .fq_dma_size = MTK_DMA_SIZE(2K),
5249         },
5250         .rx = {
5251                 .desc_size = sizeof(struct mtk_rx_dma),
5252                 .irq_done_mask = MTK_RX_DONE_INT,
5253                 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5254                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5255                 .dma_len_offset = 16,
5256                 .dma_size = MTK_DMA_SIZE(2K),
5257         },
5258 };
5259
5260 static const struct mtk_soc_data mt7986_data = {
5261         .reg_map = &mt7986_reg_map,
5262         .ana_rgc3 = 0x128,
5263         .caps = MT7986_CAPS,
5264         .hw_features = MTK_HW_FEATURES,
5265         .required_clks = MT7986_CLKS_BITMAP,
5266         .required_pctl = false,
5267         .version = 2,
5268         .offload_version = 2,
5269         .ppe_num = 2,
5270         .hash_offset = 4,
5271         .has_accounting = true,
5272         .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5273         .tx = {
5274                 .desc_size = sizeof(struct mtk_tx_dma_v2),
5275                 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5276                 .dma_len_offset = 8,
5277                 .dma_size = MTK_DMA_SIZE(2K),
5278                 .fq_dma_size = MTK_DMA_SIZE(2K),
5279         },
5280         .rx = {
5281                 .desc_size = sizeof(struct mtk_rx_dma),
5282                 .irq_done_mask = MTK_RX_DONE_INT,
5283                 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5284                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5285                 .dma_len_offset = 16,
5286                 .dma_size = MTK_DMA_SIZE(2K),
5287         },
5288 };
5289
5290 static const struct mtk_soc_data mt7988_data = {
5291         .reg_map = &mt7988_reg_map,
5292         .ana_rgc3 = 0x128,
5293         .caps = MT7988_CAPS,
5294         .hw_features = MTK_HW_FEATURES,
5295         .required_clks = MT7988_CLKS_BITMAP,
5296         .required_pctl = false,
5297         .version = 3,
5298         .offload_version = 2,
5299         .ppe_num = 3,
5300         .hash_offset = 4,
5301         .has_accounting = true,
5302         .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5303         .tx = {
5304                 .desc_size = sizeof(struct mtk_tx_dma_v2),
5305                 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5306                 .dma_len_offset = 8,
5307                 .dma_size = MTK_DMA_SIZE(2K),
5308                 .fq_dma_size = MTK_DMA_SIZE(4K),
5309         },
5310         .rx = {
5311                 .desc_size = sizeof(struct mtk_rx_dma_v2),
5312                 .irq_done_mask = MTK_RX_DONE_INT_V2,
5313                 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5314                 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5315                 .dma_len_offset = 8,
5316                 .dma_size = MTK_DMA_SIZE(2K),
5317         },
5318 };
5319
5320 static const struct mtk_soc_data rt5350_data = {
5321         .reg_map = &mt7628_reg_map,
5322         .caps = MT7628_CAPS,
5323         .hw_features = MTK_HW_FEATURES_MT7628,
5324         .required_clks = MT7628_CLKS_BITMAP,
5325         .required_pctl = false,
5326         .version = 1,
5327         .tx = {
5328                 .desc_size = sizeof(struct mtk_tx_dma),
5329                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5330                 .dma_len_offset = 16,
5331                 .dma_size = MTK_DMA_SIZE(2K),
5332         },
5333         .rx = {
5334                 .desc_size = sizeof(struct mtk_rx_dma),
5335                 .irq_done_mask = MTK_RX_DONE_INT,
5336                 .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5337                 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5338                 .dma_len_offset = 16,
5339                 .dma_size = MTK_DMA_SIZE(2K),
5340         },
5341 };
5342
5343 const struct of_device_id of_mtk_match[] = {
5344         { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5345         { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5346         { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5347         { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5348         { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5349         { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5350         { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5351         { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5352         { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5353         {},
5354 };
5355 MODULE_DEVICE_TABLE(of, of_mtk_match);
5356
5357 static struct platform_driver mtk_driver = {
5358         .probe = mtk_probe,
5359         .remove = mtk_remove,
5360         .driver = {
5361                 .name = "mtk_soc_eth",
5362                 .of_match_table = of_mtk_match,
5363         },
5364 };
5365
5366 module_platform_driver(mtk_driver);
5367
5368 MODULE_LICENSE("GPL");
5369 MODULE_AUTHOR("John Crispin <[email protected]>");
5370 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
This page took 0.341275 seconds and 4 git commands to generate.