]> Git Repo - linux.git/blob - drivers/net/ethernet/microchip/lan966x/lan966x_main.c
Linux 6.14-rc3
[linux.git] / drivers / net / ethernet / microchip / lan966x / lan966x_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/module.h>
4 #include <linux/if_bridge.h>
5 #include <linux/if_vlan.h>
6 #include <linux/iopoll.h>
7 #include <linux/ip.h>
8 #include <linux/of.h>
9 #include <linux/of_net.h>
10 #include <linux/phy/phy.h>
11 #include <linux/platform_device.h>
12 #include <linux/reset.h>
13 #include <net/addrconf.h>
14
15 #include "lan966x_main.h"
16
17 #define XTR_EOF_0                       0x00000080U
18 #define XTR_EOF_1                       0x01000080U
19 #define XTR_EOF_2                       0x02000080U
20 #define XTR_EOF_3                       0x03000080U
21 #define XTR_PRUNED                      0x04000080U
22 #define XTR_ABORT                       0x05000080U
23 #define XTR_ESCAPE                      0x06000080U
24 #define XTR_NOT_READY                   0x07000080U
25 #define XTR_VALID_BYTES(x)              (4 - (((x) >> 24) & 3))
26
27 #define IO_RANGES 2
28
29 static const struct of_device_id lan966x_match[] = {
30         { .compatible = "microchip,lan966x-switch" },
31         { }
32 };
33 MODULE_DEVICE_TABLE(of, lan966x_match);
34
35 struct lan966x_main_io_resource {
36         enum lan966x_target id;
37         phys_addr_t offset;
38         int range;
39 };
40
41 static const struct lan966x_main_io_resource lan966x_main_iomap[] =  {
42         { TARGET_CPU,                   0xc0000, 0 }, /* 0xe00c0000 */
43         { TARGET_FDMA,                  0xc0400, 0 }, /* 0xe00c0400 */
44         { TARGET_ORG,                         0, 1 }, /* 0xe2000000 */
45         { TARGET_GCB,                    0x4000, 1 }, /* 0xe2004000 */
46         { TARGET_QS,                     0x8000, 1 }, /* 0xe2008000 */
47         { TARGET_PTP,                    0xc000, 1 }, /* 0xe200c000 */
48         { TARGET_CHIP_TOP,              0x10000, 1 }, /* 0xe2010000 */
49         { TARGET_REW,                   0x14000, 1 }, /* 0xe2014000 */
50         { TARGET_VCAP,                  0x18000, 1 }, /* 0xe2018000 */
51         { TARGET_VCAP + 1,              0x20000, 1 }, /* 0xe2020000 */
52         { TARGET_VCAP + 2,              0x24000, 1 }, /* 0xe2024000 */
53         { TARGET_SYS,                   0x28000, 1 }, /* 0xe2028000 */
54         { TARGET_DEV,                   0x34000, 1 }, /* 0xe2034000 */
55         { TARGET_DEV +  1,              0x38000, 1 }, /* 0xe2038000 */
56         { TARGET_DEV +  2,              0x3c000, 1 }, /* 0xe203c000 */
57         { TARGET_DEV +  3,              0x40000, 1 }, /* 0xe2040000 */
58         { TARGET_DEV +  4,              0x44000, 1 }, /* 0xe2044000 */
59         { TARGET_DEV +  5,              0x48000, 1 }, /* 0xe2048000 */
60         { TARGET_DEV +  6,              0x4c000, 1 }, /* 0xe204c000 */
61         { TARGET_DEV +  7,              0x50000, 1 }, /* 0xe2050000 */
62         { TARGET_QSYS,                 0x100000, 1 }, /* 0xe2100000 */
63         { TARGET_AFI,                  0x120000, 1 }, /* 0xe2120000 */
64         { TARGET_ANA,                  0x140000, 1 }, /* 0xe2140000 */
65 };
66
67 static int lan966x_create_targets(struct platform_device *pdev,
68                                   struct lan966x *lan966x)
69 {
70         struct resource *iores[IO_RANGES];
71         void __iomem *begin[IO_RANGES];
72         int idx;
73
74         /* Initially map the entire range and after that update each target to
75          * point inside the region at the correct offset. It is possible that
76          * other devices access the same region so don't add any checks about
77          * this.
78          */
79         for (idx = 0; idx < IO_RANGES; idx++) {
80                 iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
81                                                    idx);
82                 if (!iores[idx]) {
83                         dev_err(&pdev->dev, "Invalid resource\n");
84                         return -EINVAL;
85                 }
86
87                 begin[idx] = devm_ioremap(&pdev->dev,
88                                           iores[idx]->start,
89                                           resource_size(iores[idx]));
90                 if (!begin[idx]) {
91                         dev_err(&pdev->dev, "Unable to get registers: %s\n",
92                                 iores[idx]->name);
93                         return -ENOMEM;
94                 }
95         }
96
97         for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
98                 const struct lan966x_main_io_resource *iomap =
99                         &lan966x_main_iomap[idx];
100
101                 lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
102         }
103
104         return 0;
105 }
106
107 static bool lan966x_port_unique_address(struct net_device *dev)
108 {
109         struct lan966x_port *port = netdev_priv(dev);
110         struct lan966x *lan966x = port->lan966x;
111         int p;
112
113         for (p = 0; p < lan966x->num_phys_ports; ++p) {
114                 port = lan966x->ports[p];
115                 if (!port || port->dev == dev)
116                         continue;
117
118                 if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
119                         return false;
120         }
121
122         return true;
123 }
124
125 static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
126 {
127         struct lan966x_port *port = netdev_priv(dev);
128         struct lan966x *lan966x = port->lan966x;
129         const struct sockaddr *addr = p;
130         int ret;
131
132         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
133                 return 0;
134
135         /* Learn the new net device MAC address in the mac table. */
136         ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
137         if (ret)
138                 return ret;
139
140         /* If there is another port with the same address as the dev, then don't
141          * delete it from the MAC table
142          */
143         if (!lan966x_port_unique_address(dev))
144                 goto out;
145
146         /* Then forget the previous one. */
147         ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
148         if (ret)
149                 return ret;
150
151 out:
152         eth_hw_addr_set(dev, addr->sa_data);
153         return ret;
154 }
155
156 static int lan966x_port_get_phys_port_name(struct net_device *dev,
157                                            char *buf, size_t len)
158 {
159         struct lan966x_port *port = netdev_priv(dev);
160         int ret;
161
162         ret = snprintf(buf, len, "p%d", port->chip_port);
163         if (ret >= len)
164                 return -EINVAL;
165
166         return 0;
167 }
168
169 static int lan966x_port_open(struct net_device *dev)
170 {
171         struct lan966x_port *port = netdev_priv(dev);
172         struct lan966x *lan966x = port->lan966x;
173         int err;
174
175         /* Enable receiving frames on the port, and activate auto-learning of
176          * MAC addresses.
177          */
178         lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
179                 ANA_PORT_CFG_RECV_ENA_SET(1) |
180                 ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
181                 ANA_PORT_CFG_LEARNAUTO |
182                 ANA_PORT_CFG_RECV_ENA |
183                 ANA_PORT_CFG_PORTID_VAL,
184                 lan966x, ANA_PORT_CFG(port->chip_port));
185
186         err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
187         if (err) {
188                 netdev_err(dev, "Could not attach to PHY\n");
189                 return err;
190         }
191
192         phylink_start(port->phylink);
193
194         return 0;
195 }
196
197 static int lan966x_port_stop(struct net_device *dev)
198 {
199         struct lan966x_port *port = netdev_priv(dev);
200
201         lan966x_port_config_down(port);
202         phylink_stop(port->phylink);
203         phylink_disconnect_phy(port->phylink);
204
205         return 0;
206 }
207
208 static int lan966x_port_inj_status(struct lan966x *lan966x)
209 {
210         return lan_rd(lan966x, QS_INJ_STATUS);
211 }
212
213 static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
214 {
215         u32 val;
216
217         if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp)))
218                 return 0;
219
220         return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
221                                          QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
222                                          READL_SLEEP_US, READL_TIMEOUT_US);
223 }
224
225 static int lan966x_port_ifh_xmit(struct sk_buff *skb,
226                                  __be32 *ifh,
227                                  struct net_device *dev)
228 {
229         struct lan966x_port *port = netdev_priv(dev);
230         struct lan966x *lan966x = port->lan966x;
231         u32 i, count, last;
232         u8 grp = 0;
233         u32 val;
234         int err;
235
236         val = lan_rd(lan966x, QS_INJ_STATUS);
237         if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
238             (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
239                 goto err;
240
241         /* Write start of frame */
242         lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
243                QS_INJ_CTRL_SOF_SET(1),
244                lan966x, QS_INJ_CTRL(grp));
245
246         /* Write IFH header */
247         for (i = 0; i < IFH_LEN; ++i) {
248                 /* Wait until the fifo is ready */
249                 err = lan966x_port_inj_ready(lan966x, grp);
250                 if (err)
251                         goto err;
252
253                 lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
254         }
255
256         /* Write frame */
257         count = DIV_ROUND_UP(skb->len, 4);
258         last = skb->len % 4;
259         for (i = 0; i < count; ++i) {
260                 /* Wait until the fifo is ready */
261                 err = lan966x_port_inj_ready(lan966x, grp);
262                 if (err)
263                         goto err;
264
265                 lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
266         }
267
268         /* Add padding */
269         while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
270                 /* Wait until the fifo is ready */
271                 err = lan966x_port_inj_ready(lan966x, grp);
272                 if (err)
273                         goto err;
274
275                 lan_wr(0, lan966x, QS_INJ_WR(grp));
276                 ++i;
277         }
278
279         /* Indicate EOF and valid bytes in the last word */
280         lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
281                QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
282                                      0 : last) |
283                QS_INJ_CTRL_EOF_SET(1),
284                lan966x, QS_INJ_CTRL(grp));
285
286         /* Add dummy CRC */
287         lan_wr(0, lan966x, QS_INJ_WR(grp));
288         skb_tx_timestamp(skb);
289
290         dev->stats.tx_packets++;
291         dev->stats.tx_bytes += skb->len;
292
293         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
294             LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
295                 return NETDEV_TX_OK;
296
297         dev_consume_skb_any(skb);
298         return NETDEV_TX_OK;
299
300 err:
301         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
302             LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
303                 lan966x_ptp_txtstamp_release(port, skb);
304
305         return NETDEV_TX_BUSY;
306 }
307
308 static void lan966x_ifh_set(u8 *ifh, size_t val, size_t pos, size_t length)
309 {
310         int i = 0;
311
312         do {
313                 u8 p = IFH_LEN_BYTES - (pos + i) / 8 - 1;
314                 u8 v = val >> i & 0xff;
315
316                 /* There is no need to check for limits of the array, as these
317                  * will never be written
318                  */
319                 ifh[p] |= v << ((pos + i) % 8);
320                 ifh[p - 1] |= v >> (8 - (pos + i) % 8);
321
322                 i += 8;
323         } while (i < length);
324 }
325
326 void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
327 {
328         lan966x_ifh_set(ifh, bypass, IFH_POS_BYPASS, IFH_WID_BYPASS);
329 }
330
331 void lan966x_ifh_set_port(void *ifh, u64 port)
332 {
333         lan966x_ifh_set(ifh, port, IFH_POS_DSTS, IFH_WID_DSTS);
334 }
335
336 static void lan966x_ifh_set_qos_class(void *ifh, u64 qos)
337 {
338         lan966x_ifh_set(ifh, qos, IFH_POS_QOS_CLASS, IFH_WID_QOS_CLASS);
339 }
340
341 static void lan966x_ifh_set_ipv(void *ifh, u64 ipv)
342 {
343         lan966x_ifh_set(ifh, ipv, IFH_POS_IPV, IFH_WID_IPV);
344 }
345
346 static void lan966x_ifh_set_vid(void *ifh, u64 vid)
347 {
348         lan966x_ifh_set(ifh, vid, IFH_POS_TCI, IFH_WID_TCI);
349 }
350
351 static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
352 {
353         lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
354 }
355
356 static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
357 {
358         lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
359 }
360
361 static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
362                                      struct net_device *dev)
363 {
364         struct lan966x_port *port = netdev_priv(dev);
365         struct lan966x *lan966x = port->lan966x;
366         __be32 ifh[IFH_LEN];
367         int err;
368
369         memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
370
371         lan966x_ifh_set_bypass(ifh, 1);
372         lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
373         lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
374         lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
375         lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
376
377         if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
378                 err = lan966x_ptp_txtstamp_request(port, skb);
379                 if (err)
380                         return err;
381
382                 lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
383                 lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
384         }
385
386         spin_lock(&lan966x->tx_lock);
387         if (port->lan966x->fdma)
388                 err = lan966x_fdma_xmit(skb, ifh, dev);
389         else
390                 err = lan966x_port_ifh_xmit(skb, ifh, dev);
391         spin_unlock(&lan966x->tx_lock);
392
393         return err;
394 }
395
396 static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
397 {
398         struct lan966x_port *port = netdev_priv(dev);
399         struct lan966x *lan966x = port->lan966x;
400         int old_mtu = dev->mtu;
401         int err;
402
403         lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),
404                lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
405         WRITE_ONCE(dev->mtu, new_mtu);
406
407         if (!lan966x->fdma)
408                 return 0;
409
410         err = lan966x_fdma_change_mtu(lan966x);
411         if (err) {
412                 lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)),
413                        lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
414                 dev->mtu = old_mtu;
415         }
416
417         return err;
418 }
419
420 static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
421 {
422         struct lan966x_port *port = netdev_priv(dev);
423         struct lan966x *lan966x = port->lan966x;
424
425         return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED);
426 }
427
428 static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
429 {
430         struct lan966x_port *port = netdev_priv(dev);
431         struct lan966x *lan966x = port->lan966x;
432
433         return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID);
434 }
435
436 static void lan966x_port_set_rx_mode(struct net_device *dev)
437 {
438         __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
439 }
440
441 static int lan966x_port_get_parent_id(struct net_device *dev,
442                                       struct netdev_phys_item_id *ppid)
443 {
444         struct lan966x_port *port = netdev_priv(dev);
445         struct lan966x *lan966x = port->lan966x;
446
447         ppid->id_len = sizeof(lan966x->base_mac);
448         memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
449
450         return 0;
451 }
452
453 static int lan966x_port_hwtstamp_get(struct net_device *dev,
454                                      struct kernel_hwtstamp_config *cfg)
455 {
456         struct lan966x_port *port = netdev_priv(dev);
457
458         if (!port->lan966x->ptp)
459                 return -EOPNOTSUPP;
460
461         lan966x_ptp_hwtstamp_get(port, cfg);
462
463         return 0;
464 }
465
466 static int lan966x_port_hwtstamp_set(struct net_device *dev,
467                                      struct kernel_hwtstamp_config *cfg,
468                                      struct netlink_ext_ack *extack)
469 {
470         struct lan966x_port *port = netdev_priv(dev);
471         int err;
472
473         if (cfg->source != HWTSTAMP_SOURCE_NETDEV &&
474             cfg->source != HWTSTAMP_SOURCE_PHYLIB)
475                 return -EOPNOTSUPP;
476
477         if (cfg->source == HWTSTAMP_SOURCE_NETDEV && !port->lan966x->ptp)
478                 return -EOPNOTSUPP;
479
480         err = lan966x_ptp_setup_traps(port, cfg);
481         if (err)
482                 return err;
483
484         if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
485                 err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
486                 if (err) {
487                         lan966x_ptp_del_traps(port);
488                         return err;
489                 }
490         }
491
492         return 0;
493 }
494
495 static const struct net_device_ops lan966x_port_netdev_ops = {
496         .ndo_open                       = lan966x_port_open,
497         .ndo_stop                       = lan966x_port_stop,
498         .ndo_start_xmit                 = lan966x_port_xmit,
499         .ndo_change_mtu                 = lan966x_port_change_mtu,
500         .ndo_set_rx_mode                = lan966x_port_set_rx_mode,
501         .ndo_get_phys_port_name         = lan966x_port_get_phys_port_name,
502         .ndo_get_stats64                = lan966x_stats_get,
503         .ndo_set_mac_address            = lan966x_port_set_mac_address,
504         .ndo_get_port_parent_id         = lan966x_port_get_parent_id,
505         .ndo_eth_ioctl                  = phy_do_ioctl,
506         .ndo_setup_tc                   = lan966x_tc_setup,
507         .ndo_bpf                        = lan966x_xdp,
508         .ndo_xdp_xmit                   = lan966x_xdp_xmit,
509         .ndo_hwtstamp_get               = lan966x_port_hwtstamp_get,
510         .ndo_hwtstamp_set               = lan966x_port_hwtstamp_set,
511 };
512
513 bool lan966x_netdevice_check(const struct net_device *dev)
514 {
515         return dev->netdev_ops == &lan966x_port_netdev_ops;
516 }
517
518 bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
519 {
520         u32 val;
521
522         /* The IGMP and MLD frames are not forward by the HW if
523          * multicast snooping is enabled, therefore don't mark as
524          * offload to allow the SW to forward the frames accordingly.
525          */
526         val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
527         if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
528                      ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
529                 return true;
530
531         if (eth_type_vlan(skb->protocol)) {
532                 skb = skb_vlan_untag(skb);
533                 if (unlikely(!skb))
534                         return false;
535         }
536
537         if (skb->protocol == htons(ETH_P_IP) &&
538             ip_hdr(skb)->protocol == IPPROTO_IGMP)
539                 return false;
540
541         if (IS_ENABLED(CONFIG_IPV6) &&
542             skb->protocol == htons(ETH_P_IPV6) &&
543             ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
544             !ipv6_mc_check_mld(skb))
545                 return false;
546
547         return true;
548 }
549
550 static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
551 {
552         return lan_rd(lan966x, QS_XTR_RD(grp));
553 }
554
555 static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
556 {
557         u32 val;
558
559         return read_poll_timeout(lan966x_port_xtr_status, val,
560                                  val != XTR_NOT_READY,
561                                  READL_SLEEP_US, READL_TIMEOUT_US, false,
562                                  lan966x, grp);
563 }
564
565 static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
566 {
567         u32 bytes_valid;
568         u32 val;
569         int err;
570
571         val = lan_rd(lan966x, QS_XTR_RD(grp));
572         if (val == XTR_NOT_READY) {
573                 err = lan966x_port_xtr_ready(lan966x, grp);
574                 if (err)
575                         return -EIO;
576         }
577
578         switch (val) {
579         case XTR_ABORT:
580                 return -EIO;
581         case XTR_EOF_0:
582         case XTR_EOF_1:
583         case XTR_EOF_2:
584         case XTR_EOF_3:
585         case XTR_PRUNED:
586                 bytes_valid = XTR_VALID_BYTES(val);
587                 val = lan_rd(lan966x, QS_XTR_RD(grp));
588                 if (val == XTR_ESCAPE)
589                         *rval = lan_rd(lan966x, QS_XTR_RD(grp));
590                 else
591                         *rval = val;
592
593                 return bytes_valid;
594         case XTR_ESCAPE:
595                 *rval = lan_rd(lan966x, QS_XTR_RD(grp));
596
597                 return 4;
598         default:
599                 *rval = val;
600
601                 return 4;
602         }
603 }
604
605 static u64 lan966x_ifh_get(u8 *ifh, size_t pos, size_t length)
606 {
607         u64 val = 0;
608         u8 v;
609
610         for (int i = 0; i < length ; i++) {
611                 int j = pos + i;
612                 int k = j % 8;
613
614                 if (i == 0 || k == 0)
615                         v = ifh[IFH_LEN_BYTES - (j / 8) - 1];
616
617                 if (v & (1 << k))
618                         val |= (1ULL << i);
619         }
620
621         return val;
622 }
623
624 void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
625 {
626         *src_port = lan966x_ifh_get(ifh, IFH_POS_SRCPORT, IFH_WID_SRCPORT);
627 }
628
629 static void lan966x_ifh_get_len(void *ifh, u64 *len)
630 {
631         *len = lan966x_ifh_get(ifh, IFH_POS_LEN, IFH_WID_LEN);
632 }
633
634 void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
635 {
636         *timestamp = lan966x_ifh_get(ifh, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
637 }
638
639 static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
640 {
641         struct lan966x *lan966x = args;
642         int i, grp = 0, err = 0;
643
644         if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
645                 return IRQ_NONE;
646
647         do {
648                 u64 src_port, len, timestamp;
649                 struct net_device *dev;
650                 struct sk_buff *skb;
651                 int sz = 0, buf_len;
652                 u32 ifh[IFH_LEN];
653                 u32 *buf;
654                 u32 val;
655
656                 for (i = 0; i < IFH_LEN; i++) {
657                         err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
658                         if (err != 4)
659                                 goto recover;
660                 }
661
662                 err = 0;
663
664                 lan966x_ifh_get_src_port(ifh, &src_port);
665                 lan966x_ifh_get_len(ifh, &len);
666                 lan966x_ifh_get_timestamp(ifh, &timestamp);
667
668                 WARN_ON(src_port >= lan966x->num_phys_ports);
669
670                 dev = lan966x->ports[src_port]->dev;
671                 skb = netdev_alloc_skb(dev, len);
672                 if (unlikely(!skb)) {
673                         netdev_err(dev, "Unable to allocate sk_buff\n");
674                         break;
675                 }
676                 buf_len = len - ETH_FCS_LEN;
677                 buf = (u32 *)skb_put(skb, buf_len);
678
679                 len = 0;
680                 do {
681                         sz = lan966x_rx_frame_word(lan966x, grp, &val);
682                         if (sz < 0) {
683                                 kfree_skb(skb);
684                                 goto recover;
685                         }
686
687                         *buf++ = val;
688                         len += sz;
689                 } while (len < buf_len);
690
691                 /* Read the FCS */
692                 sz = lan966x_rx_frame_word(lan966x, grp, &val);
693                 if (sz < 0) {
694                         kfree_skb(skb);
695                         goto recover;
696                 }
697
698                 /* Update the statistics if part of the FCS was read before */
699                 len -= ETH_FCS_LEN - sz;
700
701                 if (unlikely(dev->features & NETIF_F_RXFCS)) {
702                         buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
703                         *buf = val;
704                 }
705
706                 lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
707                 skb->protocol = eth_type_trans(skb, dev);
708
709                 if (lan966x->bridge_mask & BIT(src_port)) {
710                         skb->offload_fwd_mark = 1;
711
712                         skb_reset_network_header(skb);
713                         if (!lan966x_hw_offload(lan966x, src_port, skb))
714                                 skb->offload_fwd_mark = 0;
715                 }
716
717                 if (!skb_defer_rx_timestamp(skb))
718                         netif_rx(skb);
719
720                 dev->stats.rx_bytes += len;
721                 dev->stats.rx_packets++;
722
723 recover:
724                 if (sz < 0 || err)
725                         lan_rd(lan966x, QS_XTR_RD(grp));
726
727         } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
728
729         return IRQ_HANDLED;
730 }
731
732 static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
733 {
734         struct lan966x *lan966x = args;
735
736         return lan966x_mac_irq_handler(lan966x);
737 }
738
739 static void lan966x_cleanup_ports(struct lan966x *lan966x)
740 {
741         struct lan966x_port *port;
742         int p;
743
744         for (p = 0; p < lan966x->num_phys_ports; p++) {
745                 port = lan966x->ports[p];
746                 if (!port)
747                         continue;
748
749                 if (port->dev)
750                         unregister_netdev(port->dev);
751
752                 lan966x_xdp_port_deinit(port);
753                 if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
754                         lan966x_fdma_netdev_deinit(lan966x, port->dev);
755
756                 if (port->phylink) {
757                         rtnl_lock();
758                         lan966x_port_stop(port->dev);
759                         rtnl_unlock();
760                         phylink_destroy(port->phylink);
761                         port->phylink = NULL;
762                 }
763
764                 if (port->fwnode)
765                         fwnode_handle_put(port->fwnode);
766         }
767
768         disable_irq(lan966x->xtr_irq);
769         lan966x->xtr_irq = -ENXIO;
770
771         if (lan966x->ana_irq > 0) {
772                 disable_irq(lan966x->ana_irq);
773                 lan966x->ana_irq = -ENXIO;
774         }
775
776         if (lan966x->fdma)
777                 devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
778
779         if (lan966x->ptp_irq > 0)
780                 devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
781
782         if (lan966x->ptp_ext_irq > 0)
783                 devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
784 }
785
786 static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
787                               phy_interface_t phy_mode,
788                               struct fwnode_handle *portnp)
789 {
790         struct lan966x_port *port;
791         struct phylink *phylink;
792         struct net_device *dev;
793         int err;
794
795         if (p >= lan966x->num_phys_ports)
796                 return -EINVAL;
797
798         dev = devm_alloc_etherdev_mqs(lan966x->dev,
799                                       sizeof(struct lan966x_port),
800                                       NUM_PRIO_QUEUES, 1);
801         if (!dev)
802                 return -ENOMEM;
803
804         SET_NETDEV_DEV(dev, lan966x->dev);
805         port = netdev_priv(dev);
806         port->dev = dev;
807         port->lan966x = lan966x;
808         port->chip_port = p;
809         lan966x->ports[p] = port;
810
811         dev->max_mtu = ETH_MAX_MTU;
812
813         dev->netdev_ops = &lan966x_port_netdev_ops;
814         dev->ethtool_ops = &lan966x_ethtool_ops;
815         dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
816                          NETIF_F_HW_VLAN_STAG_TX |
817                          NETIF_F_HW_TC;
818         dev->hw_features |= NETIF_F_HW_TC;
819         dev->see_all_hwtstamp_requests = true;
820         dev->needed_headroom = IFH_LEN_BYTES;
821
822         eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
823
824         lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID,
825                           ENTRYTYPE_LOCKED);
826
827         port->phylink_config.dev = &port->dev->dev;
828         port->phylink_config.type = PHYLINK_NETDEV;
829         port->phylink_pcs.poll = true;
830         port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
831         port->phylink_pcs.neg_mode = true;
832
833         port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
834                 MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
835
836         phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
837         __set_bit(PHY_INTERFACE_MODE_MII,
838                   port->phylink_config.supported_interfaces);
839         __set_bit(PHY_INTERFACE_MODE_GMII,
840                   port->phylink_config.supported_interfaces);
841         __set_bit(PHY_INTERFACE_MODE_SGMII,
842                   port->phylink_config.supported_interfaces);
843         __set_bit(PHY_INTERFACE_MODE_QSGMII,
844                   port->phylink_config.supported_interfaces);
845         __set_bit(PHY_INTERFACE_MODE_QUSGMII,
846                   port->phylink_config.supported_interfaces);
847         __set_bit(PHY_INTERFACE_MODE_1000BASEX,
848                   port->phylink_config.supported_interfaces);
849         __set_bit(PHY_INTERFACE_MODE_2500BASEX,
850                   port->phylink_config.supported_interfaces);
851
852         phylink = phylink_create(&port->phylink_config,
853                                  portnp,
854                                  phy_mode,
855                                  &lan966x_phylink_mac_ops);
856         if (IS_ERR(phylink)) {
857                 port->dev = NULL;
858                 return PTR_ERR(phylink);
859         }
860
861         port->phylink = phylink;
862
863         if (lan966x->fdma)
864                 dev->xdp_features = NETDEV_XDP_ACT_BASIC |
865                                     NETDEV_XDP_ACT_REDIRECT |
866                                     NETDEV_XDP_ACT_NDO_XMIT;
867
868         err = register_netdev(dev);
869         if (err) {
870                 dev_err(lan966x->dev, "register_netdev failed\n");
871                 return err;
872         }
873
874         lan966x_vlan_port_set_vlan_aware(port, 0);
875         lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
876         lan966x_vlan_port_apply(port);
877
878         return 0;
879 }
880
881 static void lan966x_init(struct lan966x *lan966x)
882 {
883         u32 p, i;
884
885         /* MAC table initialization */
886         lan966x_mac_init(lan966x);
887
888         lan966x_vlan_init(lan966x);
889
890         /* Flush queues */
891         lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
892                GENMASK(1, 0),
893                lan966x, QS_XTR_FLUSH);
894
895         /* Allow to drain */
896         mdelay(1);
897
898         /* All Queues normal */
899         lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
900                ~(GENMASK(1, 0)),
901                lan966x, QS_XTR_FLUSH);
902
903         /* Set MAC age time to default value, the entry is aged after
904          * 2 * AGE_PERIOD
905          */
906         lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
907                lan966x, ANA_AUTOAGE);
908
909         /* Disable learning for frames discarded by VLAN ingress filtering */
910         lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
911                 ANA_ADVLEARN_VLAN_CHK,
912                 lan966x, ANA_ADVLEARN);
913
914         /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */
915         lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
916                (20000000 / 65),
917                lan966x,  SYS_FRM_AGING);
918
919         /* Map the 8 CPU extraction queues to CPU port */
920         lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
921
922         /* Do byte-swap and expect status after last data word
923          * Extraction: Mode: manual extraction) | Byte_swap
924          */
925         lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
926                QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
927                lan966x, QS_XTR_GRP_CFG(0));
928
929         /* Injection: Mode: manual injection | Byte_swap */
930         lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
931                QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
932                lan966x, QS_INJ_GRP_CFG(0));
933
934         lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
935                 QS_INJ_CTRL_GAP_SIZE,
936                 lan966x, QS_INJ_CTRL(0));
937
938         /* Enable IFH insertion/parsing on CPU ports */
939         lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
940                SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
941                lan966x, SYS_PORT_MODE(CPU_PORT));
942
943         /* Setup flooding PGIDs */
944         lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
945                ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
946                ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) |
947                ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
948                lan966x, ANA_FLOODING_IPMC);
949
950         /* There are 8 priorities */
951         for (i = 0; i < 8; ++i)
952                 lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
953                         ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
954                         ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
955                         ANA_FLOODING_FLD_MULTICAST |
956                         ANA_FLOODING_FLD_UNICAST |
957                         ANA_FLOODING_FLD_BROADCAST,
958                         lan966x, ANA_FLOODING(i));
959
960         for (i = 0; i < PGID_ENTRIES; ++i)
961                 /* Set all the entries to obey VLAN_VLAN */
962                 lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
963                         ANA_PGID_CFG_OBEY_VLAN,
964                         lan966x, ANA_PGID_CFG(i));
965
966         for (p = 0; p < lan966x->num_phys_ports; p++) {
967                 /* Disable bridging by default */
968                 lan_rmw(ANA_PGID_PGID_SET(0x0),
969                         ANA_PGID_PGID,
970                         lan966x, ANA_PGID(p + PGID_SRC));
971
972                 /* Do not forward BPDU frames to the front ports and copy them
973                  * to CPU
974                  */
975                 lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
976         }
977
978         /* Set source buffer size for each priority and each port to 1500 bytes */
979         for (i = 0; i <= QSYS_Q_RSRV; ++i) {
980                 lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
981                 lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
982         }
983
984         /* Enable switching to/from cpu port */
985         lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
986                QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
987                QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
988                lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
989
990         /* Configure and enable the CPU port */
991         lan_rmw(ANA_PGID_PGID_SET(0),
992                 ANA_PGID_PGID,
993                 lan966x, ANA_PGID(CPU_PORT));
994         lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
995                 ANA_PGID_PGID,
996                 lan966x, ANA_PGID(PGID_CPU));
997
998         /* Multicast to all other ports */
999         lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
1000                 ANA_PGID_PGID,
1001                 lan966x, ANA_PGID(PGID_MC));
1002
1003         /* This will be controlled by mrouter ports */
1004         lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
1005                 ANA_PGID_PGID,
1006                 lan966x, ANA_PGID(PGID_MCIPV4));
1007
1008         lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
1009                 ANA_PGID_PGID,
1010                 lan966x, ANA_PGID(PGID_MCIPV6));
1011
1012         /* Unicast to all other ports */
1013         lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
1014                 ANA_PGID_PGID,
1015                 lan966x, ANA_PGID(PGID_UC));
1016
1017         /* Broadcast to the CPU port and to other ports */
1018         lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
1019                 ANA_PGID_PGID,
1020                 lan966x, ANA_PGID(PGID_BC));
1021
1022         lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
1023                lan966x, REW_PORT_CFG(CPU_PORT));
1024
1025         lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
1026                 ANA_ANAINTR_INTR_ENA,
1027                 lan966x, ANA_ANAINTR);
1028
1029         spin_lock_init(&lan966x->tx_lock);
1030
1031         lan966x_taprio_init(lan966x);
1032 }
1033
1034 static int lan966x_ram_init(struct lan966x *lan966x)
1035 {
1036         return lan_rd(lan966x, SYS_RAM_INIT);
1037 }
1038
1039 static int lan966x_reset_switch(struct lan966x *lan966x)
1040 {
1041         struct reset_control *switch_reset;
1042         int val = 0;
1043         int ret;
1044
1045         switch_reset = devm_reset_control_get_optional_shared(lan966x->dev,
1046                                                               "switch");
1047         if (IS_ERR(switch_reset))
1048                 return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
1049                                      "Could not obtain switch reset");
1050
1051         reset_control_reset(switch_reset);
1052
1053         /* Don't reinitialize the switch core, if it is already initialized. In
1054          * case it is initialized twice, some pointers inside the queue system
1055          * in HW will get corrupted and then after a while the queue system gets
1056          * full and no traffic is passing through the switch. The issue is seen
1057          * when loading and unloading the driver and sending traffic through the
1058          * switch.
1059          */
1060         if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA)
1061                 return 0;
1062
1063         lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
1064         lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
1065         ret = readx_poll_timeout(lan966x_ram_init, lan966x,
1066                                  val, (val & BIT(1)) == 0, READL_SLEEP_US,
1067                                  READL_TIMEOUT_US);
1068         if (ret)
1069                 return ret;
1070
1071         lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
1072
1073         return 0;
1074 }
1075
1076 static int lan966x_probe(struct platform_device *pdev)
1077 {
1078         struct fwnode_handle *ports, *portnp;
1079         struct lan966x *lan966x;
1080         u8 mac_addr[ETH_ALEN];
1081         int err;
1082
1083         lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
1084         if (!lan966x)
1085                 return -ENOMEM;
1086
1087         platform_set_drvdata(pdev, lan966x);
1088         lan966x->dev = &pdev->dev;
1089
1090         if (!device_get_mac_address(&pdev->dev, mac_addr)) {
1091                 ether_addr_copy(lan966x->base_mac, mac_addr);
1092         } else {
1093                 pr_info("MAC addr was not set, use random MAC\n");
1094                 eth_random_addr(lan966x->base_mac);
1095                 lan966x->base_mac[5] &= 0xf0;
1096         }
1097
1098         err = lan966x_create_targets(pdev, lan966x);
1099         if (err)
1100                 return dev_err_probe(&pdev->dev, err,
1101                                      "Failed to create targets");
1102
1103         err = lan966x_reset_switch(lan966x);
1104         if (err)
1105                 return dev_err_probe(&pdev->dev, err, "Reset failed");
1106
1107         lan966x->num_phys_ports = NUM_PHYS_PORTS;
1108         lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
1109                                       sizeof(struct lan966x_port *),
1110                                       GFP_KERNEL);
1111         if (!lan966x->ports)
1112                 return -ENOMEM;
1113
1114         /* There QS system has 32KB of memory */
1115         lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
1116
1117         /* set irq */
1118         lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
1119         if (lan966x->xtr_irq < 0)
1120                 return lan966x->xtr_irq;
1121
1122         err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
1123                                         lan966x_xtr_irq_handler, IRQF_ONESHOT,
1124                                         "frame extraction", lan966x);
1125         if (err) {
1126                 pr_err("Unable to use xtr irq");
1127                 return -ENODEV;
1128         }
1129
1130         lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
1131         if (lan966x->ana_irq > 0) {
1132                 err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
1133                                                 lan966x_ana_irq_handler, IRQF_ONESHOT,
1134                                                 "ana irq", lan966x);
1135                 if (err)
1136                         return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
1137         }
1138
1139         lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp");
1140         if (lan966x->ptp_irq > 0) {
1141                 err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL,
1142                                                 lan966x_ptp_irq_handler, IRQF_ONESHOT,
1143                                                 "ptp irq", lan966x);
1144                 if (err)
1145                         return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq");
1146
1147                 lan966x->ptp = 1;
1148         }
1149
1150         lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma");
1151         if (lan966x->fdma_irq > 0) {
1152                 err = devm_request_irq(&pdev->dev, lan966x->fdma_irq,
1153                                        lan966x_fdma_irq_handler, 0,
1154                                        "fdma irq", lan966x);
1155                 if (err)
1156                         return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq");
1157
1158                 lan966x->fdma = true;
1159         }
1160
1161         if (lan966x->ptp) {
1162                 lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext");
1163                 if (lan966x->ptp_ext_irq > 0) {
1164                         err = devm_request_threaded_irq(&pdev->dev,
1165                                                         lan966x->ptp_ext_irq, NULL,
1166                                                         lan966x_ptp_ext_irq_handler,
1167                                                         IRQF_ONESHOT,
1168                                                         "ptp-ext irq", lan966x);
1169                         if (err)
1170                                 return dev_err_probe(&pdev->dev, err,
1171                                                      "Unable to use ptp-ext irq");
1172                 }
1173         }
1174
1175         ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
1176         if (!ports)
1177                 return dev_err_probe(&pdev->dev, -ENODEV,
1178                                      "no ethernet-ports child found\n");
1179
1180         lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
1181
1182         /* init switch */
1183         lan966x_init(lan966x);
1184         lan966x_stats_init(lan966x);
1185
1186         /* go over the child nodes */
1187         fwnode_for_each_available_child_node(ports, portnp) {
1188                 phy_interface_t phy_mode;
1189                 struct phy *serdes;
1190                 u32 p;
1191
1192                 if (fwnode_property_read_u32(portnp, "reg", &p))
1193                         continue;
1194
1195                 phy_mode = fwnode_get_phy_mode(portnp);
1196                 err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
1197                 if (err)
1198                         goto cleanup_ports;
1199
1200                 /* Read needed configuration */
1201                 lan966x->ports[p]->config.portmode = phy_mode;
1202                 lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
1203
1204                 serdes = devm_of_phy_optional_get(lan966x->dev,
1205                                                   to_of_node(portnp), NULL);
1206                 if (IS_ERR(serdes)) {
1207                         err = PTR_ERR(serdes);
1208                         goto cleanup_ports;
1209                 }
1210                 lan966x->ports[p]->serdes = serdes;
1211
1212                 lan966x_port_init(lan966x->ports[p]);
1213                 err = lan966x_xdp_port_init(lan966x->ports[p]);
1214                 if (err)
1215                         goto cleanup_ports;
1216         }
1217
1218         fwnode_handle_put(ports);
1219
1220         lan966x_mdb_init(lan966x);
1221         err = lan966x_fdb_init(lan966x);
1222         if (err)
1223                 goto cleanup_ports;
1224
1225         err = lan966x_ptp_init(lan966x);
1226         if (err)
1227                 goto cleanup_fdb;
1228
1229         err = lan966x_fdma_init(lan966x);
1230         if (err)
1231                 goto cleanup_ptp;
1232
1233         err = lan966x_vcap_init(lan966x);
1234         if (err)
1235                 goto cleanup_fdma;
1236
1237         lan966x_dcb_init(lan966x);
1238
1239         return 0;
1240
1241 cleanup_fdma:
1242         lan966x_fdma_deinit(lan966x);
1243
1244 cleanup_ptp:
1245         lan966x_ptp_deinit(lan966x);
1246
1247 cleanup_fdb:
1248         lan966x_fdb_deinit(lan966x);
1249
1250 cleanup_ports:
1251         fwnode_handle_put(ports);
1252         fwnode_handle_put(portnp);
1253
1254         lan966x_cleanup_ports(lan966x);
1255
1256         cancel_delayed_work_sync(&lan966x->stats_work);
1257         destroy_workqueue(lan966x->stats_queue);
1258         mutex_destroy(&lan966x->stats_lock);
1259
1260         debugfs_remove_recursive(lan966x->debugfs_root);
1261
1262         return err;
1263 }
1264
1265 static void lan966x_remove(struct platform_device *pdev)
1266 {
1267         struct lan966x *lan966x = platform_get_drvdata(pdev);
1268
1269         lan966x_taprio_deinit(lan966x);
1270         lan966x_vcap_deinit(lan966x);
1271         lan966x_fdma_deinit(lan966x);
1272         lan966x_cleanup_ports(lan966x);
1273
1274         cancel_delayed_work_sync(&lan966x->stats_work);
1275         destroy_workqueue(lan966x->stats_queue);
1276         mutex_destroy(&lan966x->stats_lock);
1277
1278         lan966x_mac_purge_entries(lan966x);
1279         lan966x_mdb_deinit(lan966x);
1280         lan966x_fdb_deinit(lan966x);
1281         lan966x_ptp_deinit(lan966x);
1282
1283         debugfs_remove_recursive(lan966x->debugfs_root);
1284 }
1285
1286 static struct platform_driver lan966x_driver = {
1287         .probe = lan966x_probe,
1288         .remove = lan966x_remove,
1289         .driver = {
1290                 .name = "lan966x-switch",
1291                 .of_match_table = lan966x_match,
1292         },
1293 };
1294
1295 static int __init lan966x_switch_driver_init(void)
1296 {
1297         int ret;
1298
1299         lan966x_register_notifier_blocks();
1300
1301         ret = platform_driver_register(&lan966x_driver);
1302         if (ret)
1303                 goto err;
1304
1305         return 0;
1306
1307 err:
1308         lan966x_unregister_notifier_blocks();
1309         return ret;
1310 }
1311
1312 static void __exit lan966x_switch_driver_exit(void)
1313 {
1314         platform_driver_unregister(&lan966x_driver);
1315         lan966x_unregister_notifier_blocks();
1316 }
1317
1318 module_init(lan966x_switch_driver_init);
1319 module_exit(lan966x_switch_driver_exit);
1320
1321 MODULE_DESCRIPTION("Microchip LAN966X switch driver");
1322 MODULE_AUTHOR("Horatiu Vultur <[email protected]>");
1323 MODULE_LICENSE("Dual MIT/GPL");
This page took 0.106399 seconds and 4 git commands to generate.