1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
12 #include <linux/bitfield.h>
13 #include <linux/regmap.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_platform.h>
18 #include <linux/if_bridge.h>
19 #include <linux/mdio.h>
20 #include <linux/phylink.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/etherdevice.h>
23 #include <linux/dsa/tag_qca.h>
27 #define MIB_DESC(_s, _o, _n) \
34 static const struct qca8k_mib_desc ar8327_mib[] = {
35 MIB_DESC(1, 0x00, "RxBroad"),
36 MIB_DESC(1, 0x04, "RxPause"),
37 MIB_DESC(1, 0x08, "RxMulti"),
38 MIB_DESC(1, 0x0c, "RxFcsErr"),
39 MIB_DESC(1, 0x10, "RxAlignErr"),
40 MIB_DESC(1, 0x14, "RxRunt"),
41 MIB_DESC(1, 0x18, "RxFragment"),
42 MIB_DESC(1, 0x1c, "Rx64Byte"),
43 MIB_DESC(1, 0x20, "Rx128Byte"),
44 MIB_DESC(1, 0x24, "Rx256Byte"),
45 MIB_DESC(1, 0x28, "Rx512Byte"),
46 MIB_DESC(1, 0x2c, "Rx1024Byte"),
47 MIB_DESC(1, 0x30, "Rx1518Byte"),
48 MIB_DESC(1, 0x34, "RxMaxByte"),
49 MIB_DESC(1, 0x38, "RxTooLong"),
50 MIB_DESC(2, 0x3c, "RxGoodByte"),
51 MIB_DESC(2, 0x44, "RxBadByte"),
52 MIB_DESC(1, 0x4c, "RxOverFlow"),
53 MIB_DESC(1, 0x50, "Filtered"),
54 MIB_DESC(1, 0x54, "TxBroad"),
55 MIB_DESC(1, 0x58, "TxPause"),
56 MIB_DESC(1, 0x5c, "TxMulti"),
57 MIB_DESC(1, 0x60, "TxUnderRun"),
58 MIB_DESC(1, 0x64, "Tx64Byte"),
59 MIB_DESC(1, 0x68, "Tx128Byte"),
60 MIB_DESC(1, 0x6c, "Tx256Byte"),
61 MIB_DESC(1, 0x70, "Tx512Byte"),
62 MIB_DESC(1, 0x74, "Tx1024Byte"),
63 MIB_DESC(1, 0x78, "Tx1518Byte"),
64 MIB_DESC(1, 0x7c, "TxMaxByte"),
65 MIB_DESC(1, 0x80, "TxOverSize"),
66 MIB_DESC(2, 0x84, "TxByte"),
67 MIB_DESC(1, 0x8c, "TxCollision"),
68 MIB_DESC(1, 0x90, "TxAbortCol"),
69 MIB_DESC(1, 0x94, "TxMultiCol"),
70 MIB_DESC(1, 0x98, "TxSingleCol"),
71 MIB_DESC(1, 0x9c, "TxExcDefer"),
72 MIB_DESC(1, 0xa0, "TxDefer"),
73 MIB_DESC(1, 0xa4, "TxLateCol"),
74 MIB_DESC(1, 0xa8, "RXUnicast"),
75 MIB_DESC(1, 0xac, "TXUnicast"),
79 qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
88 *page = regaddr & 0x3ff;
92 qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
94 u16 *cached_lo = &priv->mdio_cache.lo;
95 struct mii_bus *bus = priv->bus;
101 ret = bus->write(bus, phy_id, regnum, lo);
103 dev_err_ratelimited(&bus->dev,
104 "failed to write qca8k 32bit lo register\n");
111 qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
113 u16 *cached_hi = &priv->mdio_cache.hi;
114 struct mii_bus *bus = priv->bus;
117 if (hi == *cached_hi)
120 ret = bus->write(bus, phy_id, regnum, hi);
122 dev_err_ratelimited(&bus->dev,
123 "failed to write qca8k 32bit hi register\n");
130 qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
134 ret = bus->read(bus, phy_id, regnum);
137 ret = bus->read(bus, phy_id, regnum + 1);
142 dev_err_ratelimited(&bus->dev,
143 "failed to read qca8k 32bit register\n");
152 qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
158 hi = (u16)(val >> 16);
160 ret = qca8k_set_lo(priv, phy_id, regnum, lo);
162 ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
166 qca8k_set_page(struct qca8k_priv *priv, u16 page)
168 u16 *cached_page = &priv->mdio_cache.page;
169 struct mii_bus *bus = priv->bus;
172 if (page == *cached_page)
175 ret = bus->write(bus, 0x18, 0, page);
177 dev_err_ratelimited(&bus->dev,
178 "failed to set qca8k page\n");
183 usleep_range(1000, 2000);
188 qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
190 return regmap_read(priv->regmap, reg, val);
194 qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
196 return regmap_write(priv->regmap, reg, val);
200 qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
202 return regmap_update_bits(priv->regmap, reg, mask, write_val);
205 static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
207 struct qca8k_mgmt_eth_data *mgmt_eth_data;
208 struct qca8k_priv *priv = ds->priv;
209 struct qca_mgmt_ethhdr *mgmt_ethhdr;
212 mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
213 mgmt_eth_data = &priv->mgmt_eth_data;
215 cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
216 len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
218 /* Make sure the seq match the requested packet */
219 if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
220 mgmt_eth_data->ack = true;
222 if (cmd == MDIO_READ) {
223 mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
225 /* Get the rest of the 12 byte of data.
226 * The read/write function will extract the requested data.
228 if (len > QCA_HDR_MGMT_DATA1_LEN)
229 memcpy(mgmt_eth_data->data + 1, skb->data,
230 QCA_HDR_MGMT_DATA2_LEN);
233 complete(&mgmt_eth_data->rw_done);
236 static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
237 int priority, unsigned int len)
239 struct qca_mgmt_ethhdr *mgmt_ethhdr;
240 unsigned int real_len;
245 skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
249 /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
250 * Actually for some reason the steps are:
261 skb_reset_mac_header(skb);
262 skb_set_network_header(skb, skb->len);
264 mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
266 hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
267 hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
268 hdr |= QCA_HDR_XMIT_FROM_CPU;
269 hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
270 hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
272 mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
273 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
274 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
275 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
276 QCA_HDR_MGMT_CHECK_CODE_VAL);
278 if (cmd == MDIO_WRITE)
279 mgmt_ethhdr->mdio_data = *val;
281 mgmt_ethhdr->hdr = htons(hdr);
283 data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
284 if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
285 memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
290 static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
292 struct qca_mgmt_ethhdr *mgmt_ethhdr;
294 mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
295 mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
298 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
300 struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
305 skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
306 QCA8K_ETHERNET_MDIO_PRIORITY, len);
310 mutex_lock(&mgmt_eth_data->mutex);
312 /* Check mgmt_master if is operational */
313 if (!priv->mgmt_master) {
315 mutex_unlock(&mgmt_eth_data->mutex);
319 skb->dev = priv->mgmt_master;
321 reinit_completion(&mgmt_eth_data->rw_done);
323 /* Increment seq_num and set it in the mdio pkt */
324 mgmt_eth_data->seq++;
325 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
326 mgmt_eth_data->ack = false;
330 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
331 msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
333 *val = mgmt_eth_data->data[0];
334 if (len > QCA_HDR_MGMT_DATA1_LEN)
335 memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
337 ack = mgmt_eth_data->ack;
339 mutex_unlock(&mgmt_eth_data->mutex);
350 static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
352 struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
357 skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
358 QCA8K_ETHERNET_MDIO_PRIORITY, len);
362 mutex_lock(&mgmt_eth_data->mutex);
364 /* Check mgmt_master if is operational */
365 if (!priv->mgmt_master) {
367 mutex_unlock(&mgmt_eth_data->mutex);
371 skb->dev = priv->mgmt_master;
373 reinit_completion(&mgmt_eth_data->rw_done);
375 /* Increment seq_num and set it in the mdio pkt */
376 mgmt_eth_data->seq++;
377 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
378 mgmt_eth_data->ack = false;
382 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
383 msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
385 ack = mgmt_eth_data->ack;
387 mutex_unlock(&mgmt_eth_data->mutex);
399 qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
404 ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
411 return qca8k_write_eth(priv, reg, &val, sizeof(val));
415 qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
417 int i, count = len / sizeof(u32), ret;
419 if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
422 for (i = 0; i < count; i++) {
423 ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
432 qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
434 int i, count = len / sizeof(u32), ret;
437 if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
440 for (i = 0; i < count; i++) {
443 ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
452 qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
454 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
455 struct mii_bus *bus = priv->bus;
459 if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
462 qca8k_split_addr(reg, &r1, &r2, &page);
464 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
466 ret = qca8k_set_page(priv, page);
470 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
473 mutex_unlock(&bus->mdio_lock);
478 qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
480 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
481 struct mii_bus *bus = priv->bus;
485 if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
488 qca8k_split_addr(reg, &r1, &r2, &page);
490 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
492 ret = qca8k_set_page(priv, page);
496 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
499 mutex_unlock(&bus->mdio_lock);
504 qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
506 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
507 struct mii_bus *bus = priv->bus;
512 if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
515 qca8k_split_addr(reg, &r1, &r2, &page);
517 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
519 ret = qca8k_set_page(priv, page);
523 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
529 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
532 mutex_unlock(&bus->mdio_lock);
537 static const struct regmap_range qca8k_readable_ranges[] = {
538 regmap_reg_range(0x0000, 0x00e4), /* Global control */
539 regmap_reg_range(0x0100, 0x0168), /* EEE control */
540 regmap_reg_range(0x0200, 0x0270), /* Parser control */
541 regmap_reg_range(0x0400, 0x0454), /* ACL */
542 regmap_reg_range(0x0600, 0x0718), /* Lookup */
543 regmap_reg_range(0x0800, 0x0b70), /* QM */
544 regmap_reg_range(0x0c00, 0x0c80), /* PKT */
545 regmap_reg_range(0x0e00, 0x0e98), /* L3 */
546 regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
547 regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
548 regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
549 regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
550 regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
551 regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
552 regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
556 static const struct regmap_access_table qca8k_readable_table = {
557 .yes_ranges = qca8k_readable_ranges,
558 .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
561 static struct regmap_config qca8k_regmap_config = {
565 .max_register = 0x16ac, /* end MIB - Port6 range */
566 .reg_read = qca8k_regmap_read,
567 .reg_write = qca8k_regmap_write,
568 .reg_update_bits = qca8k_regmap_update_bits,
569 .rd_table = &qca8k_readable_table,
570 .disable_locking = true, /* Locking is handled by qca8k read/write */
571 .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
575 qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
579 return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
580 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
584 qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
589 /* load the ARL table into an array */
590 ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
595 fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
597 fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
598 /* portmask - 54:48 */
599 fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
601 fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
602 fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
603 fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
604 fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
605 fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
606 fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
612 qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
618 reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
620 reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
621 /* portmask - 54:48 */
622 reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
624 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
625 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
626 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
627 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
628 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
629 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
631 /* load the array into the ARL table */
632 qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
636 qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
641 /* Set the command and FDB index */
642 reg = QCA8K_ATU_FUNC_BUSY;
645 reg |= QCA8K_ATU_FUNC_PORT_EN;
646 reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
649 /* Write the function register triggering the table access */
650 ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
654 /* wait for completion */
655 ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
659 /* Check for table full violation when adding an entry */
660 if (cmd == QCA8K_FDB_LOAD) {
661 ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
664 if (reg & QCA8K_ATU_FUNC_FULL)
672 qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
676 qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
677 ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
681 return qca8k_fdb_read(priv, fdb);
685 qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
690 mutex_lock(&priv->reg_mutex);
691 qca8k_fdb_write(priv, vid, port_mask, mac, aging);
692 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
693 mutex_unlock(&priv->reg_mutex);
699 qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
703 mutex_lock(&priv->reg_mutex);
704 qca8k_fdb_write(priv, vid, port_mask, mac, 0);
705 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
706 mutex_unlock(&priv->reg_mutex);
712 qca8k_fdb_flush(struct qca8k_priv *priv)
714 mutex_lock(&priv->reg_mutex);
715 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
716 mutex_unlock(&priv->reg_mutex);
720 qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
721 const u8 *mac, u16 vid)
723 struct qca8k_fdb fdb = { 0 };
726 mutex_lock(&priv->reg_mutex);
728 qca8k_fdb_write(priv, vid, 0, mac, 0);
729 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
733 ret = qca8k_fdb_read(priv, &fdb);
737 /* Rule exist. Delete first */
739 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
744 /* Add port to fdb portmask */
745 fdb.port_mask |= port_mask;
747 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
748 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
751 mutex_unlock(&priv->reg_mutex);
756 qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
757 const u8 *mac, u16 vid)
759 struct qca8k_fdb fdb = { 0 };
762 mutex_lock(&priv->reg_mutex);
764 qca8k_fdb_write(priv, vid, 0, mac, 0);
765 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
769 /* Rule doesn't exist. Why delete? */
775 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
779 /* Only port in the rule is this port. Don't re insert */
780 if (fdb.port_mask == port_mask)
783 /* Remove port from port mask */
784 fdb.port_mask &= ~port_mask;
786 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
787 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
790 mutex_unlock(&priv->reg_mutex);
795 qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
800 /* Set the command and VLAN index */
801 reg = QCA8K_VTU_FUNC1_BUSY;
803 reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
805 /* Write the function register triggering the table access */
806 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
810 /* wait for completion */
811 ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
815 /* Check for table full violation when adding an entry */
816 if (cmd == QCA8K_VLAN_LOAD) {
817 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
820 if (reg & QCA8K_VTU_FUNC1_FULL)
828 qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
834 We do the right thing with VLAN 0 and treat it as untagged while
835 preserving the tag on egress.
840 mutex_lock(&priv->reg_mutex);
841 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
845 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
848 reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
849 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
851 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
853 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
855 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
858 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
861 mutex_unlock(&priv->reg_mutex);
867 qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
873 mutex_lock(&priv->reg_mutex);
874 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
878 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
881 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
882 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
884 /* Check if we're the last member to be removed */
886 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
887 mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
889 if ((reg & mask) != mask) {
896 ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
898 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
901 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
905 mutex_unlock(&priv->reg_mutex);
911 qca8k_mib_init(struct qca8k_priv *priv)
915 mutex_lock(&priv->reg_mutex);
916 ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
917 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
918 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
923 ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
927 ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
931 ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
934 mutex_unlock(&priv->reg_mutex);
939 qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
941 u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
943 /* Port 0 and 6 have no internal PHY */
944 if (port > 0 && port < 6)
945 mask |= QCA8K_PORT_STATUS_LINK_AUTO;
948 regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
950 regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
954 qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
955 struct sk_buff *read_skb, u32 *val)
957 struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
961 reinit_completion(&mgmt_eth_data->rw_done);
963 /* Increment seq_num and set it in the copy pkt */
964 mgmt_eth_data->seq++;
965 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
966 mgmt_eth_data->ack = false;
970 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
971 QCA8K_ETHERNET_TIMEOUT);
973 ack = mgmt_eth_data->ack;
981 *val = mgmt_eth_data->data[0];
987 qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
988 int regnum, u16 data)
990 struct sk_buff *write_skb, *clear_skb, *read_skb;
991 struct qca8k_mgmt_eth_data *mgmt_eth_data;
992 u32 write_val, clear_val = 0, val;
993 struct net_device *mgmt_master;
997 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1000 mgmt_eth_data = &priv->mgmt_eth_data;
1002 write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1003 QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1004 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1007 write_val |= QCA8K_MDIO_MASTER_READ;
1009 write_val |= QCA8K_MDIO_MASTER_WRITE;
1010 write_val |= QCA8K_MDIO_MASTER_DATA(data);
1013 /* Prealloc all the needed skb before the lock */
1014 write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
1015 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
1019 clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1020 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1026 read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1027 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1033 /* Actually start the request:
1034 * 1. Send mdio master packet
1035 * 2. Busy Wait for mdio master command
1036 * 3. Get the data if we are reading
1037 * 4. Reset the mdio master (even with error)
1039 mutex_lock(&mgmt_eth_data->mutex);
1041 /* Check if mgmt_master is operational */
1042 mgmt_master = priv->mgmt_master;
1044 mutex_unlock(&mgmt_eth_data->mutex);
1046 goto err_mgmt_master;
1049 read_skb->dev = mgmt_master;
1050 clear_skb->dev = mgmt_master;
1051 write_skb->dev = mgmt_master;
1053 reinit_completion(&mgmt_eth_data->rw_done);
1055 /* Increment seq_num and set it in the write pkt */
1056 mgmt_eth_data->seq++;
1057 qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
1058 mgmt_eth_data->ack = false;
1060 dev_queue_xmit(write_skb);
1062 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1063 QCA8K_ETHERNET_TIMEOUT);
1065 ack = mgmt_eth_data->ack;
1069 kfree_skb(read_skb);
1075 kfree_skb(read_skb);
1079 ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
1080 !(val & QCA8K_MDIO_MASTER_BUSY), 0,
1081 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1082 mgmt_eth_data, read_skb, &val);
1084 if (ret < 0 && ret1 < 0) {
1090 reinit_completion(&mgmt_eth_data->rw_done);
1092 /* Increment seq_num and set it in the read pkt */
1093 mgmt_eth_data->seq++;
1094 qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
1095 mgmt_eth_data->ack = false;
1097 dev_queue_xmit(read_skb);
1099 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1100 QCA8K_ETHERNET_TIMEOUT);
1102 ack = mgmt_eth_data->ack;
1114 ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
1116 kfree_skb(read_skb);
1119 reinit_completion(&mgmt_eth_data->rw_done);
1121 /* Increment seq_num and set it in the clear pkt */
1122 mgmt_eth_data->seq++;
1123 qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
1124 mgmt_eth_data->ack = false;
1126 dev_queue_xmit(clear_skb);
1128 wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1129 QCA8K_ETHERNET_TIMEOUT);
1131 mutex_unlock(&mgmt_eth_data->mutex);
1135 /* Error handling before lock */
1137 kfree_skb(read_skb);
1139 kfree_skb(clear_skb);
1141 kfree_skb(write_skb);
1147 qca8k_port_to_phy(int port)
1149 /* From Andrew Lunn:
1150 * Port 0 has no internal phy.
1151 * Port 1 has an internal PHY at MDIO address 0.
1152 * Port 2 has an internal PHY at MDIO address 1.
1154 * Port 5 has an internal PHY at MDIO address 4.
1155 * Port 6 has no internal PHY.
1162 qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
1168 qca8k_split_addr(reg, &r1, &r2, &page);
1170 ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
1171 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1172 bus, 0x10 | r2, r1, &val);
1174 /* Check if qca8k_read has failed for a different reason
1175 * before returnting -ETIMEDOUT
1177 if (ret < 0 && ret1 < 0)
1184 qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
1186 struct mii_bus *bus = priv->bus;
1191 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1194 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1195 QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1196 QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
1197 QCA8K_MDIO_MASTER_DATA(data);
1199 qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1201 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1203 ret = qca8k_set_page(priv, page);
1207 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1209 ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1210 QCA8K_MDIO_MASTER_BUSY);
1213 /* even if the busy_wait timeouts try to clear the MASTER_EN */
1214 qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1216 mutex_unlock(&bus->mdio_lock);
1222 qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
1224 struct mii_bus *bus = priv->bus;
1229 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1232 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1233 QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1234 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1236 qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1238 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1240 ret = qca8k_set_page(priv, page);
1244 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1246 ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1247 QCA8K_MDIO_MASTER_BUSY);
1251 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
1254 /* even if the busy_wait timeouts try to clear the MASTER_EN */
1255 qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1257 mutex_unlock(&bus->mdio_lock);
1260 ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
1266 qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
1268 struct qca8k_priv *priv = slave_bus->priv;
1271 /* Use mdio Ethernet when available, fallback to legacy one on error */
1272 ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
1276 return qca8k_mdio_write(priv, phy, regnum, data);
1280 qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
1282 struct qca8k_priv *priv = slave_bus->priv;
1285 /* Use mdio Ethernet when available, fallback to legacy one on error */
1286 ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
1290 return qca8k_mdio_read(priv, phy, regnum);
1294 qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
1296 struct qca8k_priv *priv = ds->priv;
1299 /* Check if the legacy mapping should be used and the
1300 * port is not correctly mapped to the right PHY in the
1303 if (priv->legacy_phy_port_mapping)
1304 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1306 /* Use mdio Ethernet when available, fallback to legacy one on error */
1307 ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
1311 return qca8k_mdio_write(priv, port, regnum, data);
1315 qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
1317 struct qca8k_priv *priv = ds->priv;
1320 /* Check if the legacy mapping should be used and the
1321 * port is not correctly mapped to the right PHY in the
1324 if (priv->legacy_phy_port_mapping)
1325 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1327 /* Use mdio Ethernet when available, fallback to legacy one on error */
1328 ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
1332 ret = qca8k_mdio_read(priv, port, regnum);
1341 qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
1343 struct dsa_switch *ds = priv->ds;
1344 struct mii_bus *bus;
1346 bus = devm_mdiobus_alloc(ds->dev);
1351 bus->priv = (void *)priv;
1352 bus->name = "qca8k slave mii";
1353 bus->read = qca8k_internal_mdio_read;
1354 bus->write = qca8k_internal_mdio_write;
1355 snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
1358 bus->parent = ds->dev;
1359 bus->phy_mask = ~ds->phys_mii_mask;
1361 ds->slave_mii_bus = bus;
1363 return devm_of_mdiobus_register(priv->dev, bus, mdio);
1367 qca8k_setup_mdio_bus(struct qca8k_priv *priv)
1369 u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
1370 struct device_node *ports, *port, *mdio;
1371 phy_interface_t mode;
1374 ports = of_get_child_by_name(priv->dev->of_node, "ports");
1376 ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
1381 for_each_available_child_of_node(ports, port) {
1382 err = of_property_read_u32(port, "reg", ®);
1389 if (!dsa_is_user_port(priv->ds, reg))
1392 of_get_phy_mode(port, &mode);
1394 if (of_property_read_bool(port, "phy-handle") &&
1395 mode != PHY_INTERFACE_MODE_INTERNAL)
1396 external_mdio_mask |= BIT(reg);
1398 internal_mdio_mask |= BIT(reg);
1402 if (!external_mdio_mask && !internal_mdio_mask) {
1403 dev_err(priv->dev, "no PHYs are defined.\n");
1407 /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
1408 * the MDIO_MASTER register also _disconnects_ the external MDC
1409 * passthrough to the internal PHYs. It's not possible to use both
1410 * configurations at the same time!
1412 * Because this came up during the review process:
1413 * If the external mdio-bus driver is capable magically disabling
1414 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
1415 * accessors for the time being, it would be possible to pull this
1418 if (!!external_mdio_mask && !!internal_mdio_mask) {
1419 dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
1423 if (external_mdio_mask) {
1424 /* Make sure to disable the internal mdio bus in cases
1425 * a dt-overlay and driver reload changed the configuration
1428 return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
1429 QCA8K_MDIO_MASTER_EN);
1432 /* Check if the devicetree declare the port:phy mapping */
1433 mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
1434 if (of_device_is_available(mdio)) {
1435 err = qca8k_mdio_register(priv, mdio);
1442 /* If a mapping can't be found the legacy mapping is used,
1443 * using the qca8k_port_to_phy function
1445 priv->legacy_phy_port_mapping = true;
1446 priv->ops.phy_read = qca8k_phy_read;
1447 priv->ops.phy_write = qca8k_phy_write;
1453 qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
1458 /* SoC specific settings for ipq8064.
1459 * If more device require this consider adding
1460 * a dedicated binding.
1462 if (of_machine_is_compatible("qcom,ipq8064"))
1463 mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1465 /* SoC specific settings for ipq8065 */
1466 if (of_machine_is_compatible("qcom,ipq8065"))
1467 mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1470 ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1471 QCA8K_MAC_PWR_RGMII0_1_8V |
1472 QCA8K_MAC_PWR_RGMII1_1_8V,
1479 static int qca8k_find_cpu_port(struct dsa_switch *ds)
1481 struct qca8k_priv *priv = ds->priv;
1483 /* Find the connected cpu port. Valid port are 0 or 6 */
1484 if (dsa_is_cpu_port(ds, 0))
1487 dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1489 if (dsa_is_cpu_port(ds, 6))
1496 qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1498 struct device_node *node = priv->dev->of_node;
1499 const struct qca8k_match_data *data;
1503 /* QCA8327 require to set to the correct mode.
1504 * His bigger brother QCA8328 have the 172 pin layout.
1505 * Should be applied by default but we set this just to make sure.
1507 if (priv->switch_id == QCA8K_ID_QCA8327) {
1508 data = of_device_get_match_data(priv->dev);
1510 /* Set the correct package of 148 pin for QCA8327 */
1511 if (data->reduced_package)
1512 val |= QCA8327_PWS_PACKAGE148_EN;
1514 ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1520 if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1521 val |= QCA8K_PWS_POWER_ON_SEL;
1523 if (of_property_read_bool(node, "qca,led-open-drain")) {
1524 if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1525 dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1529 val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1532 return qca8k_rmw(priv, QCA8K_REG_PWS,
1533 QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1538 qca8k_parse_port_config(struct qca8k_priv *priv)
1540 int port, cpu_port_index = -1, ret;
1541 struct device_node *port_dn;
1542 phy_interface_t mode;
1543 struct dsa_port *dp;
1546 /* We have 2 CPU port. Check them */
1547 for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1548 /* Skip every other port */
1549 if (port != 0 && port != 6)
1552 dp = dsa_to_port(priv->ds, port);
1556 if (!of_device_is_available(port_dn))
1559 ret = of_get_phy_mode(port_dn, &mode);
1564 case PHY_INTERFACE_MODE_RGMII:
1565 case PHY_INTERFACE_MODE_RGMII_ID:
1566 case PHY_INTERFACE_MODE_RGMII_TXID:
1567 case PHY_INTERFACE_MODE_RGMII_RXID:
1568 case PHY_INTERFACE_MODE_SGMII:
1571 if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1572 /* Switch regs accept value in ns, convert ps to ns */
1573 delay = delay / 1000;
1574 else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1575 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1578 if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1579 dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1583 priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1587 if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1588 /* Switch regs accept value in ns, convert ps to ns */
1589 delay = delay / 1000;
1590 else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1591 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1594 if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1595 dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1599 priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1601 /* Skip sgmii parsing for rgmii* mode */
1602 if (mode == PHY_INTERFACE_MODE_RGMII ||
1603 mode == PHY_INTERFACE_MODE_RGMII_ID ||
1604 mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1605 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1608 if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1609 priv->ports_config.sgmii_tx_clk_falling_edge = true;
1611 if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1612 priv->ports_config.sgmii_rx_clk_falling_edge = true;
1614 if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1615 priv->ports_config.sgmii_enable_pll = true;
1617 if (priv->switch_id == QCA8K_ID_QCA8327) {
1618 dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1619 priv->ports_config.sgmii_enable_pll = false;
1622 if (priv->switch_revision < 2)
1623 dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1636 qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1642 /* Delay can be declared in 3 different way.
1643 * Mode to rgmii and internal-delay standard binding defined
1644 * rgmii-id or rgmii-tx/rx phy mode set.
1645 * The parse logic set a delay different than 0 only when one
1646 * of the 3 different way is used. In all other case delay is
1647 * not enabled. With ID or TX/RXID delay is enabled and set
1648 * to the default and recommended value.
1650 if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1651 delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1653 val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1654 QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1657 if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1658 delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1660 val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1661 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1664 /* Set RGMII delay based on the selected values */
1665 ret = qca8k_rmw(priv, reg,
1666 QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1667 QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1668 QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1669 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1672 dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1673 cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1676 static struct phylink_pcs *
1677 qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1678 phy_interface_t interface)
1680 struct qca8k_priv *priv = ds->priv;
1681 struct phylink_pcs *pcs = NULL;
1683 switch (interface) {
1684 case PHY_INTERFACE_MODE_SGMII:
1685 case PHY_INTERFACE_MODE_1000BASEX:
1688 pcs = &priv->pcs_port_0.pcs;
1692 pcs = &priv->pcs_port_6.pcs;
1705 qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1706 const struct phylink_link_state *state)
1708 struct qca8k_priv *priv = ds->priv;
1713 case 0: /* 1st CPU port */
1714 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1715 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1716 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1717 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1718 state->interface != PHY_INTERFACE_MODE_SGMII)
1721 reg = QCA8K_REG_PORT0_PAD_CTRL;
1722 cpu_port_index = QCA8K_CPU_PORT0;
1729 /* Internal PHY, nothing to do */
1731 case 6: /* 2nd CPU port / external PHY */
1732 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1733 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1734 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1735 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1736 state->interface != PHY_INTERFACE_MODE_SGMII &&
1737 state->interface != PHY_INTERFACE_MODE_1000BASEX)
1740 reg = QCA8K_REG_PORT6_PAD_CTRL;
1741 cpu_port_index = QCA8K_CPU_PORT6;
1744 dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1748 if (port != 6 && phylink_autoneg_inband(mode)) {
1749 dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1754 switch (state->interface) {
1755 case PHY_INTERFACE_MODE_RGMII:
1756 case PHY_INTERFACE_MODE_RGMII_ID:
1757 case PHY_INTERFACE_MODE_RGMII_TXID:
1758 case PHY_INTERFACE_MODE_RGMII_RXID:
1759 qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1761 /* Configure rgmii delay */
1762 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1764 /* QCA8337 requires to set rgmii rx delay for all ports.
1765 * This is enabled through PORT5_PAD_CTRL for all ports,
1766 * rather than individual port registers.
1768 if (priv->switch_id == QCA8K_ID_QCA8337)
1769 qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1770 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1772 case PHY_INTERFACE_MODE_SGMII:
1773 case PHY_INTERFACE_MODE_1000BASEX:
1774 /* Enable SGMII on the port */
1775 qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1778 dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1779 phy_modes(state->interface), port);
1784 static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1785 struct phylink_config *config)
1788 case 0: /* 1st CPU port */
1789 phy_interface_set_rgmii(config->supported_interfaces);
1790 __set_bit(PHY_INTERFACE_MODE_SGMII,
1791 config->supported_interfaces);
1800 __set_bit(PHY_INTERFACE_MODE_GMII,
1801 config->supported_interfaces);
1802 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1803 config->supported_interfaces);
1806 case 6: /* 2nd CPU port / external PHY */
1807 phy_interface_set_rgmii(config->supported_interfaces);
1808 __set_bit(PHY_INTERFACE_MODE_SGMII,
1809 config->supported_interfaces);
1810 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
1811 config->supported_interfaces);
1815 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1816 MAC_10 | MAC_100 | MAC_1000FD;
1818 config->legacy_pre_march2020 = false;
1822 qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1823 phy_interface_t interface)
1825 struct qca8k_priv *priv = ds->priv;
1827 qca8k_port_set_status(priv, port, 0);
1831 qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1832 phy_interface_t interface, struct phy_device *phydev,
1833 int speed, int duplex, bool tx_pause, bool rx_pause)
1835 struct qca8k_priv *priv = ds->priv;
1838 if (phylink_autoneg_inband(mode)) {
1839 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1843 reg = QCA8K_PORT_STATUS_SPEED_10;
1846 reg = QCA8K_PORT_STATUS_SPEED_100;
1849 reg = QCA8K_PORT_STATUS_SPEED_1000;
1852 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1856 if (duplex == DUPLEX_FULL)
1857 reg |= QCA8K_PORT_STATUS_DUPLEX;
1859 if (rx_pause || dsa_is_cpu_port(ds, port))
1860 reg |= QCA8K_PORT_STATUS_RXFLOW;
1862 if (tx_pause || dsa_is_cpu_port(ds, port))
1863 reg |= QCA8K_PORT_STATUS_TXFLOW;
1866 reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1868 qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1871 static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1873 return container_of(pcs, struct qca8k_pcs, pcs);
1876 static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1877 struct phylink_link_state *state)
1879 struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1880 int port = pcs_to_qca8k_pcs(pcs)->port;
1884 ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
1886 state->link = false;
1890 state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1891 state->an_complete = state->link;
1892 state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1893 state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1896 switch (reg & QCA8K_PORT_STATUS_SPEED) {
1897 case QCA8K_PORT_STATUS_SPEED_10:
1898 state->speed = SPEED_10;
1900 case QCA8K_PORT_STATUS_SPEED_100:
1901 state->speed = SPEED_100;
1903 case QCA8K_PORT_STATUS_SPEED_1000:
1904 state->speed = SPEED_1000;
1907 state->speed = SPEED_UNKNOWN;
1911 if (reg & QCA8K_PORT_STATUS_RXFLOW)
1912 state->pause |= MLO_PAUSE_RX;
1913 if (reg & QCA8K_PORT_STATUS_TXFLOW)
1914 state->pause |= MLO_PAUSE_TX;
1917 static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1918 phy_interface_t interface,
1919 const unsigned long *advertising,
1920 bool permit_pause_to_mac)
1922 struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1923 int cpu_port_index, ret, port;
1926 port = pcs_to_qca8k_pcs(pcs)->port;
1929 reg = QCA8K_REG_PORT0_PAD_CTRL;
1930 cpu_port_index = QCA8K_CPU_PORT0;
1934 reg = QCA8K_REG_PORT6_PAD_CTRL;
1935 cpu_port_index = QCA8K_CPU_PORT6;
1942 /* Enable/disable SerDes auto-negotiation as necessary */
1943 ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1946 if (phylink_autoneg_inband(mode))
1947 val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1949 val |= QCA8K_PWS_SERDES_AEN_DIS;
1950 qca8k_write(priv, QCA8K_REG_PWS, val);
1952 /* Configure the SGMII parameters */
1953 ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1957 val |= QCA8K_SGMII_EN_SD;
1959 if (priv->ports_config.sgmii_enable_pll)
1960 val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1963 if (dsa_is_cpu_port(priv->ds, port)) {
1964 /* CPU port, we're talking to the CPU MAC, be a PHY */
1965 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1966 val |= QCA8K_SGMII_MODE_CTRL_PHY;
1967 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1968 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1969 val |= QCA8K_SGMII_MODE_CTRL_MAC;
1970 } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1971 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1972 val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1975 qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1977 /* From original code is reported port instability as SGMII also
1978 * require delay set. Apply advised values here or take them from DT.
1980 if (interface == PHY_INTERFACE_MODE_SGMII)
1981 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1982 /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1983 * falling edge is set writing in the PORT0 PAD reg
1985 if (priv->switch_id == QCA8K_ID_QCA8327 ||
1986 priv->switch_id == QCA8K_ID_QCA8337)
1987 reg = QCA8K_REG_PORT0_PAD_CTRL;
1991 /* SGMII Clock phase configuration */
1992 if (priv->ports_config.sgmii_rx_clk_falling_edge)
1993 val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1995 if (priv->ports_config.sgmii_tx_clk_falling_edge)
1996 val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1999 ret = qca8k_rmw(priv, reg,
2000 QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
2001 QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
2007 static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
2011 static const struct phylink_pcs_ops qca8k_pcs_ops = {
2012 .pcs_get_state = qca8k_pcs_get_state,
2013 .pcs_config = qca8k_pcs_config,
2014 .pcs_an_restart = qca8k_pcs_an_restart,
2017 static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
2020 qpcs->pcs.ops = &qca8k_pcs_ops;
2022 /* We don't have interrupts for link changes, so we need to poll */
2023 qpcs->pcs.poll = true;
2029 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
2031 const struct qca8k_match_data *match_data;
2032 struct qca8k_priv *priv = ds->priv;
2035 if (stringset != ETH_SS_STATS)
2038 match_data = of_device_get_match_data(priv->dev);
2040 for (i = 0; i < match_data->mib_count; i++)
2041 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
2045 static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
2047 const struct qca8k_match_data *match_data;
2048 struct qca8k_mib_eth_data *mib_eth_data;
2049 struct qca8k_priv *priv = ds->priv;
2050 const struct qca8k_mib_desc *mib;
2051 struct mib_ethhdr *mib_ethhdr;
2052 int i, mib_len, offset = 0;
2056 mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
2057 mib_eth_data = &priv->mib_eth_data;
2059 /* The switch autocast every port. Ignore other packet and
2060 * parse only the requested one.
2062 port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
2063 if (port != mib_eth_data->req_port)
2066 match_data = device_get_match_data(priv->dev);
2067 data = mib_eth_data->data;
2069 for (i = 0; i < match_data->mib_count; i++) {
2070 mib = &ar8327_mib[i];
2072 /* First 3 mib are present in the skb head */
2074 data[i] = mib_ethhdr->data[i];
2078 mib_len = sizeof(uint32_t);
2080 /* Some mib are 64 bit wide */
2082 mib_len = sizeof(uint64_t);
2084 /* Copy the mib value from packet to the */
2085 memcpy(data + i, skb->data + offset, mib_len);
2087 /* Set the offset for the next mib */
2092 /* Complete on receiving all the mib packet */
2093 if (refcount_dec_and_test(&mib_eth_data->port_parsed))
2094 complete(&mib_eth_data->rw_done);
2098 qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
2100 struct dsa_port *dp = dsa_to_port(ds, port);
2101 struct qca8k_mib_eth_data *mib_eth_data;
2102 struct qca8k_priv *priv = ds->priv;
2105 mib_eth_data = &priv->mib_eth_data;
2107 mutex_lock(&mib_eth_data->mutex);
2109 reinit_completion(&mib_eth_data->rw_done);
2111 mib_eth_data->req_port = dp->index;
2112 mib_eth_data->data = data;
2113 refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
2115 mutex_lock(&priv->reg_mutex);
2117 /* Send mib autocast request */
2118 ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
2119 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
2120 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
2123 mutex_unlock(&priv->reg_mutex);
2128 ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
2131 mutex_unlock(&mib_eth_data->mutex);
2137 qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
2140 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2141 const struct qca8k_match_data *match_data;
2142 const struct qca8k_mib_desc *mib;
2147 if (priv->mgmt_master &&
2148 qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
2151 match_data = of_device_get_match_data(priv->dev);
2153 for (i = 0; i < match_data->mib_count; i++) {
2154 mib = &ar8327_mib[i];
2155 reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
2157 ret = qca8k_read(priv, reg, &val);
2161 if (mib->size == 2) {
2162 ret = qca8k_read(priv, reg + 4, &hi);
2169 data[i] |= (u64)hi << 32;
2174 qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
2176 const struct qca8k_match_data *match_data;
2177 struct qca8k_priv *priv = ds->priv;
2179 if (sset != ETH_SS_STATS)
2182 match_data = of_device_get_match_data(priv->dev);
2184 return match_data->mib_count;
2188 qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
2190 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2191 u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
2195 mutex_lock(&priv->reg_mutex);
2196 ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
2200 if (eee->eee_enabled)
2204 ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
2207 mutex_unlock(&priv->reg_mutex);
2212 qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2214 /* Nothing to do on the port's MAC */
2219 qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
2221 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2225 case BR_STATE_DISABLED:
2226 stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
2228 case BR_STATE_BLOCKING:
2229 stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
2231 case BR_STATE_LISTENING:
2232 stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
2234 case BR_STATE_LEARNING:
2235 stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
2237 case BR_STATE_FORWARDING:
2239 stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
2243 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2244 QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
2247 static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
2248 struct dsa_bridge bridge,
2249 bool *tx_fwd_offload)
2251 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2252 int port_mask, cpu_port;
2255 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2256 port_mask = BIT(cpu_port);
2258 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2259 if (dsa_is_cpu_port(ds, i))
2261 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2263 /* Add this port to the portvlan mask of the other ports
2266 ret = regmap_set_bits(priv->regmap,
2267 QCA8K_PORT_LOOKUP_CTRL(i),
2272 port_mask |= BIT(i);
2275 /* Add all other ports to this ports portvlan mask */
2276 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2277 QCA8K_PORT_LOOKUP_MEMBER, port_mask);
2282 static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
2283 struct dsa_bridge bridge)
2285 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2288 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2290 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2291 if (dsa_is_cpu_port(ds, i))
2293 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2295 /* Remove this port to the portvlan mask of the other ports
2298 regmap_clear_bits(priv->regmap,
2299 QCA8K_PORT_LOOKUP_CTRL(i),
2303 /* Set the cpu port to be the only one in the portvlan mask of
2306 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2307 QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
2311 qca8k_port_fast_age(struct dsa_switch *ds, int port)
2313 struct qca8k_priv *priv = ds->priv;
2315 mutex_lock(&priv->reg_mutex);
2316 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
2317 mutex_unlock(&priv->reg_mutex);
2321 qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
2323 struct qca8k_priv *priv = ds->priv;
2324 unsigned int secs = msecs / 1000;
2327 /* AGE_TIME reg is set in 7s step */
2330 /* Handle case with 0 as val to NOT disable
2336 return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
2337 QCA8K_ATU_AGE_TIME(val));
2341 qca8k_port_enable(struct dsa_switch *ds, int port,
2342 struct phy_device *phy)
2344 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2346 qca8k_port_set_status(priv, port, 1);
2347 priv->port_sts[port].enabled = 1;
2349 if (dsa_is_user_port(ds, port))
2350 phy_support_asym_pause(phy);
2356 qca8k_port_disable(struct dsa_switch *ds, int port)
2358 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2360 qca8k_port_set_status(priv, port, 0);
2361 priv->port_sts[port].enabled = 0;
2365 qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2367 struct qca8k_priv *priv = ds->priv;
2370 priv->port_mtu[port] = new_mtu;
2372 for (i = 0; i < QCA8K_NUM_PORTS; i++)
2373 if (priv->port_mtu[i] > mtu)
2374 mtu = priv->port_mtu[i];
2376 /* Include L2 header / FCS length */
2377 return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
2381 qca8k_port_max_mtu(struct dsa_switch *ds, int port)
2383 return QCA8K_MAX_MTU;
2387 qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
2388 u16 port_mask, u16 vid)
2390 /* Set the vid to the port vlan id if no vid is set */
2392 vid = QCA8K_PORT_VID_DEF;
2394 return qca8k_fdb_add(priv, addr, port_mask, vid,
2395 QCA8K_ATU_STATUS_STATIC);
2399 qca8k_port_fdb_add(struct dsa_switch *ds, int port,
2400 const unsigned char *addr, u16 vid)
2402 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2403 u16 port_mask = BIT(port);
2405 return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
2409 qca8k_port_fdb_del(struct dsa_switch *ds, int port,
2410 const unsigned char *addr, u16 vid)
2412 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2413 u16 port_mask = BIT(port);
2416 vid = QCA8K_PORT_VID_DEF;
2418 return qca8k_fdb_del(priv, addr, port_mask, vid);
2422 qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
2423 dsa_fdb_dump_cb_t *cb, void *data)
2425 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2426 struct qca8k_fdb _fdb = { 0 };
2427 int cnt = QCA8K_NUM_FDB_RECORDS;
2431 mutex_lock(&priv->reg_mutex);
2432 while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
2435 is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
2436 ret = cb(_fdb.mac, _fdb.vid, is_static, data);
2440 mutex_unlock(&priv->reg_mutex);
2446 qca8k_port_mdb_add(struct dsa_switch *ds, int port,
2447 const struct switchdev_obj_port_mdb *mdb)
2449 struct qca8k_priv *priv = ds->priv;
2450 const u8 *addr = mdb->addr;
2453 return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
2457 qca8k_port_mdb_del(struct dsa_switch *ds, int port,
2458 const struct switchdev_obj_port_mdb *mdb)
2460 struct qca8k_priv *priv = ds->priv;
2461 const u8 *addr = mdb->addr;
2464 return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
2468 qca8k_port_mirror_add(struct dsa_switch *ds, int port,
2469 struct dsa_mall_mirror_tc_entry *mirror,
2472 struct qca8k_priv *priv = ds->priv;
2473 int monitor_port, ret;
2476 /* Check for existent entry */
2477 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
2480 ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
2484 /* QCA83xx can have only one port set to mirror mode.
2485 * Check that the correct port is requested and return error otherwise.
2486 * When no mirror port is set, the values is set to 0xF
2488 monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2489 if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
2492 /* Set the monitor port */
2493 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
2494 mirror->to_local_port);
2495 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2496 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2501 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2502 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2504 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2505 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2508 ret = regmap_update_bits(priv->regmap, reg, val, val);
2512 /* Track mirror port for tx and rx to decide when the
2513 * mirror port has to be disabled.
2516 priv->mirror_rx |= BIT(port);
2518 priv->mirror_tx |= BIT(port);
2524 qca8k_port_mirror_del(struct dsa_switch *ds, int port,
2525 struct dsa_mall_mirror_tc_entry *mirror)
2527 struct qca8k_priv *priv = ds->priv;
2531 if (mirror->ingress) {
2532 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2533 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2535 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2536 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2539 ret = regmap_clear_bits(priv->regmap, reg, val);
2543 if (mirror->ingress)
2544 priv->mirror_rx &= ~BIT(port);
2546 priv->mirror_tx &= ~BIT(port);
2548 /* No port set to send packet to mirror port. Disable mirror port */
2549 if (!priv->mirror_rx && !priv->mirror_tx) {
2550 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
2551 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2552 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2557 dev_err(priv->dev, "Failed to del mirror port from %d", port);
2561 qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
2562 struct netlink_ext_ack *extack)
2564 struct qca8k_priv *priv = ds->priv;
2567 if (vlan_filtering) {
2568 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2569 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2570 QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
2572 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2573 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2574 QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
2581 qca8k_port_vlan_add(struct dsa_switch *ds, int port,
2582 const struct switchdev_obj_port_vlan *vlan,
2583 struct netlink_ext_ack *extack)
2585 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2586 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2587 struct qca8k_priv *priv = ds->priv;
2590 ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
2592 dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
2597 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
2598 QCA8K_EGREES_VLAN_PORT_MASK(port),
2599 QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
2603 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
2604 QCA8K_PORT_VLAN_CVID(vlan->vid) |
2605 QCA8K_PORT_VLAN_SVID(vlan->vid));
2612 qca8k_port_vlan_del(struct dsa_switch *ds, int port,
2613 const struct switchdev_obj_port_vlan *vlan)
2615 struct qca8k_priv *priv = ds->priv;
2618 ret = qca8k_vlan_del(priv, port, vlan->vid);
2620 dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
2625 static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
2627 struct qca8k_priv *priv = ds->priv;
2629 /* Communicate to the phy internal driver the switch revision.
2630 * Based on the switch revision different values needs to be
2631 * set to the dbg and mmd reg on the phy.
2632 * The first 2 bit are used to communicate the switch revision
2633 * to the phy driver.
2635 if (port > 0 && port < 6)
2636 return priv->switch_revision;
2641 static enum dsa_tag_protocol
2642 qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
2643 enum dsa_tag_protocol mp)
2645 return DSA_TAG_PROTO_QCA;
2649 qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
2650 struct netdev_lag_upper_info *info)
2652 struct dsa_port *dp;
2658 dsa_lag_foreach_port(dp, ds->dst, &lag)
2659 /* Includes the port joining the LAG */
2662 if (members > QCA8K_NUM_PORTS_FOR_LAG)
2665 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2668 if (info->hash_type != NETDEV_LAG_HASH_L2 &&
2669 info->hash_type != NETDEV_LAG_HASH_L23)
2676 qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
2677 struct netdev_lag_upper_info *info)
2679 struct net_device *lag_dev = lag.dev;
2680 struct qca8k_priv *priv = ds->priv;
2681 bool unique_lag = true;
2685 switch (info->hash_type) {
2686 case NETDEV_LAG_HASH_L23:
2687 hash |= QCA8K_TRUNK_HASH_SIP_EN;
2688 hash |= QCA8K_TRUNK_HASH_DIP_EN;
2690 case NETDEV_LAG_HASH_L2:
2691 hash |= QCA8K_TRUNK_HASH_SA_EN;
2692 hash |= QCA8K_TRUNK_HASH_DA_EN;
2694 default: /* We should NEVER reach this */
2698 /* Check if we are the unique configured LAG */
2699 dsa_lags_foreach_id(i, ds->dst)
2700 if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
2705 /* Hash Mode is global. Make sure the same Hash Mode
2706 * is set to all the 4 possible lag.
2707 * If we are the unique LAG we can set whatever hash
2709 * To change hash mode it's needed to remove all LAG
2710 * and change the mode with the latest.
2713 priv->lag_hash_mode = hash;
2714 } else if (priv->lag_hash_mode != hash) {
2715 netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
2719 return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
2720 QCA8K_TRUNK_HASH_MASK, hash);
2724 qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
2725 struct dsa_lag lag, bool delete)
2727 struct qca8k_priv *priv = ds->priv;
2731 /* DSA LAG IDs are one-based, hardware is zero-based */
2734 /* Read current port member */
2735 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
2739 /* Shift val to the correct trunk */
2740 val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
2741 val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
2747 /* Update port member. With empty portmap disable trunk */
2748 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
2749 QCA8K_REG_GOL_TRUNK_MEMBER(id) |
2750 QCA8K_REG_GOL_TRUNK_EN(id),
2751 !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
2752 val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
2754 /* Search empty member if adding or port on deleting */
2755 for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
2756 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
2760 val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
2761 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
2764 /* If port flagged to be disabled assume this member is
2767 if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2770 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
2774 /* If port flagged to be enabled assume this member is
2777 if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2781 /* We have found the member to add/remove */
2785 /* Set port in the correct port mask or disable port if in delete mode */
2786 return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
2787 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
2788 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
2789 !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
2790 port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
2794 qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
2795 struct netdev_lag_upper_info *info)
2799 if (!qca8k_lag_can_offload(ds, lag, info))
2802 ret = qca8k_lag_setup_hash(ds, lag, info);
2806 return qca8k_lag_refresh_portmap(ds, port, lag, false);
2810 qca8k_port_lag_leave(struct dsa_switch *ds, int port,
2813 return qca8k_lag_refresh_portmap(ds, port, lag, true);
2817 qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
2820 struct dsa_port *dp = master->dsa_ptr;
2821 struct qca8k_priv *priv = ds->priv;
2823 /* Ethernet MIB/MDIO is only supported for CPU port 0 */
2827 mutex_lock(&priv->mgmt_eth_data.mutex);
2828 mutex_lock(&priv->mib_eth_data.mutex);
2830 priv->mgmt_master = operational ? (struct net_device *)master : NULL;
2832 mutex_unlock(&priv->mib_eth_data.mutex);
2833 mutex_unlock(&priv->mgmt_eth_data.mutex);
2836 static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
2837 enum dsa_tag_protocol proto)
2839 struct qca_tagger_data *tagger_data;
2842 case DSA_TAG_PROTO_QCA:
2843 tagger_data = ds->tagger_data;
2845 tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
2846 tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
2857 qca8k_setup(struct dsa_switch *ds)
2859 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2860 int cpu_port, ret, i;
2863 cpu_port = qca8k_find_cpu_port(ds);
2865 dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
2869 /* Parse CPU port config to be later used in phy_link mac_config */
2870 ret = qca8k_parse_port_config(priv);
2874 ret = qca8k_setup_mdio_bus(priv);
2878 ret = qca8k_setup_of_pws_reg(priv);
2882 ret = qca8k_setup_mac_pwr_sel(priv);
2886 qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
2887 qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
2889 /* Make sure MAC06 is disabled */
2890 ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
2891 QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
2893 dev_err(priv->dev, "failed disabling MAC06 exchange");
2897 /* Enable CPU Port */
2898 ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2899 QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
2901 dev_err(priv->dev, "failed enabling CPU port");
2905 /* Enable MIB counters */
2906 ret = qca8k_mib_init(priv);
2908 dev_warn(priv->dev, "mib init failed");
2910 /* Initial setup of all ports */
2911 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2912 /* Disable forwarding by default on all ports */
2913 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2914 QCA8K_PORT_LOOKUP_MEMBER, 0);
2918 /* Enable QCA header mode on all cpu ports */
2919 if (dsa_is_cpu_port(ds, i)) {
2920 ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
2921 FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
2922 FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
2924 dev_err(priv->dev, "failed enabling QCA header mode");
2929 /* Disable MAC by default on all user ports */
2930 if (dsa_is_user_port(ds, i))
2931 qca8k_port_set_status(priv, i, 0);
2934 /* Forward all unknown frames to CPU port for Linux processing
2935 * Notice that in multi-cpu config only one port should be set
2936 * for igmp, unknown, multicast and broadcast packet
2938 ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
2939 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
2940 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
2941 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
2942 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
2946 /* Setup connection between CPU port & user ports
2947 * Configure specific switch configuration for ports
2949 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2950 /* CPU port gets connected to all user ports of the switch */
2951 if (dsa_is_cpu_port(ds, i)) {
2952 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2953 QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
2958 /* Individual user ports get connected to CPU port only */
2959 if (dsa_is_user_port(ds, i)) {
2960 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2961 QCA8K_PORT_LOOKUP_MEMBER,
2966 /* Enable ARP Auto-learning by default */
2967 ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
2968 QCA8K_PORT_LOOKUP_LEARN);
2972 /* For port based vlans to work we need to set the
2973 * default egress vid
2975 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
2976 QCA8K_EGREES_VLAN_PORT_MASK(i),
2977 QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
2981 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
2982 QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
2983 QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
2988 /* The port 5 of the qca8337 have some problem in flood condition. The
2989 * original legacy driver had some specific buffer and priority settings
2990 * for the different port suggested by the QCA switch team. Add this
2991 * missing settings to improve switch stability under load condition.
2992 * This problem is limited to qca8337 and other qca8k switch are not affected.
2994 if (priv->switch_id == QCA8K_ID_QCA8337) {
2996 /* The 2 CPU port and port 5 requires some different
2997 * priority than any other ports.
3002 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
3003 QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
3004 QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
3005 QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
3006 QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
3007 QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
3008 QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
3011 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
3012 QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
3013 QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
3014 QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
3015 QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
3017 qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
3019 mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
3020 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
3021 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
3022 QCA8K_PORT_HOL_CTRL1_WRED_EN;
3023 qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
3024 QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
3025 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
3026 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
3027 QCA8K_PORT_HOL_CTRL1_WRED_EN,
3031 /* Set initial MTU for every port.
3032 * We have only have a general MTU setting. So track
3033 * every port and set the max across all port.
3034 * Set per port MTU to 1500 as the MTU change function
3035 * will add the overhead and if its set to 1518 then it
3036 * will apply the overhead again and we will end up with
3037 * MTU of 1536 instead of 1518
3039 priv->port_mtu[i] = ETH_DATA_LEN;
3042 /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
3043 if (priv->switch_id == QCA8K_ID_QCA8327) {
3044 mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
3045 QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
3046 qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
3047 QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
3048 QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
3052 /* Setup our port MTUs to match power on defaults */
3053 ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
3055 dev_warn(priv->dev, "failed setting MTU settings");
3057 /* Flush the FDB table */
3058 qca8k_fdb_flush(priv);
3060 /* Set min a max ageing value supported */
3061 ds->ageing_time_min = 7000;
3062 ds->ageing_time_max = 458745000;
3064 /* Set max number of LAGs supported */
3065 ds->num_lag_ids = QCA8K_NUM_LAGS;
3070 static const struct dsa_switch_ops qca8k_switch_ops = {
3071 .get_tag_protocol = qca8k_get_tag_protocol,
3072 .setup = qca8k_setup,
3073 .get_strings = qca8k_get_strings,
3074 .get_ethtool_stats = qca8k_get_ethtool_stats,
3075 .get_sset_count = qca8k_get_sset_count,
3076 .set_ageing_time = qca8k_set_ageing_time,
3077 .get_mac_eee = qca8k_get_mac_eee,
3078 .set_mac_eee = qca8k_set_mac_eee,
3079 .port_enable = qca8k_port_enable,
3080 .port_disable = qca8k_port_disable,
3081 .port_change_mtu = qca8k_port_change_mtu,
3082 .port_max_mtu = qca8k_port_max_mtu,
3083 .port_stp_state_set = qca8k_port_stp_state_set,
3084 .port_bridge_join = qca8k_port_bridge_join,
3085 .port_bridge_leave = qca8k_port_bridge_leave,
3086 .port_fast_age = qca8k_port_fast_age,
3087 .port_fdb_add = qca8k_port_fdb_add,
3088 .port_fdb_del = qca8k_port_fdb_del,
3089 .port_fdb_dump = qca8k_port_fdb_dump,
3090 .port_mdb_add = qca8k_port_mdb_add,
3091 .port_mdb_del = qca8k_port_mdb_del,
3092 .port_mirror_add = qca8k_port_mirror_add,
3093 .port_mirror_del = qca8k_port_mirror_del,
3094 .port_vlan_filtering = qca8k_port_vlan_filtering,
3095 .port_vlan_add = qca8k_port_vlan_add,
3096 .port_vlan_del = qca8k_port_vlan_del,
3097 .phylink_get_caps = qca8k_phylink_get_caps,
3098 .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
3099 .phylink_mac_config = qca8k_phylink_mac_config,
3100 .phylink_mac_link_down = qca8k_phylink_mac_link_down,
3101 .phylink_mac_link_up = qca8k_phylink_mac_link_up,
3102 .get_phy_flags = qca8k_get_phy_flags,
3103 .port_lag_join = qca8k_port_lag_join,
3104 .port_lag_leave = qca8k_port_lag_leave,
3105 .master_state_change = qca8k_master_change,
3106 .connect_tag_protocol = qca8k_connect_tag_protocol,
3109 static int qca8k_read_switch_id(struct qca8k_priv *priv)
3111 const struct qca8k_match_data *data;
3116 /* get the switches ID from the compatible */
3117 data = of_device_get_match_data(priv->dev);
3121 ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
3125 id = QCA8K_MASK_CTRL_DEVICE_ID(val);
3126 if (id != data->id) {
3127 dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
3131 priv->switch_id = id;
3133 /* Save revision to communicate to the internal PHY driver */
3134 priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
3140 qca8k_sw_probe(struct mdio_device *mdiodev)
3142 struct qca8k_priv *priv;
3145 /* allocate the private data struct so that we can probe the switches
3148 priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
3152 priv->bus = mdiodev->bus;
3153 priv->dev = &mdiodev->dev;
3155 priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
3157 if (IS_ERR(priv->reset_gpio))
3158 return PTR_ERR(priv->reset_gpio);
3160 if (priv->reset_gpio) {
3161 gpiod_set_value_cansleep(priv->reset_gpio, 1);
3162 /* The active low duration must be greater than 10 ms
3163 * and checkpatch.pl wants 20 ms.
3166 gpiod_set_value_cansleep(priv->reset_gpio, 0);
3169 /* Start by setting up the register mapping */
3170 priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
3171 &qca8k_regmap_config);
3172 if (IS_ERR(priv->regmap)) {
3173 dev_err(priv->dev, "regmap initialization failed");
3174 return PTR_ERR(priv->regmap);
3177 priv->mdio_cache.page = 0xffff;
3178 priv->mdio_cache.lo = 0xffff;
3179 priv->mdio_cache.hi = 0xffff;
3181 /* Check the detected switch id */
3182 ret = qca8k_read_switch_id(priv);
3186 priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
3190 mutex_init(&priv->mgmt_eth_data.mutex);
3191 init_completion(&priv->mgmt_eth_data.rw_done);
3193 mutex_init(&priv->mib_eth_data.mutex);
3194 init_completion(&priv->mib_eth_data.rw_done);
3196 priv->ds->dev = &mdiodev->dev;
3197 priv->ds->num_ports = QCA8K_NUM_PORTS;
3198 priv->ds->priv = priv;
3199 priv->ops = qca8k_switch_ops;
3200 priv->ds->ops = &priv->ops;
3201 mutex_init(&priv->reg_mutex);
3202 dev_set_drvdata(&mdiodev->dev, priv);
3204 return dsa_register_switch(priv->ds);
3208 qca8k_sw_remove(struct mdio_device *mdiodev)
3210 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3216 for (i = 0; i < QCA8K_NUM_PORTS; i++)
3217 qca8k_port_set_status(priv, i, 0);
3219 dsa_unregister_switch(priv->ds);
3221 dev_set_drvdata(&mdiodev->dev, NULL);
3224 static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
3226 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3231 dsa_switch_shutdown(priv->ds);
3233 dev_set_drvdata(&mdiodev->dev, NULL);
3236 #ifdef CONFIG_PM_SLEEP
3238 qca8k_set_pm(struct qca8k_priv *priv, int enable)
3242 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
3243 if (!priv->port_sts[i].enabled)
3246 qca8k_port_set_status(priv, i, enable);
3250 static int qca8k_suspend(struct device *dev)
3252 struct qca8k_priv *priv = dev_get_drvdata(dev);
3254 qca8k_set_pm(priv, 0);
3256 return dsa_switch_suspend(priv->ds);
3259 static int qca8k_resume(struct device *dev)
3261 struct qca8k_priv *priv = dev_get_drvdata(dev);
3263 qca8k_set_pm(priv, 1);
3265 return dsa_switch_resume(priv->ds);
3267 #endif /* CONFIG_PM_SLEEP */
3269 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
3270 qca8k_suspend, qca8k_resume);
3272 static const struct qca8k_match_data qca8327 = {
3273 .id = QCA8K_ID_QCA8327,
3274 .reduced_package = true,
3275 .mib_count = QCA8K_QCA832X_MIB_COUNT,
3278 static const struct qca8k_match_data qca8328 = {
3279 .id = QCA8K_ID_QCA8327,
3280 .mib_count = QCA8K_QCA832X_MIB_COUNT,
3283 static const struct qca8k_match_data qca833x = {
3284 .id = QCA8K_ID_QCA8337,
3285 .mib_count = QCA8K_QCA833X_MIB_COUNT,
3288 static const struct of_device_id qca8k_of_match[] = {
3289 { .compatible = "qca,qca8327", .data = &qca8327 },
3290 { .compatible = "qca,qca8328", .data = &qca8328 },
3291 { .compatible = "qca,qca8334", .data = &qca833x },
3292 { .compatible = "qca,qca8337", .data = &qca833x },
3296 static struct mdio_driver qca8kmdio_driver = {
3297 .probe = qca8k_sw_probe,
3298 .remove = qca8k_sw_remove,
3299 .shutdown = qca8k_sw_shutdown,
3302 .of_match_table = qca8k_of_match,
3303 .pm = &qca8k_pm_ops,
3307 mdio_module_driver(qca8kmdio_driver);
3310 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
3311 MODULE_LICENSE("GPL v2");
3312 MODULE_ALIAS("platform:qca8k");