1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
9 #include <linux/netdevice.h>
11 #include <linux/if_bridge.h>
15 #define MIB_DESC(_s, _o, _n) \
22 const struct qca8k_mib_desc ar8327_mib[] = {
23 MIB_DESC(1, 0x00, "RxBroad"),
24 MIB_DESC(1, 0x04, "RxPause"),
25 MIB_DESC(1, 0x08, "RxMulti"),
26 MIB_DESC(1, 0x0c, "RxFcsErr"),
27 MIB_DESC(1, 0x10, "RxAlignErr"),
28 MIB_DESC(1, 0x14, "RxRunt"),
29 MIB_DESC(1, 0x18, "RxFragment"),
30 MIB_DESC(1, 0x1c, "Rx64Byte"),
31 MIB_DESC(1, 0x20, "Rx128Byte"),
32 MIB_DESC(1, 0x24, "Rx256Byte"),
33 MIB_DESC(1, 0x28, "Rx512Byte"),
34 MIB_DESC(1, 0x2c, "Rx1024Byte"),
35 MIB_DESC(1, 0x30, "Rx1518Byte"),
36 MIB_DESC(1, 0x34, "RxMaxByte"),
37 MIB_DESC(1, 0x38, "RxTooLong"),
38 MIB_DESC(2, 0x3c, "RxGoodByte"),
39 MIB_DESC(2, 0x44, "RxBadByte"),
40 MIB_DESC(1, 0x4c, "RxOverFlow"),
41 MIB_DESC(1, 0x50, "Filtered"),
42 MIB_DESC(1, 0x54, "TxBroad"),
43 MIB_DESC(1, 0x58, "TxPause"),
44 MIB_DESC(1, 0x5c, "TxMulti"),
45 MIB_DESC(1, 0x60, "TxUnderRun"),
46 MIB_DESC(1, 0x64, "Tx64Byte"),
47 MIB_DESC(1, 0x68, "Tx128Byte"),
48 MIB_DESC(1, 0x6c, "Tx256Byte"),
49 MIB_DESC(1, 0x70, "Tx512Byte"),
50 MIB_DESC(1, 0x74, "Tx1024Byte"),
51 MIB_DESC(1, 0x78, "Tx1518Byte"),
52 MIB_DESC(1, 0x7c, "TxMaxByte"),
53 MIB_DESC(1, 0x80, "TxOverSize"),
54 MIB_DESC(2, 0x84, "TxByte"),
55 MIB_DESC(1, 0x8c, "TxCollision"),
56 MIB_DESC(1, 0x90, "TxAbortCol"),
57 MIB_DESC(1, 0x94, "TxMultiCol"),
58 MIB_DESC(1, 0x98, "TxSingleCol"),
59 MIB_DESC(1, 0x9c, "TxExcDefer"),
60 MIB_DESC(1, 0xa0, "TxDefer"),
61 MIB_DESC(1, 0xa4, "TxLateCol"),
62 MIB_DESC(1, 0xa8, "RXUnicast"),
63 MIB_DESC(1, 0xac, "TXUnicast"),
66 int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
68 return regmap_read(priv->regmap, reg, val);
71 int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
73 return regmap_write(priv->regmap, reg, val);
76 int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
78 return regmap_update_bits(priv->regmap, reg, mask, write_val);
81 static const struct regmap_range qca8k_readable_ranges[] = {
82 regmap_reg_range(0x0000, 0x00e4), /* Global control */
83 regmap_reg_range(0x0100, 0x0168), /* EEE control */
84 regmap_reg_range(0x0200, 0x0270), /* Parser control */
85 regmap_reg_range(0x0400, 0x0454), /* ACL */
86 regmap_reg_range(0x0600, 0x0718), /* Lookup */
87 regmap_reg_range(0x0800, 0x0b70), /* QM */
88 regmap_reg_range(0x0c00, 0x0c80), /* PKT */
89 regmap_reg_range(0x0e00, 0x0e98), /* L3 */
90 regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
91 regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
92 regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
93 regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
94 regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
95 regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
96 regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
99 const struct regmap_access_table qca8k_readable_table = {
100 .yes_ranges = qca8k_readable_ranges,
101 .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
104 static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
108 return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
109 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
112 static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
114 u32 reg[QCA8K_ATU_TABLE_SIZE];
117 /* load the ARL table into an array */
118 ret = regmap_bulk_read(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
119 QCA8K_ATU_TABLE_SIZE);
124 fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
126 fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
127 /* portmask - 54:48 */
128 fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
130 fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
131 fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
132 fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
133 fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
134 fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
135 fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
140 static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
141 const u8 *mac, u8 aging)
143 u32 reg[QCA8K_ATU_TABLE_SIZE] = { 0 };
146 reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
148 reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
149 /* portmask - 54:48 */
150 reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
152 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
153 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
154 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
155 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
156 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
157 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
159 /* load the array into the ARL table */
160 regmap_bulk_write(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
161 QCA8K_ATU_TABLE_SIZE);
164 static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
170 /* Set the command and FDB index */
171 reg = QCA8K_ATU_FUNC_BUSY;
174 reg |= QCA8K_ATU_FUNC_PORT_EN;
175 reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
178 /* Write the function register triggering the table access */
179 ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
183 /* wait for completion */
184 ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
188 /* Check for table full violation when adding an entry */
189 if (cmd == QCA8K_FDB_LOAD) {
190 ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
193 if (reg & QCA8K_ATU_FUNC_FULL)
200 static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
205 qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
206 ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
210 return qca8k_fdb_read(priv, fdb);
213 static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
214 u16 port_mask, u16 vid, u8 aging)
218 mutex_lock(&priv->reg_mutex);
219 qca8k_fdb_write(priv, vid, port_mask, mac, aging);
220 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
221 mutex_unlock(&priv->reg_mutex);
226 static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
227 u16 port_mask, u16 vid)
231 mutex_lock(&priv->reg_mutex);
232 qca8k_fdb_write(priv, vid, port_mask, mac, 0);
233 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
234 mutex_unlock(&priv->reg_mutex);
239 void qca8k_fdb_flush(struct qca8k_priv *priv)
241 mutex_lock(&priv->reg_mutex);
242 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
243 mutex_unlock(&priv->reg_mutex);
246 static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
247 const u8 *mac, u16 vid, u8 aging)
249 struct qca8k_fdb fdb = { 0 };
252 mutex_lock(&priv->reg_mutex);
254 qca8k_fdb_write(priv, vid, 0, mac, 0);
255 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
259 ret = qca8k_fdb_read(priv, &fdb);
263 /* Rule exist. Delete first */
265 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
272 /* Add port to fdb portmask */
273 fdb.port_mask |= port_mask;
275 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
276 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
279 mutex_unlock(&priv->reg_mutex);
283 static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
284 const u8 *mac, u16 vid)
286 struct qca8k_fdb fdb = { 0 };
289 mutex_lock(&priv->reg_mutex);
291 qca8k_fdb_write(priv, vid, 0, mac, 0);
292 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
296 ret = qca8k_fdb_read(priv, &fdb);
300 /* Rule doesn't exist. Why delete? */
306 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
310 /* Only port in the rule is this port. Don't re insert */
311 if (fdb.port_mask == port_mask)
314 /* Remove port from port mask */
315 fdb.port_mask &= ~port_mask;
317 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
318 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
321 mutex_unlock(&priv->reg_mutex);
325 static int qca8k_vlan_access(struct qca8k_priv *priv,
326 enum qca8k_vlan_cmd cmd, u16 vid)
331 /* Set the command and VLAN index */
332 reg = QCA8K_VTU_FUNC1_BUSY;
334 reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
336 /* Write the function register triggering the table access */
337 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
341 /* wait for completion */
342 ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
346 /* Check for table full violation when adding an entry */
347 if (cmd == QCA8K_VLAN_LOAD) {
348 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
351 if (reg & QCA8K_VTU_FUNC1_FULL)
358 static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
364 /* We do the right thing with VLAN 0 and treat it as untagged while
365 * preserving the tag on egress.
370 mutex_lock(&priv->reg_mutex);
371 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
375 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
378 reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
379 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
381 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
383 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
385 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
388 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
391 mutex_unlock(&priv->reg_mutex);
396 static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
402 mutex_lock(&priv->reg_mutex);
403 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
407 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
410 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
411 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
413 /* Check if we're the last member to be removed */
415 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
416 mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
418 if ((reg & mask) != mask) {
425 ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
427 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
430 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
434 mutex_unlock(&priv->reg_mutex);
439 int qca8k_mib_init(struct qca8k_priv *priv)
443 mutex_lock(&priv->reg_mutex);
444 ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
445 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
446 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
451 ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
455 ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
459 ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
462 mutex_unlock(&priv->reg_mutex);
466 void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
468 u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
470 /* Port 0 and 6 have no internal PHY */
471 if (port > 0 && port < 6)
472 mask |= QCA8K_PORT_STATUS_LINK_AUTO;
475 regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
477 regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
480 void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
483 struct qca8k_priv *priv = ds->priv;
486 if (stringset != ETH_SS_STATS)
489 for (i = 0; i < priv->info->mib_count; i++)
490 ethtool_sprintf(&data, "%s", ar8327_mib[i].name);
493 void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
496 struct qca8k_priv *priv = ds->priv;
497 const struct qca8k_mib_desc *mib;
502 if (priv->mgmt_conduit && priv->info->ops->autocast_mib &&
503 priv->info->ops->autocast_mib(ds, port, data) > 0)
506 for (i = 0; i < priv->info->mib_count; i++) {
507 mib = &ar8327_mib[i];
508 reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
510 ret = qca8k_read(priv, reg, &val);
514 if (mib->size == 2) {
515 ret = qca8k_read(priv, reg + 4, &hi);
522 data[i] |= (u64)hi << 32;
526 int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
528 struct qca8k_priv *priv = ds->priv;
530 if (sset != ETH_SS_STATS)
533 return priv->info->mib_count;
536 int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
537 struct ethtool_eee *eee)
539 u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
540 struct qca8k_priv *priv = ds->priv;
544 mutex_lock(&priv->reg_mutex);
545 ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
549 if (eee->eee_enabled)
553 ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
556 mutex_unlock(&priv->reg_mutex);
560 int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
561 struct ethtool_eee *e)
563 /* Nothing to do on the port's MAC */
567 static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
570 struct qca8k_priv *priv = ds->priv;
573 return regmap_set_bits(priv->regmap,
574 QCA8K_PORT_LOOKUP_CTRL(port),
575 QCA8K_PORT_LOOKUP_LEARN);
577 return regmap_clear_bits(priv->regmap,
578 QCA8K_PORT_LOOKUP_CTRL(port),
579 QCA8K_PORT_LOOKUP_LEARN);
582 void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
584 struct dsa_port *dp = dsa_to_port(ds, port);
585 struct qca8k_priv *priv = ds->priv;
586 bool learning = false;
590 case BR_STATE_DISABLED:
591 stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
593 case BR_STATE_BLOCKING:
594 stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
596 case BR_STATE_LISTENING:
597 stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
599 case BR_STATE_LEARNING:
600 stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
601 learning = dp->learning;
603 case BR_STATE_FORWARDING:
604 learning = dp->learning;
607 stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
611 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
612 QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
614 qca8k_port_configure_learning(ds, port, learning);
617 int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port,
618 struct switchdev_brport_flags flags,
619 struct netlink_ext_ack *extack)
621 if (flags.mask & ~BR_LEARNING)
627 int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
628 struct switchdev_brport_flags flags,
629 struct netlink_ext_ack *extack)
633 if (flags.mask & BR_LEARNING) {
634 ret = qca8k_port_configure_learning(ds, port,
635 flags.val & BR_LEARNING);
643 int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
644 struct dsa_bridge bridge,
645 bool *tx_fwd_offload,
646 struct netlink_ext_ack *extack)
648 struct qca8k_priv *priv = ds->priv;
649 int port_mask, cpu_port;
652 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
653 port_mask = BIT(cpu_port);
655 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
656 if (dsa_is_cpu_port(ds, i))
658 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
660 /* Add this port to the portvlan mask of the other ports
663 ret = regmap_set_bits(priv->regmap,
664 QCA8K_PORT_LOOKUP_CTRL(i),
672 /* Add all other ports to this ports portvlan mask */
673 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
674 QCA8K_PORT_LOOKUP_MEMBER, port_mask);
679 void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
680 struct dsa_bridge bridge)
682 struct qca8k_priv *priv = ds->priv;
685 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
687 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
688 if (dsa_is_cpu_port(ds, i))
690 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
692 /* Remove this port to the portvlan mask of the other ports
695 regmap_clear_bits(priv->regmap,
696 QCA8K_PORT_LOOKUP_CTRL(i),
700 /* Set the cpu port to be the only one in the portvlan mask of
703 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
704 QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
707 void qca8k_port_fast_age(struct dsa_switch *ds, int port)
709 struct qca8k_priv *priv = ds->priv;
711 mutex_lock(&priv->reg_mutex);
712 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
713 mutex_unlock(&priv->reg_mutex);
716 int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
718 struct qca8k_priv *priv = ds->priv;
719 unsigned int secs = msecs / 1000;
722 /* AGE_TIME reg is set in 7s step */
725 /* Handle case with 0 as val to NOT disable
731 return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
732 QCA8K_ATU_AGE_TIME_MASK,
733 QCA8K_ATU_AGE_TIME(val));
736 int qca8k_port_enable(struct dsa_switch *ds, int port,
737 struct phy_device *phy)
739 struct qca8k_priv *priv = ds->priv;
741 qca8k_port_set_status(priv, port, 1);
742 priv->port_enabled_map |= BIT(port);
744 if (dsa_is_user_port(ds, port))
745 phy_support_asym_pause(phy);
750 void qca8k_port_disable(struct dsa_switch *ds, int port)
752 struct qca8k_priv *priv = ds->priv;
754 qca8k_port_set_status(priv, port, 0);
755 priv->port_enabled_map &= ~BIT(port);
758 int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
760 struct qca8k_priv *priv = ds->priv;
763 /* We have only have a general MTU setting.
764 * DSA always set the CPU port's MTU to the largest MTU of the user
766 * Setting MTU just for the CPU port is sufficient to correctly set a
767 * value for every port.
769 if (!dsa_is_cpu_port(ds, port))
772 /* To change the MAX_FRAME_SIZE the cpu ports must be off or
774 * Turn off both cpu ports before applying the new value to prevent
777 if (priv->port_enabled_map & BIT(0))
778 qca8k_port_set_status(priv, 0, 0);
780 if (priv->port_enabled_map & BIT(6))
781 qca8k_port_set_status(priv, 6, 0);
783 /* Include L2 header / FCS length */
784 ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
785 ETH_HLEN + ETH_FCS_LEN);
787 if (priv->port_enabled_map & BIT(0))
788 qca8k_port_set_status(priv, 0, 1);
790 if (priv->port_enabled_map & BIT(6))
791 qca8k_port_set_status(priv, 6, 1);
796 int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
798 return QCA8K_MAX_MTU;
801 int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
802 u16 port_mask, u16 vid)
804 /* Set the vid to the port vlan id if no vid is set */
806 vid = QCA8K_PORT_VID_DEF;
808 return qca8k_fdb_add(priv, addr, port_mask, vid,
809 QCA8K_ATU_STATUS_STATIC);
812 int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
813 const unsigned char *addr, u16 vid,
816 struct qca8k_priv *priv = ds->priv;
817 u16 port_mask = BIT(port);
819 return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
822 int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
823 const unsigned char *addr, u16 vid,
826 struct qca8k_priv *priv = ds->priv;
827 u16 port_mask = BIT(port);
830 vid = QCA8K_PORT_VID_DEF;
832 return qca8k_fdb_del(priv, addr, port_mask, vid);
835 int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
836 dsa_fdb_dump_cb_t *cb, void *data)
838 struct qca8k_priv *priv = ds->priv;
839 struct qca8k_fdb _fdb = { 0 };
840 int cnt = QCA8K_NUM_FDB_RECORDS;
844 mutex_lock(&priv->reg_mutex);
845 while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
848 is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
849 ret = cb(_fdb.mac, _fdb.vid, is_static, data);
853 mutex_unlock(&priv->reg_mutex);
858 int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
859 const struct switchdev_obj_port_mdb *mdb,
862 struct qca8k_priv *priv = ds->priv;
863 const u8 *addr = mdb->addr;
867 vid = QCA8K_PORT_VID_DEF;
869 return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
870 QCA8K_ATU_STATUS_STATIC);
873 int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
874 const struct switchdev_obj_port_mdb *mdb,
877 struct qca8k_priv *priv = ds->priv;
878 const u8 *addr = mdb->addr;
882 vid = QCA8K_PORT_VID_DEF;
884 return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
887 int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
888 struct dsa_mall_mirror_tc_entry *mirror,
889 bool ingress, struct netlink_ext_ack *extack)
891 struct qca8k_priv *priv = ds->priv;
892 int monitor_port, ret;
895 /* Check for existent entry */
896 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
899 ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
903 /* QCA83xx can have only one port set to mirror mode.
904 * Check that the correct port is requested and return error otherwise.
905 * When no mirror port is set, the values is set to 0xF
907 monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
908 if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
911 /* Set the monitor port */
912 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
913 mirror->to_local_port);
914 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
915 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
920 reg = QCA8K_PORT_LOOKUP_CTRL(port);
921 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
923 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
924 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
927 ret = regmap_update_bits(priv->regmap, reg, val, val);
931 /* Track mirror port for tx and rx to decide when the
932 * mirror port has to be disabled.
935 priv->mirror_rx |= BIT(port);
937 priv->mirror_tx |= BIT(port);
942 void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
943 struct dsa_mall_mirror_tc_entry *mirror)
945 struct qca8k_priv *priv = ds->priv;
949 if (mirror->ingress) {
950 reg = QCA8K_PORT_LOOKUP_CTRL(port);
951 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
953 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
954 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
957 ret = regmap_clear_bits(priv->regmap, reg, val);
962 priv->mirror_rx &= ~BIT(port);
964 priv->mirror_tx &= ~BIT(port);
966 /* No port set to send packet to mirror port. Disable mirror port */
967 if (!priv->mirror_rx && !priv->mirror_tx) {
968 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
969 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
970 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
975 dev_err(priv->dev, "Failed to del mirror port from %d", port);
978 int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
980 struct netlink_ext_ack *extack)
982 struct qca8k_priv *priv = ds->priv;
985 if (vlan_filtering) {
986 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
987 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
988 QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
990 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
991 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
992 QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
998 int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
999 const struct switchdev_obj_port_vlan *vlan,
1000 struct netlink_ext_ack *extack)
1002 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1003 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1004 struct qca8k_priv *priv = ds->priv;
1007 ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
1009 dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
1014 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
1015 QCA8K_EGREES_VLAN_PORT_MASK(port),
1016 QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
1020 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
1021 QCA8K_PORT_VLAN_CVID(vlan->vid) |
1022 QCA8K_PORT_VLAN_SVID(vlan->vid));
1028 int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
1029 const struct switchdev_obj_port_vlan *vlan)
1031 struct qca8k_priv *priv = ds->priv;
1034 ret = qca8k_vlan_del(priv, port, vlan->vid);
1036 dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
1041 static bool qca8k_lag_can_offload(struct dsa_switch *ds,
1043 struct netdev_lag_upper_info *info,
1044 struct netlink_ext_ack *extack)
1046 struct dsa_port *dp;
1052 dsa_lag_foreach_port(dp, ds->dst, &lag)
1053 /* Includes the port joining the LAG */
1056 if (members > QCA8K_NUM_PORTS_FOR_LAG) {
1057 NL_SET_ERR_MSG_MOD(extack,
1058 "Cannot offload more than 4 LAG ports");
1062 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
1063 NL_SET_ERR_MSG_MOD(extack,
1064 "Can only offload LAG using hash TX type");
1068 if (info->hash_type != NETDEV_LAG_HASH_L2 &&
1069 info->hash_type != NETDEV_LAG_HASH_L23) {
1070 NL_SET_ERR_MSG_MOD(extack,
1071 "Can only offload L2 or L2+L3 TX hash");
1078 static int qca8k_lag_setup_hash(struct dsa_switch *ds,
1080 struct netdev_lag_upper_info *info)
1082 struct net_device *lag_dev = lag.dev;
1083 struct qca8k_priv *priv = ds->priv;
1084 bool unique_lag = true;
1088 switch (info->hash_type) {
1089 case NETDEV_LAG_HASH_L23:
1090 hash |= QCA8K_TRUNK_HASH_SIP_EN;
1091 hash |= QCA8K_TRUNK_HASH_DIP_EN;
1093 case NETDEV_LAG_HASH_L2:
1094 hash |= QCA8K_TRUNK_HASH_SA_EN;
1095 hash |= QCA8K_TRUNK_HASH_DA_EN;
1097 default: /* We should NEVER reach this */
1101 /* Check if we are the unique configured LAG */
1102 dsa_lags_foreach_id(i, ds->dst)
1103 if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
1108 /* Hash Mode is global. Make sure the same Hash Mode
1109 * is set to all the 4 possible lag.
1110 * If we are the unique LAG we can set whatever hash
1112 * To change hash mode it's needed to remove all LAG
1113 * and change the mode with the latest.
1116 priv->lag_hash_mode = hash;
1117 } else if (priv->lag_hash_mode != hash) {
1118 netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
1122 return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
1123 QCA8K_TRUNK_HASH_MASK, hash);
1126 static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
1127 struct dsa_lag lag, bool delete)
1129 struct qca8k_priv *priv = ds->priv;
1133 /* DSA LAG IDs are one-based, hardware is zero-based */
1136 /* Read current port member */
1137 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
1141 /* Shift val to the correct trunk */
1142 val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
1143 val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
1149 /* Update port member. With empty portmap disable trunk */
1150 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
1151 QCA8K_REG_GOL_TRUNK_MEMBER(id) |
1152 QCA8K_REG_GOL_TRUNK_EN(id),
1153 !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
1154 val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
1156 /* Search empty member if adding or port on deleting */
1157 for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
1158 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
1162 val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
1163 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
1166 /* If port flagged to be disabled assume this member is
1169 if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
1172 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
1176 /* If port flagged to be enabled assume this member is
1179 if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
1183 /* We have found the member to add/remove */
1187 /* Set port in the correct port mask or disable port if in delete mode */
1188 return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
1189 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
1190 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
1191 !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
1192 port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
1195 int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
1196 struct netdev_lag_upper_info *info,
1197 struct netlink_ext_ack *extack)
1201 if (!qca8k_lag_can_offload(ds, lag, info, extack))
1204 ret = qca8k_lag_setup_hash(ds, lag, info);
1208 return qca8k_lag_refresh_portmap(ds, port, lag, false);
1211 int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
1214 return qca8k_lag_refresh_portmap(ds, port, lag, true);
1217 int qca8k_read_switch_id(struct qca8k_priv *priv)
1226 ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
1230 id = QCA8K_MASK_CTRL_DEVICE_ID(val);
1231 if (id != priv->info->id) {
1233 "Switch id detected %x but expected %x",
1234 id, priv->info->id);
1238 priv->switch_id = id;
1240 /* Save revision to communicate to the internal PHY driver */
1241 priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);