1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/if_bridge.h>
5 #include "lan966x_main.h"
7 static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
9 u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
12 /* Reset destination and aggregation PGIDS */
13 for (p = 0; p < lan966x->num_phys_ports; ++p)
14 lan_wr(ANA_PGID_PGID_SET(BIT(p)),
15 lan966x, ANA_PGID(p));
17 for (p = PGID_AGGR; p < PGID_SRC; ++p)
18 lan_wr(ANA_PGID_PGID_SET(visited),
19 lan966x, ANA_PGID(p));
21 /* The visited ports bitmask holds the list of ports offloading any
22 * bonding interface. Initially we mark all these ports as unvisited,
23 * then every time we visit a port in this bitmask, we know that it is
24 * the lowest numbered port, i.e. the one whose logical ID == physical
25 * port ID == LAG ID. So we mark as visited all further ports in the
26 * bitmask that are offloading the same bonding interface. This way,
27 * we set up the aggregation PGIDs only once per bonding interface.
29 for (p = 0; p < lan966x->num_phys_ports; ++p) {
30 struct lan966x_port *port = lan966x->ports[p];
32 if (!port || !port->bond)
38 /* Now, set PGIDs for each active LAG */
39 for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
40 struct lan966x_port *port = lan966x->ports[lag];
41 int num_active_ports = 0;
42 struct net_device *bond;
43 unsigned long bond_mask;
46 if (!port || !port->bond || (visited & BIT(lag)))
50 bond_mask = lan966x_lag_get_mask(lan966x, bond);
52 for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
53 struct lan966x_port *port = lan966x->ports[p];
58 lan_wr(ANA_PGID_PGID_SET(bond_mask),
59 lan966x, ANA_PGID(p));
60 if (port->lag_tx_active)
61 aggr_idx[num_active_ports++] = p;
64 for (i = PGID_AGGR; i < PGID_SRC; ++i) {
67 ac = lan_rd(lan966x, ANA_PGID(i));
69 /* Don't do division by zero if there was no active
70 * port. Just make all aggregation codes zero.
73 ac |= BIT(aggr_idx[i % num_active_ports]);
74 lan_wr(ANA_PGID_PGID_SET(ac),
75 lan966x, ANA_PGID(i));
78 /* Mark all ports in the same LAG as visited to avoid applying
79 * the same config again.
81 for (p = lag; p < lan966x->num_phys_ports; p++) {
82 struct lan966x_port *port = lan966x->ports[p];
87 if (port->bond == bond)
93 static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
95 struct lan966x_port *port;
100 for (p = 0; p < lan966x->num_phys_ports; ++p) {
101 port = lan966x->ports[p];
105 lag_id = port->chip_port;
107 bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
109 lag_id = __ffs(bond_mask);
111 lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
112 ANA_PORT_CFG_PORTID_VAL,
113 lan966x, ANA_PORT_CFG(port->chip_port));
117 static void lan966x_lag_update_ids(struct lan966x *lan966x)
119 lan966x_lag_set_port_ids(lan966x);
120 lan966x_update_fwd_mask(lan966x);
121 lan966x_lag_set_aggr_pgids(lan966x);
124 int lan966x_lag_port_join(struct lan966x_port *port,
125 struct net_device *brport_dev,
126 struct net_device *bond,
127 struct netlink_ext_ack *extack)
129 struct lan966x *lan966x = port->lan966x;
130 struct net_device *dev = port->dev;
135 bond_mask = lan966x_lag_get_mask(lan966x, bond);
137 lag_id = __ffs(bond_mask);
140 lan966x_lag_update_ids(lan966x);
142 err = switchdev_bridge_port_offload(brport_dev, dev, port,
143 &lan966x_switchdev_nb,
144 &lan966x_switchdev_blocking_nb,
149 lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
151 if (lan966x_lag_first_port(port->bond, port->dev) &&
153 lan966x_mac_lag_replace_port_entry(lan966x,
154 lan966x->ports[lag_id],
161 lan966x_lag_update_ids(lan966x);
166 void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
168 struct lan966x *lan966x = port->lan966x;
172 if (lan966x_lag_first_port(port->bond, port->dev)) {
173 bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
174 bond_mask &= ~BIT(port->chip_port);
176 lag_id = __ffs(bond_mask);
177 lan966x_mac_lag_replace_port_entry(lan966x, port,
178 lan966x->ports[lag_id]);
180 lan966x_mac_lag_remove_port_entry(lan966x, port);
185 lan966x_lag_update_ids(lan966x);
186 lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
189 static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
190 enum netdev_lag_hash hash_type)
194 for (p = 0; p < lan966x->num_phys_ports; ++p) {
195 struct lan966x_port *port = lan966x->ports[p];
197 if (!port || !port->bond)
200 if (port->hash_type != hash_type)
207 int lan966x_lag_port_prechangeupper(struct net_device *dev,
208 struct netdev_notifier_changeupper_info *info)
210 struct lan966x_port *port = netdev_priv(dev);
211 struct lan966x *lan966x = port->lan966x;
212 struct netdev_lag_upper_info *lui;
213 struct netlink_ext_ack *extack;
215 extack = netdev_notifier_info_to_extack(&info->info);
216 lui = info->upper_info;
218 port->hash_type = NETDEV_LAG_HASH_NONE;
222 if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
223 NL_SET_ERR_MSG_MOD(extack,
224 "LAG device using unsupported Tx type");
228 if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
229 NL_SET_ERR_MSG_MOD(extack,
230 "LAG devices can have only the same hash_type");
234 switch (lui->hash_type) {
235 case NETDEV_LAG_HASH_L2:
236 lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
237 ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
238 lan966x, ANA_AGGR_CFG);
240 case NETDEV_LAG_HASH_L34:
241 lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
242 ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
243 ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
244 lan966x, ANA_AGGR_CFG);
246 case NETDEV_LAG_HASH_L23:
247 lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
248 ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
249 ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
250 ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
251 lan966x, ANA_AGGR_CFG);
254 NL_SET_ERR_MSG_MOD(extack,
255 "LAG device using unsupported hash type");
259 port->hash_type = lui->hash_type;
264 int lan966x_lag_port_changelowerstate(struct net_device *dev,
265 struct netdev_notifier_changelowerstate_info *info)
267 struct netdev_lag_lower_state_info *lag = info->lower_state_info;
268 struct lan966x_port *port = netdev_priv(dev);
269 struct lan966x *lan966x = port->lan966x;
275 is_active = lag->link_up && lag->tx_enabled;
276 if (port->lag_tx_active == is_active)
279 port->lag_tx_active = is_active;
280 lan966x_lag_set_aggr_pgids(lan966x);
285 int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
286 struct netdev_notifier_changeupper_info *info)
288 struct lan966x_port *port;
289 struct net_device *lower;
290 struct list_head *iter;
293 netdev_for_each_lower_dev(dev, lower, iter) {
294 if (!lan966x_netdevice_check(lower))
297 port = netdev_priv(lower);
298 if (port->bond != dev)
301 err = lan966x_port_prechangeupper(lower, dev, info);
309 int lan966x_lag_netdev_changeupper(struct net_device *dev,
310 struct netdev_notifier_changeupper_info *info)
312 struct lan966x_port *port;
313 struct net_device *lower;
314 struct list_head *iter;
317 netdev_for_each_lower_dev(dev, lower, iter) {
318 if (!lan966x_netdevice_check(lower))
321 port = netdev_priv(lower);
322 if (port->bond != dev)
325 err = lan966x_port_changeupper(lower, dev, info);
333 bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
335 struct lan966x_port *port = netdev_priv(dev);
336 struct lan966x *lan966x = port->lan966x;
337 unsigned long bond_mask;
339 if (port->bond != lag)
342 bond_mask = lan966x_lag_get_mask(lan966x, lag);
343 if (bond_mask && port->chip_port == __ffs(bond_mask))
349 u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
351 struct lan966x_port *port;
358 for (p = 0; p < lan966x->num_phys_ports; p++) {
359 port = lan966x->ports[p];
363 if (port->bond == bond)