1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch chip, part of a switch fabric
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
22 dsa_switch_for_each_port(dp, ds)
23 if (dp->ageing_time && dp->ageing_time < ageing_time)
24 ageing_time = dp->ageing_time;
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 struct dsa_notifier_ageing_time_info *info)
32 unsigned int ageing_time = info->ageing_time;
34 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
37 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
40 /* Program the fastest ageing time in case of multiple bridges */
41 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
43 if (ds->ops->set_ageing_time)
44 return ds->ops->set_ageing_time(ds, ageing_time);
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 struct dsa_notifier_mtu_info *info)
52 if (dp->ds->index == info->sw_index && dp->index == info->port)
55 /* Do not propagate to other switches in the tree if the notifier was
56 * targeted for a single switch.
58 if (info->targeted_match)
61 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 struct dsa_notifier_mtu_info *info)
73 if (!ds->ops->port_change_mtu)
76 dsa_switch_for_each_port(dp, ds) {
77 if (dsa_port_mtu_match(dp, info)) {
78 ret = ds->ops->port_change_mtu(ds, dp->index,
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 struct dsa_notifier_bridge_info *info)
91 struct dsa_switch_tree *dst = ds->dst;
94 if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 if (!ds->ops->port_bridge_join)
98 err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
99 &info->tx_fwd_offload,
105 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
106 ds->ops->crosschip_bridge_join) {
107 err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
109 info->port, info->bridge,
118 static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds,
119 struct dsa_notifier_bridge_info *info)
121 struct netlink_ext_ack extack = {0};
122 bool change_vlan_filtering = false;
127 if (ds->needs_standalone_vlan_filtering &&
128 !br_vlan_enabled(info->bridge.dev)) {
129 change_vlan_filtering = true;
130 vlan_filtering = true;
131 } else if (!ds->needs_standalone_vlan_filtering &&
132 br_vlan_enabled(info->bridge.dev)) {
133 change_vlan_filtering = true;
134 vlan_filtering = false;
137 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
138 * event for changing vlan_filtering setting upon slave ports leaving
139 * it. That is a good thing, because that lets us handle it and also
140 * handle the case where the switch's vlan_filtering setting is global
141 * (not per port). When that happens, the correct moment to trigger the
142 * vlan_filtering callback is only when the last port leaves the last
145 if (change_vlan_filtering && ds->vlan_filtering_is_global) {
146 dsa_switch_for_each_port(dp, ds) {
147 struct net_device *br = dsa_port_bridge_dev_get(dp);
149 if (br && br_vlan_enabled(br)) {
150 change_vlan_filtering = false;
156 if (change_vlan_filtering) {
157 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
158 vlan_filtering, &extack);
160 dev_err(ds->dev, "port %d: %s\n", info->port,
162 if (err && err != -EOPNOTSUPP)
169 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
170 struct dsa_notifier_bridge_info *info)
172 struct dsa_switch_tree *dst = ds->dst;
175 if (dst->index == info->tree_index && ds->index == info->sw_index &&
176 ds->ops->port_bridge_leave)
177 ds->ops->port_bridge_leave(ds, info->port, info->bridge);
179 if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
180 ds->ops->crosschip_bridge_leave)
181 ds->ops->crosschip_bridge_leave(ds, info->tree_index,
182 info->sw_index, info->port,
185 if (ds->dst->index == info->tree_index && ds->index == info->sw_index) {
186 err = dsa_switch_sync_vlan_filtering(ds, info);
194 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
195 * DSA links) that sit between the targeted port on which the notifier was
196 * emitted and its dedicated CPU port.
198 static bool dsa_port_host_address_match(struct dsa_port *dp,
199 int info_sw_index, int info_port)
201 struct dsa_port *targeted_dp, *cpu_dp;
202 struct dsa_switch *targeted_ds;
204 targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
205 targeted_dp = dsa_to_port(targeted_ds, info_port);
206 cpu_dp = targeted_dp->cpu_dp;
208 if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
209 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
215 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
216 const unsigned char *addr, u16 vid,
219 struct dsa_mac_addr *a;
221 list_for_each_entry(a, addr_list, list)
222 if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
223 dsa_db_equal(&a->db, &db))
229 static int dsa_port_do_mdb_add(struct dsa_port *dp,
230 const struct switchdev_obj_port_mdb *mdb,
233 struct dsa_switch *ds = dp->ds;
234 struct dsa_mac_addr *a;
235 int port = dp->index;
238 /* No need to bother with refcounting for user ports */
239 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
240 return ds->ops->port_mdb_add(ds, port, mdb, db);
242 mutex_lock(&dp->addr_lists_lock);
244 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
246 refcount_inc(&a->refcount);
250 a = kzalloc(sizeof(*a), GFP_KERNEL);
256 err = ds->ops->port_mdb_add(ds, port, mdb, db);
262 ether_addr_copy(a->addr, mdb->addr);
265 refcount_set(&a->refcount, 1);
266 list_add_tail(&a->list, &dp->mdbs);
269 mutex_unlock(&dp->addr_lists_lock);
274 static int dsa_port_do_mdb_del(struct dsa_port *dp,
275 const struct switchdev_obj_port_mdb *mdb,
278 struct dsa_switch *ds = dp->ds;
279 struct dsa_mac_addr *a;
280 int port = dp->index;
283 /* No need to bother with refcounting for user ports */
284 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
285 return ds->ops->port_mdb_del(ds, port, mdb, db);
287 mutex_lock(&dp->addr_lists_lock);
289 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
295 if (!refcount_dec_and_test(&a->refcount))
298 err = ds->ops->port_mdb_del(ds, port, mdb, db);
300 refcount_set(&a->refcount, 1);
308 mutex_unlock(&dp->addr_lists_lock);
313 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
314 u16 vid, struct dsa_db db)
316 struct dsa_switch *ds = dp->ds;
317 struct dsa_mac_addr *a;
318 int port = dp->index;
321 /* No need to bother with refcounting for user ports */
322 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
323 return ds->ops->port_fdb_add(ds, port, addr, vid, db);
325 mutex_lock(&dp->addr_lists_lock);
327 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
329 refcount_inc(&a->refcount);
333 a = kzalloc(sizeof(*a), GFP_KERNEL);
339 err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
345 ether_addr_copy(a->addr, addr);
348 refcount_set(&a->refcount, 1);
349 list_add_tail(&a->list, &dp->fdbs);
352 mutex_unlock(&dp->addr_lists_lock);
357 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
358 u16 vid, struct dsa_db db)
360 struct dsa_switch *ds = dp->ds;
361 struct dsa_mac_addr *a;
362 int port = dp->index;
365 /* No need to bother with refcounting for user ports */
366 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
367 return ds->ops->port_fdb_del(ds, port, addr, vid, db);
369 mutex_lock(&dp->addr_lists_lock);
371 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
377 if (!refcount_dec_and_test(&a->refcount))
380 err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
382 refcount_set(&a->refcount, 1);
390 mutex_unlock(&dp->addr_lists_lock);
395 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
396 const unsigned char *addr, u16 vid,
399 struct dsa_mac_addr *a;
402 mutex_lock(&lag->fdb_lock);
404 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
406 refcount_inc(&a->refcount);
410 a = kzalloc(sizeof(*a), GFP_KERNEL);
416 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
422 ether_addr_copy(a->addr, addr);
424 refcount_set(&a->refcount, 1);
425 list_add_tail(&a->list, &lag->fdbs);
428 mutex_unlock(&lag->fdb_lock);
433 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
434 const unsigned char *addr, u16 vid,
437 struct dsa_mac_addr *a;
440 mutex_lock(&lag->fdb_lock);
442 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
448 if (!refcount_dec_and_test(&a->refcount))
451 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
453 refcount_set(&a->refcount, 1);
461 mutex_unlock(&lag->fdb_lock);
466 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
467 struct dsa_notifier_fdb_info *info)
472 if (!ds->ops->port_fdb_add)
475 dsa_switch_for_each_port(dp, ds) {
476 if (dsa_port_host_address_match(dp, info->sw_index,
478 err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
488 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
489 struct dsa_notifier_fdb_info *info)
494 if (!ds->ops->port_fdb_del)
497 dsa_switch_for_each_port(dp, ds) {
498 if (dsa_port_host_address_match(dp, info->sw_index,
500 err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
510 static int dsa_switch_fdb_add(struct dsa_switch *ds,
511 struct dsa_notifier_fdb_info *info)
513 int port = dsa_towards_port(ds, info->sw_index, info->port);
514 struct dsa_port *dp = dsa_to_port(ds, port);
516 if (!ds->ops->port_fdb_add)
519 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
522 static int dsa_switch_fdb_del(struct dsa_switch *ds,
523 struct dsa_notifier_fdb_info *info)
525 int port = dsa_towards_port(ds, info->sw_index, info->port);
526 struct dsa_port *dp = dsa_to_port(ds, port);
528 if (!ds->ops->port_fdb_del)
531 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
534 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
535 struct dsa_notifier_lag_fdb_info *info)
539 if (!ds->ops->lag_fdb_add)
542 /* Notify switch only if it has a port in this LAG */
543 dsa_switch_for_each_port(dp, ds)
544 if (dsa_port_offloads_lag(dp, info->lag))
545 return dsa_switch_do_lag_fdb_add(ds, info->lag,
546 info->addr, info->vid,
552 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
553 struct dsa_notifier_lag_fdb_info *info)
557 if (!ds->ops->lag_fdb_del)
560 /* Notify switch only if it has a port in this LAG */
561 dsa_switch_for_each_port(dp, ds)
562 if (dsa_port_offloads_lag(dp, info->lag))
563 return dsa_switch_do_lag_fdb_del(ds, info->lag,
564 info->addr, info->vid,
570 static int dsa_switch_lag_change(struct dsa_switch *ds,
571 struct dsa_notifier_lag_info *info)
573 if (ds->index == info->sw_index && ds->ops->port_lag_change)
574 return ds->ops->port_lag_change(ds, info->port);
576 if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
577 return ds->ops->crosschip_lag_change(ds, info->sw_index,
583 static int dsa_switch_lag_join(struct dsa_switch *ds,
584 struct dsa_notifier_lag_info *info)
586 if (ds->index == info->sw_index && ds->ops->port_lag_join)
587 return ds->ops->port_lag_join(ds, info->port, info->lag,
590 if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
591 return ds->ops->crosschip_lag_join(ds, info->sw_index,
592 info->port, info->lag,
598 static int dsa_switch_lag_leave(struct dsa_switch *ds,
599 struct dsa_notifier_lag_info *info)
601 if (ds->index == info->sw_index && ds->ops->port_lag_leave)
602 return ds->ops->port_lag_leave(ds, info->port, info->lag);
604 if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
605 return ds->ops->crosschip_lag_leave(ds, info->sw_index,
606 info->port, info->lag);
611 static int dsa_switch_mdb_add(struct dsa_switch *ds,
612 struct dsa_notifier_mdb_info *info)
614 int port = dsa_towards_port(ds, info->sw_index, info->port);
615 struct dsa_port *dp = dsa_to_port(ds, port);
617 if (!ds->ops->port_mdb_add)
620 return dsa_port_do_mdb_add(dp, info->mdb, info->db);
623 static int dsa_switch_mdb_del(struct dsa_switch *ds,
624 struct dsa_notifier_mdb_info *info)
626 int port = dsa_towards_port(ds, info->sw_index, info->port);
627 struct dsa_port *dp = dsa_to_port(ds, port);
629 if (!ds->ops->port_mdb_del)
632 return dsa_port_do_mdb_del(dp, info->mdb, info->db);
635 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
636 struct dsa_notifier_mdb_info *info)
641 if (!ds->ops->port_mdb_add)
644 dsa_switch_for_each_port(dp, ds) {
645 if (dsa_port_host_address_match(dp, info->sw_index,
647 err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
656 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
657 struct dsa_notifier_mdb_info *info)
662 if (!ds->ops->port_mdb_del)
665 dsa_switch_for_each_port(dp, ds) {
666 if (dsa_port_host_address_match(dp, info->sw_index,
668 err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
677 /* Port VLANs match on the targeted port and on all DSA ports */
678 static bool dsa_port_vlan_match(struct dsa_port *dp,
679 struct dsa_notifier_vlan_info *info)
681 if (dp->ds->index == info->sw_index && dp->index == info->port)
684 if (dsa_port_is_dsa(dp))
690 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
691 * (upstream and downstream) of that switch and its upstream switches.
693 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
694 struct dsa_notifier_vlan_info *info)
696 struct dsa_port *targeted_dp, *cpu_dp;
697 struct dsa_switch *targeted_ds;
699 targeted_ds = dsa_switch_find(dp->ds->dst->index, info->sw_index);
700 targeted_dp = dsa_to_port(targeted_ds, info->port);
701 cpu_dp = targeted_dp->cpu_dp;
703 if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
704 return dsa_port_is_dsa(dp) || dp == cpu_dp;
709 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
710 const struct switchdev_obj_port_vlan *vlan)
714 list_for_each_entry(v, vlan_list, list)
715 if (v->vid == vlan->vid)
721 static int dsa_port_do_vlan_add(struct dsa_port *dp,
722 const struct switchdev_obj_port_vlan *vlan,
723 struct netlink_ext_ack *extack)
725 struct dsa_switch *ds = dp->ds;
726 int port = dp->index;
730 /* No need to bother with refcounting for user ports. */
731 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
732 return ds->ops->port_vlan_add(ds, port, vlan, extack);
734 /* No need to propagate on shared ports the existing VLANs that were
735 * re-notified after just the flags have changed. This would cause a
736 * refcount bump which we need to avoid, since it unbalances the
737 * additions with the deletions.
742 mutex_lock(&dp->vlans_lock);
744 v = dsa_vlan_find(&dp->vlans, vlan);
746 refcount_inc(&v->refcount);
750 v = kzalloc(sizeof(*v), GFP_KERNEL);
756 err = ds->ops->port_vlan_add(ds, port, vlan, extack);
763 refcount_set(&v->refcount, 1);
764 list_add_tail(&v->list, &dp->vlans);
767 mutex_unlock(&dp->vlans_lock);
772 static int dsa_port_do_vlan_del(struct dsa_port *dp,
773 const struct switchdev_obj_port_vlan *vlan)
775 struct dsa_switch *ds = dp->ds;
776 int port = dp->index;
780 /* No need to bother with refcounting for user ports */
781 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
782 return ds->ops->port_vlan_del(ds, port, vlan);
784 mutex_lock(&dp->vlans_lock);
786 v = dsa_vlan_find(&dp->vlans, vlan);
792 if (!refcount_dec_and_test(&v->refcount))
795 err = ds->ops->port_vlan_del(ds, port, vlan);
797 refcount_set(&v->refcount, 1);
805 mutex_unlock(&dp->vlans_lock);
810 static int dsa_switch_vlan_add(struct dsa_switch *ds,
811 struct dsa_notifier_vlan_info *info)
816 if (!ds->ops->port_vlan_add)
819 dsa_switch_for_each_port(dp, ds) {
820 if (dsa_port_vlan_match(dp, info)) {
821 err = dsa_port_do_vlan_add(dp, info->vlan,
831 static int dsa_switch_vlan_del(struct dsa_switch *ds,
832 struct dsa_notifier_vlan_info *info)
837 if (!ds->ops->port_vlan_del)
840 dsa_switch_for_each_port(dp, ds) {
841 if (dsa_port_vlan_match(dp, info)) {
842 err = dsa_port_do_vlan_del(dp, info->vlan);
851 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
852 struct dsa_notifier_vlan_info *info)
857 if (!ds->ops->port_vlan_add)
860 dsa_switch_for_each_port(dp, ds) {
861 if (dsa_port_host_vlan_match(dp, info)) {
862 err = dsa_port_do_vlan_add(dp, info->vlan,
872 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
873 struct dsa_notifier_vlan_info *info)
878 if (!ds->ops->port_vlan_del)
881 dsa_switch_for_each_port(dp, ds) {
882 if (dsa_port_host_vlan_match(dp, info)) {
883 err = dsa_port_do_vlan_del(dp, info->vlan);
892 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
893 struct dsa_notifier_tag_proto_info *info)
895 const struct dsa_device_ops *tag_ops = info->tag_ops;
896 struct dsa_port *dp, *cpu_dp;
899 if (!ds->ops->change_tag_protocol)
904 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
905 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
910 dsa_port_set_tag_protocol(cpu_dp, tag_ops);
913 /* Now that changing the tag protocol can no longer fail, let's update
914 * the remaining bits which are "duplicated for faster access", and the
915 * bits that depend on the tagger, such as the MTU.
917 dsa_switch_for_each_user_port(dp, ds) {
918 struct net_device *slave = dp->slave;
920 dsa_slave_setup_tagger(slave);
922 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
923 dsa_slave_change_mtu(slave, slave->mtu);
929 /* We use the same cross-chip notifiers to inform both the tagger side, as well
930 * as the switch side, of connection and disconnection events.
931 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
932 * switch side doesn't support connecting to this tagger, and therefore, the
933 * fact that we don't disconnect the tagger side doesn't constitute a memory
934 * leak: the tagger will still operate with persistent per-switch memory, just
935 * with the switch side unconnected to it. What does constitute a hard error is
936 * when the switch side supports connecting but fails.
939 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
940 struct dsa_notifier_tag_proto_info *info)
942 const struct dsa_device_ops *tag_ops = info->tag_ops;
945 /* Notify the new tagger about the connection to this switch */
946 if (tag_ops->connect) {
947 err = tag_ops->connect(ds);
952 if (!ds->ops->connect_tag_protocol)
955 /* Notify the switch about the connection to the new tagger */
956 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
958 /* Revert the new tagger's connection to this tree */
959 if (tag_ops->disconnect)
960 tag_ops->disconnect(ds);
968 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
969 struct dsa_notifier_tag_proto_info *info)
971 const struct dsa_device_ops *tag_ops = info->tag_ops;
973 /* Notify the tagger about the disconnection from this switch */
974 if (tag_ops->disconnect && ds->tagger_data)
975 tag_ops->disconnect(ds);
977 /* No need to notify the switch, since it shouldn't have any
978 * resources to tear down
984 dsa_switch_master_state_change(struct dsa_switch *ds,
985 struct dsa_notifier_master_state_info *info)
987 if (!ds->ops->master_state_change)
990 ds->ops->master_state_change(ds, info->master, info->operational);
995 static int dsa_switch_event(struct notifier_block *nb,
996 unsigned long event, void *info)
998 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
1002 case DSA_NOTIFIER_AGEING_TIME:
1003 err = dsa_switch_ageing_time(ds, info);
1005 case DSA_NOTIFIER_BRIDGE_JOIN:
1006 err = dsa_switch_bridge_join(ds, info);
1008 case DSA_NOTIFIER_BRIDGE_LEAVE:
1009 err = dsa_switch_bridge_leave(ds, info);
1011 case DSA_NOTIFIER_FDB_ADD:
1012 err = dsa_switch_fdb_add(ds, info);
1014 case DSA_NOTIFIER_FDB_DEL:
1015 err = dsa_switch_fdb_del(ds, info);
1017 case DSA_NOTIFIER_HOST_FDB_ADD:
1018 err = dsa_switch_host_fdb_add(ds, info);
1020 case DSA_NOTIFIER_HOST_FDB_DEL:
1021 err = dsa_switch_host_fdb_del(ds, info);
1023 case DSA_NOTIFIER_LAG_FDB_ADD:
1024 err = dsa_switch_lag_fdb_add(ds, info);
1026 case DSA_NOTIFIER_LAG_FDB_DEL:
1027 err = dsa_switch_lag_fdb_del(ds, info);
1029 case DSA_NOTIFIER_LAG_CHANGE:
1030 err = dsa_switch_lag_change(ds, info);
1032 case DSA_NOTIFIER_LAG_JOIN:
1033 err = dsa_switch_lag_join(ds, info);
1035 case DSA_NOTIFIER_LAG_LEAVE:
1036 err = dsa_switch_lag_leave(ds, info);
1038 case DSA_NOTIFIER_MDB_ADD:
1039 err = dsa_switch_mdb_add(ds, info);
1041 case DSA_NOTIFIER_MDB_DEL:
1042 err = dsa_switch_mdb_del(ds, info);
1044 case DSA_NOTIFIER_HOST_MDB_ADD:
1045 err = dsa_switch_host_mdb_add(ds, info);
1047 case DSA_NOTIFIER_HOST_MDB_DEL:
1048 err = dsa_switch_host_mdb_del(ds, info);
1050 case DSA_NOTIFIER_VLAN_ADD:
1051 err = dsa_switch_vlan_add(ds, info);
1053 case DSA_NOTIFIER_VLAN_DEL:
1054 err = dsa_switch_vlan_del(ds, info);
1056 case DSA_NOTIFIER_HOST_VLAN_ADD:
1057 err = dsa_switch_host_vlan_add(ds, info);
1059 case DSA_NOTIFIER_HOST_VLAN_DEL:
1060 err = dsa_switch_host_vlan_del(ds, info);
1062 case DSA_NOTIFIER_MTU:
1063 err = dsa_switch_mtu(ds, info);
1065 case DSA_NOTIFIER_TAG_PROTO:
1066 err = dsa_switch_change_tag_proto(ds, info);
1068 case DSA_NOTIFIER_TAG_PROTO_CONNECT:
1069 err = dsa_switch_connect_tag_proto(ds, info);
1071 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
1072 err = dsa_switch_disconnect_tag_proto(ds, info);
1074 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1075 err = dsa_switch_tag_8021q_vlan_add(ds, info);
1077 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1078 err = dsa_switch_tag_8021q_vlan_del(ds, info);
1080 case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1081 err = dsa_switch_master_state_change(ds, info);
1089 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1092 return notifier_from_errno(err);
1095 int dsa_switch_register_notifier(struct dsa_switch *ds)
1097 ds->nb.notifier_call = dsa_switch_event;
1099 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1102 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1106 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1108 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);