]> Git Repo - linux.git/blob - net/dsa/port.c
net/smc: add sysctl interface for SMC
[linux.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <[email protected]>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30         return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp)
34 {
35         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
36         struct switchdev_notifier_fdb_info info = {
37                 /* flush all VLANs */
38                 .vid = 0,
39         };
40
41         /* When the port becomes standalone it has already left the bridge.
42          * Don't notify the bridge in that case.
43          */
44         if (!brport_dev)
45                 return;
46
47         call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
48                                  brport_dev, &info.info, NULL);
49 }
50
51 static void dsa_port_fast_age(const struct dsa_port *dp)
52 {
53         struct dsa_switch *ds = dp->ds;
54
55         if (!ds->ops->port_fast_age)
56                 return;
57
58         ds->ops->port_fast_age(ds, dp->index);
59
60         dsa_port_notify_bridge_fdb_flush(dp);
61 }
62
63 static bool dsa_port_can_configure_learning(struct dsa_port *dp)
64 {
65         struct switchdev_brport_flags flags = {
66                 .mask = BR_LEARNING,
67         };
68         struct dsa_switch *ds = dp->ds;
69         int err;
70
71         if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags)
72                 return false;
73
74         err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL);
75         return !err;
76 }
77
78 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
79 {
80         struct dsa_switch *ds = dp->ds;
81         int port = dp->index;
82
83         if (!ds->ops->port_stp_state_set)
84                 return -EOPNOTSUPP;
85
86         ds->ops->port_stp_state_set(ds, port, state);
87
88         if (!dsa_port_can_configure_learning(dp) ||
89             (do_fast_age && dp->learning)) {
90                 /* Fast age FDB entries or flush appropriate forwarding database
91                  * for the given port, if we are moving it from Learning or
92                  * Forwarding state, to Disabled or Blocking or Listening state.
93                  * Ports that were standalone before the STP state change don't
94                  * need to fast age the FDB, since address learning is off in
95                  * standalone mode.
96                  */
97
98                 if ((dp->stp_state == BR_STATE_LEARNING ||
99                      dp->stp_state == BR_STATE_FORWARDING) &&
100                     (state == BR_STATE_DISABLED ||
101                      state == BR_STATE_BLOCKING ||
102                      state == BR_STATE_LISTENING))
103                         dsa_port_fast_age(dp);
104         }
105
106         dp->stp_state = state;
107
108         return 0;
109 }
110
111 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
112                                    bool do_fast_age)
113 {
114         int err;
115
116         err = dsa_port_set_state(dp, state, do_fast_age);
117         if (err)
118                 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
119 }
120
121 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
122 {
123         struct dsa_switch *ds = dp->ds;
124         int port = dp->index;
125         int err;
126
127         if (ds->ops->port_enable) {
128                 err = ds->ops->port_enable(ds, port, phy);
129                 if (err)
130                         return err;
131         }
132
133         if (!dp->bridge)
134                 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
135
136         if (dp->pl)
137                 phylink_start(dp->pl);
138
139         return 0;
140 }
141
142 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
143 {
144         int err;
145
146         rtnl_lock();
147         err = dsa_port_enable_rt(dp, phy);
148         rtnl_unlock();
149
150         return err;
151 }
152
153 void dsa_port_disable_rt(struct dsa_port *dp)
154 {
155         struct dsa_switch *ds = dp->ds;
156         int port = dp->index;
157
158         if (dp->pl)
159                 phylink_stop(dp->pl);
160
161         if (!dp->bridge)
162                 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
163
164         if (ds->ops->port_disable)
165                 ds->ops->port_disable(ds, port);
166 }
167
168 void dsa_port_disable(struct dsa_port *dp)
169 {
170         rtnl_lock();
171         dsa_port_disable_rt(dp);
172         rtnl_unlock();
173 }
174
175 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
176                                          struct netlink_ext_ack *extack)
177 {
178         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
179                                    BR_BCAST_FLOOD | BR_PORT_LOCKED;
180         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
181         int flag, err;
182
183         for_each_set_bit(flag, &mask, 32) {
184                 struct switchdev_brport_flags flags = {0};
185
186                 flags.mask = BIT(flag);
187
188                 if (br_port_flag_is_set(brport_dev, BIT(flag)))
189                         flags.val = BIT(flag);
190
191                 err = dsa_port_bridge_flags(dp, flags, extack);
192                 if (err && err != -EOPNOTSUPP)
193                         return err;
194         }
195
196         return 0;
197 }
198
199 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
200 {
201         const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
202         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
203                                    BR_BCAST_FLOOD | BR_PORT_LOCKED;
204         int flag, err;
205
206         for_each_set_bit(flag, &mask, 32) {
207                 struct switchdev_brport_flags flags = {0};
208
209                 flags.mask = BIT(flag);
210                 flags.val = val & BIT(flag);
211
212                 err = dsa_port_bridge_flags(dp, flags, NULL);
213                 if (err && err != -EOPNOTSUPP)
214                         dev_err(dp->ds->dev,
215                                 "failed to clear bridge port flag %lu: %pe\n",
216                                 flags.val, ERR_PTR(err));
217         }
218 }
219
220 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
221                                          struct netlink_ext_ack *extack)
222 {
223         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
224         struct net_device *br = dsa_port_bridge_dev_get(dp);
225         int err;
226
227         err = dsa_port_inherit_brport_flags(dp, extack);
228         if (err)
229                 return err;
230
231         err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
232         if (err && err != -EOPNOTSUPP)
233                 return err;
234
235         err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
236         if (err && err != -EOPNOTSUPP)
237                 return err;
238
239         err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
240         if (err && err != -EOPNOTSUPP)
241                 return err;
242
243         return 0;
244 }
245
246 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
247 {
248         /* Configure the port for standalone mode (no address learning,
249          * flood everything).
250          * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
251          * when the user requests it through netlink or sysfs, but not
252          * automatically at port join or leave, so we need to handle resetting
253          * the brport flags ourselves. But we even prefer it that way, because
254          * otherwise, some setups might never get the notification they need,
255          * for example, when a port leaves a LAG that offloads the bridge,
256          * it becomes standalone, but as far as the bridge is concerned, no
257          * port ever left.
258          */
259         dsa_port_clear_brport_flags(dp);
260
261         /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
262          * so allow it to be in BR_STATE_FORWARDING to be kept functional
263          */
264         dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
265
266         /* VLAN filtering is handled by dsa_switch_bridge_leave */
267
268         /* Ageing time may be global to the switch chip, so don't change it
269          * here because we have no good reason (or value) to change it to.
270          */
271 }
272
273 static int dsa_port_bridge_create(struct dsa_port *dp,
274                                   struct net_device *br,
275                                   struct netlink_ext_ack *extack)
276 {
277         struct dsa_switch *ds = dp->ds;
278         struct dsa_bridge *bridge;
279
280         bridge = dsa_tree_bridge_find(ds->dst, br);
281         if (bridge) {
282                 refcount_inc(&bridge->refcount);
283                 dp->bridge = bridge;
284                 return 0;
285         }
286
287         bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
288         if (!bridge)
289                 return -ENOMEM;
290
291         refcount_set(&bridge->refcount, 1);
292
293         bridge->dev = br;
294
295         bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges);
296         if (ds->max_num_bridges && !bridge->num) {
297                 NL_SET_ERR_MSG_MOD(extack,
298                                    "Range of offloadable bridges exceeded");
299                 kfree(bridge);
300                 return -EOPNOTSUPP;
301         }
302
303         dp->bridge = bridge;
304
305         return 0;
306 }
307
308 static void dsa_port_bridge_destroy(struct dsa_port *dp,
309                                     const struct net_device *br)
310 {
311         struct dsa_bridge *bridge = dp->bridge;
312
313         dp->bridge = NULL;
314
315         if (!refcount_dec_and_test(&bridge->refcount))
316                 return;
317
318         if (bridge->num)
319                 dsa_bridge_num_put(br, bridge->num);
320
321         kfree(bridge);
322 }
323
324 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
325                          struct netlink_ext_ack *extack)
326 {
327         struct dsa_notifier_bridge_info info = {
328                 .tree_index = dp->ds->dst->index,
329                 .sw_index = dp->ds->index,
330                 .port = dp->index,
331                 .extack = extack,
332         };
333         struct net_device *dev = dp->slave;
334         struct net_device *brport_dev;
335         int err;
336
337         /* Here the interface is already bridged. Reflect the current
338          * configuration so that drivers can program their chips accordingly.
339          */
340         err = dsa_port_bridge_create(dp, br, extack);
341         if (err)
342                 return err;
343
344         brport_dev = dsa_port_to_bridge_port(dp);
345
346         info.bridge = *dp->bridge;
347         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
348         if (err)
349                 goto out_rollback;
350
351         /* Drivers which support bridge TX forwarding should set this */
352         dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
353
354         err = switchdev_bridge_port_offload(brport_dev, dev, dp,
355                                             &dsa_slave_switchdev_notifier,
356                                             &dsa_slave_switchdev_blocking_notifier,
357                                             dp->bridge->tx_fwd_offload, extack);
358         if (err)
359                 goto out_rollback_unbridge;
360
361         err = dsa_port_switchdev_sync_attrs(dp, extack);
362         if (err)
363                 goto out_rollback_unoffload;
364
365         return 0;
366
367 out_rollback_unoffload:
368         switchdev_bridge_port_unoffload(brport_dev, dp,
369                                         &dsa_slave_switchdev_notifier,
370                                         &dsa_slave_switchdev_blocking_notifier);
371 out_rollback_unbridge:
372         dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
373 out_rollback:
374         dsa_port_bridge_destroy(dp, br);
375         return err;
376 }
377
378 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
379 {
380         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
381
382         /* Don't try to unoffload something that is not offloaded */
383         if (!brport_dev)
384                 return;
385
386         switchdev_bridge_port_unoffload(brport_dev, dp,
387                                         &dsa_slave_switchdev_notifier,
388                                         &dsa_slave_switchdev_blocking_notifier);
389
390         dsa_flush_workqueue();
391 }
392
393 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
394 {
395         struct dsa_notifier_bridge_info info = {
396                 .tree_index = dp->ds->dst->index,
397                 .sw_index = dp->ds->index,
398                 .port = dp->index,
399         };
400         int err;
401
402         /* If the port could not be offloaded to begin with, then
403          * there is nothing to do.
404          */
405         if (!dp->bridge)
406                 return;
407
408         info.bridge = *dp->bridge;
409
410         /* Here the port is already unbridged. Reflect the current configuration
411          * so that drivers can program their chips accordingly.
412          */
413         dsa_port_bridge_destroy(dp, br);
414
415         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
416         if (err)
417                 dev_err(dp->ds->dev,
418                         "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
419                         dp->index, ERR_PTR(err));
420
421         dsa_port_switchdev_unsync_attrs(dp);
422 }
423
424 int dsa_port_lag_change(struct dsa_port *dp,
425                         struct netdev_lag_lower_state_info *linfo)
426 {
427         struct dsa_notifier_lag_info info = {
428                 .sw_index = dp->ds->index,
429                 .port = dp->index,
430         };
431         bool tx_enabled;
432
433         if (!dp->lag)
434                 return 0;
435
436         /* On statically configured aggregates (e.g. loadbalance
437          * without LACP) ports will always be tx_enabled, even if the
438          * link is down. Thus we require both link_up and tx_enabled
439          * in order to include it in the tx set.
440          */
441         tx_enabled = linfo->link_up && linfo->tx_enabled;
442
443         if (tx_enabled == dp->lag_tx_enabled)
444                 return 0;
445
446         dp->lag_tx_enabled = tx_enabled;
447
448         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
449 }
450
451 static int dsa_port_lag_create(struct dsa_port *dp,
452                                struct net_device *lag_dev)
453 {
454         struct dsa_switch *ds = dp->ds;
455         struct dsa_lag *lag;
456
457         lag = dsa_tree_lag_find(ds->dst, lag_dev);
458         if (lag) {
459                 refcount_inc(&lag->refcount);
460                 dp->lag = lag;
461                 return 0;
462         }
463
464         lag = kzalloc(sizeof(*lag), GFP_KERNEL);
465         if (!lag)
466                 return -ENOMEM;
467
468         refcount_set(&lag->refcount, 1);
469         mutex_init(&lag->fdb_lock);
470         INIT_LIST_HEAD(&lag->fdbs);
471         lag->dev = lag_dev;
472         dsa_lag_map(ds->dst, lag);
473         dp->lag = lag;
474
475         return 0;
476 }
477
478 static void dsa_port_lag_destroy(struct dsa_port *dp)
479 {
480         struct dsa_lag *lag = dp->lag;
481
482         dp->lag = NULL;
483         dp->lag_tx_enabled = false;
484
485         if (!refcount_dec_and_test(&lag->refcount))
486                 return;
487
488         WARN_ON(!list_empty(&lag->fdbs));
489         dsa_lag_unmap(dp->ds->dst, lag);
490         kfree(lag);
491 }
492
493 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
494                       struct netdev_lag_upper_info *uinfo,
495                       struct netlink_ext_ack *extack)
496 {
497         struct dsa_notifier_lag_info info = {
498                 .sw_index = dp->ds->index,
499                 .port = dp->index,
500                 .info = uinfo,
501         };
502         struct net_device *bridge_dev;
503         int err;
504
505         err = dsa_port_lag_create(dp, lag_dev);
506         if (err)
507                 goto err_lag_create;
508
509         info.lag = *dp->lag;
510         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
511         if (err)
512                 goto err_lag_join;
513
514         bridge_dev = netdev_master_upper_dev_get(lag_dev);
515         if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
516                 return 0;
517
518         err = dsa_port_bridge_join(dp, bridge_dev, extack);
519         if (err)
520                 goto err_bridge_join;
521
522         return 0;
523
524 err_bridge_join:
525         dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
526 err_lag_join:
527         dsa_port_lag_destroy(dp);
528 err_lag_create:
529         return err;
530 }
531
532 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
533 {
534         struct net_device *br = dsa_port_bridge_dev_get(dp);
535
536         if (br)
537                 dsa_port_pre_bridge_leave(dp, br);
538 }
539
540 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
541 {
542         struct net_device *br = dsa_port_bridge_dev_get(dp);
543         struct dsa_notifier_lag_info info = {
544                 .sw_index = dp->ds->index,
545                 .port = dp->index,
546         };
547         int err;
548
549         if (!dp->lag)
550                 return;
551
552         /* Port might have been part of a LAG that in turn was
553          * attached to a bridge.
554          */
555         if (br)
556                 dsa_port_bridge_leave(dp, br);
557
558         info.lag = *dp->lag;
559
560         dsa_port_lag_destroy(dp);
561
562         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
563         if (err)
564                 dev_err(dp->ds->dev,
565                         "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
566                         dp->index, ERR_PTR(err));
567 }
568
569 /* Must be called under rcu_read_lock() */
570 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
571                                               bool vlan_filtering,
572                                               struct netlink_ext_ack *extack)
573 {
574         struct dsa_switch *ds = dp->ds;
575         struct dsa_port *other_dp;
576         int err;
577
578         /* VLAN awareness was off, so the question is "can we turn it on".
579          * We may have had 8021q uppers, those need to go. Make sure we don't
580          * enter an inconsistent state: deny changing the VLAN awareness state
581          * as long as we have 8021q uppers.
582          */
583         if (vlan_filtering && dsa_port_is_user(dp)) {
584                 struct net_device *br = dsa_port_bridge_dev_get(dp);
585                 struct net_device *upper_dev, *slave = dp->slave;
586                 struct list_head *iter;
587
588                 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
589                         struct bridge_vlan_info br_info;
590                         u16 vid;
591
592                         if (!is_vlan_dev(upper_dev))
593                                 continue;
594
595                         vid = vlan_dev_vlan_id(upper_dev);
596
597                         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
598                          * device, respectively the VID is not found, returning
599                          * 0 means success, which is a failure for us here.
600                          */
601                         err = br_vlan_get_info(br, vid, &br_info);
602                         if (err == 0) {
603                                 NL_SET_ERR_MSG_MOD(extack,
604                                                    "Must first remove VLAN uppers having VIDs also present in bridge");
605                                 return false;
606                         }
607                 }
608         }
609
610         if (!ds->vlan_filtering_is_global)
611                 return true;
612
613         /* For cases where enabling/disabling VLAN awareness is global to the
614          * switch, we need to handle the case where multiple bridges span
615          * different ports of the same switch device and one of them has a
616          * different setting than what is being requested.
617          */
618         dsa_switch_for_each_port(other_dp, ds) {
619                 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp);
620
621                 /* If it's the same bridge, it also has same
622                  * vlan_filtering setting => no need to check
623                  */
624                 if (!other_br || other_br == dsa_port_bridge_dev_get(dp))
625                         continue;
626
627                 if (br_vlan_enabled(other_br) != vlan_filtering) {
628                         NL_SET_ERR_MSG_MOD(extack,
629                                            "VLAN filtering is a global setting");
630                         return false;
631                 }
632         }
633         return true;
634 }
635
636 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
637                             struct netlink_ext_ack *extack)
638 {
639         bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp);
640         struct dsa_switch *ds = dp->ds;
641         bool apply;
642         int err;
643
644         if (!ds->ops->port_vlan_filtering)
645                 return -EOPNOTSUPP;
646
647         /* We are called from dsa_slave_switchdev_blocking_event(),
648          * which is not under rcu_read_lock(), unlike
649          * dsa_slave_switchdev_event().
650          */
651         rcu_read_lock();
652         apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
653         rcu_read_unlock();
654         if (!apply)
655                 return -EINVAL;
656
657         if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
658                 return 0;
659
660         err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
661                                            extack);
662         if (err)
663                 return err;
664
665         if (ds->vlan_filtering_is_global) {
666                 struct dsa_port *other_dp;
667
668                 ds->vlan_filtering = vlan_filtering;
669
670                 dsa_switch_for_each_user_port(other_dp, ds) {
671                         struct net_device *slave = dp->slave;
672
673                         /* We might be called in the unbind path, so not
674                          * all slave devices might still be registered.
675                          */
676                         if (!slave)
677                                 continue;
678
679                         err = dsa_slave_manage_vlan_filtering(slave,
680                                                               vlan_filtering);
681                         if (err)
682                                 goto restore;
683                 }
684         } else {
685                 dp->vlan_filtering = vlan_filtering;
686
687                 err = dsa_slave_manage_vlan_filtering(dp->slave,
688                                                       vlan_filtering);
689                 if (err)
690                         goto restore;
691         }
692
693         return 0;
694
695 restore:
696         ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL);
697
698         if (ds->vlan_filtering_is_global)
699                 ds->vlan_filtering = old_vlan_filtering;
700         else
701                 dp->vlan_filtering = old_vlan_filtering;
702
703         return err;
704 }
705
706 /* This enforces legacy behavior for switch drivers which assume they can't
707  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
708  */
709 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
710 {
711         struct net_device *br = dsa_port_bridge_dev_get(dp);
712         struct dsa_switch *ds = dp->ds;
713
714         if (!br)
715                 return false;
716
717         return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br);
718 }
719
720 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
721 {
722         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
723         unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
724         struct dsa_notifier_ageing_time_info info;
725         int err;
726
727         info.ageing_time = ageing_time;
728
729         err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
730         if (err)
731                 return err;
732
733         dp->ageing_time = ageing_time;
734
735         return 0;
736 }
737
738 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
739                               struct switchdev_brport_flags flags,
740                               struct netlink_ext_ack *extack)
741 {
742         struct dsa_switch *ds = dp->ds;
743
744         if (!ds->ops->port_pre_bridge_flags)
745                 return -EINVAL;
746
747         return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
748 }
749
750 int dsa_port_bridge_flags(struct dsa_port *dp,
751                           struct switchdev_brport_flags flags,
752                           struct netlink_ext_ack *extack)
753 {
754         struct dsa_switch *ds = dp->ds;
755         int err;
756
757         if (!ds->ops->port_bridge_flags)
758                 return -EOPNOTSUPP;
759
760         err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
761         if (err)
762                 return err;
763
764         if (flags.mask & BR_LEARNING) {
765                 bool learning = flags.val & BR_LEARNING;
766
767                 if (learning == dp->learning)
768                         return 0;
769
770                 if ((dp->learning && !learning) &&
771                     (dp->stp_state == BR_STATE_LEARNING ||
772                      dp->stp_state == BR_STATE_FORWARDING))
773                         dsa_port_fast_age(dp);
774
775                 dp->learning = learning;
776         }
777
778         return 0;
779 }
780
781 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
782                         bool targeted_match)
783 {
784         struct dsa_notifier_mtu_info info = {
785                 .sw_index = dp->ds->index,
786                 .targeted_match = targeted_match,
787                 .port = dp->index,
788                 .mtu = new_mtu,
789         };
790
791         return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
792 }
793
794 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
795                      u16 vid)
796 {
797         struct dsa_notifier_fdb_info info = {
798                 .sw_index = dp->ds->index,
799                 .port = dp->index,
800                 .addr = addr,
801                 .vid = vid,
802                 .db = {
803                         .type = DSA_DB_BRIDGE,
804                         .bridge = *dp->bridge,
805                 },
806         };
807
808         /* Refcounting takes bridge.num as a key, and should be global for all
809          * bridges in the absence of FDB isolation, and per bridge otherwise.
810          * Force the bridge.num to zero here in the absence of FDB isolation.
811          */
812         if (!dp->ds->fdb_isolation)
813                 info.db.bridge.num = 0;
814
815         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
816 }
817
818 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
819                      u16 vid)
820 {
821         struct dsa_notifier_fdb_info info = {
822                 .sw_index = dp->ds->index,
823                 .port = dp->index,
824                 .addr = addr,
825                 .vid = vid,
826                 .db = {
827                         .type = DSA_DB_BRIDGE,
828                         .bridge = *dp->bridge,
829                 },
830         };
831
832         if (!dp->ds->fdb_isolation)
833                 info.db.bridge.num = 0;
834
835         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
836 }
837
838 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
839                           u16 vid)
840 {
841         struct dsa_notifier_fdb_info info = {
842                 .sw_index = dp->ds->index,
843                 .port = dp->index,
844                 .addr = addr,
845                 .vid = vid,
846                 .db = {
847                         .type = DSA_DB_BRIDGE,
848                         .bridge = *dp->bridge,
849                 },
850         };
851         struct dsa_port *cpu_dp = dp->cpu_dp;
852         int err;
853
854         /* Avoid a call to __dev_set_promiscuity() on the master, which
855          * requires rtnl_lock(), since we can't guarantee that is held here,
856          * and we can't take it either.
857          */
858         if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
859                 err = dev_uc_add(cpu_dp->master, addr);
860                 if (err)
861                         return err;
862         }
863
864         if (!dp->ds->fdb_isolation)
865                 info.db.bridge.num = 0;
866
867         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
868 }
869
870 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
871                           u16 vid)
872 {
873         struct dsa_notifier_fdb_info info = {
874                 .sw_index = dp->ds->index,
875                 .port = dp->index,
876                 .addr = addr,
877                 .vid = vid,
878                 .db = {
879                         .type = DSA_DB_BRIDGE,
880                         .bridge = *dp->bridge,
881                 },
882         };
883         struct dsa_port *cpu_dp = dp->cpu_dp;
884         int err;
885
886         if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
887                 err = dev_uc_del(cpu_dp->master, addr);
888                 if (err)
889                         return err;
890         }
891
892         if (!dp->ds->fdb_isolation)
893                 info.db.bridge.num = 0;
894
895         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
896 }
897
898 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
899                          u16 vid)
900 {
901         struct dsa_notifier_lag_fdb_info info = {
902                 .lag = dp->lag,
903                 .addr = addr,
904                 .vid = vid,
905                 .db = {
906                         .type = DSA_DB_BRIDGE,
907                         .bridge = *dp->bridge,
908                 },
909         };
910
911         if (!dp->ds->fdb_isolation)
912                 info.db.bridge.num = 0;
913
914         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info);
915 }
916
917 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
918                          u16 vid)
919 {
920         struct dsa_notifier_lag_fdb_info info = {
921                 .lag = dp->lag,
922                 .addr = addr,
923                 .vid = vid,
924                 .db = {
925                         .type = DSA_DB_BRIDGE,
926                         .bridge = *dp->bridge,
927                 },
928         };
929
930         if (!dp->ds->fdb_isolation)
931                 info.db.bridge.num = 0;
932
933         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info);
934 }
935
936 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
937 {
938         struct dsa_switch *ds = dp->ds;
939         int port = dp->index;
940
941         if (!ds->ops->port_fdb_dump)
942                 return -EOPNOTSUPP;
943
944         return ds->ops->port_fdb_dump(ds, port, cb, data);
945 }
946
947 int dsa_port_mdb_add(const struct dsa_port *dp,
948                      const struct switchdev_obj_port_mdb *mdb)
949 {
950         struct dsa_notifier_mdb_info info = {
951                 .sw_index = dp->ds->index,
952                 .port = dp->index,
953                 .mdb = mdb,
954                 .db = {
955                         .type = DSA_DB_BRIDGE,
956                         .bridge = *dp->bridge,
957                 },
958         };
959
960         if (!dp->ds->fdb_isolation)
961                 info.db.bridge.num = 0;
962
963         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
964 }
965
966 int dsa_port_mdb_del(const struct dsa_port *dp,
967                      const struct switchdev_obj_port_mdb *mdb)
968 {
969         struct dsa_notifier_mdb_info info = {
970                 .sw_index = dp->ds->index,
971                 .port = dp->index,
972                 .mdb = mdb,
973                 .db = {
974                         .type = DSA_DB_BRIDGE,
975                         .bridge = *dp->bridge,
976                 },
977         };
978
979         if (!dp->ds->fdb_isolation)
980                 info.db.bridge.num = 0;
981
982         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
983 }
984
985 int dsa_port_host_mdb_add(const struct dsa_port *dp,
986                           const struct switchdev_obj_port_mdb *mdb)
987 {
988         struct dsa_notifier_mdb_info info = {
989                 .sw_index = dp->ds->index,
990                 .port = dp->index,
991                 .mdb = mdb,
992                 .db = {
993                         .type = DSA_DB_BRIDGE,
994                         .bridge = *dp->bridge,
995                 },
996         };
997         struct dsa_port *cpu_dp = dp->cpu_dp;
998         int err;
999
1000         err = dev_mc_add(cpu_dp->master, mdb->addr);
1001         if (err)
1002                 return err;
1003
1004         if (!dp->ds->fdb_isolation)
1005                 info.db.bridge.num = 0;
1006
1007         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
1008 }
1009
1010 int dsa_port_host_mdb_del(const struct dsa_port *dp,
1011                           const struct switchdev_obj_port_mdb *mdb)
1012 {
1013         struct dsa_notifier_mdb_info info = {
1014                 .sw_index = dp->ds->index,
1015                 .port = dp->index,
1016                 .mdb = mdb,
1017                 .db = {
1018                         .type = DSA_DB_BRIDGE,
1019                         .bridge = *dp->bridge,
1020                 },
1021         };
1022         struct dsa_port *cpu_dp = dp->cpu_dp;
1023         int err;
1024
1025         err = dev_mc_del(cpu_dp->master, mdb->addr);
1026         if (err)
1027                 return err;
1028
1029         if (!dp->ds->fdb_isolation)
1030                 info.db.bridge.num = 0;
1031
1032         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
1033 }
1034
1035 int dsa_port_vlan_add(struct dsa_port *dp,
1036                       const struct switchdev_obj_port_vlan *vlan,
1037                       struct netlink_ext_ack *extack)
1038 {
1039         struct dsa_notifier_vlan_info info = {
1040                 .sw_index = dp->ds->index,
1041                 .port = dp->index,
1042                 .vlan = vlan,
1043                 .extack = extack,
1044         };
1045
1046         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
1047 }
1048
1049 int dsa_port_vlan_del(struct dsa_port *dp,
1050                       const struct switchdev_obj_port_vlan *vlan)
1051 {
1052         struct dsa_notifier_vlan_info info = {
1053                 .sw_index = dp->ds->index,
1054                 .port = dp->index,
1055                 .vlan = vlan,
1056         };
1057
1058         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
1059 }
1060
1061 int dsa_port_host_vlan_add(struct dsa_port *dp,
1062                            const struct switchdev_obj_port_vlan *vlan,
1063                            struct netlink_ext_ack *extack)
1064 {
1065         struct dsa_notifier_vlan_info info = {
1066                 .sw_index = dp->ds->index,
1067                 .port = dp->index,
1068                 .vlan = vlan,
1069                 .extack = extack,
1070         };
1071         struct dsa_port *cpu_dp = dp->cpu_dp;
1072         int err;
1073
1074         err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info);
1075         if (err && err != -EOPNOTSUPP)
1076                 return err;
1077
1078         vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
1079
1080         return err;
1081 }
1082
1083 int dsa_port_host_vlan_del(struct dsa_port *dp,
1084                            const struct switchdev_obj_port_vlan *vlan)
1085 {
1086         struct dsa_notifier_vlan_info info = {
1087                 .sw_index = dp->ds->index,
1088                 .port = dp->index,
1089                 .vlan = vlan,
1090         };
1091         struct dsa_port *cpu_dp = dp->cpu_dp;
1092         int err;
1093
1094         err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info);
1095         if (err && err != -EOPNOTSUPP)
1096                 return err;
1097
1098         vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
1099
1100         return err;
1101 }
1102
1103 int dsa_port_mrp_add(const struct dsa_port *dp,
1104                      const struct switchdev_obj_mrp *mrp)
1105 {
1106         struct dsa_switch *ds = dp->ds;
1107
1108         if (!ds->ops->port_mrp_add)
1109                 return -EOPNOTSUPP;
1110
1111         return ds->ops->port_mrp_add(ds, dp->index, mrp);
1112 }
1113
1114 int dsa_port_mrp_del(const struct dsa_port *dp,
1115                      const struct switchdev_obj_mrp *mrp)
1116 {
1117         struct dsa_switch *ds = dp->ds;
1118
1119         if (!ds->ops->port_mrp_del)
1120                 return -EOPNOTSUPP;
1121
1122         return ds->ops->port_mrp_del(ds, dp->index, mrp);
1123 }
1124
1125 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
1126                                const struct switchdev_obj_ring_role_mrp *mrp)
1127 {
1128         struct dsa_switch *ds = dp->ds;
1129
1130         if (!ds->ops->port_mrp_add_ring_role)
1131                 return -EOPNOTSUPP;
1132
1133         return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp);
1134 }
1135
1136 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
1137                                const struct switchdev_obj_ring_role_mrp *mrp)
1138 {
1139         struct dsa_switch *ds = dp->ds;
1140
1141         if (!ds->ops->port_mrp_del_ring_role)
1142                 return -EOPNOTSUPP;
1143
1144         return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
1145 }
1146
1147 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
1148                                const struct dsa_device_ops *tag_ops)
1149 {
1150         cpu_dp->rcv = tag_ops->rcv;
1151         cpu_dp->tag_ops = tag_ops;
1152 }
1153
1154 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
1155 {
1156         struct device_node *phy_dn;
1157         struct phy_device *phydev;
1158
1159         phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
1160         if (!phy_dn)
1161                 return NULL;
1162
1163         phydev = of_phy_find_device(phy_dn);
1164         if (!phydev) {
1165                 of_node_put(phy_dn);
1166                 return ERR_PTR(-EPROBE_DEFER);
1167         }
1168
1169         of_node_put(phy_dn);
1170         return phydev;
1171 }
1172
1173 static void dsa_port_phylink_validate(struct phylink_config *config,
1174                                       unsigned long *supported,
1175                                       struct phylink_link_state *state)
1176 {
1177         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1178         struct dsa_switch *ds = dp->ds;
1179
1180         if (!ds->ops->phylink_validate) {
1181                 if (config->mac_capabilities)
1182                         phylink_generic_validate(config, supported, state);
1183                 return;
1184         }
1185
1186         ds->ops->phylink_validate(ds, dp->index, supported, state);
1187 }
1188
1189 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
1190                                                struct phylink_link_state *state)
1191 {
1192         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1193         struct dsa_switch *ds = dp->ds;
1194         int err;
1195
1196         /* Only called for inband modes */
1197         if (!ds->ops->phylink_mac_link_state) {
1198                 state->link = 0;
1199                 return;
1200         }
1201
1202         err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
1203         if (err < 0) {
1204                 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
1205                         dp->index, err);
1206                 state->link = 0;
1207         }
1208 }
1209
1210 static struct phylink_pcs *
1211 dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
1212                                 phy_interface_t interface)
1213 {
1214         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1215         struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
1216         struct dsa_switch *ds = dp->ds;
1217
1218         if (ds->ops->phylink_mac_select_pcs)
1219                 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface);
1220
1221         return pcs;
1222 }
1223
1224 static void dsa_port_phylink_mac_config(struct phylink_config *config,
1225                                         unsigned int mode,
1226                                         const struct phylink_link_state *state)
1227 {
1228         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1229         struct dsa_switch *ds = dp->ds;
1230
1231         if (!ds->ops->phylink_mac_config)
1232                 return;
1233
1234         ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1235 }
1236
1237 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
1238 {
1239         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1240         struct dsa_switch *ds = dp->ds;
1241
1242         if (!ds->ops->phylink_mac_an_restart)
1243                 return;
1244
1245         ds->ops->phylink_mac_an_restart(ds, dp->index);
1246 }
1247
1248 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
1249                                            unsigned int mode,
1250                                            phy_interface_t interface)
1251 {
1252         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1253         struct phy_device *phydev = NULL;
1254         struct dsa_switch *ds = dp->ds;
1255
1256         if (dsa_port_is_user(dp))
1257                 phydev = dp->slave->phydev;
1258
1259         if (!ds->ops->phylink_mac_link_down) {
1260                 if (ds->ops->adjust_link && phydev)
1261                         ds->ops->adjust_link(ds, dp->index, phydev);
1262                 return;
1263         }
1264
1265         ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1266 }
1267
1268 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1269                                          struct phy_device *phydev,
1270                                          unsigned int mode,
1271                                          phy_interface_t interface,
1272                                          int speed, int duplex,
1273                                          bool tx_pause, bool rx_pause)
1274 {
1275         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1276         struct dsa_switch *ds = dp->ds;
1277
1278         if (!ds->ops->phylink_mac_link_up) {
1279                 if (ds->ops->adjust_link && phydev)
1280                         ds->ops->adjust_link(ds, dp->index, phydev);
1281                 return;
1282         }
1283
1284         ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1285                                      speed, duplex, tx_pause, rx_pause);
1286 }
1287
1288 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1289         .validate = dsa_port_phylink_validate,
1290         .mac_select_pcs = dsa_port_phylink_mac_select_pcs,
1291         .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1292         .mac_config = dsa_port_phylink_mac_config,
1293         .mac_an_restart = dsa_port_phylink_mac_an_restart,
1294         .mac_link_down = dsa_port_phylink_mac_link_down,
1295         .mac_link_up = dsa_port_phylink_mac_link_up,
1296 };
1297
1298 int dsa_port_phylink_create(struct dsa_port *dp)
1299 {
1300         struct dsa_switch *ds = dp->ds;
1301         phy_interface_t mode;
1302         int err;
1303
1304         err = of_get_phy_mode(dp->dn, &mode);
1305         if (err)
1306                 mode = PHY_INTERFACE_MODE_NA;
1307
1308         /* Presence of phylink_mac_link_state or phylink_mac_an_restart is
1309          * an indicator of a legacy phylink driver.
1310          */
1311         if (ds->ops->phylink_mac_link_state ||
1312             ds->ops->phylink_mac_an_restart)
1313                 dp->pl_config.legacy_pre_march2020 = true;
1314
1315         if (ds->ops->phylink_get_caps)
1316                 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
1317
1318         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
1319                                 mode, &dsa_port_phylink_mac_ops);
1320         if (IS_ERR(dp->pl)) {
1321                 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1322                 return PTR_ERR(dp->pl);
1323         }
1324
1325         return 0;
1326 }
1327
1328 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1329 {
1330         struct dsa_switch *ds = dp->ds;
1331         struct phy_device *phydev;
1332         int port = dp->index;
1333         int err = 0;
1334
1335         phydev = dsa_port_get_phy_device(dp);
1336         if (!phydev)
1337                 return 0;
1338
1339         if (IS_ERR(phydev))
1340                 return PTR_ERR(phydev);
1341
1342         if (enable) {
1343                 err = genphy_resume(phydev);
1344                 if (err < 0)
1345                         goto err_put_dev;
1346
1347                 err = genphy_read_status(phydev);
1348                 if (err < 0)
1349                         goto err_put_dev;
1350         } else {
1351                 err = genphy_suspend(phydev);
1352                 if (err < 0)
1353                         goto err_put_dev;
1354         }
1355
1356         if (ds->ops->adjust_link)
1357                 ds->ops->adjust_link(ds, port, phydev);
1358
1359         dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1360
1361 err_put_dev:
1362         put_device(&phydev->mdio.dev);
1363         return err;
1364 }
1365
1366 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1367 {
1368         struct device_node *dn = dp->dn;
1369         struct dsa_switch *ds = dp->ds;
1370         struct phy_device *phydev;
1371         int port = dp->index;
1372         phy_interface_t mode;
1373         int err;
1374
1375         err = of_phy_register_fixed_link(dn);
1376         if (err) {
1377                 dev_err(ds->dev,
1378                         "failed to register the fixed PHY of port %d\n",
1379                         port);
1380                 return err;
1381         }
1382
1383         phydev = of_phy_find_device(dn);
1384
1385         err = of_get_phy_mode(dn, &mode);
1386         if (err)
1387                 mode = PHY_INTERFACE_MODE_NA;
1388         phydev->interface = mode;
1389
1390         genphy_read_status(phydev);
1391
1392         if (ds->ops->adjust_link)
1393                 ds->ops->adjust_link(ds, port, phydev);
1394
1395         put_device(&phydev->mdio.dev);
1396
1397         return 0;
1398 }
1399
1400 static int dsa_port_phylink_register(struct dsa_port *dp)
1401 {
1402         struct dsa_switch *ds = dp->ds;
1403         struct device_node *port_dn = dp->dn;
1404         int err;
1405
1406         dp->pl_config.dev = ds->dev;
1407         dp->pl_config.type = PHYLINK_DEV;
1408
1409         err = dsa_port_phylink_create(dp);
1410         if (err)
1411                 return err;
1412
1413         err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1414         if (err && err != -ENODEV) {
1415                 pr_err("could not attach to PHY: %d\n", err);
1416                 goto err_phy_connect;
1417         }
1418
1419         return 0;
1420
1421 err_phy_connect:
1422         phylink_destroy(dp->pl);
1423         return err;
1424 }
1425
1426 int dsa_port_link_register_of(struct dsa_port *dp)
1427 {
1428         struct dsa_switch *ds = dp->ds;
1429         struct device_node *phy_np;
1430         int port = dp->index;
1431
1432         if (!ds->ops->adjust_link) {
1433                 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1434                 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1435                         if (ds->ops->phylink_mac_link_down)
1436                                 ds->ops->phylink_mac_link_down(ds, port,
1437                                         MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1438                         return dsa_port_phylink_register(dp);
1439                 }
1440                 return 0;
1441         }
1442
1443         dev_warn(ds->dev,
1444                  "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1445
1446         if (of_phy_is_fixed_link(dp->dn))
1447                 return dsa_port_fixed_link_register_of(dp);
1448         else
1449                 return dsa_port_setup_phy_of(dp, true);
1450 }
1451
1452 void dsa_port_link_unregister_of(struct dsa_port *dp)
1453 {
1454         struct dsa_switch *ds = dp->ds;
1455
1456         if (!ds->ops->adjust_link && dp->pl) {
1457                 rtnl_lock();
1458                 phylink_disconnect_phy(dp->pl);
1459                 rtnl_unlock();
1460                 phylink_destroy(dp->pl);
1461                 dp->pl = NULL;
1462                 return;
1463         }
1464
1465         if (of_phy_is_fixed_link(dp->dn))
1466                 of_phy_deregister_fixed_link(dp->dn);
1467         else
1468                 dsa_port_setup_phy_of(dp, false);
1469 }
1470
1471 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1472 {
1473         struct dsa_switch *ds = dp->ds;
1474         int err;
1475
1476         if (!ds->ops->port_hsr_join)
1477                 return -EOPNOTSUPP;
1478
1479         dp->hsr_dev = hsr;
1480
1481         err = ds->ops->port_hsr_join(ds, dp->index, hsr);
1482         if (err)
1483                 dp->hsr_dev = NULL;
1484
1485         return err;
1486 }
1487
1488 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1489 {
1490         struct dsa_switch *ds = dp->ds;
1491         int err;
1492
1493         dp->hsr_dev = NULL;
1494
1495         if (ds->ops->port_hsr_leave) {
1496                 err = ds->ops->port_hsr_leave(ds, dp->index, hsr);
1497                 if (err)
1498                         dev_err(dp->ds->dev,
1499                                 "port %d failed to leave HSR %s: %pe\n",
1500                                 dp->index, hsr->name, ERR_PTR(err));
1501         }
1502 }
1503
1504 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
1505 {
1506         struct dsa_notifier_tag_8021q_vlan_info info = {
1507                 .tree_index = dp->ds->dst->index,
1508                 .sw_index = dp->ds->index,
1509                 .port = dp->index,
1510                 .vid = vid,
1511         };
1512
1513         if (broadcast)
1514                 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1515
1516         return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1517 }
1518
1519 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
1520 {
1521         struct dsa_notifier_tag_8021q_vlan_info info = {
1522                 .tree_index = dp->ds->dst->index,
1523                 .sw_index = dp->ds->index,
1524                 .port = dp->index,
1525                 .vid = vid,
1526         };
1527         int err;
1528
1529         if (broadcast)
1530                 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1531         else
1532                 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1533         if (err)
1534                 dev_err(dp->ds->dev,
1535                         "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n",
1536                         dp->index, vid, ERR_PTR(err));
1537 }
This page took 0.116837 seconds and 4 git commands to generate.