]> Git Repo - linux.git/blob - net/dsa/port.c
bpf: Resolve to prog->aux->dst_prog->type only for BPF_PROG_TYPE_EXT
[linux.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <[email protected]>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30         return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp, u16 vid)
34 {
35         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
36         struct switchdev_notifier_fdb_info info = {
37                 .vid = vid,
38         };
39
40         /* When the port becomes standalone it has already left the bridge.
41          * Don't notify the bridge in that case.
42          */
43         if (!brport_dev)
44                 return;
45
46         call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
47                                  brport_dev, &info.info, NULL);
48 }
49
50 static void dsa_port_fast_age(const struct dsa_port *dp)
51 {
52         struct dsa_switch *ds = dp->ds;
53
54         if (!ds->ops->port_fast_age)
55                 return;
56
57         ds->ops->port_fast_age(ds, dp->index);
58
59         /* flush all VLANs */
60         dsa_port_notify_bridge_fdb_flush(dp, 0);
61 }
62
63 static int dsa_port_vlan_fast_age(const struct dsa_port *dp, u16 vid)
64 {
65         struct dsa_switch *ds = dp->ds;
66         int err;
67
68         if (!ds->ops->port_vlan_fast_age)
69                 return -EOPNOTSUPP;
70
71         err = ds->ops->port_vlan_fast_age(ds, dp->index, vid);
72
73         if (!err)
74                 dsa_port_notify_bridge_fdb_flush(dp, vid);
75
76         return err;
77 }
78
79 static int dsa_port_msti_fast_age(const struct dsa_port *dp, u16 msti)
80 {
81         DECLARE_BITMAP(vids, VLAN_N_VID) = { 0 };
82         int err, vid;
83
84         err = br_mst_get_info(dsa_port_bridge_dev_get(dp), msti, vids);
85         if (err)
86                 return err;
87
88         for_each_set_bit(vid, vids, VLAN_N_VID) {
89                 err = dsa_port_vlan_fast_age(dp, vid);
90                 if (err)
91                         return err;
92         }
93
94         return 0;
95 }
96
97 static bool dsa_port_can_configure_learning(struct dsa_port *dp)
98 {
99         struct switchdev_brport_flags flags = {
100                 .mask = BR_LEARNING,
101         };
102         struct dsa_switch *ds = dp->ds;
103         int err;
104
105         if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags)
106                 return false;
107
108         err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL);
109         return !err;
110 }
111
112 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
113 {
114         struct dsa_switch *ds = dp->ds;
115         int port = dp->index;
116
117         if (!ds->ops->port_stp_state_set)
118                 return -EOPNOTSUPP;
119
120         ds->ops->port_stp_state_set(ds, port, state);
121
122         if (!dsa_port_can_configure_learning(dp) ||
123             (do_fast_age && dp->learning)) {
124                 /* Fast age FDB entries or flush appropriate forwarding database
125                  * for the given port, if we are moving it from Learning or
126                  * Forwarding state, to Disabled or Blocking or Listening state.
127                  * Ports that were standalone before the STP state change don't
128                  * need to fast age the FDB, since address learning is off in
129                  * standalone mode.
130                  */
131
132                 if ((dp->stp_state == BR_STATE_LEARNING ||
133                      dp->stp_state == BR_STATE_FORWARDING) &&
134                     (state == BR_STATE_DISABLED ||
135                      state == BR_STATE_BLOCKING ||
136                      state == BR_STATE_LISTENING))
137                         dsa_port_fast_age(dp);
138         }
139
140         dp->stp_state = state;
141
142         return 0;
143 }
144
145 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
146                                    bool do_fast_age)
147 {
148         int err;
149
150         err = dsa_port_set_state(dp, state, do_fast_age);
151         if (err)
152                 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
153 }
154
155 int dsa_port_set_mst_state(struct dsa_port *dp,
156                            const struct switchdev_mst_state *state,
157                            struct netlink_ext_ack *extack)
158 {
159         struct dsa_switch *ds = dp->ds;
160         u8 prev_state;
161         int err;
162
163         if (!ds->ops->port_mst_state_set)
164                 return -EOPNOTSUPP;
165
166         err = br_mst_get_state(dsa_port_to_bridge_port(dp), state->msti,
167                                &prev_state);
168         if (err)
169                 return err;
170
171         err = ds->ops->port_mst_state_set(ds, dp->index, state);
172         if (err)
173                 return err;
174
175         if (!(dp->learning &&
176               (prev_state == BR_STATE_LEARNING ||
177                prev_state == BR_STATE_FORWARDING) &&
178               (state->state == BR_STATE_DISABLED ||
179                state->state == BR_STATE_BLOCKING ||
180                state->state == BR_STATE_LISTENING)))
181                 return 0;
182
183         err = dsa_port_msti_fast_age(dp, state->msti);
184         if (err)
185                 NL_SET_ERR_MSG_MOD(extack,
186                                    "Unable to flush associated VLANs");
187
188         return 0;
189 }
190
191 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
192 {
193         struct dsa_switch *ds = dp->ds;
194         int port = dp->index;
195         int err;
196
197         if (ds->ops->port_enable) {
198                 err = ds->ops->port_enable(ds, port, phy);
199                 if (err)
200                         return err;
201         }
202
203         if (!dp->bridge)
204                 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
205
206         if (dp->pl)
207                 phylink_start(dp->pl);
208
209         return 0;
210 }
211
212 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
213 {
214         int err;
215
216         rtnl_lock();
217         err = dsa_port_enable_rt(dp, phy);
218         rtnl_unlock();
219
220         return err;
221 }
222
223 void dsa_port_disable_rt(struct dsa_port *dp)
224 {
225         struct dsa_switch *ds = dp->ds;
226         int port = dp->index;
227
228         if (dp->pl)
229                 phylink_stop(dp->pl);
230
231         if (!dp->bridge)
232                 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
233
234         if (ds->ops->port_disable)
235                 ds->ops->port_disable(ds, port);
236 }
237
238 void dsa_port_disable(struct dsa_port *dp)
239 {
240         rtnl_lock();
241         dsa_port_disable_rt(dp);
242         rtnl_unlock();
243 }
244
245 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
246                                          struct netlink_ext_ack *extack)
247 {
248         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
249                                    BR_BCAST_FLOOD | BR_PORT_LOCKED;
250         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
251         int flag, err;
252
253         for_each_set_bit(flag, &mask, 32) {
254                 struct switchdev_brport_flags flags = {0};
255
256                 flags.mask = BIT(flag);
257
258                 if (br_port_flag_is_set(brport_dev, BIT(flag)))
259                         flags.val = BIT(flag);
260
261                 err = dsa_port_bridge_flags(dp, flags, extack);
262                 if (err && err != -EOPNOTSUPP)
263                         return err;
264         }
265
266         return 0;
267 }
268
269 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
270 {
271         const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
272         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
273                                    BR_BCAST_FLOOD | BR_PORT_LOCKED;
274         int flag, err;
275
276         for_each_set_bit(flag, &mask, 32) {
277                 struct switchdev_brport_flags flags = {0};
278
279                 flags.mask = BIT(flag);
280                 flags.val = val & BIT(flag);
281
282                 err = dsa_port_bridge_flags(dp, flags, NULL);
283                 if (err && err != -EOPNOTSUPP)
284                         dev_err(dp->ds->dev,
285                                 "failed to clear bridge port flag %lu: %pe\n",
286                                 flags.val, ERR_PTR(err));
287         }
288 }
289
290 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
291                                          struct netlink_ext_ack *extack)
292 {
293         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
294         struct net_device *br = dsa_port_bridge_dev_get(dp);
295         int err;
296
297         err = dsa_port_inherit_brport_flags(dp, extack);
298         if (err)
299                 return err;
300
301         err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
302         if (err && err != -EOPNOTSUPP)
303                 return err;
304
305         err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
306         if (err && err != -EOPNOTSUPP)
307                 return err;
308
309         err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
310         if (err && err != -EOPNOTSUPP)
311                 return err;
312
313         return 0;
314 }
315
316 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
317 {
318         /* Configure the port for standalone mode (no address learning,
319          * flood everything).
320          * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
321          * when the user requests it through netlink or sysfs, but not
322          * automatically at port join or leave, so we need to handle resetting
323          * the brport flags ourselves. But we even prefer it that way, because
324          * otherwise, some setups might never get the notification they need,
325          * for example, when a port leaves a LAG that offloads the bridge,
326          * it becomes standalone, but as far as the bridge is concerned, no
327          * port ever left.
328          */
329         dsa_port_clear_brport_flags(dp);
330
331         /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
332          * so allow it to be in BR_STATE_FORWARDING to be kept functional
333          */
334         dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
335
336         /* VLAN filtering is handled by dsa_switch_bridge_leave */
337
338         /* Ageing time may be global to the switch chip, so don't change it
339          * here because we have no good reason (or value) to change it to.
340          */
341 }
342
343 static int dsa_port_bridge_create(struct dsa_port *dp,
344                                   struct net_device *br,
345                                   struct netlink_ext_ack *extack)
346 {
347         struct dsa_switch *ds = dp->ds;
348         struct dsa_bridge *bridge;
349
350         bridge = dsa_tree_bridge_find(ds->dst, br);
351         if (bridge) {
352                 refcount_inc(&bridge->refcount);
353                 dp->bridge = bridge;
354                 return 0;
355         }
356
357         bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
358         if (!bridge)
359                 return -ENOMEM;
360
361         refcount_set(&bridge->refcount, 1);
362
363         bridge->dev = br;
364
365         bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges);
366         if (ds->max_num_bridges && !bridge->num) {
367                 NL_SET_ERR_MSG_MOD(extack,
368                                    "Range of offloadable bridges exceeded");
369                 kfree(bridge);
370                 return -EOPNOTSUPP;
371         }
372
373         dp->bridge = bridge;
374
375         return 0;
376 }
377
378 static void dsa_port_bridge_destroy(struct dsa_port *dp,
379                                     const struct net_device *br)
380 {
381         struct dsa_bridge *bridge = dp->bridge;
382
383         dp->bridge = NULL;
384
385         if (!refcount_dec_and_test(&bridge->refcount))
386                 return;
387
388         if (bridge->num)
389                 dsa_bridge_num_put(br, bridge->num);
390
391         kfree(bridge);
392 }
393
394 static bool dsa_port_supports_mst(struct dsa_port *dp)
395 {
396         struct dsa_switch *ds = dp->ds;
397
398         return ds->ops->vlan_msti_set &&
399                 ds->ops->port_mst_state_set &&
400                 ds->ops->port_vlan_fast_age &&
401                 dsa_port_can_configure_learning(dp);
402 }
403
404 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
405                          struct netlink_ext_ack *extack)
406 {
407         struct dsa_notifier_bridge_info info = {
408                 .tree_index = dp->ds->dst->index,
409                 .sw_index = dp->ds->index,
410                 .port = dp->index,
411                 .extack = extack,
412         };
413         struct net_device *dev = dp->slave;
414         struct net_device *brport_dev;
415         int err;
416
417         if (br_mst_enabled(br) && !dsa_port_supports_mst(dp))
418                 return -EOPNOTSUPP;
419
420         /* Here the interface is already bridged. Reflect the current
421          * configuration so that drivers can program their chips accordingly.
422          */
423         err = dsa_port_bridge_create(dp, br, extack);
424         if (err)
425                 return err;
426
427         brport_dev = dsa_port_to_bridge_port(dp);
428
429         info.bridge = *dp->bridge;
430         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
431         if (err)
432                 goto out_rollback;
433
434         /* Drivers which support bridge TX forwarding should set this */
435         dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
436
437         err = switchdev_bridge_port_offload(brport_dev, dev, dp,
438                                             &dsa_slave_switchdev_notifier,
439                                             &dsa_slave_switchdev_blocking_notifier,
440                                             dp->bridge->tx_fwd_offload, extack);
441         if (err)
442                 goto out_rollback_unbridge;
443
444         err = dsa_port_switchdev_sync_attrs(dp, extack);
445         if (err)
446                 goto out_rollback_unoffload;
447
448         return 0;
449
450 out_rollback_unoffload:
451         switchdev_bridge_port_unoffload(brport_dev, dp,
452                                         &dsa_slave_switchdev_notifier,
453                                         &dsa_slave_switchdev_blocking_notifier);
454 out_rollback_unbridge:
455         dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
456 out_rollback:
457         dsa_port_bridge_destroy(dp, br);
458         return err;
459 }
460
461 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
462 {
463         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
464
465         /* Don't try to unoffload something that is not offloaded */
466         if (!brport_dev)
467                 return;
468
469         switchdev_bridge_port_unoffload(brport_dev, dp,
470                                         &dsa_slave_switchdev_notifier,
471                                         &dsa_slave_switchdev_blocking_notifier);
472
473         dsa_flush_workqueue();
474 }
475
476 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
477 {
478         struct dsa_notifier_bridge_info info = {
479                 .tree_index = dp->ds->dst->index,
480                 .sw_index = dp->ds->index,
481                 .port = dp->index,
482         };
483         int err;
484
485         /* If the port could not be offloaded to begin with, then
486          * there is nothing to do.
487          */
488         if (!dp->bridge)
489                 return;
490
491         info.bridge = *dp->bridge;
492
493         /* Here the port is already unbridged. Reflect the current configuration
494          * so that drivers can program their chips accordingly.
495          */
496         dsa_port_bridge_destroy(dp, br);
497
498         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
499         if (err)
500                 dev_err(dp->ds->dev,
501                         "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
502                         dp->index, ERR_PTR(err));
503
504         dsa_port_switchdev_unsync_attrs(dp);
505 }
506
507 int dsa_port_lag_change(struct dsa_port *dp,
508                         struct netdev_lag_lower_state_info *linfo)
509 {
510         struct dsa_notifier_lag_info info = {
511                 .sw_index = dp->ds->index,
512                 .port = dp->index,
513         };
514         bool tx_enabled;
515
516         if (!dp->lag)
517                 return 0;
518
519         /* On statically configured aggregates (e.g. loadbalance
520          * without LACP) ports will always be tx_enabled, even if the
521          * link is down. Thus we require both link_up and tx_enabled
522          * in order to include it in the tx set.
523          */
524         tx_enabled = linfo->link_up && linfo->tx_enabled;
525
526         if (tx_enabled == dp->lag_tx_enabled)
527                 return 0;
528
529         dp->lag_tx_enabled = tx_enabled;
530
531         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
532 }
533
534 static int dsa_port_lag_create(struct dsa_port *dp,
535                                struct net_device *lag_dev)
536 {
537         struct dsa_switch *ds = dp->ds;
538         struct dsa_lag *lag;
539
540         lag = dsa_tree_lag_find(ds->dst, lag_dev);
541         if (lag) {
542                 refcount_inc(&lag->refcount);
543                 dp->lag = lag;
544                 return 0;
545         }
546
547         lag = kzalloc(sizeof(*lag), GFP_KERNEL);
548         if (!lag)
549                 return -ENOMEM;
550
551         refcount_set(&lag->refcount, 1);
552         mutex_init(&lag->fdb_lock);
553         INIT_LIST_HEAD(&lag->fdbs);
554         lag->dev = lag_dev;
555         dsa_lag_map(ds->dst, lag);
556         dp->lag = lag;
557
558         return 0;
559 }
560
561 static void dsa_port_lag_destroy(struct dsa_port *dp)
562 {
563         struct dsa_lag *lag = dp->lag;
564
565         dp->lag = NULL;
566         dp->lag_tx_enabled = false;
567
568         if (!refcount_dec_and_test(&lag->refcount))
569                 return;
570
571         WARN_ON(!list_empty(&lag->fdbs));
572         dsa_lag_unmap(dp->ds->dst, lag);
573         kfree(lag);
574 }
575
576 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
577                       struct netdev_lag_upper_info *uinfo,
578                       struct netlink_ext_ack *extack)
579 {
580         struct dsa_notifier_lag_info info = {
581                 .sw_index = dp->ds->index,
582                 .port = dp->index,
583                 .info = uinfo,
584         };
585         struct net_device *bridge_dev;
586         int err;
587
588         err = dsa_port_lag_create(dp, lag_dev);
589         if (err)
590                 goto err_lag_create;
591
592         info.lag = *dp->lag;
593         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
594         if (err)
595                 goto err_lag_join;
596
597         bridge_dev = netdev_master_upper_dev_get(lag_dev);
598         if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
599                 return 0;
600
601         err = dsa_port_bridge_join(dp, bridge_dev, extack);
602         if (err)
603                 goto err_bridge_join;
604
605         return 0;
606
607 err_bridge_join:
608         dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
609 err_lag_join:
610         dsa_port_lag_destroy(dp);
611 err_lag_create:
612         return err;
613 }
614
615 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
616 {
617         struct net_device *br = dsa_port_bridge_dev_get(dp);
618
619         if (br)
620                 dsa_port_pre_bridge_leave(dp, br);
621 }
622
623 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
624 {
625         struct net_device *br = dsa_port_bridge_dev_get(dp);
626         struct dsa_notifier_lag_info info = {
627                 .sw_index = dp->ds->index,
628                 .port = dp->index,
629         };
630         int err;
631
632         if (!dp->lag)
633                 return;
634
635         /* Port might have been part of a LAG that in turn was
636          * attached to a bridge.
637          */
638         if (br)
639                 dsa_port_bridge_leave(dp, br);
640
641         info.lag = *dp->lag;
642
643         dsa_port_lag_destroy(dp);
644
645         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
646         if (err)
647                 dev_err(dp->ds->dev,
648                         "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
649                         dp->index, ERR_PTR(err));
650 }
651
652 /* Must be called under rcu_read_lock() */
653 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
654                                               bool vlan_filtering,
655                                               struct netlink_ext_ack *extack)
656 {
657         struct dsa_switch *ds = dp->ds;
658         struct dsa_port *other_dp;
659         int err;
660
661         /* VLAN awareness was off, so the question is "can we turn it on".
662          * We may have had 8021q uppers, those need to go. Make sure we don't
663          * enter an inconsistent state: deny changing the VLAN awareness state
664          * as long as we have 8021q uppers.
665          */
666         if (vlan_filtering && dsa_port_is_user(dp)) {
667                 struct net_device *br = dsa_port_bridge_dev_get(dp);
668                 struct net_device *upper_dev, *slave = dp->slave;
669                 struct list_head *iter;
670
671                 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
672                         struct bridge_vlan_info br_info;
673                         u16 vid;
674
675                         if (!is_vlan_dev(upper_dev))
676                                 continue;
677
678                         vid = vlan_dev_vlan_id(upper_dev);
679
680                         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
681                          * device, respectively the VID is not found, returning
682                          * 0 means success, which is a failure for us here.
683                          */
684                         err = br_vlan_get_info(br, vid, &br_info);
685                         if (err == 0) {
686                                 NL_SET_ERR_MSG_MOD(extack,
687                                                    "Must first remove VLAN uppers having VIDs also present in bridge");
688                                 return false;
689                         }
690                 }
691         }
692
693         if (!ds->vlan_filtering_is_global)
694                 return true;
695
696         /* For cases where enabling/disabling VLAN awareness is global to the
697          * switch, we need to handle the case where multiple bridges span
698          * different ports of the same switch device and one of them has a
699          * different setting than what is being requested.
700          */
701         dsa_switch_for_each_port(other_dp, ds) {
702                 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp);
703
704                 /* If it's the same bridge, it also has same
705                  * vlan_filtering setting => no need to check
706                  */
707                 if (!other_br || other_br == dsa_port_bridge_dev_get(dp))
708                         continue;
709
710                 if (br_vlan_enabled(other_br) != vlan_filtering) {
711                         NL_SET_ERR_MSG_MOD(extack,
712                                            "VLAN filtering is a global setting");
713                         return false;
714                 }
715         }
716         return true;
717 }
718
719 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
720                             struct netlink_ext_ack *extack)
721 {
722         bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp);
723         struct dsa_switch *ds = dp->ds;
724         bool apply;
725         int err;
726
727         if (!ds->ops->port_vlan_filtering)
728                 return -EOPNOTSUPP;
729
730         /* We are called from dsa_slave_switchdev_blocking_event(),
731          * which is not under rcu_read_lock(), unlike
732          * dsa_slave_switchdev_event().
733          */
734         rcu_read_lock();
735         apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
736         rcu_read_unlock();
737         if (!apply)
738                 return -EINVAL;
739
740         if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
741                 return 0;
742
743         err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
744                                            extack);
745         if (err)
746                 return err;
747
748         if (ds->vlan_filtering_is_global) {
749                 struct dsa_port *other_dp;
750
751                 ds->vlan_filtering = vlan_filtering;
752
753                 dsa_switch_for_each_user_port(other_dp, ds) {
754                         struct net_device *slave = dp->slave;
755
756                         /* We might be called in the unbind path, so not
757                          * all slave devices might still be registered.
758                          */
759                         if (!slave)
760                                 continue;
761
762                         err = dsa_slave_manage_vlan_filtering(slave,
763                                                               vlan_filtering);
764                         if (err)
765                                 goto restore;
766                 }
767         } else {
768                 dp->vlan_filtering = vlan_filtering;
769
770                 err = dsa_slave_manage_vlan_filtering(dp->slave,
771                                                       vlan_filtering);
772                 if (err)
773                         goto restore;
774         }
775
776         return 0;
777
778 restore:
779         ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL);
780
781         if (ds->vlan_filtering_is_global)
782                 ds->vlan_filtering = old_vlan_filtering;
783         else
784                 dp->vlan_filtering = old_vlan_filtering;
785
786         return err;
787 }
788
789 /* This enforces legacy behavior for switch drivers which assume they can't
790  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
791  */
792 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
793 {
794         struct net_device *br = dsa_port_bridge_dev_get(dp);
795         struct dsa_switch *ds = dp->ds;
796
797         if (!br)
798                 return false;
799
800         return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br);
801 }
802
803 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
804 {
805         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
806         unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
807         struct dsa_notifier_ageing_time_info info;
808         int err;
809
810         info.ageing_time = ageing_time;
811
812         err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
813         if (err)
814                 return err;
815
816         dp->ageing_time = ageing_time;
817
818         return 0;
819 }
820
821 int dsa_port_mst_enable(struct dsa_port *dp, bool on,
822                         struct netlink_ext_ack *extack)
823 {
824         if (on && !dsa_port_supports_mst(dp)) {
825                 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST");
826                 return -EINVAL;
827         }
828
829         return 0;
830 }
831
832 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
833                               struct switchdev_brport_flags flags,
834                               struct netlink_ext_ack *extack)
835 {
836         struct dsa_switch *ds = dp->ds;
837
838         if (!ds->ops->port_pre_bridge_flags)
839                 return -EINVAL;
840
841         return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
842 }
843
844 int dsa_port_bridge_flags(struct dsa_port *dp,
845                           struct switchdev_brport_flags flags,
846                           struct netlink_ext_ack *extack)
847 {
848         struct dsa_switch *ds = dp->ds;
849         int err;
850
851         if (!ds->ops->port_bridge_flags)
852                 return -EOPNOTSUPP;
853
854         err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
855         if (err)
856                 return err;
857
858         if (flags.mask & BR_LEARNING) {
859                 bool learning = flags.val & BR_LEARNING;
860
861                 if (learning == dp->learning)
862                         return 0;
863
864                 if ((dp->learning && !learning) &&
865                     (dp->stp_state == BR_STATE_LEARNING ||
866                      dp->stp_state == BR_STATE_FORWARDING))
867                         dsa_port_fast_age(dp);
868
869                 dp->learning = learning;
870         }
871
872         return 0;
873 }
874
875 int dsa_port_vlan_msti(struct dsa_port *dp,
876                        const struct switchdev_vlan_msti *msti)
877 {
878         struct dsa_switch *ds = dp->ds;
879
880         if (!ds->ops->vlan_msti_set)
881                 return -EOPNOTSUPP;
882
883         return ds->ops->vlan_msti_set(ds, *dp->bridge, msti);
884 }
885
886 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
887                         bool targeted_match)
888 {
889         struct dsa_notifier_mtu_info info = {
890                 .sw_index = dp->ds->index,
891                 .targeted_match = targeted_match,
892                 .port = dp->index,
893                 .mtu = new_mtu,
894         };
895
896         return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
897 }
898
899 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
900                      u16 vid)
901 {
902         struct dsa_notifier_fdb_info info = {
903                 .sw_index = dp->ds->index,
904                 .port = dp->index,
905                 .addr = addr,
906                 .vid = vid,
907                 .db = {
908                         .type = DSA_DB_BRIDGE,
909                         .bridge = *dp->bridge,
910                 },
911         };
912
913         /* Refcounting takes bridge.num as a key, and should be global for all
914          * bridges in the absence of FDB isolation, and per bridge otherwise.
915          * Force the bridge.num to zero here in the absence of FDB isolation.
916          */
917         if (!dp->ds->fdb_isolation)
918                 info.db.bridge.num = 0;
919
920         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
921 }
922
923 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
924                      u16 vid)
925 {
926         struct dsa_notifier_fdb_info info = {
927                 .sw_index = dp->ds->index,
928                 .port = dp->index,
929                 .addr = addr,
930                 .vid = vid,
931                 .db = {
932                         .type = DSA_DB_BRIDGE,
933                         .bridge = *dp->bridge,
934                 },
935         };
936
937         if (!dp->ds->fdb_isolation)
938                 info.db.bridge.num = 0;
939
940         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
941 }
942
943 static int dsa_port_host_fdb_add(struct dsa_port *dp,
944                                  const unsigned char *addr, u16 vid,
945                                  struct dsa_db db)
946 {
947         struct dsa_notifier_fdb_info info = {
948                 .sw_index = dp->ds->index,
949                 .port = dp->index,
950                 .addr = addr,
951                 .vid = vid,
952                 .db = db,
953         };
954
955         if (!dp->ds->fdb_isolation)
956                 info.db.bridge.num = 0;
957
958         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
959 }
960
961 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
962                                      const unsigned char *addr, u16 vid)
963 {
964         struct dsa_db db = {
965                 .type = DSA_DB_PORT,
966                 .dp = dp,
967         };
968
969         return dsa_port_host_fdb_add(dp, addr, vid, db);
970 }
971
972 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp,
973                                  const unsigned char *addr, u16 vid)
974 {
975         struct dsa_port *cpu_dp = dp->cpu_dp;
976         struct dsa_db db = {
977                 .type = DSA_DB_BRIDGE,
978                 .bridge = *dp->bridge,
979         };
980         int err;
981
982         /* Avoid a call to __dev_set_promiscuity() on the master, which
983          * requires rtnl_lock(), since we can't guarantee that is held here,
984          * and we can't take it either.
985          */
986         if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
987                 err = dev_uc_add(cpu_dp->master, addr);
988                 if (err)
989                         return err;
990         }
991
992         return dsa_port_host_fdb_add(dp, addr, vid, db);
993 }
994
995 static int dsa_port_host_fdb_del(struct dsa_port *dp,
996                                  const unsigned char *addr, u16 vid,
997                                  struct dsa_db db)
998 {
999         struct dsa_notifier_fdb_info info = {
1000                 .sw_index = dp->ds->index,
1001                 .port = dp->index,
1002                 .addr = addr,
1003                 .vid = vid,
1004                 .db = db,
1005         };
1006
1007         if (!dp->ds->fdb_isolation)
1008                 info.db.bridge.num = 0;
1009
1010         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
1011 }
1012
1013 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
1014                                      const unsigned char *addr, u16 vid)
1015 {
1016         struct dsa_db db = {
1017                 .type = DSA_DB_PORT,
1018                 .dp = dp,
1019         };
1020
1021         return dsa_port_host_fdb_del(dp, addr, vid, db);
1022 }
1023
1024 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp,
1025                                  const unsigned char *addr, u16 vid)
1026 {
1027         struct dsa_port *cpu_dp = dp->cpu_dp;
1028         struct dsa_db db = {
1029                 .type = DSA_DB_BRIDGE,
1030                 .bridge = *dp->bridge,
1031         };
1032         int err;
1033
1034         if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
1035                 err = dev_uc_del(cpu_dp->master, addr);
1036                 if (err)
1037                         return err;
1038         }
1039
1040         return dsa_port_host_fdb_del(dp, addr, vid, db);
1041 }
1042
1043 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
1044                          u16 vid)
1045 {
1046         struct dsa_notifier_lag_fdb_info info = {
1047                 .lag = dp->lag,
1048                 .addr = addr,
1049                 .vid = vid,
1050                 .db = {
1051                         .type = DSA_DB_BRIDGE,
1052                         .bridge = *dp->bridge,
1053                 },
1054         };
1055
1056         if (!dp->ds->fdb_isolation)
1057                 info.db.bridge.num = 0;
1058
1059         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info);
1060 }
1061
1062 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
1063                          u16 vid)
1064 {
1065         struct dsa_notifier_lag_fdb_info info = {
1066                 .lag = dp->lag,
1067                 .addr = addr,
1068                 .vid = vid,
1069                 .db = {
1070                         .type = DSA_DB_BRIDGE,
1071                         .bridge = *dp->bridge,
1072                 },
1073         };
1074
1075         if (!dp->ds->fdb_isolation)
1076                 info.db.bridge.num = 0;
1077
1078         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info);
1079 }
1080
1081 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
1082 {
1083         struct dsa_switch *ds = dp->ds;
1084         int port = dp->index;
1085
1086         if (!ds->ops->port_fdb_dump)
1087                 return -EOPNOTSUPP;
1088
1089         return ds->ops->port_fdb_dump(ds, port, cb, data);
1090 }
1091
1092 int dsa_port_mdb_add(const struct dsa_port *dp,
1093                      const struct switchdev_obj_port_mdb *mdb)
1094 {
1095         struct dsa_notifier_mdb_info info = {
1096                 .sw_index = dp->ds->index,
1097                 .port = dp->index,
1098                 .mdb = mdb,
1099                 .db = {
1100                         .type = DSA_DB_BRIDGE,
1101                         .bridge = *dp->bridge,
1102                 },
1103         };
1104
1105         if (!dp->ds->fdb_isolation)
1106                 info.db.bridge.num = 0;
1107
1108         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
1109 }
1110
1111 int dsa_port_mdb_del(const struct dsa_port *dp,
1112                      const struct switchdev_obj_port_mdb *mdb)
1113 {
1114         struct dsa_notifier_mdb_info info = {
1115                 .sw_index = dp->ds->index,
1116                 .port = dp->index,
1117                 .mdb = mdb,
1118                 .db = {
1119                         .type = DSA_DB_BRIDGE,
1120                         .bridge = *dp->bridge,
1121                 },
1122         };
1123
1124         if (!dp->ds->fdb_isolation)
1125                 info.db.bridge.num = 0;
1126
1127         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
1128 }
1129
1130 static int dsa_port_host_mdb_add(const struct dsa_port *dp,
1131                                  const struct switchdev_obj_port_mdb *mdb,
1132                                  struct dsa_db db)
1133 {
1134         struct dsa_notifier_mdb_info info = {
1135                 .sw_index = dp->ds->index,
1136                 .port = dp->index,
1137                 .mdb = mdb,
1138                 .db = db,
1139         };
1140
1141         if (!dp->ds->fdb_isolation)
1142                 info.db.bridge.num = 0;
1143
1144         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
1145 }
1146
1147 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
1148                                      const struct switchdev_obj_port_mdb *mdb)
1149 {
1150         struct dsa_db db = {
1151                 .type = DSA_DB_PORT,
1152                 .dp = dp,
1153         };
1154
1155         return dsa_port_host_mdb_add(dp, mdb, db);
1156 }
1157
1158 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
1159                                  const struct switchdev_obj_port_mdb *mdb)
1160 {
1161         struct dsa_port *cpu_dp = dp->cpu_dp;
1162         struct dsa_db db = {
1163                 .type = DSA_DB_BRIDGE,
1164                 .bridge = *dp->bridge,
1165         };
1166         int err;
1167
1168         err = dev_mc_add(cpu_dp->master, mdb->addr);
1169         if (err)
1170                 return err;
1171
1172         return dsa_port_host_mdb_add(dp, mdb, db);
1173 }
1174
1175 static int dsa_port_host_mdb_del(const struct dsa_port *dp,
1176                                  const struct switchdev_obj_port_mdb *mdb,
1177                                  struct dsa_db db)
1178 {
1179         struct dsa_notifier_mdb_info info = {
1180                 .sw_index = dp->ds->index,
1181                 .port = dp->index,
1182                 .mdb = mdb,
1183                 .db = db,
1184         };
1185
1186         if (!dp->ds->fdb_isolation)
1187                 info.db.bridge.num = 0;
1188
1189         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
1190 }
1191
1192 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
1193                                      const struct switchdev_obj_port_mdb *mdb)
1194 {
1195         struct dsa_db db = {
1196                 .type = DSA_DB_PORT,
1197                 .dp = dp,
1198         };
1199
1200         return dsa_port_host_mdb_del(dp, mdb, db);
1201 }
1202
1203 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
1204                                  const struct switchdev_obj_port_mdb *mdb)
1205 {
1206         struct dsa_port *cpu_dp = dp->cpu_dp;
1207         struct dsa_db db = {
1208                 .type = DSA_DB_BRIDGE,
1209                 .bridge = *dp->bridge,
1210         };
1211         int err;
1212
1213         err = dev_mc_del(cpu_dp->master, mdb->addr);
1214         if (err)
1215                 return err;
1216
1217         return dsa_port_host_mdb_del(dp, mdb, db);
1218 }
1219
1220 int dsa_port_vlan_add(struct dsa_port *dp,
1221                       const struct switchdev_obj_port_vlan *vlan,
1222                       struct netlink_ext_ack *extack)
1223 {
1224         struct dsa_notifier_vlan_info info = {
1225                 .sw_index = dp->ds->index,
1226                 .port = dp->index,
1227                 .vlan = vlan,
1228                 .extack = extack,
1229         };
1230
1231         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
1232 }
1233
1234 int dsa_port_vlan_del(struct dsa_port *dp,
1235                       const struct switchdev_obj_port_vlan *vlan)
1236 {
1237         struct dsa_notifier_vlan_info info = {
1238                 .sw_index = dp->ds->index,
1239                 .port = dp->index,
1240                 .vlan = vlan,
1241         };
1242
1243         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
1244 }
1245
1246 int dsa_port_host_vlan_add(struct dsa_port *dp,
1247                            const struct switchdev_obj_port_vlan *vlan,
1248                            struct netlink_ext_ack *extack)
1249 {
1250         struct dsa_notifier_vlan_info info = {
1251                 .sw_index = dp->ds->index,
1252                 .port = dp->index,
1253                 .vlan = vlan,
1254                 .extack = extack,
1255         };
1256         struct dsa_port *cpu_dp = dp->cpu_dp;
1257         int err;
1258
1259         err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info);
1260         if (err && err != -EOPNOTSUPP)
1261                 return err;
1262
1263         vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
1264
1265         return err;
1266 }
1267
1268 int dsa_port_host_vlan_del(struct dsa_port *dp,
1269                            const struct switchdev_obj_port_vlan *vlan)
1270 {
1271         struct dsa_notifier_vlan_info info = {
1272                 .sw_index = dp->ds->index,
1273                 .port = dp->index,
1274                 .vlan = vlan,
1275         };
1276         struct dsa_port *cpu_dp = dp->cpu_dp;
1277         int err;
1278
1279         err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info);
1280         if (err && err != -EOPNOTSUPP)
1281                 return err;
1282
1283         vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
1284
1285         return err;
1286 }
1287
1288 int dsa_port_mrp_add(const struct dsa_port *dp,
1289                      const struct switchdev_obj_mrp *mrp)
1290 {
1291         struct dsa_switch *ds = dp->ds;
1292
1293         if (!ds->ops->port_mrp_add)
1294                 return -EOPNOTSUPP;
1295
1296         return ds->ops->port_mrp_add(ds, dp->index, mrp);
1297 }
1298
1299 int dsa_port_mrp_del(const struct dsa_port *dp,
1300                      const struct switchdev_obj_mrp *mrp)
1301 {
1302         struct dsa_switch *ds = dp->ds;
1303
1304         if (!ds->ops->port_mrp_del)
1305                 return -EOPNOTSUPP;
1306
1307         return ds->ops->port_mrp_del(ds, dp->index, mrp);
1308 }
1309
1310 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
1311                                const struct switchdev_obj_ring_role_mrp *mrp)
1312 {
1313         struct dsa_switch *ds = dp->ds;
1314
1315         if (!ds->ops->port_mrp_add_ring_role)
1316                 return -EOPNOTSUPP;
1317
1318         return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp);
1319 }
1320
1321 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
1322                                const struct switchdev_obj_ring_role_mrp *mrp)
1323 {
1324         struct dsa_switch *ds = dp->ds;
1325
1326         if (!ds->ops->port_mrp_del_ring_role)
1327                 return -EOPNOTSUPP;
1328
1329         return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
1330 }
1331
1332 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
1333                                const struct dsa_device_ops *tag_ops)
1334 {
1335         cpu_dp->rcv = tag_ops->rcv;
1336         cpu_dp->tag_ops = tag_ops;
1337 }
1338
1339 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
1340 {
1341         struct device_node *phy_dn;
1342         struct phy_device *phydev;
1343
1344         phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
1345         if (!phy_dn)
1346                 return NULL;
1347
1348         phydev = of_phy_find_device(phy_dn);
1349         if (!phydev) {
1350                 of_node_put(phy_dn);
1351                 return ERR_PTR(-EPROBE_DEFER);
1352         }
1353
1354         of_node_put(phy_dn);
1355         return phydev;
1356 }
1357
1358 static void dsa_port_phylink_validate(struct phylink_config *config,
1359                                       unsigned long *supported,
1360                                       struct phylink_link_state *state)
1361 {
1362         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1363         struct dsa_switch *ds = dp->ds;
1364
1365         if (!ds->ops->phylink_validate) {
1366                 if (config->mac_capabilities)
1367                         phylink_generic_validate(config, supported, state);
1368                 return;
1369         }
1370
1371         ds->ops->phylink_validate(ds, dp->index, supported, state);
1372 }
1373
1374 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
1375                                                struct phylink_link_state *state)
1376 {
1377         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1378         struct dsa_switch *ds = dp->ds;
1379         int err;
1380
1381         /* Only called for inband modes */
1382         if (!ds->ops->phylink_mac_link_state) {
1383                 state->link = 0;
1384                 return;
1385         }
1386
1387         err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
1388         if (err < 0) {
1389                 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
1390                         dp->index, err);
1391                 state->link = 0;
1392         }
1393 }
1394
1395 static struct phylink_pcs *
1396 dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
1397                                 phy_interface_t interface)
1398 {
1399         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1400         struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
1401         struct dsa_switch *ds = dp->ds;
1402
1403         if (ds->ops->phylink_mac_select_pcs)
1404                 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface);
1405
1406         return pcs;
1407 }
1408
1409 static void dsa_port_phylink_mac_config(struct phylink_config *config,
1410                                         unsigned int mode,
1411                                         const struct phylink_link_state *state)
1412 {
1413         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1414         struct dsa_switch *ds = dp->ds;
1415
1416         if (!ds->ops->phylink_mac_config)
1417                 return;
1418
1419         ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1420 }
1421
1422 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
1423 {
1424         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1425         struct dsa_switch *ds = dp->ds;
1426
1427         if (!ds->ops->phylink_mac_an_restart)
1428                 return;
1429
1430         ds->ops->phylink_mac_an_restart(ds, dp->index);
1431 }
1432
1433 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
1434                                            unsigned int mode,
1435                                            phy_interface_t interface)
1436 {
1437         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1438         struct phy_device *phydev = NULL;
1439         struct dsa_switch *ds = dp->ds;
1440
1441         if (dsa_port_is_user(dp))
1442                 phydev = dp->slave->phydev;
1443
1444         if (!ds->ops->phylink_mac_link_down) {
1445                 if (ds->ops->adjust_link && phydev)
1446                         ds->ops->adjust_link(ds, dp->index, phydev);
1447                 return;
1448         }
1449
1450         ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1451 }
1452
1453 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1454                                          struct phy_device *phydev,
1455                                          unsigned int mode,
1456                                          phy_interface_t interface,
1457                                          int speed, int duplex,
1458                                          bool tx_pause, bool rx_pause)
1459 {
1460         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1461         struct dsa_switch *ds = dp->ds;
1462
1463         if (!ds->ops->phylink_mac_link_up) {
1464                 if (ds->ops->adjust_link && phydev)
1465                         ds->ops->adjust_link(ds, dp->index, phydev);
1466                 return;
1467         }
1468
1469         ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1470                                      speed, duplex, tx_pause, rx_pause);
1471 }
1472
1473 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1474         .validate = dsa_port_phylink_validate,
1475         .mac_select_pcs = dsa_port_phylink_mac_select_pcs,
1476         .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1477         .mac_config = dsa_port_phylink_mac_config,
1478         .mac_an_restart = dsa_port_phylink_mac_an_restart,
1479         .mac_link_down = dsa_port_phylink_mac_link_down,
1480         .mac_link_up = dsa_port_phylink_mac_link_up,
1481 };
1482
1483 int dsa_port_phylink_create(struct dsa_port *dp)
1484 {
1485         struct dsa_switch *ds = dp->ds;
1486         phy_interface_t mode;
1487         int err;
1488
1489         err = of_get_phy_mode(dp->dn, &mode);
1490         if (err)
1491                 mode = PHY_INTERFACE_MODE_NA;
1492
1493         /* Presence of phylink_mac_link_state or phylink_mac_an_restart is
1494          * an indicator of a legacy phylink driver.
1495          */
1496         if (ds->ops->phylink_mac_link_state ||
1497             ds->ops->phylink_mac_an_restart)
1498                 dp->pl_config.legacy_pre_march2020 = true;
1499
1500         if (ds->ops->phylink_get_caps)
1501                 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
1502
1503         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
1504                                 mode, &dsa_port_phylink_mac_ops);
1505         if (IS_ERR(dp->pl)) {
1506                 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1507                 return PTR_ERR(dp->pl);
1508         }
1509
1510         return 0;
1511 }
1512
1513 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1514 {
1515         struct dsa_switch *ds = dp->ds;
1516         struct phy_device *phydev;
1517         int port = dp->index;
1518         int err = 0;
1519
1520         phydev = dsa_port_get_phy_device(dp);
1521         if (!phydev)
1522                 return 0;
1523
1524         if (IS_ERR(phydev))
1525                 return PTR_ERR(phydev);
1526
1527         if (enable) {
1528                 err = genphy_resume(phydev);
1529                 if (err < 0)
1530                         goto err_put_dev;
1531
1532                 err = genphy_read_status(phydev);
1533                 if (err < 0)
1534                         goto err_put_dev;
1535         } else {
1536                 err = genphy_suspend(phydev);
1537                 if (err < 0)
1538                         goto err_put_dev;
1539         }
1540
1541         if (ds->ops->adjust_link)
1542                 ds->ops->adjust_link(ds, port, phydev);
1543
1544         dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1545
1546 err_put_dev:
1547         put_device(&phydev->mdio.dev);
1548         return err;
1549 }
1550
1551 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1552 {
1553         struct device_node *dn = dp->dn;
1554         struct dsa_switch *ds = dp->ds;
1555         struct phy_device *phydev;
1556         int port = dp->index;
1557         phy_interface_t mode;
1558         int err;
1559
1560         err = of_phy_register_fixed_link(dn);
1561         if (err) {
1562                 dev_err(ds->dev,
1563                         "failed to register the fixed PHY of port %d\n",
1564                         port);
1565                 return err;
1566         }
1567
1568         phydev = of_phy_find_device(dn);
1569
1570         err = of_get_phy_mode(dn, &mode);
1571         if (err)
1572                 mode = PHY_INTERFACE_MODE_NA;
1573         phydev->interface = mode;
1574
1575         genphy_read_status(phydev);
1576
1577         if (ds->ops->adjust_link)
1578                 ds->ops->adjust_link(ds, port, phydev);
1579
1580         put_device(&phydev->mdio.dev);
1581
1582         return 0;
1583 }
1584
1585 static int dsa_port_phylink_register(struct dsa_port *dp)
1586 {
1587         struct dsa_switch *ds = dp->ds;
1588         struct device_node *port_dn = dp->dn;
1589         int err;
1590
1591         dp->pl_config.dev = ds->dev;
1592         dp->pl_config.type = PHYLINK_DEV;
1593
1594         err = dsa_port_phylink_create(dp);
1595         if (err)
1596                 return err;
1597
1598         err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1599         if (err && err != -ENODEV) {
1600                 pr_err("could not attach to PHY: %d\n", err);
1601                 goto err_phy_connect;
1602         }
1603
1604         return 0;
1605
1606 err_phy_connect:
1607         phylink_destroy(dp->pl);
1608         return err;
1609 }
1610
1611 int dsa_port_link_register_of(struct dsa_port *dp)
1612 {
1613         struct dsa_switch *ds = dp->ds;
1614         struct device_node *phy_np;
1615         int port = dp->index;
1616
1617         if (!ds->ops->adjust_link) {
1618                 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1619                 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1620                         if (ds->ops->phylink_mac_link_down)
1621                                 ds->ops->phylink_mac_link_down(ds, port,
1622                                         MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1623                         return dsa_port_phylink_register(dp);
1624                 }
1625                 return 0;
1626         }
1627
1628         dev_warn(ds->dev,
1629                  "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1630
1631         if (of_phy_is_fixed_link(dp->dn))
1632                 return dsa_port_fixed_link_register_of(dp);
1633         else
1634                 return dsa_port_setup_phy_of(dp, true);
1635 }
1636
1637 void dsa_port_link_unregister_of(struct dsa_port *dp)
1638 {
1639         struct dsa_switch *ds = dp->ds;
1640
1641         if (!ds->ops->adjust_link && dp->pl) {
1642                 rtnl_lock();
1643                 phylink_disconnect_phy(dp->pl);
1644                 rtnl_unlock();
1645                 phylink_destroy(dp->pl);
1646                 dp->pl = NULL;
1647                 return;
1648         }
1649
1650         if (of_phy_is_fixed_link(dp->dn))
1651                 of_phy_deregister_fixed_link(dp->dn);
1652         else
1653                 dsa_port_setup_phy_of(dp, false);
1654 }
1655
1656 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1657 {
1658         struct dsa_switch *ds = dp->ds;
1659         int err;
1660
1661         if (!ds->ops->port_hsr_join)
1662                 return -EOPNOTSUPP;
1663
1664         dp->hsr_dev = hsr;
1665
1666         err = ds->ops->port_hsr_join(ds, dp->index, hsr);
1667         if (err)
1668                 dp->hsr_dev = NULL;
1669
1670         return err;
1671 }
1672
1673 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1674 {
1675         struct dsa_switch *ds = dp->ds;
1676         int err;
1677
1678         dp->hsr_dev = NULL;
1679
1680         if (ds->ops->port_hsr_leave) {
1681                 err = ds->ops->port_hsr_leave(ds, dp->index, hsr);
1682                 if (err)
1683                         dev_err(dp->ds->dev,
1684                                 "port %d failed to leave HSR %s: %pe\n",
1685                                 dp->index, hsr->name, ERR_PTR(err));
1686         }
1687 }
1688
1689 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
1690 {
1691         struct dsa_notifier_tag_8021q_vlan_info info = {
1692                 .tree_index = dp->ds->dst->index,
1693                 .sw_index = dp->ds->index,
1694                 .port = dp->index,
1695                 .vid = vid,
1696         };
1697
1698         if (broadcast)
1699                 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1700
1701         return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1702 }
1703
1704 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
1705 {
1706         struct dsa_notifier_tag_8021q_vlan_info info = {
1707                 .tree_index = dp->ds->dst->index,
1708                 .sw_index = dp->ds->index,
1709                 .port = dp->index,
1710                 .vid = vid,
1711         };
1712         int err;
1713
1714         if (broadcast)
1715                 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1716         else
1717                 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1718         if (err)
1719                 dev_err(dp->ds->dev,
1720                         "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n",
1721                         dp->index, vid, ERR_PTR(err));
1722 }
This page took 0.126921 seconds and 4 git commands to generate.