]> Git Repo - linux.git/blob - net/dsa/dsa2.c
selftests/harness: Run TEARDOWN for ASSERT failures
[linux.git] / net / dsa / dsa2.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <[email protected]>
6  * Copyright (c) 2016 Andrew Lunn <[email protected]>
7  */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <net/devlink.h>
18 #include <net/sch_generic.h>
19
20 #include "dsa_priv.h"
21
22 static DEFINE_MUTEX(dsa2_mutex);
23 LIST_HEAD(dsa_tree_list);
24
25 /* Track the bridges with forwarding offload enabled */
26 static unsigned long dsa_fwd_offloading_bridges;
27
28 /**
29  * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
30  * @dst: collection of struct dsa_switch devices to notify.
31  * @e: event, must be of type DSA_NOTIFIER_*
32  * @v: event-specific value.
33  *
34  * Given a struct dsa_switch_tree, this can be used to run a function once for
35  * each member DSA switch. The other alternative of traversing the tree is only
36  * through its ports list, which does not uniquely list the switches.
37  */
38 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
39 {
40         struct raw_notifier_head *nh = &dst->nh;
41         int err;
42
43         err = raw_notifier_call_chain(nh, e, v);
44
45         return notifier_to_errno(err);
46 }
47
48 /**
49  * dsa_broadcast - Notify all DSA trees in the system.
50  * @e: event, must be of type DSA_NOTIFIER_*
51  * @v: event-specific value.
52  *
53  * Can be used to notify the switching fabric of events such as cross-chip
54  * bridging between disjoint trees (such as islands of tagger-compatible
55  * switches bridged by an incompatible middle switch).
56  *
57  * WARNING: this function is not reliable during probe time, because probing
58  * between trees is asynchronous and not all DSA trees might have probed.
59  */
60 int dsa_broadcast(unsigned long e, void *v)
61 {
62         struct dsa_switch_tree *dst;
63         int err = 0;
64
65         list_for_each_entry(dst, &dsa_tree_list, list) {
66                 err = dsa_tree_notify(dst, e, v);
67                 if (err)
68                         break;
69         }
70
71         return err;
72 }
73
74 /**
75  * dsa_lag_map() - Map LAG structure to a linear LAG array
76  * @dst: Tree in which to record the mapping.
77  * @lag: LAG structure that is to be mapped to the tree's array.
78  *
79  * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
80  * two spaces. The size of the mapping space is determined by the
81  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
82  * it unset if it is not needed, in which case these functions become
83  * no-ops.
84  */
85 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
86 {
87         unsigned int id;
88
89         for (id = 1; id <= dst->lags_len; id++) {
90                 if (!dsa_lag_by_id(dst, id)) {
91                         dst->lags[id - 1] = lag;
92                         lag->id = id;
93                         return;
94                 }
95         }
96
97         /* No IDs left, which is OK. Some drivers do not need it. The
98          * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
99          * returns an error for this device when joining the LAG. The
100          * driver can then return -EOPNOTSUPP back to DSA, which will
101          * fall back to a software LAG.
102          */
103 }
104
105 /**
106  * dsa_lag_unmap() - Remove a LAG ID mapping
107  * @dst: Tree in which the mapping is recorded.
108  * @lag: LAG structure that was mapped.
109  *
110  * As there may be multiple users of the mapping, it is only removed
111  * if there are no other references to it.
112  */
113 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
114 {
115         unsigned int id;
116
117         dsa_lags_foreach_id(id, dst) {
118                 if (dsa_lag_by_id(dst, id) == lag) {
119                         dst->lags[id - 1] = NULL;
120                         lag->id = 0;
121                         break;
122                 }
123         }
124 }
125
126 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
127                                   const struct net_device *lag_dev)
128 {
129         struct dsa_port *dp;
130
131         list_for_each_entry(dp, &dst->ports, list)
132                 if (dsa_port_lag_dev_get(dp) == lag_dev)
133                         return dp->lag;
134
135         return NULL;
136 }
137
138 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
139                                         const struct net_device *br)
140 {
141         struct dsa_port *dp;
142
143         list_for_each_entry(dp, &dst->ports, list)
144                 if (dsa_port_bridge_dev_get(dp) == br)
145                         return dp->bridge;
146
147         return NULL;
148 }
149
150 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
151 {
152         struct dsa_switch_tree *dst;
153
154         list_for_each_entry(dst, &dsa_tree_list, list) {
155                 struct dsa_bridge *bridge;
156
157                 bridge = dsa_tree_bridge_find(dst, bridge_dev);
158                 if (bridge)
159                         return bridge->num;
160         }
161
162         return 0;
163 }
164
165 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
166 {
167         unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
168
169         /* Switches without FDB isolation support don't get unique
170          * bridge numbering
171          */
172         if (!max)
173                 return 0;
174
175         if (!bridge_num) {
176                 /* First port that requests FDB isolation or TX forwarding
177                  * offload for this bridge
178                  */
179                 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
180                                                 DSA_MAX_NUM_OFFLOADING_BRIDGES,
181                                                 1);
182                 if (bridge_num >= max)
183                         return 0;
184
185                 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
186         }
187
188         return bridge_num;
189 }
190
191 void dsa_bridge_num_put(const struct net_device *bridge_dev,
192                         unsigned int bridge_num)
193 {
194         /* Since we refcount bridges, we know that when we call this function
195          * it is no longer in use, so we can just go ahead and remove it from
196          * the bit mask.
197          */
198         clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
199 }
200
201 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
202 {
203         struct dsa_switch_tree *dst;
204         struct dsa_port *dp;
205
206         list_for_each_entry(dst, &dsa_tree_list, list) {
207                 if (dst->index != tree_index)
208                         continue;
209
210                 list_for_each_entry(dp, &dst->ports, list) {
211                         if (dp->ds->index != sw_index)
212                                 continue;
213
214                         return dp->ds;
215                 }
216         }
217
218         return NULL;
219 }
220 EXPORT_SYMBOL_GPL(dsa_switch_find);
221
222 static struct dsa_switch_tree *dsa_tree_find(int index)
223 {
224         struct dsa_switch_tree *dst;
225
226         list_for_each_entry(dst, &dsa_tree_list, list)
227                 if (dst->index == index)
228                         return dst;
229
230         return NULL;
231 }
232
233 static struct dsa_switch_tree *dsa_tree_alloc(int index)
234 {
235         struct dsa_switch_tree *dst;
236
237         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
238         if (!dst)
239                 return NULL;
240
241         dst->index = index;
242
243         INIT_LIST_HEAD(&dst->rtable);
244
245         INIT_LIST_HEAD(&dst->ports);
246
247         INIT_LIST_HEAD(&dst->list);
248         list_add_tail(&dst->list, &dsa_tree_list);
249
250         kref_init(&dst->refcount);
251
252         return dst;
253 }
254
255 static void dsa_tree_free(struct dsa_switch_tree *dst)
256 {
257         if (dst->tag_ops)
258                 dsa_tag_driver_put(dst->tag_ops);
259         list_del(&dst->list);
260         kfree(dst);
261 }
262
263 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
264 {
265         if (dst)
266                 kref_get(&dst->refcount);
267
268         return dst;
269 }
270
271 static struct dsa_switch_tree *dsa_tree_touch(int index)
272 {
273         struct dsa_switch_tree *dst;
274
275         dst = dsa_tree_find(index);
276         if (dst)
277                 return dsa_tree_get(dst);
278         else
279                 return dsa_tree_alloc(index);
280 }
281
282 static void dsa_tree_release(struct kref *ref)
283 {
284         struct dsa_switch_tree *dst;
285
286         dst = container_of(ref, struct dsa_switch_tree, refcount);
287
288         dsa_tree_free(dst);
289 }
290
291 static void dsa_tree_put(struct dsa_switch_tree *dst)
292 {
293         if (dst)
294                 kref_put(&dst->refcount, dsa_tree_release);
295 }
296
297 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
298                                                    struct device_node *dn)
299 {
300         struct dsa_port *dp;
301
302         list_for_each_entry(dp, &dst->ports, list)
303                 if (dp->dn == dn)
304                         return dp;
305
306         return NULL;
307 }
308
309 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
310                                        struct dsa_port *link_dp)
311 {
312         struct dsa_switch *ds = dp->ds;
313         struct dsa_switch_tree *dst;
314         struct dsa_link *dl;
315
316         dst = ds->dst;
317
318         list_for_each_entry(dl, &dst->rtable, list)
319                 if (dl->dp == dp && dl->link_dp == link_dp)
320                         return dl;
321
322         dl = kzalloc(sizeof(*dl), GFP_KERNEL);
323         if (!dl)
324                 return NULL;
325
326         dl->dp = dp;
327         dl->link_dp = link_dp;
328
329         INIT_LIST_HEAD(&dl->list);
330         list_add_tail(&dl->list, &dst->rtable);
331
332         return dl;
333 }
334
335 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
336 {
337         struct dsa_switch *ds = dp->ds;
338         struct dsa_switch_tree *dst = ds->dst;
339         struct device_node *dn = dp->dn;
340         struct of_phandle_iterator it;
341         struct dsa_port *link_dp;
342         struct dsa_link *dl;
343         int err;
344
345         of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
346                 link_dp = dsa_tree_find_port_by_node(dst, it.node);
347                 if (!link_dp) {
348                         of_node_put(it.node);
349                         return false;
350                 }
351
352                 dl = dsa_link_touch(dp, link_dp);
353                 if (!dl) {
354                         of_node_put(it.node);
355                         return false;
356                 }
357         }
358
359         return true;
360 }
361
362 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
363 {
364         bool complete = true;
365         struct dsa_port *dp;
366
367         list_for_each_entry(dp, &dst->ports, list) {
368                 if (dsa_port_is_dsa(dp)) {
369                         complete = dsa_port_setup_routing_table(dp);
370                         if (!complete)
371                                 break;
372                 }
373         }
374
375         return complete;
376 }
377
378 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
379 {
380         struct dsa_port *dp;
381
382         list_for_each_entry(dp, &dst->ports, list)
383                 if (dsa_port_is_cpu(dp))
384                         return dp;
385
386         return NULL;
387 }
388
389 /* Assign the default CPU port (the first one in the tree) to all ports of the
390  * fabric which don't already have one as part of their own switch.
391  */
392 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
393 {
394         struct dsa_port *cpu_dp, *dp;
395
396         cpu_dp = dsa_tree_find_first_cpu(dst);
397         if (!cpu_dp) {
398                 pr_err("DSA: tree %d has no CPU port\n", dst->index);
399                 return -EINVAL;
400         }
401
402         list_for_each_entry(dp, &dst->ports, list) {
403                 if (dp->cpu_dp)
404                         continue;
405
406                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
407                         dp->cpu_dp = cpu_dp;
408         }
409
410         return 0;
411 }
412
413 /* Perform initial assignment of CPU ports to user ports and DSA links in the
414  * fabric, giving preference to CPU ports local to each switch. Default to
415  * using the first CPU port in the switch tree if the port does not have a CPU
416  * port local to this switch.
417  */
418 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
419 {
420         struct dsa_port *cpu_dp, *dp;
421
422         list_for_each_entry(cpu_dp, &dst->ports, list) {
423                 if (!dsa_port_is_cpu(cpu_dp))
424                         continue;
425
426                 /* Prefer a local CPU port */
427                 dsa_switch_for_each_port(dp, cpu_dp->ds) {
428                         /* Prefer the first local CPU port found */
429                         if (dp->cpu_dp)
430                                 continue;
431
432                         if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
433                                 dp->cpu_dp = cpu_dp;
434                 }
435         }
436
437         return dsa_tree_setup_default_cpu(dst);
438 }
439
440 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
441 {
442         struct dsa_port *dp;
443
444         list_for_each_entry(dp, &dst->ports, list)
445                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
446                         dp->cpu_dp = NULL;
447 }
448
449 static int dsa_port_setup(struct dsa_port *dp)
450 {
451         struct devlink_port *dlp = &dp->devlink_port;
452         bool dsa_port_link_registered = false;
453         struct dsa_switch *ds = dp->ds;
454         bool dsa_port_enabled = false;
455         int err = 0;
456
457         if (dp->setup)
458                 return 0;
459
460         if (ds->ops->port_setup) {
461                 err = ds->ops->port_setup(ds, dp->index);
462                 if (err)
463                         return err;
464         }
465
466         switch (dp->type) {
467         case DSA_PORT_TYPE_UNUSED:
468                 dsa_port_disable(dp);
469                 break;
470         case DSA_PORT_TYPE_CPU:
471                 err = dsa_port_link_register_of(dp);
472                 if (err)
473                         break;
474                 dsa_port_link_registered = true;
475
476                 err = dsa_port_enable(dp, NULL);
477                 if (err)
478                         break;
479                 dsa_port_enabled = true;
480
481                 break;
482         case DSA_PORT_TYPE_DSA:
483                 err = dsa_port_link_register_of(dp);
484                 if (err)
485                         break;
486                 dsa_port_link_registered = true;
487
488                 err = dsa_port_enable(dp, NULL);
489                 if (err)
490                         break;
491                 dsa_port_enabled = true;
492
493                 break;
494         case DSA_PORT_TYPE_USER:
495                 of_get_mac_address(dp->dn, dp->mac);
496                 err = dsa_slave_create(dp);
497                 if (err)
498                         break;
499
500                 devlink_port_type_eth_set(dlp, dp->slave);
501                 break;
502         }
503
504         if (err && dsa_port_enabled)
505                 dsa_port_disable(dp);
506         if (err && dsa_port_link_registered)
507                 dsa_port_link_unregister_of(dp);
508         if (err) {
509                 if (ds->ops->port_teardown)
510                         ds->ops->port_teardown(ds, dp->index);
511                 return err;
512         }
513
514         dp->setup = true;
515
516         return 0;
517 }
518
519 static int dsa_port_devlink_setup(struct dsa_port *dp)
520 {
521         struct devlink_port *dlp = &dp->devlink_port;
522         struct dsa_switch_tree *dst = dp->ds->dst;
523         struct devlink_port_attrs attrs = {};
524         struct devlink *dl = dp->ds->devlink;
525         const unsigned char *id;
526         unsigned char len;
527         int err;
528
529         id = (const unsigned char *)&dst->index;
530         len = sizeof(dst->index);
531
532         attrs.phys.port_number = dp->index;
533         memcpy(attrs.switch_id.id, id, len);
534         attrs.switch_id.id_len = len;
535         memset(dlp, 0, sizeof(*dlp));
536
537         switch (dp->type) {
538         case DSA_PORT_TYPE_UNUSED:
539                 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
540                 break;
541         case DSA_PORT_TYPE_CPU:
542                 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
543                 break;
544         case DSA_PORT_TYPE_DSA:
545                 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
546                 break;
547         case DSA_PORT_TYPE_USER:
548                 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
549                 break;
550         }
551
552         devlink_port_attrs_set(dlp, &attrs);
553         err = devlink_port_register(dl, dlp, dp->index);
554
555         if (!err)
556                 dp->devlink_port_setup = true;
557
558         return err;
559 }
560
561 static void dsa_port_teardown(struct dsa_port *dp)
562 {
563         struct devlink_port *dlp = &dp->devlink_port;
564         struct dsa_switch *ds = dp->ds;
565         struct net_device *slave;
566
567         if (!dp->setup)
568                 return;
569
570         if (ds->ops->port_teardown)
571                 ds->ops->port_teardown(ds, dp->index);
572
573         devlink_port_type_clear(dlp);
574
575         switch (dp->type) {
576         case DSA_PORT_TYPE_UNUSED:
577                 break;
578         case DSA_PORT_TYPE_CPU:
579                 dsa_port_disable(dp);
580                 dsa_port_link_unregister_of(dp);
581                 break;
582         case DSA_PORT_TYPE_DSA:
583                 dsa_port_disable(dp);
584                 dsa_port_link_unregister_of(dp);
585                 break;
586         case DSA_PORT_TYPE_USER:
587                 slave = dp->slave;
588
589                 if (slave) {
590                         dp->slave = NULL;
591                         dsa_slave_destroy(slave);
592                 }
593                 break;
594         }
595
596         dp->setup = false;
597 }
598
599 static void dsa_port_devlink_teardown(struct dsa_port *dp)
600 {
601         struct devlink_port *dlp = &dp->devlink_port;
602
603         if (dp->devlink_port_setup)
604                 devlink_port_unregister(dlp);
605         dp->devlink_port_setup = false;
606 }
607
608 /* Destroy the current devlink port, and create a new one which has the UNUSED
609  * flavour. At this point, any call to ds->ops->port_setup has been already
610  * balanced out by a call to ds->ops->port_teardown, so we know that any
611  * devlink port regions the driver had are now unregistered. We then call its
612  * ds->ops->port_setup again, in order for the driver to re-create them on the
613  * new devlink port.
614  */
615 static int dsa_port_reinit_as_unused(struct dsa_port *dp)
616 {
617         struct dsa_switch *ds = dp->ds;
618         int err;
619
620         dsa_port_devlink_teardown(dp);
621         dp->type = DSA_PORT_TYPE_UNUSED;
622         err = dsa_port_devlink_setup(dp);
623         if (err)
624                 return err;
625
626         if (ds->ops->port_setup) {
627                 /* On error, leave the devlink port registered,
628                  * dsa_switch_teardown will clean it up later.
629                  */
630                 err = ds->ops->port_setup(ds, dp->index);
631                 if (err)
632                         return err;
633         }
634
635         return 0;
636 }
637
638 static int dsa_devlink_info_get(struct devlink *dl,
639                                 struct devlink_info_req *req,
640                                 struct netlink_ext_ack *extack)
641 {
642         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
643
644         if (ds->ops->devlink_info_get)
645                 return ds->ops->devlink_info_get(ds, req, extack);
646
647         return -EOPNOTSUPP;
648 }
649
650 static int dsa_devlink_sb_pool_get(struct devlink *dl,
651                                    unsigned int sb_index, u16 pool_index,
652                                    struct devlink_sb_pool_info *pool_info)
653 {
654         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
655
656         if (!ds->ops->devlink_sb_pool_get)
657                 return -EOPNOTSUPP;
658
659         return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
660                                             pool_info);
661 }
662
663 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
664                                    u16 pool_index, u32 size,
665                                    enum devlink_sb_threshold_type threshold_type,
666                                    struct netlink_ext_ack *extack)
667 {
668         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
669
670         if (!ds->ops->devlink_sb_pool_set)
671                 return -EOPNOTSUPP;
672
673         return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
674                                             threshold_type, extack);
675 }
676
677 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
678                                         unsigned int sb_index, u16 pool_index,
679                                         u32 *p_threshold)
680 {
681         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
682         int port = dsa_devlink_port_to_port(dlp);
683
684         if (!ds->ops->devlink_sb_port_pool_get)
685                 return -EOPNOTSUPP;
686
687         return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
688                                                  pool_index, p_threshold);
689 }
690
691 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
692                                         unsigned int sb_index, u16 pool_index,
693                                         u32 threshold,
694                                         struct netlink_ext_ack *extack)
695 {
696         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
697         int port = dsa_devlink_port_to_port(dlp);
698
699         if (!ds->ops->devlink_sb_port_pool_set)
700                 return -EOPNOTSUPP;
701
702         return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
703                                                  pool_index, threshold, extack);
704 }
705
706 static int
707 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
708                                 unsigned int sb_index, u16 tc_index,
709                                 enum devlink_sb_pool_type pool_type,
710                                 u16 *p_pool_index, u32 *p_threshold)
711 {
712         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
713         int port = dsa_devlink_port_to_port(dlp);
714
715         if (!ds->ops->devlink_sb_tc_pool_bind_get)
716                 return -EOPNOTSUPP;
717
718         return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
719                                                     tc_index, pool_type,
720                                                     p_pool_index, p_threshold);
721 }
722
723 static int
724 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
725                                 unsigned int sb_index, u16 tc_index,
726                                 enum devlink_sb_pool_type pool_type,
727                                 u16 pool_index, u32 threshold,
728                                 struct netlink_ext_ack *extack)
729 {
730         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
731         int port = dsa_devlink_port_to_port(dlp);
732
733         if (!ds->ops->devlink_sb_tc_pool_bind_set)
734                 return -EOPNOTSUPP;
735
736         return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
737                                                     tc_index, pool_type,
738                                                     pool_index, threshold,
739                                                     extack);
740 }
741
742 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
743                                        unsigned int sb_index)
744 {
745         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
746
747         if (!ds->ops->devlink_sb_occ_snapshot)
748                 return -EOPNOTSUPP;
749
750         return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
751 }
752
753 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
754                                         unsigned int sb_index)
755 {
756         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
757
758         if (!ds->ops->devlink_sb_occ_max_clear)
759                 return -EOPNOTSUPP;
760
761         return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
762 }
763
764 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
765                                             unsigned int sb_index,
766                                             u16 pool_index, u32 *p_cur,
767                                             u32 *p_max)
768 {
769         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
770         int port = dsa_devlink_port_to_port(dlp);
771
772         if (!ds->ops->devlink_sb_occ_port_pool_get)
773                 return -EOPNOTSUPP;
774
775         return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
776                                                      pool_index, p_cur, p_max);
777 }
778
779 static int
780 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
781                                     unsigned int sb_index, u16 tc_index,
782                                     enum devlink_sb_pool_type pool_type,
783                                     u32 *p_cur, u32 *p_max)
784 {
785         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
786         int port = dsa_devlink_port_to_port(dlp);
787
788         if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
789                 return -EOPNOTSUPP;
790
791         return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
792                                                         sb_index, tc_index,
793                                                         pool_type, p_cur,
794                                                         p_max);
795 }
796
797 static const struct devlink_ops dsa_devlink_ops = {
798         .info_get                       = dsa_devlink_info_get,
799         .sb_pool_get                    = dsa_devlink_sb_pool_get,
800         .sb_pool_set                    = dsa_devlink_sb_pool_set,
801         .sb_port_pool_get               = dsa_devlink_sb_port_pool_get,
802         .sb_port_pool_set               = dsa_devlink_sb_port_pool_set,
803         .sb_tc_pool_bind_get            = dsa_devlink_sb_tc_pool_bind_get,
804         .sb_tc_pool_bind_set            = dsa_devlink_sb_tc_pool_bind_set,
805         .sb_occ_snapshot                = dsa_devlink_sb_occ_snapshot,
806         .sb_occ_max_clear               = dsa_devlink_sb_occ_max_clear,
807         .sb_occ_port_pool_get           = dsa_devlink_sb_occ_port_pool_get,
808         .sb_occ_tc_port_bind_get        = dsa_devlink_sb_occ_tc_port_bind_get,
809 };
810
811 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
812 {
813         const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
814         struct dsa_switch_tree *dst = ds->dst;
815         struct dsa_port *cpu_dp;
816         int err;
817
818         if (tag_ops->proto == dst->default_proto)
819                 goto connect;
820
821         dsa_switch_for_each_cpu_port(cpu_dp, ds) {
822                 rtnl_lock();
823                 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
824                                                    tag_ops->proto);
825                 rtnl_unlock();
826                 if (err) {
827                         dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
828                                 tag_ops->name, ERR_PTR(err));
829                         return err;
830                 }
831         }
832
833 connect:
834         if (tag_ops->connect) {
835                 err = tag_ops->connect(ds);
836                 if (err)
837                         return err;
838         }
839
840         if (ds->ops->connect_tag_protocol) {
841                 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
842                 if (err) {
843                         dev_err(ds->dev,
844                                 "Unable to connect to tag protocol \"%s\": %pe\n",
845                                 tag_ops->name, ERR_PTR(err));
846                         goto disconnect;
847                 }
848         }
849
850         return 0;
851
852 disconnect:
853         if (tag_ops->disconnect)
854                 tag_ops->disconnect(ds);
855
856         return err;
857 }
858
859 static int dsa_switch_setup(struct dsa_switch *ds)
860 {
861         struct dsa_devlink_priv *dl_priv;
862         struct dsa_port *dp;
863         int err;
864
865         if (ds->setup)
866                 return 0;
867
868         /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
869          * driver and before ops->setup() has run, since the switch drivers and
870          * the slave MDIO bus driver rely on these values for probing PHY
871          * devices or not
872          */
873         ds->phys_mii_mask |= dsa_user_ports(ds);
874
875         /* Add the switch to devlink before calling setup, so that setup can
876          * add dpipe tables
877          */
878         ds->devlink =
879                 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
880         if (!ds->devlink)
881                 return -ENOMEM;
882         dl_priv = devlink_priv(ds->devlink);
883         dl_priv->ds = ds;
884
885         /* Setup devlink port instances now, so that the switch
886          * setup() can register regions etc, against the ports
887          */
888         dsa_switch_for_each_port(dp, ds) {
889                 err = dsa_port_devlink_setup(dp);
890                 if (err)
891                         goto unregister_devlink_ports;
892         }
893
894         err = dsa_switch_register_notifier(ds);
895         if (err)
896                 goto unregister_devlink_ports;
897
898         ds->configure_vlan_while_not_filtering = true;
899
900         err = ds->ops->setup(ds);
901         if (err < 0)
902                 goto unregister_notifier;
903
904         err = dsa_switch_setup_tag_protocol(ds);
905         if (err)
906                 goto teardown;
907
908         if (!ds->slave_mii_bus && ds->ops->phy_read) {
909                 ds->slave_mii_bus = mdiobus_alloc();
910                 if (!ds->slave_mii_bus) {
911                         err = -ENOMEM;
912                         goto teardown;
913                 }
914
915                 dsa_slave_mii_bus_init(ds);
916
917                 err = mdiobus_register(ds->slave_mii_bus);
918                 if (err < 0)
919                         goto free_slave_mii_bus;
920         }
921
922         ds->setup = true;
923         devlink_register(ds->devlink);
924         return 0;
925
926 free_slave_mii_bus:
927         if (ds->slave_mii_bus && ds->ops->phy_read)
928                 mdiobus_free(ds->slave_mii_bus);
929 teardown:
930         if (ds->ops->teardown)
931                 ds->ops->teardown(ds);
932 unregister_notifier:
933         dsa_switch_unregister_notifier(ds);
934 unregister_devlink_ports:
935         dsa_switch_for_each_port(dp, ds)
936                 dsa_port_devlink_teardown(dp);
937         devlink_free(ds->devlink);
938         ds->devlink = NULL;
939         return err;
940 }
941
942 static void dsa_switch_teardown(struct dsa_switch *ds)
943 {
944         struct dsa_port *dp;
945
946         if (!ds->setup)
947                 return;
948
949         if (ds->devlink)
950                 devlink_unregister(ds->devlink);
951
952         if (ds->slave_mii_bus && ds->ops->phy_read) {
953                 mdiobus_unregister(ds->slave_mii_bus);
954                 mdiobus_free(ds->slave_mii_bus);
955                 ds->slave_mii_bus = NULL;
956         }
957
958         if (ds->ops->teardown)
959                 ds->ops->teardown(ds);
960
961         dsa_switch_unregister_notifier(ds);
962
963         if (ds->devlink) {
964                 dsa_switch_for_each_port(dp, ds)
965                         dsa_port_devlink_teardown(dp);
966                 devlink_free(ds->devlink);
967                 ds->devlink = NULL;
968         }
969
970         ds->setup = false;
971 }
972
973 /* First tear down the non-shared, then the shared ports. This ensures that
974  * all work items scheduled by our switchdev handlers for user ports have
975  * completed before we destroy the refcounting kept on the shared ports.
976  */
977 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
978 {
979         struct dsa_port *dp;
980
981         list_for_each_entry(dp, &dst->ports, list)
982                 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
983                         dsa_port_teardown(dp);
984
985         dsa_flush_workqueue();
986
987         list_for_each_entry(dp, &dst->ports, list)
988                 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
989                         dsa_port_teardown(dp);
990 }
991
992 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
993 {
994         struct dsa_port *dp;
995
996         list_for_each_entry(dp, &dst->ports, list)
997                 dsa_switch_teardown(dp->ds);
998 }
999
1000 /* Bring shared ports up first, then non-shared ports */
1001 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
1002 {
1003         struct dsa_port *dp;
1004         int err = 0;
1005
1006         list_for_each_entry(dp, &dst->ports, list) {
1007                 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1008                         err = dsa_port_setup(dp);
1009                         if (err)
1010                                 goto teardown;
1011                 }
1012         }
1013
1014         list_for_each_entry(dp, &dst->ports, list) {
1015                 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1016                         err = dsa_port_setup(dp);
1017                         if (err) {
1018                                 err = dsa_port_reinit_as_unused(dp);
1019                                 if (err)
1020                                         goto teardown;
1021                         }
1022                 }
1023         }
1024
1025         return 0;
1026
1027 teardown:
1028         dsa_tree_teardown_ports(dst);
1029
1030         return err;
1031 }
1032
1033 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1034 {
1035         struct dsa_port *dp;
1036         int err = 0;
1037
1038         list_for_each_entry(dp, &dst->ports, list) {
1039                 err = dsa_switch_setup(dp->ds);
1040                 if (err) {
1041                         dsa_tree_teardown_switches(dst);
1042                         break;
1043                 }
1044         }
1045
1046         return err;
1047 }
1048
1049 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1050 {
1051         struct dsa_port *dp;
1052         int err = 0;
1053
1054         rtnl_lock();
1055
1056         list_for_each_entry(dp, &dst->ports, list) {
1057                 if (dsa_port_is_cpu(dp)) {
1058                         struct net_device *master = dp->master;
1059                         bool admin_up = (master->flags & IFF_UP) &&
1060                                         !qdisc_tx_is_noop(master);
1061
1062                         err = dsa_master_setup(master, dp);
1063                         if (err)
1064                                 break;
1065
1066                         /* Replay master state event */
1067                         dsa_tree_master_admin_state_change(dst, master, admin_up);
1068                         dsa_tree_master_oper_state_change(dst, master,
1069                                                           netif_oper_up(master));
1070                 }
1071         }
1072
1073         rtnl_unlock();
1074
1075         return err;
1076 }
1077
1078 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1079 {
1080         struct dsa_port *dp;
1081
1082         rtnl_lock();
1083
1084         list_for_each_entry(dp, &dst->ports, list) {
1085                 if (dsa_port_is_cpu(dp)) {
1086                         struct net_device *master = dp->master;
1087
1088                         /* Synthesizing an "admin down" state is sufficient for
1089                          * the switches to get a notification if the master is
1090                          * currently up and running.
1091                          */
1092                         dsa_tree_master_admin_state_change(dst, master, false);
1093
1094                         dsa_master_teardown(master);
1095                 }
1096         }
1097
1098         rtnl_unlock();
1099 }
1100
1101 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1102 {
1103         unsigned int len = 0;
1104         struct dsa_port *dp;
1105
1106         list_for_each_entry(dp, &dst->ports, list) {
1107                 if (dp->ds->num_lag_ids > len)
1108                         len = dp->ds->num_lag_ids;
1109         }
1110
1111         if (!len)
1112                 return 0;
1113
1114         dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1115         if (!dst->lags)
1116                 return -ENOMEM;
1117
1118         dst->lags_len = len;
1119         return 0;
1120 }
1121
1122 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1123 {
1124         kfree(dst->lags);
1125 }
1126
1127 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1128 {
1129         bool complete;
1130         int err;
1131
1132         if (dst->setup) {
1133                 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1134                        dst->index);
1135                 return -EEXIST;
1136         }
1137
1138         complete = dsa_tree_setup_routing_table(dst);
1139         if (!complete)
1140                 return 0;
1141
1142         err = dsa_tree_setup_cpu_ports(dst);
1143         if (err)
1144                 return err;
1145
1146         err = dsa_tree_setup_switches(dst);
1147         if (err)
1148                 goto teardown_cpu_ports;
1149
1150         err = dsa_tree_setup_master(dst);
1151         if (err)
1152                 goto teardown_switches;
1153
1154         err = dsa_tree_setup_ports(dst);
1155         if (err)
1156                 goto teardown_master;
1157
1158         err = dsa_tree_setup_lags(dst);
1159         if (err)
1160                 goto teardown_ports;
1161
1162         dst->setup = true;
1163
1164         pr_info("DSA: tree %d setup\n", dst->index);
1165
1166         return 0;
1167
1168 teardown_ports:
1169         dsa_tree_teardown_ports(dst);
1170 teardown_master:
1171         dsa_tree_teardown_master(dst);
1172 teardown_switches:
1173         dsa_tree_teardown_switches(dst);
1174 teardown_cpu_ports:
1175         dsa_tree_teardown_cpu_ports(dst);
1176
1177         return err;
1178 }
1179
1180 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1181 {
1182         struct dsa_link *dl, *next;
1183
1184         if (!dst->setup)
1185                 return;
1186
1187         dsa_tree_teardown_lags(dst);
1188
1189         dsa_tree_teardown_ports(dst);
1190
1191         dsa_tree_teardown_master(dst);
1192
1193         dsa_tree_teardown_switches(dst);
1194
1195         dsa_tree_teardown_cpu_ports(dst);
1196
1197         list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1198                 list_del(&dl->list);
1199                 kfree(dl);
1200         }
1201
1202         pr_info("DSA: tree %d torn down\n", dst->index);
1203
1204         dst->setup = false;
1205 }
1206
1207 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1208                                    const struct dsa_device_ops *tag_ops)
1209 {
1210         const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1211         struct dsa_notifier_tag_proto_info info;
1212         int err;
1213
1214         dst->tag_ops = tag_ops;
1215
1216         /* Notify the switches from this tree about the connection
1217          * to the new tagger
1218          */
1219         info.tag_ops = tag_ops;
1220         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1221         if (err && err != -EOPNOTSUPP)
1222                 goto out_disconnect;
1223
1224         /* Notify the old tagger about the disconnection from this tree */
1225         info.tag_ops = old_tag_ops;
1226         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1227
1228         return 0;
1229
1230 out_disconnect:
1231         info.tag_ops = tag_ops;
1232         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1233         dst->tag_ops = old_tag_ops;
1234
1235         return err;
1236 }
1237
1238 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1239  * is that all DSA switches within a tree share the same tagger, otherwise
1240  * they would have formed disjoint trees (different "dsa,member" values).
1241  */
1242 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1243                               struct net_device *master,
1244                               const struct dsa_device_ops *tag_ops,
1245                               const struct dsa_device_ops *old_tag_ops)
1246 {
1247         struct dsa_notifier_tag_proto_info info;
1248         struct dsa_port *dp;
1249         int err = -EBUSY;
1250
1251         if (!rtnl_trylock())
1252                 return restart_syscall();
1253
1254         /* At the moment we don't allow changing the tag protocol under
1255          * traffic. The rtnl_mutex also happens to serialize concurrent
1256          * attempts to change the tagging protocol. If we ever lift the IFF_UP
1257          * restriction, there needs to be another mutex which serializes this.
1258          */
1259         if (master->flags & IFF_UP)
1260                 goto out_unlock;
1261
1262         list_for_each_entry(dp, &dst->ports, list) {
1263                 if (!dsa_port_is_user(dp))
1264                         continue;
1265
1266                 if (dp->slave->flags & IFF_UP)
1267                         goto out_unlock;
1268         }
1269
1270         /* Notify the tag protocol change */
1271         info.tag_ops = tag_ops;
1272         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1273         if (err)
1274                 goto out_unwind_tagger;
1275
1276         err = dsa_tree_bind_tag_proto(dst, tag_ops);
1277         if (err)
1278                 goto out_unwind_tagger;
1279
1280         rtnl_unlock();
1281
1282         return 0;
1283
1284 out_unwind_tagger:
1285         info.tag_ops = old_tag_ops;
1286         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1287 out_unlock:
1288         rtnl_unlock();
1289         return err;
1290 }
1291
1292 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1293                                          struct net_device *master)
1294 {
1295         struct dsa_notifier_master_state_info info;
1296         struct dsa_port *cpu_dp = master->dsa_ptr;
1297
1298         info.master = master;
1299         info.operational = dsa_port_master_is_operational(cpu_dp);
1300
1301         dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1302 }
1303
1304 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1305                                         struct net_device *master,
1306                                         bool up)
1307 {
1308         struct dsa_port *cpu_dp = master->dsa_ptr;
1309         bool notify = false;
1310
1311         if ((dsa_port_master_is_operational(cpu_dp)) !=
1312             (up && cpu_dp->master_oper_up))
1313                 notify = true;
1314
1315         cpu_dp->master_admin_up = up;
1316
1317         if (notify)
1318                 dsa_tree_master_state_change(dst, master);
1319 }
1320
1321 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1322                                        struct net_device *master,
1323                                        bool up)
1324 {
1325         struct dsa_port *cpu_dp = master->dsa_ptr;
1326         bool notify = false;
1327
1328         if ((dsa_port_master_is_operational(cpu_dp)) !=
1329             (cpu_dp->master_admin_up && up))
1330                 notify = true;
1331
1332         cpu_dp->master_oper_up = up;
1333
1334         if (notify)
1335                 dsa_tree_master_state_change(dst, master);
1336 }
1337
1338 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1339 {
1340         struct dsa_switch_tree *dst = ds->dst;
1341         struct dsa_port *dp;
1342
1343         dsa_switch_for_each_port(dp, ds)
1344                 if (dp->index == index)
1345                         return dp;
1346
1347         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1348         if (!dp)
1349                 return NULL;
1350
1351         dp->ds = ds;
1352         dp->index = index;
1353
1354         mutex_init(&dp->addr_lists_lock);
1355         mutex_init(&dp->vlans_lock);
1356         INIT_LIST_HEAD(&dp->fdbs);
1357         INIT_LIST_HEAD(&dp->mdbs);
1358         INIT_LIST_HEAD(&dp->vlans);
1359         INIT_LIST_HEAD(&dp->list);
1360         list_add_tail(&dp->list, &dst->ports);
1361
1362         return dp;
1363 }
1364
1365 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1366 {
1367         if (!name)
1368                 name = "eth%d";
1369
1370         dp->type = DSA_PORT_TYPE_USER;
1371         dp->name = name;
1372
1373         return 0;
1374 }
1375
1376 static int dsa_port_parse_dsa(struct dsa_port *dp)
1377 {
1378         dp->type = DSA_PORT_TYPE_DSA;
1379
1380         return 0;
1381 }
1382
1383 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1384                                                   struct net_device *master)
1385 {
1386         enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1387         struct dsa_switch *mds, *ds = dp->ds;
1388         unsigned int mdp_upstream;
1389         struct dsa_port *mdp;
1390
1391         /* It is possible to stack DSA switches onto one another when that
1392          * happens the switch driver may want to know if its tagging protocol
1393          * is going to work in such a configuration.
1394          */
1395         if (dsa_slave_dev_check(master)) {
1396                 mdp = dsa_slave_to_port(master);
1397                 mds = mdp->ds;
1398                 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1399                 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1400                                                           DSA_TAG_PROTO_NONE);
1401         }
1402
1403         /* If the master device is not itself a DSA slave in a disjoint DSA
1404          * tree, then return immediately.
1405          */
1406         return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1407 }
1408
1409 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1410                               const char *user_protocol)
1411 {
1412         struct dsa_switch *ds = dp->ds;
1413         struct dsa_switch_tree *dst = ds->dst;
1414         const struct dsa_device_ops *tag_ops;
1415         enum dsa_tag_protocol default_proto;
1416
1417         /* Find out which protocol the switch would prefer. */
1418         default_proto = dsa_get_tag_protocol(dp, master);
1419         if (dst->default_proto) {
1420                 if (dst->default_proto != default_proto) {
1421                         dev_err(ds->dev,
1422                                 "A DSA switch tree can have only one tagging protocol\n");
1423                         return -EINVAL;
1424                 }
1425         } else {
1426                 dst->default_proto = default_proto;
1427         }
1428
1429         /* See if the user wants to override that preference. */
1430         if (user_protocol) {
1431                 if (!ds->ops->change_tag_protocol) {
1432                         dev_err(ds->dev, "Tag protocol cannot be modified\n");
1433                         return -EINVAL;
1434                 }
1435
1436                 tag_ops = dsa_find_tagger_by_name(user_protocol);
1437         } else {
1438                 tag_ops = dsa_tag_driver_get(default_proto);
1439         }
1440
1441         if (IS_ERR(tag_ops)) {
1442                 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1443                         return -EPROBE_DEFER;
1444
1445                 dev_warn(ds->dev, "No tagger for this switch\n");
1446                 return PTR_ERR(tag_ops);
1447         }
1448
1449         if (dst->tag_ops) {
1450                 if (dst->tag_ops != tag_ops) {
1451                         dev_err(ds->dev,
1452                                 "A DSA switch tree can have only one tagging protocol\n");
1453
1454                         dsa_tag_driver_put(tag_ops);
1455                         return -EINVAL;
1456                 }
1457
1458                 /* In the case of multiple CPU ports per switch, the tagging
1459                  * protocol is still reference-counted only per switch tree.
1460                  */
1461                 dsa_tag_driver_put(tag_ops);
1462         } else {
1463                 dst->tag_ops = tag_ops;
1464         }
1465
1466         dp->master = master;
1467         dp->type = DSA_PORT_TYPE_CPU;
1468         dsa_port_set_tag_protocol(dp, dst->tag_ops);
1469         dp->dst = dst;
1470
1471         /* At this point, the tree may be configured to use a different
1472          * tagger than the one chosen by the switch driver during
1473          * .setup, in the case when a user selects a custom protocol
1474          * through the DT.
1475          *
1476          * This is resolved by syncing the driver with the tree in
1477          * dsa_switch_setup_tag_protocol once .setup has run and the
1478          * driver is ready to accept calls to .change_tag_protocol. If
1479          * the driver does not support the custom protocol at that
1480          * point, the tree is wholly rejected, thereby ensuring that the
1481          * tree and driver are always in agreement on the protocol to
1482          * use.
1483          */
1484         return 0;
1485 }
1486
1487 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1488 {
1489         struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1490         const char *name = of_get_property(dn, "label", NULL);
1491         bool link = of_property_read_bool(dn, "link");
1492
1493         dp->dn = dn;
1494
1495         if (ethernet) {
1496                 struct net_device *master;
1497                 const char *user_protocol;
1498
1499                 master = of_find_net_device_by_node(ethernet);
1500                 of_node_put(ethernet);
1501                 if (!master)
1502                         return -EPROBE_DEFER;
1503
1504                 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1505                 return dsa_port_parse_cpu(dp, master, user_protocol);
1506         }
1507
1508         if (link)
1509                 return dsa_port_parse_dsa(dp);
1510
1511         return dsa_port_parse_user(dp, name);
1512 }
1513
1514 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1515                                      struct device_node *dn)
1516 {
1517         struct device_node *ports, *port;
1518         struct dsa_port *dp;
1519         int err = 0;
1520         u32 reg;
1521
1522         ports = of_get_child_by_name(dn, "ports");
1523         if (!ports) {
1524                 /* The second possibility is "ethernet-ports" */
1525                 ports = of_get_child_by_name(dn, "ethernet-ports");
1526                 if (!ports) {
1527                         dev_err(ds->dev, "no ports child node found\n");
1528                         return -EINVAL;
1529                 }
1530         }
1531
1532         for_each_available_child_of_node(ports, port) {
1533                 err = of_property_read_u32(port, "reg", &reg);
1534                 if (err) {
1535                         of_node_put(port);
1536                         goto out_put_node;
1537                 }
1538
1539                 if (reg >= ds->num_ports) {
1540                         dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1541                                 port, reg, ds->num_ports);
1542                         of_node_put(port);
1543                         err = -EINVAL;
1544                         goto out_put_node;
1545                 }
1546
1547                 dp = dsa_to_port(ds, reg);
1548
1549                 err = dsa_port_parse_of(dp, port);
1550                 if (err) {
1551                         of_node_put(port);
1552                         goto out_put_node;
1553                 }
1554         }
1555
1556 out_put_node:
1557         of_node_put(ports);
1558         return err;
1559 }
1560
1561 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1562                                       struct device_node *dn)
1563 {
1564         u32 m[2] = { 0, 0 };
1565         int sz;
1566
1567         /* Don't error out if this optional property isn't found */
1568         sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1569         if (sz < 0 && sz != -EINVAL)
1570                 return sz;
1571
1572         ds->index = m[1];
1573
1574         ds->dst = dsa_tree_touch(m[0]);
1575         if (!ds->dst)
1576                 return -ENOMEM;
1577
1578         if (dsa_switch_find(ds->dst->index, ds->index)) {
1579                 dev_err(ds->dev,
1580                         "A DSA switch with index %d already exists in tree %d\n",
1581                         ds->index, ds->dst->index);
1582                 return -EEXIST;
1583         }
1584
1585         if (ds->dst->last_switch < ds->index)
1586                 ds->dst->last_switch = ds->index;
1587
1588         return 0;
1589 }
1590
1591 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1592 {
1593         struct dsa_port *dp;
1594         int port;
1595
1596         for (port = 0; port < ds->num_ports; port++) {
1597                 dp = dsa_port_touch(ds, port);
1598                 if (!dp)
1599                         return -ENOMEM;
1600         }
1601
1602         return 0;
1603 }
1604
1605 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1606 {
1607         int err;
1608
1609         err = dsa_switch_parse_member_of(ds, dn);
1610         if (err)
1611                 return err;
1612
1613         err = dsa_switch_touch_ports(ds);
1614         if (err)
1615                 return err;
1616
1617         return dsa_switch_parse_ports_of(ds, dn);
1618 }
1619
1620 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1621                           struct device *dev)
1622 {
1623         if (!strcmp(name, "cpu")) {
1624                 struct net_device *master;
1625
1626                 master = dsa_dev_to_net_device(dev);
1627                 if (!master)
1628                         return -EPROBE_DEFER;
1629
1630                 dev_put(master);
1631
1632                 return dsa_port_parse_cpu(dp, master, NULL);
1633         }
1634
1635         if (!strcmp(name, "dsa"))
1636                 return dsa_port_parse_dsa(dp);
1637
1638         return dsa_port_parse_user(dp, name);
1639 }
1640
1641 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1642                                   struct dsa_chip_data *cd)
1643 {
1644         bool valid_name_found = false;
1645         struct dsa_port *dp;
1646         struct device *dev;
1647         const char *name;
1648         unsigned int i;
1649         int err;
1650
1651         for (i = 0; i < DSA_MAX_PORTS; i++) {
1652                 name = cd->port_names[i];
1653                 dev = cd->netdev[i];
1654                 dp = dsa_to_port(ds, i);
1655
1656                 if (!name)
1657                         continue;
1658
1659                 err = dsa_port_parse(dp, name, dev);
1660                 if (err)
1661                         return err;
1662
1663                 valid_name_found = true;
1664         }
1665
1666         if (!valid_name_found && i == DSA_MAX_PORTS)
1667                 return -EINVAL;
1668
1669         return 0;
1670 }
1671
1672 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1673 {
1674         int err;
1675
1676         ds->cd = cd;
1677
1678         /* We don't support interconnected switches nor multiple trees via
1679          * platform data, so this is the unique switch of the tree.
1680          */
1681         ds->index = 0;
1682         ds->dst = dsa_tree_touch(0);
1683         if (!ds->dst)
1684                 return -ENOMEM;
1685
1686         err = dsa_switch_touch_ports(ds);
1687         if (err)
1688                 return err;
1689
1690         return dsa_switch_parse_ports(ds, cd);
1691 }
1692
1693 static void dsa_switch_release_ports(struct dsa_switch *ds)
1694 {
1695         struct dsa_port *dp, *next;
1696
1697         dsa_switch_for_each_port_safe(dp, next, ds) {
1698                 WARN_ON(!list_empty(&dp->fdbs));
1699                 WARN_ON(!list_empty(&dp->mdbs));
1700                 WARN_ON(!list_empty(&dp->vlans));
1701                 list_del(&dp->list);
1702                 kfree(dp);
1703         }
1704 }
1705
1706 static int dsa_switch_probe(struct dsa_switch *ds)
1707 {
1708         struct dsa_switch_tree *dst;
1709         struct dsa_chip_data *pdata;
1710         struct device_node *np;
1711         int err;
1712
1713         if (!ds->dev)
1714                 return -ENODEV;
1715
1716         pdata = ds->dev->platform_data;
1717         np = ds->dev->of_node;
1718
1719         if (!ds->num_ports)
1720                 return -EINVAL;
1721
1722         if (np) {
1723                 err = dsa_switch_parse_of(ds, np);
1724                 if (err)
1725                         dsa_switch_release_ports(ds);
1726         } else if (pdata) {
1727                 err = dsa_switch_parse(ds, pdata);
1728                 if (err)
1729                         dsa_switch_release_ports(ds);
1730         } else {
1731                 err = -ENODEV;
1732         }
1733
1734         if (err)
1735                 return err;
1736
1737         dst = ds->dst;
1738         dsa_tree_get(dst);
1739         err = dsa_tree_setup(dst);
1740         if (err) {
1741                 dsa_switch_release_ports(ds);
1742                 dsa_tree_put(dst);
1743         }
1744
1745         return err;
1746 }
1747
1748 int dsa_register_switch(struct dsa_switch *ds)
1749 {
1750         int err;
1751
1752         mutex_lock(&dsa2_mutex);
1753         err = dsa_switch_probe(ds);
1754         dsa_tree_put(ds->dst);
1755         mutex_unlock(&dsa2_mutex);
1756
1757         return err;
1758 }
1759 EXPORT_SYMBOL_GPL(dsa_register_switch);
1760
1761 static void dsa_switch_remove(struct dsa_switch *ds)
1762 {
1763         struct dsa_switch_tree *dst = ds->dst;
1764
1765         dsa_tree_teardown(dst);
1766         dsa_switch_release_ports(ds);
1767         dsa_tree_put(dst);
1768 }
1769
1770 void dsa_unregister_switch(struct dsa_switch *ds)
1771 {
1772         mutex_lock(&dsa2_mutex);
1773         dsa_switch_remove(ds);
1774         mutex_unlock(&dsa2_mutex);
1775 }
1776 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1777
1778 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1779  * blocking that operation from completion, due to the dev_hold taken inside
1780  * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1781  * the DSA master, so that the system can reboot successfully.
1782  */
1783 void dsa_switch_shutdown(struct dsa_switch *ds)
1784 {
1785         struct net_device *master, *slave_dev;
1786         struct dsa_port *dp;
1787
1788         mutex_lock(&dsa2_mutex);
1789
1790         if (!ds->setup)
1791                 goto out;
1792
1793         rtnl_lock();
1794
1795         dsa_switch_for_each_user_port(dp, ds) {
1796                 master = dp->cpu_dp->master;
1797                 slave_dev = dp->slave;
1798
1799                 netdev_upper_dev_unlink(master, slave_dev);
1800         }
1801
1802         /* Disconnect from further netdevice notifiers on the master,
1803          * since netdev_uses_dsa() will now return false.
1804          */
1805         dsa_switch_for_each_cpu_port(dp, ds)
1806                 dp->master->dsa_ptr = NULL;
1807
1808         rtnl_unlock();
1809 out:
1810         mutex_unlock(&dsa2_mutex);
1811 }
1812 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
This page took 0.134917 seconds and 4 git commands to generate.