]> Git Repo - linux.git/blob - net/dsa/dsa2.c
zstd: import usptream v1.5.2
[linux.git] / net / dsa / dsa2.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <[email protected]>
6  * Copyright (c) 2016 Andrew Lunn <[email protected]>
7  */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <net/devlink.h>
19 #include <net/sch_generic.h>
20
21 #include "dsa_priv.h"
22
23 static DEFINE_MUTEX(dsa2_mutex);
24 LIST_HEAD(dsa_tree_list);
25
26 /* Track the bridges with forwarding offload enabled */
27 static unsigned long dsa_fwd_offloading_bridges;
28
29 /**
30  * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
31  * @dst: collection of struct dsa_switch devices to notify.
32  * @e: event, must be of type DSA_NOTIFIER_*
33  * @v: event-specific value.
34  *
35  * Given a struct dsa_switch_tree, this can be used to run a function once for
36  * each member DSA switch. The other alternative of traversing the tree is only
37  * through its ports list, which does not uniquely list the switches.
38  */
39 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
40 {
41         struct raw_notifier_head *nh = &dst->nh;
42         int err;
43
44         err = raw_notifier_call_chain(nh, e, v);
45
46         return notifier_to_errno(err);
47 }
48
49 /**
50  * dsa_broadcast - Notify all DSA trees in the system.
51  * @e: event, must be of type DSA_NOTIFIER_*
52  * @v: event-specific value.
53  *
54  * Can be used to notify the switching fabric of events such as cross-chip
55  * bridging between disjoint trees (such as islands of tagger-compatible
56  * switches bridged by an incompatible middle switch).
57  *
58  * WARNING: this function is not reliable during probe time, because probing
59  * between trees is asynchronous and not all DSA trees might have probed.
60  */
61 int dsa_broadcast(unsigned long e, void *v)
62 {
63         struct dsa_switch_tree *dst;
64         int err = 0;
65
66         list_for_each_entry(dst, &dsa_tree_list, list) {
67                 err = dsa_tree_notify(dst, e, v);
68                 if (err)
69                         break;
70         }
71
72         return err;
73 }
74
75 /**
76  * dsa_lag_map() - Map LAG structure to a linear LAG array
77  * @dst: Tree in which to record the mapping.
78  * @lag: LAG structure that is to be mapped to the tree's array.
79  *
80  * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
81  * two spaces. The size of the mapping space is determined by the
82  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
83  * it unset if it is not needed, in which case these functions become
84  * no-ops.
85  */
86 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
87 {
88         unsigned int id;
89
90         for (id = 1; id <= dst->lags_len; id++) {
91                 if (!dsa_lag_by_id(dst, id)) {
92                         dst->lags[id - 1] = lag;
93                         lag->id = id;
94                         return;
95                 }
96         }
97
98         /* No IDs left, which is OK. Some drivers do not need it. The
99          * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
100          * returns an error for this device when joining the LAG. The
101          * driver can then return -EOPNOTSUPP back to DSA, which will
102          * fall back to a software LAG.
103          */
104 }
105
106 /**
107  * dsa_lag_unmap() - Remove a LAG ID mapping
108  * @dst: Tree in which the mapping is recorded.
109  * @lag: LAG structure that was mapped.
110  *
111  * As there may be multiple users of the mapping, it is only removed
112  * if there are no other references to it.
113  */
114 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
115 {
116         unsigned int id;
117
118         dsa_lags_foreach_id(id, dst) {
119                 if (dsa_lag_by_id(dst, id) == lag) {
120                         dst->lags[id - 1] = NULL;
121                         lag->id = 0;
122                         break;
123                 }
124         }
125 }
126
127 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
128                                   const struct net_device *lag_dev)
129 {
130         struct dsa_port *dp;
131
132         list_for_each_entry(dp, &dst->ports, list)
133                 if (dsa_port_lag_dev_get(dp) == lag_dev)
134                         return dp->lag;
135
136         return NULL;
137 }
138
139 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
140                                         const struct net_device *br)
141 {
142         struct dsa_port *dp;
143
144         list_for_each_entry(dp, &dst->ports, list)
145                 if (dsa_port_bridge_dev_get(dp) == br)
146                         return dp->bridge;
147
148         return NULL;
149 }
150
151 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
152 {
153         struct dsa_switch_tree *dst;
154
155         list_for_each_entry(dst, &dsa_tree_list, list) {
156                 struct dsa_bridge *bridge;
157
158                 bridge = dsa_tree_bridge_find(dst, bridge_dev);
159                 if (bridge)
160                         return bridge->num;
161         }
162
163         return 0;
164 }
165
166 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
167 {
168         unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
169
170         /* Switches without FDB isolation support don't get unique
171          * bridge numbering
172          */
173         if (!max)
174                 return 0;
175
176         if (!bridge_num) {
177                 /* First port that requests FDB isolation or TX forwarding
178                  * offload for this bridge
179                  */
180                 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
181                                                 DSA_MAX_NUM_OFFLOADING_BRIDGES,
182                                                 1);
183                 if (bridge_num >= max)
184                         return 0;
185
186                 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
187         }
188
189         return bridge_num;
190 }
191
192 void dsa_bridge_num_put(const struct net_device *bridge_dev,
193                         unsigned int bridge_num)
194 {
195         /* Since we refcount bridges, we know that when we call this function
196          * it is no longer in use, so we can just go ahead and remove it from
197          * the bit mask.
198          */
199         clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
200 }
201
202 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
203 {
204         struct dsa_switch_tree *dst;
205         struct dsa_port *dp;
206
207         list_for_each_entry(dst, &dsa_tree_list, list) {
208                 if (dst->index != tree_index)
209                         continue;
210
211                 list_for_each_entry(dp, &dst->ports, list) {
212                         if (dp->ds->index != sw_index)
213                                 continue;
214
215                         return dp->ds;
216                 }
217         }
218
219         return NULL;
220 }
221 EXPORT_SYMBOL_GPL(dsa_switch_find);
222
223 static struct dsa_switch_tree *dsa_tree_find(int index)
224 {
225         struct dsa_switch_tree *dst;
226
227         list_for_each_entry(dst, &dsa_tree_list, list)
228                 if (dst->index == index)
229                         return dst;
230
231         return NULL;
232 }
233
234 static struct dsa_switch_tree *dsa_tree_alloc(int index)
235 {
236         struct dsa_switch_tree *dst;
237
238         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
239         if (!dst)
240                 return NULL;
241
242         dst->index = index;
243
244         INIT_LIST_HEAD(&dst->rtable);
245
246         INIT_LIST_HEAD(&dst->ports);
247
248         INIT_LIST_HEAD(&dst->list);
249         list_add_tail(&dst->list, &dsa_tree_list);
250
251         kref_init(&dst->refcount);
252
253         return dst;
254 }
255
256 static void dsa_tree_free(struct dsa_switch_tree *dst)
257 {
258         if (dst->tag_ops)
259                 dsa_tag_driver_put(dst->tag_ops);
260         list_del(&dst->list);
261         kfree(dst);
262 }
263
264 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
265 {
266         if (dst)
267                 kref_get(&dst->refcount);
268
269         return dst;
270 }
271
272 static struct dsa_switch_tree *dsa_tree_touch(int index)
273 {
274         struct dsa_switch_tree *dst;
275
276         dst = dsa_tree_find(index);
277         if (dst)
278                 return dsa_tree_get(dst);
279         else
280                 return dsa_tree_alloc(index);
281 }
282
283 static void dsa_tree_release(struct kref *ref)
284 {
285         struct dsa_switch_tree *dst;
286
287         dst = container_of(ref, struct dsa_switch_tree, refcount);
288
289         dsa_tree_free(dst);
290 }
291
292 static void dsa_tree_put(struct dsa_switch_tree *dst)
293 {
294         if (dst)
295                 kref_put(&dst->refcount, dsa_tree_release);
296 }
297
298 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
299                                                    struct device_node *dn)
300 {
301         struct dsa_port *dp;
302
303         list_for_each_entry(dp, &dst->ports, list)
304                 if (dp->dn == dn)
305                         return dp;
306
307         return NULL;
308 }
309
310 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
311                                        struct dsa_port *link_dp)
312 {
313         struct dsa_switch *ds = dp->ds;
314         struct dsa_switch_tree *dst;
315         struct dsa_link *dl;
316
317         dst = ds->dst;
318
319         list_for_each_entry(dl, &dst->rtable, list)
320                 if (dl->dp == dp && dl->link_dp == link_dp)
321                         return dl;
322
323         dl = kzalloc(sizeof(*dl), GFP_KERNEL);
324         if (!dl)
325                 return NULL;
326
327         dl->dp = dp;
328         dl->link_dp = link_dp;
329
330         INIT_LIST_HEAD(&dl->list);
331         list_add_tail(&dl->list, &dst->rtable);
332
333         return dl;
334 }
335
336 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
337 {
338         struct dsa_switch *ds = dp->ds;
339         struct dsa_switch_tree *dst = ds->dst;
340         struct device_node *dn = dp->dn;
341         struct of_phandle_iterator it;
342         struct dsa_port *link_dp;
343         struct dsa_link *dl;
344         int err;
345
346         of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
347                 link_dp = dsa_tree_find_port_by_node(dst, it.node);
348                 if (!link_dp) {
349                         of_node_put(it.node);
350                         return false;
351                 }
352
353                 dl = dsa_link_touch(dp, link_dp);
354                 if (!dl) {
355                         of_node_put(it.node);
356                         return false;
357                 }
358         }
359
360         return true;
361 }
362
363 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
364 {
365         bool complete = true;
366         struct dsa_port *dp;
367
368         list_for_each_entry(dp, &dst->ports, list) {
369                 if (dsa_port_is_dsa(dp)) {
370                         complete = dsa_port_setup_routing_table(dp);
371                         if (!complete)
372                                 break;
373                 }
374         }
375
376         return complete;
377 }
378
379 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
380 {
381         struct dsa_port *dp;
382
383         list_for_each_entry(dp, &dst->ports, list)
384                 if (dsa_port_is_cpu(dp))
385                         return dp;
386
387         return NULL;
388 }
389
390 struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
391 {
392         struct device_node *ethernet;
393         struct net_device *master;
394         struct dsa_port *cpu_dp;
395
396         cpu_dp = dsa_tree_find_first_cpu(dst);
397         ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
398         master = of_find_net_device_by_node(ethernet);
399         of_node_put(ethernet);
400
401         return master;
402 }
403
404 /* Assign the default CPU port (the first one in the tree) to all ports of the
405  * fabric which don't already have one as part of their own switch.
406  */
407 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
408 {
409         struct dsa_port *cpu_dp, *dp;
410
411         cpu_dp = dsa_tree_find_first_cpu(dst);
412         if (!cpu_dp) {
413                 pr_err("DSA: tree %d has no CPU port\n", dst->index);
414                 return -EINVAL;
415         }
416
417         list_for_each_entry(dp, &dst->ports, list) {
418                 if (dp->cpu_dp)
419                         continue;
420
421                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
422                         dp->cpu_dp = cpu_dp;
423         }
424
425         return 0;
426 }
427
428 /* Perform initial assignment of CPU ports to user ports and DSA links in the
429  * fabric, giving preference to CPU ports local to each switch. Default to
430  * using the first CPU port in the switch tree if the port does not have a CPU
431  * port local to this switch.
432  */
433 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
434 {
435         struct dsa_port *cpu_dp, *dp;
436
437         list_for_each_entry(cpu_dp, &dst->ports, list) {
438                 if (!dsa_port_is_cpu(cpu_dp))
439                         continue;
440
441                 /* Prefer a local CPU port */
442                 dsa_switch_for_each_port(dp, cpu_dp->ds) {
443                         /* Prefer the first local CPU port found */
444                         if (dp->cpu_dp)
445                                 continue;
446
447                         if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
448                                 dp->cpu_dp = cpu_dp;
449                 }
450         }
451
452         return dsa_tree_setup_default_cpu(dst);
453 }
454
455 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
456 {
457         struct dsa_port *dp;
458
459         list_for_each_entry(dp, &dst->ports, list)
460                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
461                         dp->cpu_dp = NULL;
462 }
463
464 static int dsa_port_devlink_setup(struct dsa_port *dp)
465 {
466         struct devlink_port *dlp = &dp->devlink_port;
467         struct dsa_switch_tree *dst = dp->ds->dst;
468         struct devlink_port_attrs attrs = {};
469         struct devlink *dl = dp->ds->devlink;
470         struct dsa_switch *ds = dp->ds;
471         const unsigned char *id;
472         unsigned char len;
473         int err;
474
475         memset(dlp, 0, sizeof(*dlp));
476         devlink_port_init(dl, dlp);
477
478         if (ds->ops->port_setup) {
479                 err = ds->ops->port_setup(ds, dp->index);
480                 if (err)
481                         return err;
482         }
483
484         id = (const unsigned char *)&dst->index;
485         len = sizeof(dst->index);
486
487         attrs.phys.port_number = dp->index;
488         memcpy(attrs.switch_id.id, id, len);
489         attrs.switch_id.id_len = len;
490
491         switch (dp->type) {
492         case DSA_PORT_TYPE_UNUSED:
493                 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
494                 break;
495         case DSA_PORT_TYPE_CPU:
496                 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
497                 break;
498         case DSA_PORT_TYPE_DSA:
499                 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
500                 break;
501         case DSA_PORT_TYPE_USER:
502                 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
503                 break;
504         }
505
506         devlink_port_attrs_set(dlp, &attrs);
507         err = devlink_port_register(dl, dlp, dp->index);
508         if (err) {
509                 if (ds->ops->port_teardown)
510                         ds->ops->port_teardown(ds, dp->index);
511                 return err;
512         }
513
514         return 0;
515 }
516
517 static void dsa_port_devlink_teardown(struct dsa_port *dp)
518 {
519         struct devlink_port *dlp = &dp->devlink_port;
520         struct dsa_switch *ds = dp->ds;
521
522         devlink_port_unregister(dlp);
523
524         if (ds->ops->port_teardown)
525                 ds->ops->port_teardown(ds, dp->index);
526
527         devlink_port_fini(dlp);
528 }
529
530 static int dsa_port_setup(struct dsa_port *dp)
531 {
532         struct devlink_port *dlp = &dp->devlink_port;
533         bool dsa_port_link_registered = false;
534         struct dsa_switch *ds = dp->ds;
535         bool dsa_port_enabled = false;
536         int err = 0;
537
538         if (dp->setup)
539                 return 0;
540
541         err = dsa_port_devlink_setup(dp);
542         if (err)
543                 return err;
544
545         switch (dp->type) {
546         case DSA_PORT_TYPE_UNUSED:
547                 dsa_port_disable(dp);
548                 break;
549         case DSA_PORT_TYPE_CPU:
550                 if (dp->dn) {
551                         err = dsa_shared_port_link_register_of(dp);
552                         if (err)
553                                 break;
554                         dsa_port_link_registered = true;
555                 } else {
556                         dev_warn(ds->dev,
557                                  "skipping link registration for CPU port %d\n",
558                                  dp->index);
559                 }
560
561                 err = dsa_port_enable(dp, NULL);
562                 if (err)
563                         break;
564                 dsa_port_enabled = true;
565
566                 break;
567         case DSA_PORT_TYPE_DSA:
568                 if (dp->dn) {
569                         err = dsa_shared_port_link_register_of(dp);
570                         if (err)
571                                 break;
572                         dsa_port_link_registered = true;
573                 } else {
574                         dev_warn(ds->dev,
575                                  "skipping link registration for DSA port %d\n",
576                                  dp->index);
577                 }
578
579                 err = dsa_port_enable(dp, NULL);
580                 if (err)
581                         break;
582                 dsa_port_enabled = true;
583
584                 break;
585         case DSA_PORT_TYPE_USER:
586                 of_get_mac_address(dp->dn, dp->mac);
587                 err = dsa_slave_create(dp);
588                 if (err)
589                         break;
590
591                 devlink_port_type_eth_set(dlp, dp->slave);
592                 break;
593         }
594
595         if (err && dsa_port_enabled)
596                 dsa_port_disable(dp);
597         if (err && dsa_port_link_registered)
598                 dsa_shared_port_link_unregister_of(dp);
599         if (err) {
600                 dsa_port_devlink_teardown(dp);
601                 return err;
602         }
603
604         dp->setup = true;
605
606         return 0;
607 }
608
609 static void dsa_port_teardown(struct dsa_port *dp)
610 {
611         struct devlink_port *dlp = &dp->devlink_port;
612
613         if (!dp->setup)
614                 return;
615
616         devlink_port_type_clear(dlp);
617
618         switch (dp->type) {
619         case DSA_PORT_TYPE_UNUSED:
620                 break;
621         case DSA_PORT_TYPE_CPU:
622                 dsa_port_disable(dp);
623                 if (dp->dn)
624                         dsa_shared_port_link_unregister_of(dp);
625                 break;
626         case DSA_PORT_TYPE_DSA:
627                 dsa_port_disable(dp);
628                 if (dp->dn)
629                         dsa_shared_port_link_unregister_of(dp);
630                 break;
631         case DSA_PORT_TYPE_USER:
632                 if (dp->slave) {
633                         dsa_slave_destroy(dp->slave);
634                         dp->slave = NULL;
635                 }
636                 break;
637         }
638
639         dsa_port_devlink_teardown(dp);
640
641         dp->setup = false;
642 }
643
644 static int dsa_port_setup_as_unused(struct dsa_port *dp)
645 {
646         dp->type = DSA_PORT_TYPE_UNUSED;
647         return dsa_port_setup(dp);
648 }
649
650 static int dsa_devlink_info_get(struct devlink *dl,
651                                 struct devlink_info_req *req,
652                                 struct netlink_ext_ack *extack)
653 {
654         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
655
656         if (ds->ops->devlink_info_get)
657                 return ds->ops->devlink_info_get(ds, req, extack);
658
659         return -EOPNOTSUPP;
660 }
661
662 static int dsa_devlink_sb_pool_get(struct devlink *dl,
663                                    unsigned int sb_index, u16 pool_index,
664                                    struct devlink_sb_pool_info *pool_info)
665 {
666         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
667
668         if (!ds->ops->devlink_sb_pool_get)
669                 return -EOPNOTSUPP;
670
671         return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
672                                             pool_info);
673 }
674
675 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
676                                    u16 pool_index, u32 size,
677                                    enum devlink_sb_threshold_type threshold_type,
678                                    struct netlink_ext_ack *extack)
679 {
680         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
681
682         if (!ds->ops->devlink_sb_pool_set)
683                 return -EOPNOTSUPP;
684
685         return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
686                                             threshold_type, extack);
687 }
688
689 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
690                                         unsigned int sb_index, u16 pool_index,
691                                         u32 *p_threshold)
692 {
693         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
694         int port = dsa_devlink_port_to_port(dlp);
695
696         if (!ds->ops->devlink_sb_port_pool_get)
697                 return -EOPNOTSUPP;
698
699         return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
700                                                  pool_index, p_threshold);
701 }
702
703 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
704                                         unsigned int sb_index, u16 pool_index,
705                                         u32 threshold,
706                                         struct netlink_ext_ack *extack)
707 {
708         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
709         int port = dsa_devlink_port_to_port(dlp);
710
711         if (!ds->ops->devlink_sb_port_pool_set)
712                 return -EOPNOTSUPP;
713
714         return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
715                                                  pool_index, threshold, extack);
716 }
717
718 static int
719 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
720                                 unsigned int sb_index, u16 tc_index,
721                                 enum devlink_sb_pool_type pool_type,
722                                 u16 *p_pool_index, u32 *p_threshold)
723 {
724         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
725         int port = dsa_devlink_port_to_port(dlp);
726
727         if (!ds->ops->devlink_sb_tc_pool_bind_get)
728                 return -EOPNOTSUPP;
729
730         return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
731                                                     tc_index, pool_type,
732                                                     p_pool_index, p_threshold);
733 }
734
735 static int
736 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
737                                 unsigned int sb_index, u16 tc_index,
738                                 enum devlink_sb_pool_type pool_type,
739                                 u16 pool_index, u32 threshold,
740                                 struct netlink_ext_ack *extack)
741 {
742         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
743         int port = dsa_devlink_port_to_port(dlp);
744
745         if (!ds->ops->devlink_sb_tc_pool_bind_set)
746                 return -EOPNOTSUPP;
747
748         return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
749                                                     tc_index, pool_type,
750                                                     pool_index, threshold,
751                                                     extack);
752 }
753
754 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
755                                        unsigned int sb_index)
756 {
757         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
758
759         if (!ds->ops->devlink_sb_occ_snapshot)
760                 return -EOPNOTSUPP;
761
762         return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
763 }
764
765 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
766                                         unsigned int sb_index)
767 {
768         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
769
770         if (!ds->ops->devlink_sb_occ_max_clear)
771                 return -EOPNOTSUPP;
772
773         return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
774 }
775
776 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
777                                             unsigned int sb_index,
778                                             u16 pool_index, u32 *p_cur,
779                                             u32 *p_max)
780 {
781         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
782         int port = dsa_devlink_port_to_port(dlp);
783
784         if (!ds->ops->devlink_sb_occ_port_pool_get)
785                 return -EOPNOTSUPP;
786
787         return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
788                                                      pool_index, p_cur, p_max);
789 }
790
791 static int
792 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
793                                     unsigned int sb_index, u16 tc_index,
794                                     enum devlink_sb_pool_type pool_type,
795                                     u32 *p_cur, u32 *p_max)
796 {
797         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
798         int port = dsa_devlink_port_to_port(dlp);
799
800         if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
801                 return -EOPNOTSUPP;
802
803         return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
804                                                         sb_index, tc_index,
805                                                         pool_type, p_cur,
806                                                         p_max);
807 }
808
809 static const struct devlink_ops dsa_devlink_ops = {
810         .info_get                       = dsa_devlink_info_get,
811         .sb_pool_get                    = dsa_devlink_sb_pool_get,
812         .sb_pool_set                    = dsa_devlink_sb_pool_set,
813         .sb_port_pool_get               = dsa_devlink_sb_port_pool_get,
814         .sb_port_pool_set               = dsa_devlink_sb_port_pool_set,
815         .sb_tc_pool_bind_get            = dsa_devlink_sb_tc_pool_bind_get,
816         .sb_tc_pool_bind_set            = dsa_devlink_sb_tc_pool_bind_set,
817         .sb_occ_snapshot                = dsa_devlink_sb_occ_snapshot,
818         .sb_occ_max_clear               = dsa_devlink_sb_occ_max_clear,
819         .sb_occ_port_pool_get           = dsa_devlink_sb_occ_port_pool_get,
820         .sb_occ_tc_port_bind_get        = dsa_devlink_sb_occ_tc_port_bind_get,
821 };
822
823 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
824 {
825         const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
826         struct dsa_switch_tree *dst = ds->dst;
827         int err;
828
829         if (tag_ops->proto == dst->default_proto)
830                 goto connect;
831
832         rtnl_lock();
833         err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
834         rtnl_unlock();
835         if (err) {
836                 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
837                         tag_ops->name, ERR_PTR(err));
838                 return err;
839         }
840
841 connect:
842         if (tag_ops->connect) {
843                 err = tag_ops->connect(ds);
844                 if (err)
845                         return err;
846         }
847
848         if (ds->ops->connect_tag_protocol) {
849                 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
850                 if (err) {
851                         dev_err(ds->dev,
852                                 "Unable to connect to tag protocol \"%s\": %pe\n",
853                                 tag_ops->name, ERR_PTR(err));
854                         goto disconnect;
855                 }
856         }
857
858         return 0;
859
860 disconnect:
861         if (tag_ops->disconnect)
862                 tag_ops->disconnect(ds);
863
864         return err;
865 }
866
867 static int dsa_switch_setup(struct dsa_switch *ds)
868 {
869         struct dsa_devlink_priv *dl_priv;
870         struct device_node *dn;
871         int err;
872
873         if (ds->setup)
874                 return 0;
875
876         /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
877          * driver and before ops->setup() has run, since the switch drivers and
878          * the slave MDIO bus driver rely on these values for probing PHY
879          * devices or not
880          */
881         ds->phys_mii_mask |= dsa_user_ports(ds);
882
883         /* Add the switch to devlink before calling setup, so that setup can
884          * add dpipe tables
885          */
886         ds->devlink =
887                 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
888         if (!ds->devlink)
889                 return -ENOMEM;
890         dl_priv = devlink_priv(ds->devlink);
891         dl_priv->ds = ds;
892
893         err = dsa_switch_register_notifier(ds);
894         if (err)
895                 goto devlink_free;
896
897         ds->configure_vlan_while_not_filtering = true;
898
899         err = ds->ops->setup(ds);
900         if (err < 0)
901                 goto unregister_notifier;
902
903         err = dsa_switch_setup_tag_protocol(ds);
904         if (err)
905                 goto teardown;
906
907         if (!ds->slave_mii_bus && ds->ops->phy_read) {
908                 ds->slave_mii_bus = mdiobus_alloc();
909                 if (!ds->slave_mii_bus) {
910                         err = -ENOMEM;
911                         goto teardown;
912                 }
913
914                 dsa_slave_mii_bus_init(ds);
915
916                 dn = of_get_child_by_name(ds->dev->of_node, "mdio");
917
918                 err = of_mdiobus_register(ds->slave_mii_bus, dn);
919                 of_node_put(dn);
920                 if (err < 0)
921                         goto free_slave_mii_bus;
922         }
923
924         ds->setup = true;
925         devlink_register(ds->devlink);
926         return 0;
927
928 free_slave_mii_bus:
929         if (ds->slave_mii_bus && ds->ops->phy_read)
930                 mdiobus_free(ds->slave_mii_bus);
931 teardown:
932         if (ds->ops->teardown)
933                 ds->ops->teardown(ds);
934 unregister_notifier:
935         dsa_switch_unregister_notifier(ds);
936 devlink_free:
937         devlink_free(ds->devlink);
938         ds->devlink = NULL;
939         return err;
940 }
941
942 static void dsa_switch_teardown(struct dsa_switch *ds)
943 {
944         if (!ds->setup)
945                 return;
946
947         if (ds->devlink)
948                 devlink_unregister(ds->devlink);
949
950         if (ds->slave_mii_bus && ds->ops->phy_read) {
951                 mdiobus_unregister(ds->slave_mii_bus);
952                 mdiobus_free(ds->slave_mii_bus);
953                 ds->slave_mii_bus = NULL;
954         }
955
956         if (ds->ops->teardown)
957                 ds->ops->teardown(ds);
958
959         dsa_switch_unregister_notifier(ds);
960
961         if (ds->devlink) {
962                 devlink_free(ds->devlink);
963                 ds->devlink = NULL;
964         }
965
966         ds->setup = false;
967 }
968
969 /* First tear down the non-shared, then the shared ports. This ensures that
970  * all work items scheduled by our switchdev handlers for user ports have
971  * completed before we destroy the refcounting kept on the shared ports.
972  */
973 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
974 {
975         struct dsa_port *dp;
976
977         list_for_each_entry(dp, &dst->ports, list)
978                 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
979                         dsa_port_teardown(dp);
980
981         dsa_flush_workqueue();
982
983         list_for_each_entry(dp, &dst->ports, list)
984                 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
985                         dsa_port_teardown(dp);
986 }
987
988 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
989 {
990         struct dsa_port *dp;
991
992         list_for_each_entry(dp, &dst->ports, list)
993                 dsa_switch_teardown(dp->ds);
994 }
995
996 /* Bring shared ports up first, then non-shared ports */
997 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
998 {
999         struct dsa_port *dp;
1000         int err = 0;
1001
1002         list_for_each_entry(dp, &dst->ports, list) {
1003                 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1004                         err = dsa_port_setup(dp);
1005                         if (err)
1006                                 goto teardown;
1007                 }
1008         }
1009
1010         list_for_each_entry(dp, &dst->ports, list) {
1011                 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1012                         err = dsa_port_setup(dp);
1013                         if (err) {
1014                                 err = dsa_port_setup_as_unused(dp);
1015                                 if (err)
1016                                         goto teardown;
1017                         }
1018                 }
1019         }
1020
1021         return 0;
1022
1023 teardown:
1024         dsa_tree_teardown_ports(dst);
1025
1026         return err;
1027 }
1028
1029 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1030 {
1031         struct dsa_port *dp;
1032         int err = 0;
1033
1034         list_for_each_entry(dp, &dst->ports, list) {
1035                 err = dsa_switch_setup(dp->ds);
1036                 if (err) {
1037                         dsa_tree_teardown_switches(dst);
1038                         break;
1039                 }
1040         }
1041
1042         return err;
1043 }
1044
1045 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1046 {
1047         struct dsa_port *cpu_dp;
1048         int err = 0;
1049
1050         rtnl_lock();
1051
1052         dsa_tree_for_each_cpu_port(cpu_dp, dst) {
1053                 struct net_device *master = cpu_dp->master;
1054                 bool admin_up = (master->flags & IFF_UP) &&
1055                                 !qdisc_tx_is_noop(master);
1056
1057                 err = dsa_master_setup(master, cpu_dp);
1058                 if (err)
1059                         break;
1060
1061                 /* Replay master state event */
1062                 dsa_tree_master_admin_state_change(dst, master, admin_up);
1063                 dsa_tree_master_oper_state_change(dst, master,
1064                                                   netif_oper_up(master));
1065         }
1066
1067         rtnl_unlock();
1068
1069         return err;
1070 }
1071
1072 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1073 {
1074         struct dsa_port *cpu_dp;
1075
1076         rtnl_lock();
1077
1078         dsa_tree_for_each_cpu_port(cpu_dp, dst) {
1079                 struct net_device *master = cpu_dp->master;
1080
1081                 /* Synthesizing an "admin down" state is sufficient for
1082                  * the switches to get a notification if the master is
1083                  * currently up and running.
1084                  */
1085                 dsa_tree_master_admin_state_change(dst, master, false);
1086
1087                 dsa_master_teardown(master);
1088         }
1089
1090         rtnl_unlock();
1091 }
1092
1093 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1094 {
1095         unsigned int len = 0;
1096         struct dsa_port *dp;
1097
1098         list_for_each_entry(dp, &dst->ports, list) {
1099                 if (dp->ds->num_lag_ids > len)
1100                         len = dp->ds->num_lag_ids;
1101         }
1102
1103         if (!len)
1104                 return 0;
1105
1106         dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1107         if (!dst->lags)
1108                 return -ENOMEM;
1109
1110         dst->lags_len = len;
1111         return 0;
1112 }
1113
1114 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1115 {
1116         kfree(dst->lags);
1117 }
1118
1119 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1120 {
1121         bool complete;
1122         int err;
1123
1124         if (dst->setup) {
1125                 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1126                        dst->index);
1127                 return -EEXIST;
1128         }
1129
1130         complete = dsa_tree_setup_routing_table(dst);
1131         if (!complete)
1132                 return 0;
1133
1134         err = dsa_tree_setup_cpu_ports(dst);
1135         if (err)
1136                 return err;
1137
1138         err = dsa_tree_setup_switches(dst);
1139         if (err)
1140                 goto teardown_cpu_ports;
1141
1142         err = dsa_tree_setup_ports(dst);
1143         if (err)
1144                 goto teardown_switches;
1145
1146         err = dsa_tree_setup_master(dst);
1147         if (err)
1148                 goto teardown_ports;
1149
1150         err = dsa_tree_setup_lags(dst);
1151         if (err)
1152                 goto teardown_master;
1153
1154         dst->setup = true;
1155
1156         pr_info("DSA: tree %d setup\n", dst->index);
1157
1158         return 0;
1159
1160 teardown_master:
1161         dsa_tree_teardown_master(dst);
1162 teardown_ports:
1163         dsa_tree_teardown_ports(dst);
1164 teardown_switches:
1165         dsa_tree_teardown_switches(dst);
1166 teardown_cpu_ports:
1167         dsa_tree_teardown_cpu_ports(dst);
1168
1169         return err;
1170 }
1171
1172 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1173 {
1174         struct dsa_link *dl, *next;
1175
1176         if (!dst->setup)
1177                 return;
1178
1179         dsa_tree_teardown_lags(dst);
1180
1181         dsa_tree_teardown_master(dst);
1182
1183         dsa_tree_teardown_ports(dst);
1184
1185         dsa_tree_teardown_switches(dst);
1186
1187         dsa_tree_teardown_cpu_ports(dst);
1188
1189         list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1190                 list_del(&dl->list);
1191                 kfree(dl);
1192         }
1193
1194         pr_info("DSA: tree %d torn down\n", dst->index);
1195
1196         dst->setup = false;
1197 }
1198
1199 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1200                                    const struct dsa_device_ops *tag_ops)
1201 {
1202         const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1203         struct dsa_notifier_tag_proto_info info;
1204         int err;
1205
1206         dst->tag_ops = tag_ops;
1207
1208         /* Notify the switches from this tree about the connection
1209          * to the new tagger
1210          */
1211         info.tag_ops = tag_ops;
1212         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1213         if (err && err != -EOPNOTSUPP)
1214                 goto out_disconnect;
1215
1216         /* Notify the old tagger about the disconnection from this tree */
1217         info.tag_ops = old_tag_ops;
1218         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1219
1220         return 0;
1221
1222 out_disconnect:
1223         info.tag_ops = tag_ops;
1224         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1225         dst->tag_ops = old_tag_ops;
1226
1227         return err;
1228 }
1229
1230 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1231  * is that all DSA switches within a tree share the same tagger, otherwise
1232  * they would have formed disjoint trees (different "dsa,member" values).
1233  */
1234 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1235                               const struct dsa_device_ops *tag_ops,
1236                               const struct dsa_device_ops *old_tag_ops)
1237 {
1238         struct dsa_notifier_tag_proto_info info;
1239         struct dsa_port *dp;
1240         int err = -EBUSY;
1241
1242         if (!rtnl_trylock())
1243                 return restart_syscall();
1244
1245         /* At the moment we don't allow changing the tag protocol under
1246          * traffic. The rtnl_mutex also happens to serialize concurrent
1247          * attempts to change the tagging protocol. If we ever lift the IFF_UP
1248          * restriction, there needs to be another mutex which serializes this.
1249          */
1250         dsa_tree_for_each_user_port(dp, dst) {
1251                 if (dsa_port_to_master(dp)->flags & IFF_UP)
1252                         goto out_unlock;
1253
1254                 if (dp->slave->flags & IFF_UP)
1255                         goto out_unlock;
1256         }
1257
1258         /* Notify the tag protocol change */
1259         info.tag_ops = tag_ops;
1260         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1261         if (err)
1262                 goto out_unwind_tagger;
1263
1264         err = dsa_tree_bind_tag_proto(dst, tag_ops);
1265         if (err)
1266                 goto out_unwind_tagger;
1267
1268         rtnl_unlock();
1269
1270         return 0;
1271
1272 out_unwind_tagger:
1273         info.tag_ops = old_tag_ops;
1274         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1275 out_unlock:
1276         rtnl_unlock();
1277         return err;
1278 }
1279
1280 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1281                                          struct net_device *master)
1282 {
1283         struct dsa_notifier_master_state_info info;
1284         struct dsa_port *cpu_dp = master->dsa_ptr;
1285
1286         info.master = master;
1287         info.operational = dsa_port_master_is_operational(cpu_dp);
1288
1289         dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1290 }
1291
1292 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1293                                         struct net_device *master,
1294                                         bool up)
1295 {
1296         struct dsa_port *cpu_dp = master->dsa_ptr;
1297         bool notify = false;
1298
1299         /* Don't keep track of admin state on LAG DSA masters,
1300          * but rather just of physical DSA masters
1301          */
1302         if (netif_is_lag_master(master))
1303                 return;
1304
1305         if ((dsa_port_master_is_operational(cpu_dp)) !=
1306             (up && cpu_dp->master_oper_up))
1307                 notify = true;
1308
1309         cpu_dp->master_admin_up = up;
1310
1311         if (notify)
1312                 dsa_tree_master_state_change(dst, master);
1313 }
1314
1315 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1316                                        struct net_device *master,
1317                                        bool up)
1318 {
1319         struct dsa_port *cpu_dp = master->dsa_ptr;
1320         bool notify = false;
1321
1322         /* Don't keep track of oper state on LAG DSA masters,
1323          * but rather just of physical DSA masters
1324          */
1325         if (netif_is_lag_master(master))
1326                 return;
1327
1328         if ((dsa_port_master_is_operational(cpu_dp)) !=
1329             (cpu_dp->master_admin_up && up))
1330                 notify = true;
1331
1332         cpu_dp->master_oper_up = up;
1333
1334         if (notify)
1335                 dsa_tree_master_state_change(dst, master);
1336 }
1337
1338 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1339 {
1340         struct dsa_switch_tree *dst = ds->dst;
1341         struct dsa_port *dp;
1342
1343         dsa_switch_for_each_port(dp, ds)
1344                 if (dp->index == index)
1345                         return dp;
1346
1347         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1348         if (!dp)
1349                 return NULL;
1350
1351         dp->ds = ds;
1352         dp->index = index;
1353
1354         mutex_init(&dp->addr_lists_lock);
1355         mutex_init(&dp->vlans_lock);
1356         INIT_LIST_HEAD(&dp->fdbs);
1357         INIT_LIST_HEAD(&dp->mdbs);
1358         INIT_LIST_HEAD(&dp->vlans);
1359         INIT_LIST_HEAD(&dp->list);
1360         list_add_tail(&dp->list, &dst->ports);
1361
1362         return dp;
1363 }
1364
1365 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1366 {
1367         if (!name)
1368                 name = "eth%d";
1369
1370         dp->type = DSA_PORT_TYPE_USER;
1371         dp->name = name;
1372
1373         return 0;
1374 }
1375
1376 static int dsa_port_parse_dsa(struct dsa_port *dp)
1377 {
1378         dp->type = DSA_PORT_TYPE_DSA;
1379
1380         return 0;
1381 }
1382
1383 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1384                                                   struct net_device *master)
1385 {
1386         enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1387         struct dsa_switch *mds, *ds = dp->ds;
1388         unsigned int mdp_upstream;
1389         struct dsa_port *mdp;
1390
1391         /* It is possible to stack DSA switches onto one another when that
1392          * happens the switch driver may want to know if its tagging protocol
1393          * is going to work in such a configuration.
1394          */
1395         if (dsa_slave_dev_check(master)) {
1396                 mdp = dsa_slave_to_port(master);
1397                 mds = mdp->ds;
1398                 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1399                 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1400                                                           DSA_TAG_PROTO_NONE);
1401         }
1402
1403         /* If the master device is not itself a DSA slave in a disjoint DSA
1404          * tree, then return immediately.
1405          */
1406         return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1407 }
1408
1409 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1410                               const char *user_protocol)
1411 {
1412         struct dsa_switch *ds = dp->ds;
1413         struct dsa_switch_tree *dst = ds->dst;
1414         const struct dsa_device_ops *tag_ops;
1415         enum dsa_tag_protocol default_proto;
1416
1417         /* Find out which protocol the switch would prefer. */
1418         default_proto = dsa_get_tag_protocol(dp, master);
1419         if (dst->default_proto) {
1420                 if (dst->default_proto != default_proto) {
1421                         dev_err(ds->dev,
1422                                 "A DSA switch tree can have only one tagging protocol\n");
1423                         return -EINVAL;
1424                 }
1425         } else {
1426                 dst->default_proto = default_proto;
1427         }
1428
1429         /* See if the user wants to override that preference. */
1430         if (user_protocol) {
1431                 if (!ds->ops->change_tag_protocol) {
1432                         dev_err(ds->dev, "Tag protocol cannot be modified\n");
1433                         return -EINVAL;
1434                 }
1435
1436                 tag_ops = dsa_find_tagger_by_name(user_protocol);
1437         } else {
1438                 tag_ops = dsa_tag_driver_get(default_proto);
1439         }
1440
1441         if (IS_ERR(tag_ops)) {
1442                 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1443                         return -EPROBE_DEFER;
1444
1445                 dev_warn(ds->dev, "No tagger for this switch\n");
1446                 return PTR_ERR(tag_ops);
1447         }
1448
1449         if (dst->tag_ops) {
1450                 if (dst->tag_ops != tag_ops) {
1451                         dev_err(ds->dev,
1452                                 "A DSA switch tree can have only one tagging protocol\n");
1453
1454                         dsa_tag_driver_put(tag_ops);
1455                         return -EINVAL;
1456                 }
1457
1458                 /* In the case of multiple CPU ports per switch, the tagging
1459                  * protocol is still reference-counted only per switch tree.
1460                  */
1461                 dsa_tag_driver_put(tag_ops);
1462         } else {
1463                 dst->tag_ops = tag_ops;
1464         }
1465
1466         dp->master = master;
1467         dp->type = DSA_PORT_TYPE_CPU;
1468         dsa_port_set_tag_protocol(dp, dst->tag_ops);
1469         dp->dst = dst;
1470
1471         /* At this point, the tree may be configured to use a different
1472          * tagger than the one chosen by the switch driver during
1473          * .setup, in the case when a user selects a custom protocol
1474          * through the DT.
1475          *
1476          * This is resolved by syncing the driver with the tree in
1477          * dsa_switch_setup_tag_protocol once .setup has run and the
1478          * driver is ready to accept calls to .change_tag_protocol. If
1479          * the driver does not support the custom protocol at that
1480          * point, the tree is wholly rejected, thereby ensuring that the
1481          * tree and driver are always in agreement on the protocol to
1482          * use.
1483          */
1484         return 0;
1485 }
1486
1487 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1488 {
1489         struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1490         const char *name = of_get_property(dn, "label", NULL);
1491         bool link = of_property_read_bool(dn, "link");
1492
1493         dp->dn = dn;
1494
1495         if (ethernet) {
1496                 struct net_device *master;
1497                 const char *user_protocol;
1498
1499                 master = of_find_net_device_by_node(ethernet);
1500                 of_node_put(ethernet);
1501                 if (!master)
1502                         return -EPROBE_DEFER;
1503
1504                 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1505                 return dsa_port_parse_cpu(dp, master, user_protocol);
1506         }
1507
1508         if (link)
1509                 return dsa_port_parse_dsa(dp);
1510
1511         return dsa_port_parse_user(dp, name);
1512 }
1513
1514 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1515                                      struct device_node *dn)
1516 {
1517         struct device_node *ports, *port;
1518         struct dsa_port *dp;
1519         int err = 0;
1520         u32 reg;
1521
1522         ports = of_get_child_by_name(dn, "ports");
1523         if (!ports) {
1524                 /* The second possibility is "ethernet-ports" */
1525                 ports = of_get_child_by_name(dn, "ethernet-ports");
1526                 if (!ports) {
1527                         dev_err(ds->dev, "no ports child node found\n");
1528                         return -EINVAL;
1529                 }
1530         }
1531
1532         for_each_available_child_of_node(ports, port) {
1533                 err = of_property_read_u32(port, "reg", &reg);
1534                 if (err) {
1535                         of_node_put(port);
1536                         goto out_put_node;
1537                 }
1538
1539                 if (reg >= ds->num_ports) {
1540                         dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1541                                 port, reg, ds->num_ports);
1542                         of_node_put(port);
1543                         err = -EINVAL;
1544                         goto out_put_node;
1545                 }
1546
1547                 dp = dsa_to_port(ds, reg);
1548
1549                 err = dsa_port_parse_of(dp, port);
1550                 if (err) {
1551                         of_node_put(port);
1552                         goto out_put_node;
1553                 }
1554         }
1555
1556 out_put_node:
1557         of_node_put(ports);
1558         return err;
1559 }
1560
1561 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1562                                       struct device_node *dn)
1563 {
1564         u32 m[2] = { 0, 0 };
1565         int sz;
1566
1567         /* Don't error out if this optional property isn't found */
1568         sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1569         if (sz < 0 && sz != -EINVAL)
1570                 return sz;
1571
1572         ds->index = m[1];
1573
1574         ds->dst = dsa_tree_touch(m[0]);
1575         if (!ds->dst)
1576                 return -ENOMEM;
1577
1578         if (dsa_switch_find(ds->dst->index, ds->index)) {
1579                 dev_err(ds->dev,
1580                         "A DSA switch with index %d already exists in tree %d\n",
1581                         ds->index, ds->dst->index);
1582                 return -EEXIST;
1583         }
1584
1585         if (ds->dst->last_switch < ds->index)
1586                 ds->dst->last_switch = ds->index;
1587
1588         return 0;
1589 }
1590
1591 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1592 {
1593         struct dsa_port *dp;
1594         int port;
1595
1596         for (port = 0; port < ds->num_ports; port++) {
1597                 dp = dsa_port_touch(ds, port);
1598                 if (!dp)
1599                         return -ENOMEM;
1600         }
1601
1602         return 0;
1603 }
1604
1605 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1606 {
1607         int err;
1608
1609         err = dsa_switch_parse_member_of(ds, dn);
1610         if (err)
1611                 return err;
1612
1613         err = dsa_switch_touch_ports(ds);
1614         if (err)
1615                 return err;
1616
1617         return dsa_switch_parse_ports_of(ds, dn);
1618 }
1619
1620 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1621                           struct device *dev)
1622 {
1623         if (!strcmp(name, "cpu")) {
1624                 struct net_device *master;
1625
1626                 master = dsa_dev_to_net_device(dev);
1627                 if (!master)
1628                         return -EPROBE_DEFER;
1629
1630                 dev_put(master);
1631
1632                 return dsa_port_parse_cpu(dp, master, NULL);
1633         }
1634
1635         if (!strcmp(name, "dsa"))
1636                 return dsa_port_parse_dsa(dp);
1637
1638         return dsa_port_parse_user(dp, name);
1639 }
1640
1641 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1642                                   struct dsa_chip_data *cd)
1643 {
1644         bool valid_name_found = false;
1645         struct dsa_port *dp;
1646         struct device *dev;
1647         const char *name;
1648         unsigned int i;
1649         int err;
1650
1651         for (i = 0; i < DSA_MAX_PORTS; i++) {
1652                 name = cd->port_names[i];
1653                 dev = cd->netdev[i];
1654                 dp = dsa_to_port(ds, i);
1655
1656                 if (!name)
1657                         continue;
1658
1659                 err = dsa_port_parse(dp, name, dev);
1660                 if (err)
1661                         return err;
1662
1663                 valid_name_found = true;
1664         }
1665
1666         if (!valid_name_found && i == DSA_MAX_PORTS)
1667                 return -EINVAL;
1668
1669         return 0;
1670 }
1671
1672 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1673 {
1674         int err;
1675
1676         ds->cd = cd;
1677
1678         /* We don't support interconnected switches nor multiple trees via
1679          * platform data, so this is the unique switch of the tree.
1680          */
1681         ds->index = 0;
1682         ds->dst = dsa_tree_touch(0);
1683         if (!ds->dst)
1684                 return -ENOMEM;
1685
1686         err = dsa_switch_touch_ports(ds);
1687         if (err)
1688                 return err;
1689
1690         return dsa_switch_parse_ports(ds, cd);
1691 }
1692
1693 static void dsa_switch_release_ports(struct dsa_switch *ds)
1694 {
1695         struct dsa_port *dp, *next;
1696
1697         dsa_switch_for_each_port_safe(dp, next, ds) {
1698                 WARN_ON(!list_empty(&dp->fdbs));
1699                 WARN_ON(!list_empty(&dp->mdbs));
1700                 WARN_ON(!list_empty(&dp->vlans));
1701                 list_del(&dp->list);
1702                 kfree(dp);
1703         }
1704 }
1705
1706 static int dsa_switch_probe(struct dsa_switch *ds)
1707 {
1708         struct dsa_switch_tree *dst;
1709         struct dsa_chip_data *pdata;
1710         struct device_node *np;
1711         int err;
1712
1713         if (!ds->dev)
1714                 return -ENODEV;
1715
1716         pdata = ds->dev->platform_data;
1717         np = ds->dev->of_node;
1718
1719         if (!ds->num_ports)
1720                 return -EINVAL;
1721
1722         if (np) {
1723                 err = dsa_switch_parse_of(ds, np);
1724                 if (err)
1725                         dsa_switch_release_ports(ds);
1726         } else if (pdata) {
1727                 err = dsa_switch_parse(ds, pdata);
1728                 if (err)
1729                         dsa_switch_release_ports(ds);
1730         } else {
1731                 err = -ENODEV;
1732         }
1733
1734         if (err)
1735                 return err;
1736
1737         dst = ds->dst;
1738         dsa_tree_get(dst);
1739         err = dsa_tree_setup(dst);
1740         if (err) {
1741                 dsa_switch_release_ports(ds);
1742                 dsa_tree_put(dst);
1743         }
1744
1745         return err;
1746 }
1747
1748 int dsa_register_switch(struct dsa_switch *ds)
1749 {
1750         int err;
1751
1752         mutex_lock(&dsa2_mutex);
1753         err = dsa_switch_probe(ds);
1754         dsa_tree_put(ds->dst);
1755         mutex_unlock(&dsa2_mutex);
1756
1757         return err;
1758 }
1759 EXPORT_SYMBOL_GPL(dsa_register_switch);
1760
1761 static void dsa_switch_remove(struct dsa_switch *ds)
1762 {
1763         struct dsa_switch_tree *dst = ds->dst;
1764
1765         dsa_tree_teardown(dst);
1766         dsa_switch_release_ports(ds);
1767         dsa_tree_put(dst);
1768 }
1769
1770 void dsa_unregister_switch(struct dsa_switch *ds)
1771 {
1772         mutex_lock(&dsa2_mutex);
1773         dsa_switch_remove(ds);
1774         mutex_unlock(&dsa2_mutex);
1775 }
1776 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1777
1778 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1779  * blocking that operation from completion, due to the dev_hold taken inside
1780  * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1781  * the DSA master, so that the system can reboot successfully.
1782  */
1783 void dsa_switch_shutdown(struct dsa_switch *ds)
1784 {
1785         struct net_device *master, *slave_dev;
1786         struct dsa_port *dp;
1787
1788         mutex_lock(&dsa2_mutex);
1789
1790         if (!ds->setup)
1791                 goto out;
1792
1793         rtnl_lock();
1794
1795         dsa_switch_for_each_user_port(dp, ds) {
1796                 master = dsa_port_to_master(dp);
1797                 slave_dev = dp->slave;
1798
1799                 netdev_upper_dev_unlink(master, slave_dev);
1800         }
1801
1802         /* Disconnect from further netdevice notifiers on the master,
1803          * since netdev_uses_dsa() will now return false.
1804          */
1805         dsa_switch_for_each_cpu_port(dp, ds)
1806                 dp->master->dsa_ptr = NULL;
1807
1808         rtnl_unlock();
1809 out:
1810         mutex_unlock(&dsa2_mutex);
1811 }
1812 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
This page took 0.191263 seconds and 4 git commands to generate.