]> Git Repo - linux.git/blob - net/switchdev/switchdev.c
Merge branch 'Replay-and-offload-host-VLAN-entries-in-DSA'
[linux.git] / net / switchdev / switchdev.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/switchdev/switchdev.c - Switch device API
4  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
5  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
21
22 static LIST_HEAD(deferred);
23 static DEFINE_SPINLOCK(deferred_lock);
24
25 typedef void switchdev_deferred_func_t(struct net_device *dev,
26                                        const void *data);
27
28 struct switchdev_deferred_item {
29         struct list_head list;
30         struct net_device *dev;
31         netdevice_tracker dev_tracker;
32         switchdev_deferred_func_t *func;
33         unsigned long data[];
34 };
35
36 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
37 {
38         struct switchdev_deferred_item *dfitem;
39
40         spin_lock_bh(&deferred_lock);
41         if (list_empty(&deferred)) {
42                 dfitem = NULL;
43                 goto unlock;
44         }
45         dfitem = list_first_entry(&deferred,
46                                   struct switchdev_deferred_item, list);
47         list_del(&dfitem->list);
48 unlock:
49         spin_unlock_bh(&deferred_lock);
50         return dfitem;
51 }
52
53 /**
54  *      switchdev_deferred_process - Process ops in deferred queue
55  *
56  *      Called to flush the ops currently queued in deferred ops queue.
57  *      rtnl_lock must be held.
58  */
59 void switchdev_deferred_process(void)
60 {
61         struct switchdev_deferred_item *dfitem;
62
63         ASSERT_RTNL();
64
65         while ((dfitem = switchdev_deferred_dequeue())) {
66                 dfitem->func(dfitem->dev, dfitem->data);
67                 dev_put_track(dfitem->dev, &dfitem->dev_tracker);
68                 kfree(dfitem);
69         }
70 }
71 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
72
73 static void switchdev_deferred_process_work(struct work_struct *work)
74 {
75         rtnl_lock();
76         switchdev_deferred_process();
77         rtnl_unlock();
78 }
79
80 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
81
82 static int switchdev_deferred_enqueue(struct net_device *dev,
83                                       const void *data, size_t data_len,
84                                       switchdev_deferred_func_t *func)
85 {
86         struct switchdev_deferred_item *dfitem;
87
88         dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
89         if (!dfitem)
90                 return -ENOMEM;
91         dfitem->dev = dev;
92         dfitem->func = func;
93         memcpy(dfitem->data, data, data_len);
94         dev_hold_track(dev, &dfitem->dev_tracker, GFP_ATOMIC);
95         spin_lock_bh(&deferred_lock);
96         list_add_tail(&dfitem->list, &deferred);
97         spin_unlock_bh(&deferred_lock);
98         schedule_work(&deferred_process_work);
99         return 0;
100 }
101
102 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
103                                       struct net_device *dev,
104                                       const struct switchdev_attr *attr,
105                                       struct netlink_ext_ack *extack)
106 {
107         int err;
108         int rc;
109
110         struct switchdev_notifier_port_attr_info attr_info = {
111                 .attr = attr,
112                 .handled = false,
113         };
114
115         rc = call_switchdev_blocking_notifiers(nt, dev,
116                                                &attr_info.info, extack);
117         err = notifier_to_errno(rc);
118         if (err) {
119                 WARN_ON(!attr_info.handled);
120                 return err;
121         }
122
123         if (!attr_info.handled)
124                 return -EOPNOTSUPP;
125
126         return 0;
127 }
128
129 static int switchdev_port_attr_set_now(struct net_device *dev,
130                                        const struct switchdev_attr *attr,
131                                        struct netlink_ext_ack *extack)
132 {
133         return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
134                                           extack);
135 }
136
137 static void switchdev_port_attr_set_deferred(struct net_device *dev,
138                                              const void *data)
139 {
140         const struct switchdev_attr *attr = data;
141         int err;
142
143         err = switchdev_port_attr_set_now(dev, attr, NULL);
144         if (err && err != -EOPNOTSUPP)
145                 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
146                            err, attr->id);
147         if (attr->complete)
148                 attr->complete(dev, err, attr->complete_priv);
149 }
150
151 static int switchdev_port_attr_set_defer(struct net_device *dev,
152                                          const struct switchdev_attr *attr)
153 {
154         return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
155                                           switchdev_port_attr_set_deferred);
156 }
157
158 /**
159  *      switchdev_port_attr_set - Set port attribute
160  *
161  *      @dev: port device
162  *      @attr: attribute to set
163  *      @extack: netlink extended ack, for error message propagation
164  *
165  *      rtnl_lock must be held and must not be in atomic section,
166  *      in case SWITCHDEV_F_DEFER flag is not set.
167  */
168 int switchdev_port_attr_set(struct net_device *dev,
169                             const struct switchdev_attr *attr,
170                             struct netlink_ext_ack *extack)
171 {
172         if (attr->flags & SWITCHDEV_F_DEFER)
173                 return switchdev_port_attr_set_defer(dev, attr);
174         ASSERT_RTNL();
175         return switchdev_port_attr_set_now(dev, attr, extack);
176 }
177 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
178
179 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
180 {
181         switch (obj->id) {
182         case SWITCHDEV_OBJ_ID_PORT_VLAN:
183                 return sizeof(struct switchdev_obj_port_vlan);
184         case SWITCHDEV_OBJ_ID_PORT_MDB:
185                 return sizeof(struct switchdev_obj_port_mdb);
186         case SWITCHDEV_OBJ_ID_HOST_MDB:
187                 return sizeof(struct switchdev_obj_port_mdb);
188         default:
189                 BUG();
190         }
191         return 0;
192 }
193
194 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
195                                      struct net_device *dev,
196                                      const struct switchdev_obj *obj,
197                                      struct netlink_ext_ack *extack)
198 {
199         int rc;
200         int err;
201
202         struct switchdev_notifier_port_obj_info obj_info = {
203                 .obj = obj,
204                 .handled = false,
205         };
206
207         rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
208         err = notifier_to_errno(rc);
209         if (err) {
210                 WARN_ON(!obj_info.handled);
211                 return err;
212         }
213         if (!obj_info.handled)
214                 return -EOPNOTSUPP;
215         return 0;
216 }
217
218 static void switchdev_port_obj_add_deferred(struct net_device *dev,
219                                             const void *data)
220 {
221         const struct switchdev_obj *obj = data;
222         int err;
223
224         ASSERT_RTNL();
225         err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
226                                         dev, obj, NULL);
227         if (err && err != -EOPNOTSUPP)
228                 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
229                            err, obj->id);
230         if (obj->complete)
231                 obj->complete(dev, err, obj->complete_priv);
232 }
233
234 static int switchdev_port_obj_add_defer(struct net_device *dev,
235                                         const struct switchdev_obj *obj)
236 {
237         return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
238                                           switchdev_port_obj_add_deferred);
239 }
240
241 /**
242  *      switchdev_port_obj_add - Add port object
243  *
244  *      @dev: port device
245  *      @obj: object to add
246  *      @extack: netlink extended ack
247  *
248  *      rtnl_lock must be held and must not be in atomic section,
249  *      in case SWITCHDEV_F_DEFER flag is not set.
250  */
251 int switchdev_port_obj_add(struct net_device *dev,
252                            const struct switchdev_obj *obj,
253                            struct netlink_ext_ack *extack)
254 {
255         if (obj->flags & SWITCHDEV_F_DEFER)
256                 return switchdev_port_obj_add_defer(dev, obj);
257         ASSERT_RTNL();
258         return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
259                                          dev, obj, extack);
260 }
261 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
262
263 static int switchdev_port_obj_del_now(struct net_device *dev,
264                                       const struct switchdev_obj *obj)
265 {
266         return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
267                                          dev, obj, NULL);
268 }
269
270 static void switchdev_port_obj_del_deferred(struct net_device *dev,
271                                             const void *data)
272 {
273         const struct switchdev_obj *obj = data;
274         int err;
275
276         err = switchdev_port_obj_del_now(dev, obj);
277         if (err && err != -EOPNOTSUPP)
278                 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
279                            err, obj->id);
280         if (obj->complete)
281                 obj->complete(dev, err, obj->complete_priv);
282 }
283
284 static int switchdev_port_obj_del_defer(struct net_device *dev,
285                                         const struct switchdev_obj *obj)
286 {
287         return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
288                                           switchdev_port_obj_del_deferred);
289 }
290
291 /**
292  *      switchdev_port_obj_del - Delete port object
293  *
294  *      @dev: port device
295  *      @obj: object to delete
296  *
297  *      rtnl_lock must be held and must not be in atomic section,
298  *      in case SWITCHDEV_F_DEFER flag is not set.
299  */
300 int switchdev_port_obj_del(struct net_device *dev,
301                            const struct switchdev_obj *obj)
302 {
303         if (obj->flags & SWITCHDEV_F_DEFER)
304                 return switchdev_port_obj_del_defer(dev, obj);
305         ASSERT_RTNL();
306         return switchdev_port_obj_del_now(dev, obj);
307 }
308 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
309
310 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
311 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
312
313 /**
314  *      register_switchdev_notifier - Register notifier
315  *      @nb: notifier_block
316  *
317  *      Register switch device notifier.
318  */
319 int register_switchdev_notifier(struct notifier_block *nb)
320 {
321         return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
322 }
323 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
324
325 /**
326  *      unregister_switchdev_notifier - Unregister notifier
327  *      @nb: notifier_block
328  *
329  *      Unregister switch device notifier.
330  */
331 int unregister_switchdev_notifier(struct notifier_block *nb)
332 {
333         return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
334 }
335 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
336
337 /**
338  *      call_switchdev_notifiers - Call notifiers
339  *      @val: value passed unmodified to notifier function
340  *      @dev: port device
341  *      @info: notifier information data
342  *      @extack: netlink extended ack
343  *      Call all network notifier blocks.
344  */
345 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
346                              struct switchdev_notifier_info *info,
347                              struct netlink_ext_ack *extack)
348 {
349         info->dev = dev;
350         info->extack = extack;
351         return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
352 }
353 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
354
355 int register_switchdev_blocking_notifier(struct notifier_block *nb)
356 {
357         struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
358
359         return blocking_notifier_chain_register(chain, nb);
360 }
361 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
362
363 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
364 {
365         struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
366
367         return blocking_notifier_chain_unregister(chain, nb);
368 }
369 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
370
371 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
372                                       struct switchdev_notifier_info *info,
373                                       struct netlink_ext_ack *extack)
374 {
375         info->dev = dev;
376         info->extack = extack;
377         return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
378                                             val, info);
379 }
380 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
381
382 struct switchdev_nested_priv {
383         bool (*check_cb)(const struct net_device *dev);
384         bool (*foreign_dev_check_cb)(const struct net_device *dev,
385                                      const struct net_device *foreign_dev);
386         const struct net_device *dev;
387         struct net_device *lower_dev;
388 };
389
390 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
391                                     struct netdev_nested_priv *priv)
392 {
393         struct switchdev_nested_priv *switchdev_priv = priv->data;
394         bool (*foreign_dev_check_cb)(const struct net_device *dev,
395                                      const struct net_device *foreign_dev);
396         bool (*check_cb)(const struct net_device *dev);
397         const struct net_device *dev;
398
399         check_cb = switchdev_priv->check_cb;
400         foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
401         dev = switchdev_priv->dev;
402
403         if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
404                 switchdev_priv->lower_dev = lower_dev;
405                 return 1;
406         }
407
408         return 0;
409 }
410
411 static struct net_device *
412 switchdev_lower_dev_find_rcu(struct net_device *dev,
413                              bool (*check_cb)(const struct net_device *dev),
414                              bool (*foreign_dev_check_cb)(const struct net_device *dev,
415                                                           const struct net_device *foreign_dev))
416 {
417         struct switchdev_nested_priv switchdev_priv = {
418                 .check_cb = check_cb,
419                 .foreign_dev_check_cb = foreign_dev_check_cb,
420                 .dev = dev,
421                 .lower_dev = NULL,
422         };
423         struct netdev_nested_priv priv = {
424                 .data = &switchdev_priv,
425         };
426
427         netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
428
429         return switchdev_priv.lower_dev;
430 }
431
432 static struct net_device *
433 switchdev_lower_dev_find(struct net_device *dev,
434                          bool (*check_cb)(const struct net_device *dev),
435                          bool (*foreign_dev_check_cb)(const struct net_device *dev,
436                                                       const struct net_device *foreign_dev))
437 {
438         struct switchdev_nested_priv switchdev_priv = {
439                 .check_cb = check_cb,
440                 .foreign_dev_check_cb = foreign_dev_check_cb,
441                 .dev = dev,
442                 .lower_dev = NULL,
443         };
444         struct netdev_nested_priv priv = {
445                 .data = &switchdev_priv,
446         };
447
448         netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
449
450         return switchdev_priv.lower_dev;
451 }
452
453 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
454                 struct net_device *orig_dev, unsigned long event,
455                 const struct switchdev_notifier_fdb_info *fdb_info,
456                 bool (*check_cb)(const struct net_device *dev),
457                 bool (*foreign_dev_check_cb)(const struct net_device *dev,
458                                              const struct net_device *foreign_dev),
459                 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
460                               unsigned long event, const void *ctx,
461                               const struct switchdev_notifier_fdb_info *fdb_info),
462                 int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
463                                   unsigned long event, const void *ctx,
464                                   const struct switchdev_notifier_fdb_info *fdb_info))
465 {
466         const struct switchdev_notifier_info *info = &fdb_info->info;
467         struct net_device *br, *lower_dev;
468         struct list_head *iter;
469         int err = -EOPNOTSUPP;
470
471         if (check_cb(dev))
472                 return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
473
474         if (netif_is_lag_master(dev)) {
475                 if (!switchdev_lower_dev_find_rcu(dev, check_cb, foreign_dev_check_cb))
476                         goto maybe_bridged_with_us;
477
478                 /* This is a LAG interface that we offload */
479                 if (!lag_mod_cb)
480                         return -EOPNOTSUPP;
481
482                 return lag_mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
483         }
484
485         /* Recurse through lower interfaces in case the FDB entry is pointing
486          * towards a bridge device.
487          */
488         if (netif_is_bridge_master(dev)) {
489                 if (!switchdev_lower_dev_find_rcu(dev, check_cb, foreign_dev_check_cb))
490                         return 0;
491
492                 /* This is a bridge interface that we offload */
493                 netdev_for_each_lower_dev(dev, lower_dev, iter) {
494                         /* Do not propagate FDB entries across bridges */
495                         if (netif_is_bridge_master(lower_dev))
496                                 continue;
497
498                         /* Bridge ports might be either us, or LAG interfaces
499                          * that we offload.
500                          */
501                         if (!check_cb(lower_dev) &&
502                             !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
503                                                           foreign_dev_check_cb))
504                                 continue;
505
506                         err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
507                                                                      event, fdb_info, check_cb,
508                                                                      foreign_dev_check_cb,
509                                                                      mod_cb, lag_mod_cb);
510                         if (err && err != -EOPNOTSUPP)
511                                 return err;
512                 }
513
514                 return 0;
515         }
516
517 maybe_bridged_with_us:
518         /* Event is neither on a bridge nor a LAG. Check whether it is on an
519          * interface that is in a bridge with us.
520          */
521         br = netdev_master_upper_dev_get_rcu(dev);
522         if (!br || !netif_is_bridge_master(br))
523                 return 0;
524
525         if (!switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb))
526                 return 0;
527
528         return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
529                                                       check_cb, foreign_dev_check_cb,
530                                                       mod_cb, lag_mod_cb);
531 }
532
533 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
534                 const struct switchdev_notifier_fdb_info *fdb_info,
535                 bool (*check_cb)(const struct net_device *dev),
536                 bool (*foreign_dev_check_cb)(const struct net_device *dev,
537                                              const struct net_device *foreign_dev),
538                 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
539                               unsigned long event, const void *ctx,
540                               const struct switchdev_notifier_fdb_info *fdb_info),
541                 int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
542                                   unsigned long event, const void *ctx,
543                                   const struct switchdev_notifier_fdb_info *fdb_info))
544 {
545         int err;
546
547         err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
548                                                      check_cb, foreign_dev_check_cb,
549                                                      mod_cb, lag_mod_cb);
550         if (err == -EOPNOTSUPP)
551                 err = 0;
552
553         return err;
554 }
555 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
556
557 static int __switchdev_handle_port_obj_add(struct net_device *dev,
558                         struct switchdev_notifier_port_obj_info *port_obj_info,
559                         bool (*check_cb)(const struct net_device *dev),
560                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
561                                                      const struct net_device *foreign_dev),
562                         int (*add_cb)(struct net_device *dev, const void *ctx,
563                                       const struct switchdev_obj *obj,
564                                       struct netlink_ext_ack *extack))
565 {
566         struct switchdev_notifier_info *info = &port_obj_info->info;
567         struct net_device *br, *lower_dev;
568         struct netlink_ext_ack *extack;
569         struct list_head *iter;
570         int err = -EOPNOTSUPP;
571
572         extack = switchdev_notifier_info_to_extack(info);
573
574         if (check_cb(dev)) {
575                 err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
576                 if (err != -EOPNOTSUPP)
577                         port_obj_info->handled = true;
578                 return err;
579         }
580
581         /* Switch ports might be stacked under e.g. a LAG. Ignore the
582          * unsupported devices, another driver might be able to handle them. But
583          * propagate to the callers any hard errors.
584          *
585          * If the driver does its own bookkeeping of stacked ports, it's not
586          * necessary to go through this helper.
587          */
588         netdev_for_each_lower_dev(dev, lower_dev, iter) {
589                 if (netif_is_bridge_master(lower_dev))
590                         continue;
591
592                 /* When searching for switchdev interfaces that are neighbors
593                  * of foreign ones, and @dev is a bridge, do not recurse on the
594                  * foreign interface again, it was already visited.
595                  */
596                 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
597                     !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
598                         continue;
599
600                 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
601                                                       check_cb, foreign_dev_check_cb,
602                                                       add_cb);
603                 if (err && err != -EOPNOTSUPP)
604                         return err;
605         }
606
607         /* Event is neither on a bridge nor a LAG. Check whether it is on an
608          * interface that is in a bridge with us.
609          */
610         if (!foreign_dev_check_cb)
611                 return err;
612
613         br = netdev_master_upper_dev_get(dev);
614         if (!br || !netif_is_bridge_master(br))
615                 return err;
616
617         if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
618                 return err;
619
620         return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
621                                                foreign_dev_check_cb, add_cb);
622 }
623
624 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
625  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
626  * bridge or a LAG.
627  */
628 int switchdev_handle_port_obj_add(struct net_device *dev,
629                         struct switchdev_notifier_port_obj_info *port_obj_info,
630                         bool (*check_cb)(const struct net_device *dev),
631                         int (*add_cb)(struct net_device *dev, const void *ctx,
632                                       const struct switchdev_obj *obj,
633                                       struct netlink_ext_ack *extack))
634 {
635         int err;
636
637         err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
638                                               NULL, add_cb);
639         if (err == -EOPNOTSUPP)
640                 err = 0;
641         return err;
642 }
643 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
644
645 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
646  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
647  * that pass @check_cb and are in the same bridge as @dev.
648  */
649 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
650                         struct switchdev_notifier_port_obj_info *port_obj_info,
651                         bool (*check_cb)(const struct net_device *dev),
652                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
653                                                      const struct net_device *foreign_dev),
654                         int (*add_cb)(struct net_device *dev, const void *ctx,
655                                       const struct switchdev_obj *obj,
656                                       struct netlink_ext_ack *extack))
657 {
658         int err;
659
660         err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
661                                               foreign_dev_check_cb, add_cb);
662         if (err == -EOPNOTSUPP)
663                 err = 0;
664         return err;
665 }
666 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
667
668 static int __switchdev_handle_port_obj_del(struct net_device *dev,
669                         struct switchdev_notifier_port_obj_info *port_obj_info,
670                         bool (*check_cb)(const struct net_device *dev),
671                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
672                                                      const struct net_device *foreign_dev),
673                         int (*del_cb)(struct net_device *dev, const void *ctx,
674                                       const struct switchdev_obj *obj))
675 {
676         struct switchdev_notifier_info *info = &port_obj_info->info;
677         struct net_device *br, *lower_dev;
678         struct list_head *iter;
679         int err = -EOPNOTSUPP;
680
681         if (check_cb(dev)) {
682                 err = del_cb(dev, info->ctx, port_obj_info->obj);
683                 if (err != -EOPNOTSUPP)
684                         port_obj_info->handled = true;
685                 return err;
686         }
687
688         /* Switch ports might be stacked under e.g. a LAG. Ignore the
689          * unsupported devices, another driver might be able to handle them. But
690          * propagate to the callers any hard errors.
691          *
692          * If the driver does its own bookkeeping of stacked ports, it's not
693          * necessary to go through this helper.
694          */
695         netdev_for_each_lower_dev(dev, lower_dev, iter) {
696                 if (netif_is_bridge_master(lower_dev))
697                         continue;
698
699                 /* When searching for switchdev interfaces that are neighbors
700                  * of foreign ones, and @dev is a bridge, do not recurse on the
701                  * foreign interface again, it was already visited.
702                  */
703                 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
704                     !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
705                         continue;
706
707                 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
708                                                       check_cb, foreign_dev_check_cb,
709                                                       del_cb);
710                 if (err && err != -EOPNOTSUPP)
711                         return err;
712         }
713
714         /* Event is neither on a bridge nor a LAG. Check whether it is on an
715          * interface that is in a bridge with us.
716          */
717         if (!foreign_dev_check_cb)
718                 return err;
719
720         br = netdev_master_upper_dev_get(dev);
721         if (!br || !netif_is_bridge_master(br))
722                 return err;
723
724         if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
725                 return err;
726
727         return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
728                                                foreign_dev_check_cb, del_cb);
729 }
730
731 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
732  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
733  * bridge or a LAG.
734  */
735 int switchdev_handle_port_obj_del(struct net_device *dev,
736                         struct switchdev_notifier_port_obj_info *port_obj_info,
737                         bool (*check_cb)(const struct net_device *dev),
738                         int (*del_cb)(struct net_device *dev, const void *ctx,
739                                       const struct switchdev_obj *obj))
740 {
741         int err;
742
743         err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
744                                               NULL, del_cb);
745         if (err == -EOPNOTSUPP)
746                 err = 0;
747         return err;
748 }
749 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
750
751 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
752  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
753  * that pass @check_cb and are in the same bridge as @dev.
754  */
755 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
756                         struct switchdev_notifier_port_obj_info *port_obj_info,
757                         bool (*check_cb)(const struct net_device *dev),
758                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
759                                                      const struct net_device *foreign_dev),
760                         int (*del_cb)(struct net_device *dev, const void *ctx,
761                                       const struct switchdev_obj *obj))
762 {
763         int err;
764
765         err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
766                                               foreign_dev_check_cb, del_cb);
767         if (err == -EOPNOTSUPP)
768                 err = 0;
769         return err;
770 }
771 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
772
773 static int __switchdev_handle_port_attr_set(struct net_device *dev,
774                         struct switchdev_notifier_port_attr_info *port_attr_info,
775                         bool (*check_cb)(const struct net_device *dev),
776                         int (*set_cb)(struct net_device *dev, const void *ctx,
777                                       const struct switchdev_attr *attr,
778                                       struct netlink_ext_ack *extack))
779 {
780         struct switchdev_notifier_info *info = &port_attr_info->info;
781         struct netlink_ext_ack *extack;
782         struct net_device *lower_dev;
783         struct list_head *iter;
784         int err = -EOPNOTSUPP;
785
786         extack = switchdev_notifier_info_to_extack(info);
787
788         if (check_cb(dev)) {
789                 err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
790                 if (err != -EOPNOTSUPP)
791                         port_attr_info->handled = true;
792                 return err;
793         }
794
795         /* Switch ports might be stacked under e.g. a LAG. Ignore the
796          * unsupported devices, another driver might be able to handle them. But
797          * propagate to the callers any hard errors.
798          *
799          * If the driver does its own bookkeeping of stacked ports, it's not
800          * necessary to go through this helper.
801          */
802         netdev_for_each_lower_dev(dev, lower_dev, iter) {
803                 if (netif_is_bridge_master(lower_dev))
804                         continue;
805
806                 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
807                                                        check_cb, set_cb);
808                 if (err && err != -EOPNOTSUPP)
809                         return err;
810         }
811
812         return err;
813 }
814
815 int switchdev_handle_port_attr_set(struct net_device *dev,
816                         struct switchdev_notifier_port_attr_info *port_attr_info,
817                         bool (*check_cb)(const struct net_device *dev),
818                         int (*set_cb)(struct net_device *dev, const void *ctx,
819                                       const struct switchdev_attr *attr,
820                                       struct netlink_ext_ack *extack))
821 {
822         int err;
823
824         err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
825                                                set_cb);
826         if (err == -EOPNOTSUPP)
827                 err = 0;
828         return err;
829 }
830 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
831
832 int switchdev_bridge_port_offload(struct net_device *brport_dev,
833                                   struct net_device *dev, const void *ctx,
834                                   struct notifier_block *atomic_nb,
835                                   struct notifier_block *blocking_nb,
836                                   bool tx_fwd_offload,
837                                   struct netlink_ext_ack *extack)
838 {
839         struct switchdev_notifier_brport_info brport_info = {
840                 .brport = {
841                         .dev = dev,
842                         .ctx = ctx,
843                         .atomic_nb = atomic_nb,
844                         .blocking_nb = blocking_nb,
845                         .tx_fwd_offload = tx_fwd_offload,
846                 },
847         };
848         int err;
849
850         ASSERT_RTNL();
851
852         err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
853                                                 brport_dev, &brport_info.info,
854                                                 extack);
855         return notifier_to_errno(err);
856 }
857 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
858
859 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
860                                      const void *ctx,
861                                      struct notifier_block *atomic_nb,
862                                      struct notifier_block *blocking_nb)
863 {
864         struct switchdev_notifier_brport_info brport_info = {
865                 .brport = {
866                         .ctx = ctx,
867                         .atomic_nb = atomic_nb,
868                         .blocking_nb = blocking_nb,
869                 },
870         };
871
872         ASSERT_RTNL();
873
874         call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
875                                           brport_dev, &brport_info.info,
876                                           NULL);
877 }
878 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
This page took 0.088197 seconds and 4 git commands to generate.