1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/bits.h>
4 #include <linux/bitfield.h>
6 #include <linux/kernel.h>
7 #include <linux/netdevice.h>
8 #include <linux/netlink.h>
9 #include <linux/skbuff.h>
10 #include <linux/xarray.h>
11 #include <net/devlink.h>
12 #include <net/net_shaper.h>
14 #include "shaper_nl_gen.h"
16 #include "../core/dev.h"
18 #define NET_SHAPER_SCOPE_SHIFT 26
19 #define NET_SHAPER_ID_MASK GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0)
20 #define NET_SHAPER_SCOPE_MASK GENMASK(31, NET_SHAPER_SCOPE_SHIFT)
22 #define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK
24 struct net_shaper_hierarchy {
25 struct xarray shapers;
28 struct net_shaper_nl_ctx {
29 struct net_shaper_binding binding;
30 netdevice_tracker dev_tracker;
31 unsigned long start_index;
34 static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx)
36 return &((struct net_shaper_nl_ctx *)ctx)->binding;
39 static void net_shaper_lock(struct net_shaper_binding *binding)
41 switch (binding->type) {
42 case NET_SHAPER_BINDING_TYPE_NETDEV:
43 netdev_lock(binding->netdev);
48 static void net_shaper_unlock(struct net_shaper_binding *binding)
50 switch (binding->type) {
51 case NET_SHAPER_BINDING_TYPE_NETDEV:
52 netdev_unlock(binding->netdev);
57 static struct net_shaper_hierarchy *
58 net_shaper_hierarchy(struct net_shaper_binding *binding)
60 /* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */
61 if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
62 return READ_ONCE(binding->netdev->net_shaper_hierarchy);
64 /* No other type supported yet. */
68 static const struct net_shaper_ops *
69 net_shaper_ops(struct net_shaper_binding *binding)
71 if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
72 return binding->netdev->netdev_ops->net_shaper_ops;
74 /* No other type supported yet. */
78 /* Count the number of [multi] attributes of the given type. */
79 static int net_shaper_list_len(struct genl_info *info, int type)
84 nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr),
85 genlmsg_len(info->genlhdr), rem)
90 static int net_shaper_handle_size(void)
92 return nla_total_size(nla_total_size(sizeof(u32)) +
93 nla_total_size(sizeof(u32)));
96 static int net_shaper_fill_binding(struct sk_buff *msg,
97 const struct net_shaper_binding *binding,
100 /* Should never happen, as currently only NETDEV is supported. */
101 if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV))
104 if (nla_put_u32(msg, type, binding->netdev->ifindex))
110 static int net_shaper_fill_handle(struct sk_buff *msg,
111 const struct net_shaper_handle *handle,
114 struct nlattr *handle_attr;
116 if (handle->scope == NET_SHAPER_SCOPE_UNSPEC)
119 handle_attr = nla_nest_start(msg, type);
123 if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) ||
124 (handle->scope >= NET_SHAPER_SCOPE_QUEUE &&
125 nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id)))
126 goto handle_nest_cancel;
128 nla_nest_end(msg, handle_attr);
132 nla_nest_cancel(msg, handle_attr);
137 net_shaper_fill_one(struct sk_buff *msg,
138 const struct net_shaper_binding *binding,
139 const struct net_shaper *shaper,
140 const struct genl_info *info)
144 hdr = genlmsg_iput(msg, info);
148 if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
149 net_shaper_fill_handle(msg, &shaper->parent,
150 NET_SHAPER_A_PARENT) ||
151 net_shaper_fill_handle(msg, &shaper->handle,
152 NET_SHAPER_A_HANDLE) ||
153 ((shaper->bw_min || shaper->bw_max || shaper->burst) &&
154 nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) ||
156 nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) ||
158 nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) ||
160 nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) ||
162 nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) ||
164 nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight)))
165 goto nla_put_failure;
167 genlmsg_end(msg, hdr);
172 genlmsg_cancel(msg, hdr);
176 /* Initialize the context fetching the relevant device and
177 * acquiring a reference to it.
179 static int net_shaper_ctx_setup(const struct genl_info *info, int type,
180 struct net_shaper_nl_ctx *ctx)
182 struct net *ns = genl_info_net(info);
183 struct net_device *dev;
186 if (GENL_REQ_ATTR_CHECK(info, type))
189 ifindex = nla_get_u32(info->attrs[type]);
190 dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL);
192 NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
196 if (!dev->netdev_ops->net_shaper_ops) {
197 NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
198 netdev_put(dev, &ctx->dev_tracker);
202 ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
203 ctx->binding.netdev = dev;
207 static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx)
209 if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV)
210 netdev_put(ctx->binding.netdev, &ctx->dev_tracker);
213 static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle)
215 return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) |
216 FIELD_PREP(NET_SHAPER_ID_MASK, handle->id);
219 static void net_shaper_index_to_handle(u32 index,
220 struct net_shaper_handle *handle)
222 handle->scope = FIELD_GET(NET_SHAPER_SCOPE_MASK, index);
223 handle->id = FIELD_GET(NET_SHAPER_ID_MASK, index);
226 static void net_shaper_default_parent(const struct net_shaper_handle *handle,
227 struct net_shaper_handle *parent)
229 switch (handle->scope) {
230 case NET_SHAPER_SCOPE_UNSPEC:
231 case NET_SHAPER_SCOPE_NETDEV:
232 case __NET_SHAPER_SCOPE_MAX:
233 parent->scope = NET_SHAPER_SCOPE_UNSPEC;
236 case NET_SHAPER_SCOPE_QUEUE:
237 case NET_SHAPER_SCOPE_NODE:
238 parent->scope = NET_SHAPER_SCOPE_NETDEV;
245 * MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as
246 * it's cleared by xa_store().
248 #define NET_SHAPER_NOT_VALID XA_MARK_1
250 static struct net_shaper *
251 net_shaper_lookup(struct net_shaper_binding *binding,
252 const struct net_shaper_handle *handle)
254 struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
255 u32 index = net_shaper_handle_to_index(handle);
257 if (!hierarchy || xa_get_mark(&hierarchy->shapers, index,
258 NET_SHAPER_NOT_VALID))
261 return xa_load(&hierarchy->shapers, index);
264 /* Allocate on demand the per device shaper's hierarchy container.
265 * Called under the net shaper lock
267 static struct net_shaper_hierarchy *
268 net_shaper_hierarchy_setup(struct net_shaper_binding *binding)
270 struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
275 hierarchy = kmalloc(sizeof(*hierarchy), GFP_KERNEL);
279 /* The flag is required for ID allocation */
280 xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC);
282 switch (binding->type) {
283 case NET_SHAPER_BINDING_TYPE_NETDEV:
284 /* Pairs with READ_ONCE in net_shaper_hierarchy. */
285 WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy);
291 /* Prepare the hierarchy container to actually insert the given shaper, doing
292 * in advance the needed allocations.
294 static int net_shaper_pre_insert(struct net_shaper_binding *binding,
295 struct net_shaper_handle *handle,
296 struct netlink_ext_ack *extack)
298 struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
299 struct net_shaper *prev, *cur;
300 bool id_allocated = false;
306 index = net_shaper_handle_to_index(handle);
307 cur = xa_load(&hierarchy->shapers, index);
311 /* Allocated a new id, if needed. */
312 if (handle->scope == NET_SHAPER_SCOPE_NODE &&
313 handle->id == NET_SHAPER_ID_UNSPEC) {
316 handle->id = NET_SHAPER_ID_MASK - 1;
317 max = net_shaper_handle_to_index(handle);
319 min = net_shaper_handle_to_index(handle);
321 ret = xa_alloc(&hierarchy->shapers, &index, NULL,
322 XA_LIMIT(min, max), GFP_KERNEL);
324 NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper");
328 net_shaper_index_to_handle(index, handle);
332 cur = kzalloc(sizeof(*cur), GFP_KERNEL);
338 /* Mark 'tentative' shaper inside the hierarchy container.
339 * xa_set_mark is a no-op if the previous store fails.
341 xa_lock(&hierarchy->shapers);
342 prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
343 __xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID);
344 xa_unlock(&hierarchy->shapers);
346 NL_SET_ERR_MSG(extack, "Can't insert shaper into device store");
355 xa_erase(&hierarchy->shapers, index);
359 /* Commit the tentative insert with the actual values.
360 * Must be called only after a successful net_shaper_pre_insert().
362 static void net_shaper_commit(struct net_shaper_binding *binding,
363 int nr_shapers, const struct net_shaper *shapers)
365 struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
366 struct net_shaper *cur;
370 xa_lock(&hierarchy->shapers);
371 for (i = 0; i < nr_shapers; ++i) {
372 index = net_shaper_handle_to_index(&shapers[i].handle);
374 cur = xa_load(&hierarchy->shapers, index);
375 if (WARN_ON_ONCE(!cur))
378 /* Successful update: drop the tentative mark
379 * and update the hierarchy container.
381 __xa_clear_mark(&hierarchy->shapers, index,
382 NET_SHAPER_NOT_VALID);
385 xa_unlock(&hierarchy->shapers);
388 /* Rollback all the tentative inserts from the hierarchy. */
389 static void net_shaper_rollback(struct net_shaper_binding *binding)
391 struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
392 struct net_shaper *cur;
398 xa_lock(&hierarchy->shapers);
399 xa_for_each_marked(&hierarchy->shapers, index, cur,
400 NET_SHAPER_NOT_VALID) {
401 __xa_erase(&hierarchy->shapers, index);
404 xa_unlock(&hierarchy->shapers);
407 static int net_shaper_parse_handle(const struct nlattr *attr,
408 const struct genl_info *info,
409 struct net_shaper_handle *handle)
411 struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1];
412 struct nlattr *id_attr;
416 ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr,
417 net_shaper_handle_nl_policy, info->extack);
421 if (NL_REQ_ATTR_CHECK(info->extack, attr, tb,
422 NET_SHAPER_A_HANDLE_SCOPE))
425 handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]);
427 /* The default id for NODE scope shapers is an invalid one
428 * to help the 'group' operation discriminate between new
429 * NODE shaper creation (ID_UNSPEC) and reuse of existing
430 * shaper (any other value).
432 id_attr = tb[NET_SHAPER_A_HANDLE_ID];
434 id = nla_get_u32(id_attr);
435 else if (handle->scope == NET_SHAPER_SCOPE_NODE)
436 id = NET_SHAPER_ID_UNSPEC;
442 static int net_shaper_validate_caps(struct net_shaper_binding *binding,
444 const struct genl_info *info,
445 struct net_shaper *shaper)
447 const struct net_shaper_ops *ops = net_shaper_ops(binding);
448 struct nlattr *bad = NULL;
449 unsigned long caps = 0;
451 ops->capabilities(binding, shaper->handle.scope, &caps);
453 if (tb[NET_SHAPER_A_PRIORITY] &&
454 !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_PRIORITY)))
455 bad = tb[NET_SHAPER_A_PRIORITY];
456 if (tb[NET_SHAPER_A_WEIGHT] &&
457 !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_WEIGHT)))
458 bad = tb[NET_SHAPER_A_WEIGHT];
459 if (tb[NET_SHAPER_A_BW_MIN] &&
460 !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN)))
461 bad = tb[NET_SHAPER_A_BW_MIN];
462 if (tb[NET_SHAPER_A_BW_MAX] &&
463 !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX)))
464 bad = tb[NET_SHAPER_A_BW_MAX];
465 if (tb[NET_SHAPER_A_BURST] &&
466 !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BURST)))
467 bad = tb[NET_SHAPER_A_BURST];
470 bad = tb[NET_SHAPER_A_HANDLE];
473 NL_SET_BAD_ATTR(info->extack, bad);
477 if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE &&
478 binding->type == NET_SHAPER_BINDING_TYPE_NETDEV &&
479 shaper->handle.id >= binding->netdev->real_num_tx_queues) {
480 NL_SET_ERR_MSG_FMT(info->extack,
481 "Not existing queue id %d max %d",
483 binding->netdev->real_num_tx_queues);
487 /* The metric is really used only if there is *any* rate-related
488 * setting, either in current attributes set or in pre-existing
491 if (shaper->burst || shaper->bw_min || shaper->bw_max) {
492 u32 metric_cap = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS +
495 /* The metric test can fail even when the user did not
496 * specify the METRIC attribute. Pointing to rate related
497 * attribute will be confusing, as the attribute itself
498 * could be indeed supported, with a different metric.
501 if (!(caps & BIT(metric_cap))) {
502 NL_SET_ERR_MSG_FMT(info->extack, "Bad metric %d",
510 static int net_shaper_parse_info(struct net_shaper_binding *binding,
512 const struct genl_info *info,
513 struct net_shaper *shaper,
516 struct net_shaper *old;
519 /* The shaper handle is the only mandatory attribute. */
520 if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE))
523 ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info,
528 if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) {
529 NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
533 /* Fetch existing hierarchy, if any, so that user provide info will
534 * incrementally update the existing shaper configuration.
536 old = net_shaper_lookup(binding, &shaper->handle);
541 if (tb[NET_SHAPER_A_METRIC])
542 shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]);
544 if (tb[NET_SHAPER_A_BW_MIN])
545 shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]);
547 if (tb[NET_SHAPER_A_BW_MAX])
548 shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]);
550 if (tb[NET_SHAPER_A_BURST])
551 shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]);
553 if (tb[NET_SHAPER_A_PRIORITY])
554 shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]);
556 if (tb[NET_SHAPER_A_WEIGHT])
557 shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]);
559 ret = net_shaper_validate_caps(binding, tb, info, shaper);
566 static int net_shaper_validate_nesting(struct net_shaper_binding *binding,
567 const struct net_shaper *shaper,
568 struct netlink_ext_ack *extack)
570 const struct net_shaper_ops *ops = net_shaper_ops(binding);
571 unsigned long caps = 0;
573 ops->capabilities(binding, shaper->handle.scope, &caps);
574 if (!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_NESTING))) {
575 NL_SET_ERR_MSG_FMT(extack,
576 "Nesting not supported for scope %d",
577 shaper->handle.scope);
583 /* Fetch the existing leaf and update it with the user-provided
586 static int net_shaper_parse_leaf(struct net_shaper_binding *binding,
587 const struct nlattr *attr,
588 const struct genl_info *info,
589 const struct net_shaper *node,
590 struct net_shaper *shaper)
592 struct nlattr *tb[NET_SHAPER_A_WEIGHT + 1];
596 ret = nla_parse_nested(tb, NET_SHAPER_A_WEIGHT, attr,
597 net_shaper_leaf_info_nl_policy, info->extack);
601 ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
605 if (shaper->handle.scope != NET_SHAPER_SCOPE_QUEUE) {
606 NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
610 if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
611 ret = net_shaper_validate_nesting(binding, shaper,
618 net_shaper_default_parent(&shaper->handle, &shaper->parent);
622 /* Alike net_parse_shaper_info(), but additionally allow the user specifying
623 * the shaper's parent handle.
625 static int net_shaper_parse_node(struct net_shaper_binding *binding,
627 const struct genl_info *info,
628 struct net_shaper *shaper)
633 ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
637 if (shaper->handle.scope != NET_SHAPER_SCOPE_NODE &&
638 shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
639 NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
643 if (tb[NET_SHAPER_A_PARENT]) {
644 ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info,
649 if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE &&
650 shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) {
651 NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]);
658 static int net_shaper_generic_pre(struct genl_info *info, int type)
660 struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx;
662 BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx));
664 return net_shaper_ctx_setup(info, type, ctx);
667 int net_shaper_nl_pre_doit(const struct genl_split_ops *ops,
668 struct sk_buff *skb, struct genl_info *info)
670 return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX);
673 static void net_shaper_generic_post(struct genl_info *info)
675 net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx);
678 void net_shaper_nl_post_doit(const struct genl_split_ops *ops,
679 struct sk_buff *skb, struct genl_info *info)
681 net_shaper_generic_post(info);
684 int net_shaper_nl_pre_dumpit(struct netlink_callback *cb)
686 struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
687 const struct genl_info *info = genl_info_dump(cb);
689 return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx);
692 int net_shaper_nl_post_dumpit(struct netlink_callback *cb)
694 net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx);
698 int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops,
699 struct sk_buff *skb, struct genl_info *info)
701 return net_shaper_generic_pre(info, NET_SHAPER_A_CAPS_IFINDEX);
704 void net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops,
705 struct sk_buff *skb, struct genl_info *info)
707 net_shaper_generic_post(info);
710 int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb)
712 struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
714 return net_shaper_ctx_setup(genl_info_dump(cb),
715 NET_SHAPER_A_CAPS_IFINDEX, ctx);
718 int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb)
720 struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
722 net_shaper_ctx_cleanup(ctx);
726 int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
728 struct net_shaper_binding *binding;
729 struct net_shaper_handle handle;
730 struct net_shaper *shaper;
734 if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
737 binding = net_shaper_binding_from_ctx(info->ctx);
738 ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
743 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
748 shaper = net_shaper_lookup(binding, &handle);
750 NL_SET_BAD_ATTR(info->extack,
751 info->attrs[NET_SHAPER_A_HANDLE]);
757 ret = net_shaper_fill_one(msg, binding, shaper, info);
762 ret = genlmsg_reply(msg, info);
773 int net_shaper_nl_get_dumpit(struct sk_buff *skb,
774 struct netlink_callback *cb)
776 struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
777 const struct genl_info *info = genl_info_dump(cb);
778 struct net_shaper_hierarchy *hierarchy;
779 struct net_shaper_binding *binding;
780 struct net_shaper *shaper;
783 /* Don't error out dumps performed before any set operation. */
784 binding = net_shaper_binding_from_ctx(ctx);
785 hierarchy = net_shaper_hierarchy(binding);
790 for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
791 U32_MAX, XA_PRESENT)); ctx->start_index++) {
792 ret = net_shaper_fill_one(skb, binding, shaper, info);
801 int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info)
803 struct net_shaper_hierarchy *hierarchy;
804 struct net_shaper_binding *binding;
805 const struct net_shaper_ops *ops;
806 struct net_shaper_handle handle;
807 struct net_shaper shaper = {};
811 binding = net_shaper_binding_from_ctx(info->ctx);
813 net_shaper_lock(binding);
814 ret = net_shaper_parse_info(binding, info->attrs, info, &shaper,
820 net_shaper_default_parent(&shaper.handle, &shaper.parent);
822 hierarchy = net_shaper_hierarchy_setup(binding);
828 /* The 'set' operation can't create node-scope shapers. */
829 handle = shaper.handle;
830 if (handle.scope == NET_SHAPER_SCOPE_NODE &&
831 !net_shaper_lookup(binding, &handle)) {
836 ret = net_shaper_pre_insert(binding, &handle, info->extack);
840 ops = net_shaper_ops(binding);
841 ret = ops->set(binding, &shaper, info->extack);
843 net_shaper_rollback(binding);
847 net_shaper_commit(binding, 1, &shaper);
850 net_shaper_unlock(binding);
854 static int __net_shaper_delete(struct net_shaper_binding *binding,
855 struct net_shaper *shaper,
856 struct netlink_ext_ack *extack)
858 struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
859 struct net_shaper_handle parent_handle, handle = shaper->handle;
860 const struct net_shaper_ops *ops = net_shaper_ops(binding);
864 parent_handle = shaper->parent;
866 ret = ops->delete(binding, &handle, extack);
870 xa_erase(&hierarchy->shapers, net_shaper_handle_to_index(&handle));
871 kfree_rcu(shaper, rcu);
873 /* Eventually delete the parent, if it is left over with no leaves. */
874 if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) {
875 shaper = net_shaper_lookup(binding, &parent_handle);
876 if (shaper && !--shaper->leaves) {
877 handle = parent_handle;
884 static int net_shaper_handle_cmp(const struct net_shaper_handle *a,
885 const struct net_shaper_handle *b)
887 /* Must avoid holes in struct net_shaper_handle. */
888 BUILD_BUG_ON(sizeof(*a) != 8);
890 return memcmp(a, b, sizeof(*a));
893 static int net_shaper_parent_from_leaves(int leaves_count,
894 const struct net_shaper *leaves,
895 struct net_shaper *node,
896 struct netlink_ext_ack *extack)
898 struct net_shaper_handle parent = leaves[0].parent;
901 for (i = 1; i < leaves_count; ++i) {
902 if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) {
903 NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent");
908 node->parent = parent;
912 static int __net_shaper_group(struct net_shaper_binding *binding,
913 bool update_node, int leaves_count,
914 struct net_shaper *leaves,
915 struct net_shaper *node,
916 struct netlink_ext_ack *extack)
918 const struct net_shaper_ops *ops = net_shaper_ops(binding);
919 struct net_shaper_handle leaf_handle;
920 struct net_shaper *parent = NULL;
921 bool new_node = false;
924 if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
925 new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
927 if (!new_node && !net_shaper_lookup(binding, &node->handle)) {
928 /* The related attribute is not available when
929 * reaching here from the delete() op.
931 NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
932 node->handle.scope, node->handle.id);
936 /* When unspecified, the node parent scope is inherited from
939 if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
940 ret = net_shaper_parent_from_leaves(leaves_count,
948 net_shaper_default_parent(&node->handle, &node->parent);
951 if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
952 parent = net_shaper_lookup(binding, &node->parent);
954 NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists",
955 node->parent.scope, node->parent.id);
959 ret = net_shaper_validate_nesting(binding, node, extack);
965 /* For newly created node scope shaper, the following will
966 * update the handle, due to id allocation.
968 ret = net_shaper_pre_insert(binding, &node->handle, extack);
973 for (i = 0; i < leaves_count; ++i) {
974 leaf_handle = leaves[i].handle;
976 ret = net_shaper_pre_insert(binding, &leaf_handle, extack);
980 if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle))
983 /* The leaves shapers will be nested to the node, update the
984 * linking accordingly.
986 leaves[i].parent = node->handle;
990 ret = ops->group(binding, leaves_count, leaves, node, extack);
994 /* The node's parent gains a new leaf only when the node itself
995 * is created by this group operation
997 if (new_node && parent)
1000 net_shaper_commit(binding, 1, node);
1001 net_shaper_commit(binding, leaves_count, leaves);
1005 net_shaper_rollback(binding);
1009 static int net_shaper_pre_del_node(struct net_shaper_binding *binding,
1010 const struct net_shaper *shaper,
1011 struct netlink_ext_ack *extack)
1013 struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1014 struct net_shaper *cur, *leaves, node = {};
1015 int ret, leaves_count = 0;
1016 unsigned long index;
1019 if (!shaper->leaves)
1022 /* Fetch the new node information. */
1023 node.handle = shaper->parent;
1024 cur = net_shaper_lookup(binding, &node.handle);
1028 /* A scope NODE shaper can be nested only to the NETDEV scope
1029 * shaper without creating the latter, this check may fail only
1030 * if the data is in inconsistent status.
1032 if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV))
1036 leaves = kcalloc(shaper->leaves, sizeof(struct net_shaper),
1041 /* Build the leaves arrays. */
1042 xa_for_each(&hierarchy->shapers, index, cur) {
1043 if (net_shaper_handle_cmp(&cur->parent, &shaper->handle))
1046 if (WARN_ON_ONCE(leaves_count == shaper->leaves)) {
1051 leaves[leaves_count++] = *cur;
1054 /* When re-linking to the netdev shaper, avoid the eventual, implicit,
1055 * creation of the new node, would be surprising since the user is
1056 * doing a delete operation.
1058 update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV;
1059 ret = __net_shaper_group(binding, update_node, leaves_count,
1060 leaves, &node, extack);
1067 int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info)
1069 struct net_shaper_hierarchy *hierarchy;
1070 struct net_shaper_binding *binding;
1071 struct net_shaper_handle handle;
1072 struct net_shaper *shaper;
1075 if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
1078 binding = net_shaper_binding_from_ctx(info->ctx);
1080 net_shaper_lock(binding);
1081 ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
1086 hierarchy = net_shaper_hierarchy(binding);
1092 shaper = net_shaper_lookup(binding, &handle);
1098 if (handle.scope == NET_SHAPER_SCOPE_NODE) {
1099 ret = net_shaper_pre_del_node(binding, shaper, info->extack);
1104 ret = __net_shaper_delete(binding, shaper, info->extack);
1107 net_shaper_unlock(binding);
1111 static int net_shaper_group_send_reply(struct net_shaper_binding *binding,
1112 const struct net_shaper_handle *handle,
1113 struct genl_info *info,
1114 struct sk_buff *msg)
1118 hdr = genlmsg_iput(msg, info);
1122 if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
1123 net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE))
1126 genlmsg_end(msg, hdr);
1128 return genlmsg_reply(msg, info);
1131 /* Should never happen as msg is pre-allocated with enough space. */
1132 WARN_ONCE(true, "calculated message payload length (%d)",
1133 net_shaper_handle_size());
1138 int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
1140 struct net_shaper **old_nodes, *leaves, node = {};
1141 struct net_shaper_hierarchy *hierarchy;
1142 struct net_shaper_binding *binding;
1143 int i, ret, rem, leaves_count;
1144 int old_nodes_count = 0;
1145 struct sk_buff *msg;
1146 struct nlattr *attr;
1148 if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES))
1151 binding = net_shaper_binding_from_ctx(info->ctx);
1153 /* The group operation is optional. */
1154 if (!net_shaper_ops(binding)->group)
1157 net_shaper_lock(binding);
1158 leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES);
1159 if (!leaves_count) {
1160 NL_SET_BAD_ATTR(info->extack,
1161 info->attrs[NET_SHAPER_A_LEAVES]);
1166 leaves = kcalloc(leaves_count, sizeof(struct net_shaper) +
1167 sizeof(struct net_shaper *), GFP_KERNEL);
1172 old_nodes = (void *)&leaves[leaves_count];
1174 ret = net_shaper_parse_node(binding, info->attrs, info, &node);
1179 nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
1180 genlmsg_data(info->genlhdr),
1181 genlmsg_len(info->genlhdr), rem) {
1182 if (WARN_ON_ONCE(i >= leaves_count))
1185 ret = net_shaper_parse_leaf(binding, attr, info,
1192 /* Prepare the msg reply in advance, to avoid device operation
1193 * rollback on allocation failure.
1195 msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
1199 hierarchy = net_shaper_hierarchy_setup(binding);
1205 /* Record the node shapers that this group() operation can make
1206 * childless for later cleanup.
1208 for (i = 0; i < leaves_count; i++) {
1209 if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE &&
1210 net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) {
1211 struct net_shaper *tmp;
1213 tmp = net_shaper_lookup(binding, &leaves[i].parent);
1217 old_nodes[old_nodes_count++] = tmp;
1221 ret = __net_shaper_group(binding, true, leaves_count, leaves, &node,
1226 /* Check if we need to delete any node left alone by the new leaves
1229 for (i = 0; i < old_nodes_count; ++i) {
1230 struct net_shaper *tmp = old_nodes[i];
1232 if (--tmp->leaves > 0)
1235 /* Errors here are not fatal: the grouping operation is
1236 * completed, and user-space can still explicitly clean-up
1239 __net_shaper_delete(binding, tmp, info->extack);
1242 ret = net_shaper_group_send_reply(binding, &node.handle, info, msg);
1244 GENL_SET_ERR_MSG_FMT(info, "Can't send reply");
1250 net_shaper_unlock(binding);
1259 net_shaper_cap_fill_one(struct sk_buff *msg,
1260 struct net_shaper_binding *binding,
1261 enum net_shaper_scope scope, unsigned long flags,
1262 const struct genl_info *info)
1267 hdr = genlmsg_iput(msg, info);
1271 if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_CAPS_IFINDEX) ||
1272 nla_put_u32(msg, NET_SHAPER_A_CAPS_SCOPE, scope))
1273 goto nla_put_failure;
1275 for (cur = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS;
1276 cur <= NET_SHAPER_A_CAPS_MAX; ++cur) {
1277 if (flags & BIT(cur) && nla_put_flag(msg, cur))
1278 goto nla_put_failure;
1281 genlmsg_end(msg, hdr);
1286 genlmsg_cancel(msg, hdr);
1290 int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info)
1292 struct net_shaper_binding *binding;
1293 const struct net_shaper_ops *ops;
1294 enum net_shaper_scope scope;
1295 unsigned long flags = 0;
1296 struct sk_buff *msg;
1299 if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_CAPS_SCOPE))
1302 binding = net_shaper_binding_from_ctx(info->ctx);
1303 scope = nla_get_u32(info->attrs[NET_SHAPER_A_CAPS_SCOPE]);
1304 ops = net_shaper_ops(binding);
1305 ops->capabilities(binding, scope, &flags);
1309 msg = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1313 ret = net_shaper_cap_fill_one(msg, binding, scope, flags, info);
1317 ret = genlmsg_reply(msg, info);
1327 int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb,
1328 struct netlink_callback *cb)
1330 const struct genl_info *info = genl_info_dump(cb);
1331 struct net_shaper_binding *binding;
1332 const struct net_shaper_ops *ops;
1333 enum net_shaper_scope scope;
1336 binding = net_shaper_binding_from_ctx(cb->ctx);
1337 ops = net_shaper_ops(binding);
1338 for (scope = 0; scope <= NET_SHAPER_SCOPE_MAX; ++scope) {
1339 unsigned long flags = 0;
1341 ops->capabilities(binding, scope, &flags);
1345 ret = net_shaper_cap_fill_one(skb, binding, scope, flags,
1354 static void net_shaper_flush(struct net_shaper_binding *binding)
1356 struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1357 struct net_shaper *cur;
1358 unsigned long index;
1363 net_shaper_lock(binding);
1364 xa_lock(&hierarchy->shapers);
1365 xa_for_each(&hierarchy->shapers, index, cur) {
1366 __xa_erase(&hierarchy->shapers, index);
1369 xa_unlock(&hierarchy->shapers);
1370 net_shaper_unlock(binding);
1375 void net_shaper_flush_netdev(struct net_device *dev)
1377 struct net_shaper_binding binding = {
1378 .type = NET_SHAPER_BINDING_TYPE_NETDEV,
1382 net_shaper_flush(&binding);
1385 void net_shaper_set_real_num_tx_queues(struct net_device *dev,
1388 struct net_shaper_hierarchy *hierarchy;
1389 struct net_shaper_binding binding;
1392 binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
1393 binding.netdev = dev;
1394 hierarchy = net_shaper_hierarchy(&binding);
1398 /* Only drivers implementing shapers support ensure
1399 * the lock is acquired in advance.
1401 netdev_assert_locked(dev);
1403 /* Take action only when decreasing the tx queue number. */
1404 for (i = txq; i < dev->real_num_tx_queues; ++i) {
1405 struct net_shaper_handle handle, parent_handle;
1406 struct net_shaper *shaper;
1409 handle.scope = NET_SHAPER_SCOPE_QUEUE;
1411 shaper = net_shaper_lookup(&binding, &handle);
1415 /* Don't touch the H/W for the queue shaper, the drivers already
1416 * deleted the queue and related resources.
1418 parent_handle = shaper->parent;
1419 index = net_shaper_handle_to_index(&handle);
1420 xa_erase(&hierarchy->shapers, index);
1421 kfree_rcu(shaper, rcu);
1423 /* The recursion on parent does the full job. */
1424 if (parent_handle.scope != NET_SHAPER_SCOPE_NODE)
1427 shaper = net_shaper_lookup(&binding, &parent_handle);
1428 if (shaper && !--shaper->leaves)
1429 __net_shaper_delete(&binding, shaper, NULL);
1433 static int __init shaper_init(void)
1435 return genl_register_family(&net_shaper_nl_family);
1438 subsys_initcall(shaper_init);