1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_api.c Packet scheduler API.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
29 #include <net/net_namespace.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
34 #include <net/tc_wrapper.h>
36 #include <trace/events/qdisc.h>
43 This file consists of two interrelated parts:
45 1. queueing disciplines manager frontend.
46 2. traffic classes manager frontend.
48 Generally, queueing discipline ("qdisc") is a black box,
49 which is able to enqueue packets and to dequeue them (when
50 device is ready to send something) in order and at times
51 determined by algorithm hidden in it.
53 qdisc's are divided to two categories:
54 - "queues", which have no internal structure visible from outside.
55 - "schedulers", which split all the packets to "traffic classes",
56 using "packet classifiers" (look at cls_api.c)
58 In turn, classes may have child qdiscs (as rule, queues)
59 attached to them etc. etc. etc.
61 The goal of the routines in this file is to translate
62 information supplied by user in the form of handles
63 to more intelligible for kernel form, to make some sanity
64 checks and part of work, which is common to all qdiscs
65 and to provide rtnetlink notifications.
67 All real intelligent work is done inside qdisc modules.
71 Every discipline has two major routines: enqueue and dequeue.
75 dequeue usually returns a skb to send. It is allowed to return NULL,
76 but it does not mean that queue is empty, it just means that
77 discipline does not want to send anything this time.
78 Queue is really empty if q->q.qlen == 0.
79 For complicated disciplines with multiple queues q->q is not
80 real packet queue, but however q->q.qlen must be valid.
84 enqueue returns 0, if packet was enqueued successfully.
85 If packet (this one or another one) was dropped, it returns
87 NET_XMIT_DROP - this packet dropped
88 Expected action: do not backoff, but wait until queue will clear.
89 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
90 Expected action: backoff or ignore
96 like dequeue but without removing a packet from the queue
100 returns qdisc to initial state: purge all buffers, clear all
101 timers, counters (except for statistics) etc.
105 initializes newly created qdisc.
109 destroys resources allocated by init and during lifetime of qdisc.
113 changes qdisc parameters.
116 /* Protects list of registered TC modules. It is pure SMP lock. */
117 static DEFINE_RWLOCK(qdisc_mod_lock);
120 /************************************************
121 * Queueing disciplines manipulation. *
122 ************************************************/
125 /* The list of all installed queueing disciplines. */
127 static struct Qdisc_ops *qdisc_base;
129 /* Register/unregister queueing discipline */
131 int register_qdisc(struct Qdisc_ops *qops)
133 struct Qdisc_ops *q, **qp;
136 write_lock(&qdisc_mod_lock);
137 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 if (!strcmp(qops->id, q->id))
141 if (qops->enqueue == NULL)
142 qops->enqueue = noop_qdisc_ops.enqueue;
143 if (qops->peek == NULL) {
144 if (qops->dequeue == NULL)
145 qops->peek = noop_qdisc_ops.peek;
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
153 const struct Qdisc_class_ops *cops = qops->cl_ops;
155 if (!(cops->find && cops->walk && cops->leaf))
158 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
166 write_unlock(&qdisc_mod_lock);
173 EXPORT_SYMBOL(register_qdisc);
175 void unregister_qdisc(struct Qdisc_ops *qops)
177 struct Qdisc_ops *q, **qp;
180 write_lock(&qdisc_mod_lock);
181 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
189 write_unlock(&qdisc_mod_lock);
191 WARN(err, "unregister qdisc(%s) failed\n", qops->id);
193 EXPORT_SYMBOL(unregister_qdisc);
195 /* Get default qdisc if not otherwise specified */
196 void qdisc_get_default(char *name, size_t len)
198 read_lock(&qdisc_mod_lock);
199 strscpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 struct Qdisc_ops *q = NULL;
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
218 /* Set new default qdisc to use */
219 int qdisc_set_default(const char *name)
221 const struct Qdisc_ops *ops;
223 if (!capable(CAP_NET_ADMIN))
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module(NET_SCH_ALIAS_PREFIX "%s", name);
232 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
242 write_unlock(&qdisc_mod_lock);
244 return ops ? 0 : -ENOENT;
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
249 static int __init sch_default_qdisc(void)
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 late_initcall(sch_default_qdisc);
256 /* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 lockdep_rtnl_is_held()) {
274 if (q->handle == handle)
280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
286 q->flags |= TCQ_F_INVISIBLE;
289 EXPORT_SYMBOL(qdisc_hash_add);
291 void qdisc_hash_del(struct Qdisc *q)
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
295 hash_del_rcu(&q->hash);
298 EXPORT_SYMBOL(qdisc_hash_del);
300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
306 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
318 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
320 struct netdev_queue *nq;
325 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
329 nq = dev_ingress_queue_rcu(dev);
331 q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
337 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
340 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
344 cl = cops->find(p, classid);
348 return cops->leaf(p, cl);
351 /* Find queueing discipline by name */
353 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
355 struct Qdisc_ops *q = NULL;
358 read_lock(&qdisc_mod_lock);
359 for (q = qdisc_base; q; q = q->next) {
360 if (nla_strcmp(kind, q->id) == 0) {
361 if (!try_module_get(q->owner))
366 read_unlock(&qdisc_mod_lock);
371 /* The linklayer setting were not transferred from iproute2, in older
372 * versions, and the rate tables lookup systems have been dropped in
373 * the kernel. To keep backward compatible with older iproute2 tc
374 * utils, we detect the linklayer setting by detecting if the rate
375 * table were modified.
377 * For linklayer ATM table entries, the rate table will be aligned to
378 * 48 bytes, thus some table entries will contain the same value. The
379 * mpu (min packet unit) is also encoded into the old rate table, thus
380 * starting from the mpu, we find low and high table entries for
381 * mapping this cell. If these entries contain the same value, when
382 * the rate tables have been modified for linklayer ATM.
384 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
385 * and then roundup to the next cell, calc the table entry one below,
388 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
390 int low = roundup(r->mpu, 48);
391 int high = roundup(low+1, 48);
392 int cell_low = low >> r->cell_log;
393 int cell_high = (high >> r->cell_log) - 1;
395 /* rtab is too inaccurate at rates > 100Mbit/s */
396 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
397 pr_debug("TC linklayer: Giving up ATM detection\n");
398 return TC_LINKLAYER_ETHERNET;
401 if ((cell_high > cell_low) && (cell_high < 256)
402 && (rtab[cell_low] == rtab[cell_high])) {
403 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
404 cell_low, cell_high, rtab[cell_high]);
405 return TC_LINKLAYER_ATM;
407 return TC_LINKLAYER_ETHERNET;
410 static struct qdisc_rate_table *qdisc_rtab_list;
412 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
414 struct netlink_ext_ack *extack)
416 struct qdisc_rate_table *rtab;
418 if (tab == NULL || r->rate == 0 ||
419 r->cell_log == 0 || r->cell_log >= 32 ||
420 nla_len(tab) != TC_RTAB_SIZE) {
421 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
425 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
426 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
427 !memcmp(&rtab->data, nla_data(tab), 1024)) {
433 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
437 memcpy(rtab->data, nla_data(tab), 1024);
438 if (r->linklayer == TC_LINKLAYER_UNAWARE)
439 r->linklayer = __detect_linklayer(r, rtab->data);
440 rtab->next = qdisc_rtab_list;
441 qdisc_rtab_list = rtab;
443 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
447 EXPORT_SYMBOL(qdisc_get_rtab);
449 void qdisc_put_rtab(struct qdisc_rate_table *tab)
451 struct qdisc_rate_table *rtab, **rtabp;
453 if (!tab || --tab->refcnt)
456 for (rtabp = &qdisc_rtab_list;
457 (rtab = *rtabp) != NULL;
458 rtabp = &rtab->next) {
466 EXPORT_SYMBOL(qdisc_put_rtab);
468 static LIST_HEAD(qdisc_stab_list);
470 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
471 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
472 [TCA_STAB_DATA] = { .type = NLA_BINARY },
475 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
476 struct netlink_ext_ack *extack)
478 struct nlattr *tb[TCA_STAB_MAX + 1];
479 struct qdisc_size_table *stab;
480 struct tc_sizespec *s;
481 unsigned int tsize = 0;
485 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
489 if (!tb[TCA_STAB_BASE]) {
490 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
491 return ERR_PTR(-EINVAL);
494 s = nla_data(tb[TCA_STAB_BASE]);
497 if (!tb[TCA_STAB_DATA]) {
498 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
499 return ERR_PTR(-EINVAL);
501 tab = nla_data(tb[TCA_STAB_DATA]);
502 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
505 if (tsize != s->tsize || (!tab && tsize > 0)) {
506 NL_SET_ERR_MSG(extack, "Invalid size of size table");
507 return ERR_PTR(-EINVAL);
510 list_for_each_entry(stab, &qdisc_stab_list, list) {
511 if (memcmp(&stab->szopts, s, sizeof(*s)))
514 memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
520 if (s->size_log > STAB_SIZE_LOG_MAX ||
521 s->cell_log > STAB_SIZE_LOG_MAX) {
522 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
523 return ERR_PTR(-EINVAL);
526 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
528 return ERR_PTR(-ENOMEM);
533 memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
535 list_add_tail(&stab->list, &qdisc_stab_list);
540 void qdisc_put_stab(struct qdisc_size_table *tab)
545 if (--tab->refcnt == 0) {
546 list_del(&tab->list);
550 EXPORT_SYMBOL(qdisc_put_stab);
552 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
556 nest = nla_nest_start_noflag(skb, TCA_STAB);
558 goto nla_put_failure;
559 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
560 goto nla_put_failure;
561 nla_nest_end(skb, nest);
569 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
570 const struct qdisc_size_table *stab)
574 pkt_len = skb->len + stab->szopts.overhead;
575 if (unlikely(!stab->szopts.tsize))
578 slot = pkt_len + stab->szopts.cell_align;
579 if (unlikely(slot < 0))
582 slot >>= stab->szopts.cell_log;
583 if (likely(slot < stab->szopts.tsize))
584 pkt_len = stab->data[slot];
586 pkt_len = stab->data[stab->szopts.tsize - 1] *
587 (slot / stab->szopts.tsize) +
588 stab->data[slot % stab->szopts.tsize];
590 pkt_len <<= stab->szopts.size_log;
592 if (unlikely(pkt_len < 1))
594 qdisc_skb_cb(skb)->pkt_len = pkt_len;
596 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
598 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
600 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
601 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
602 txt, qdisc->ops->id, qdisc->handle >> 16);
603 qdisc->flags |= TCQ_F_WARN_NONWC;
606 EXPORT_SYMBOL(qdisc_warn_nonwc);
608 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
610 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
614 __netif_schedule(qdisc_root(wd->qdisc));
617 return HRTIMER_NORESTART;
620 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
623 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
624 wd->timer.function = qdisc_watchdog;
627 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
629 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
631 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
633 EXPORT_SYMBOL(qdisc_watchdog_init);
635 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
641 deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
642 &qdisc_root_sleeping(wd->qdisc)->state);
647 if (hrtimer_is_queued(&wd->timer)) {
650 softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer));
651 /* If timer is already set in [expires, expires + delta_ns],
652 * do not reprogram it.
654 if (softexpires - expires <= delta_ns)
658 hrtimer_start_range_ns(&wd->timer,
659 ns_to_ktime(expires),
661 HRTIMER_MODE_ABS_PINNED);
663 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
665 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
667 hrtimer_cancel(&wd->timer);
669 EXPORT_SYMBOL(qdisc_watchdog_cancel);
671 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
673 struct hlist_head *h;
676 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
679 for (i = 0; i < n; i++)
680 INIT_HLIST_HEAD(&h[i]);
685 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
687 struct Qdisc_class_common *cl;
688 struct hlist_node *next;
689 struct hlist_head *nhash, *ohash;
690 unsigned int nsize, nmask, osize;
693 /* Rehash when load factor exceeds 0.75 */
694 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
696 nsize = clhash->hashsize * 2;
698 nhash = qdisc_class_hash_alloc(nsize);
702 ohash = clhash->hash;
703 osize = clhash->hashsize;
706 for (i = 0; i < osize; i++) {
707 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
708 h = qdisc_class_hash(cl->classid, nmask);
709 hlist_add_head(&cl->hnode, &nhash[h]);
712 clhash->hash = nhash;
713 clhash->hashsize = nsize;
714 clhash->hashmask = nmask;
715 sch_tree_unlock(sch);
719 EXPORT_SYMBOL(qdisc_class_hash_grow);
721 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
723 unsigned int size = 4;
725 clhash->hash = qdisc_class_hash_alloc(size);
728 clhash->hashsize = size;
729 clhash->hashmask = size - 1;
730 clhash->hashelems = 0;
733 EXPORT_SYMBOL(qdisc_class_hash_init);
735 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
737 kvfree(clhash->hash);
739 EXPORT_SYMBOL(qdisc_class_hash_destroy);
741 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
742 struct Qdisc_class_common *cl)
746 INIT_HLIST_NODE(&cl->hnode);
747 h = qdisc_class_hash(cl->classid, clhash->hashmask);
748 hlist_add_head(&cl->hnode, &clhash->hash[h]);
751 EXPORT_SYMBOL(qdisc_class_hash_insert);
753 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
754 struct Qdisc_class_common *cl)
756 hlist_del(&cl->hnode);
759 EXPORT_SYMBOL(qdisc_class_hash_remove);
761 /* Allocate an unique handle from space managed by kernel
762 * Possible range is [8000-FFFF]:0000 (0x8000 values)
764 static u32 qdisc_alloc_handle(struct net_device *dev)
767 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
770 autohandle += TC_H_MAKE(0x10000U, 0);
771 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
772 autohandle = TC_H_MAKE(0x80000000U, 0);
773 if (!qdisc_lookup(dev, autohandle))
781 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
783 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
784 const struct Qdisc_class_ops *cops;
790 if (n == 0 && len == 0)
792 drops = max_t(int, n, 0);
794 while ((parentid = sch->parent)) {
795 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
798 if (sch->flags & TCQ_F_NOPARENT)
800 /* Notify parent qdisc only if child qdisc becomes empty.
802 * If child was empty even before update then backlog
803 * counter is screwed and we skip notification because
804 * parent class is already passive.
806 * If the original child was offloaded then it is allowed
807 * to be seem as empty, so the parent is notified anyway.
809 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
810 !qdisc_is_offloaded);
811 /* TODO: perform the search on a per txq basis */
812 sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
814 WARN_ON_ONCE(parentid != TC_H_ROOT);
817 cops = sch->ops->cl_ops;
818 if (notify && cops->qlen_notify) {
819 cl = cops->find(sch, parentid);
820 cops->qlen_notify(sch, cl);
823 sch->qstats.backlog -= len;
824 __qdisc_qstats_drop(sch, drops);
828 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
830 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
833 struct net_device *dev = qdisc_dev(sch);
836 sch->flags &= ~TCQ_F_OFFLOADED;
837 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
840 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
841 if (err == -EOPNOTSUPP)
845 sch->flags |= TCQ_F_OFFLOADED;
849 EXPORT_SYMBOL(qdisc_offload_dump_helper);
851 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
852 struct Qdisc *new, struct Qdisc *old,
853 enum tc_setup_type type, void *type_data,
854 struct netlink_ext_ack *extack)
856 bool any_qdisc_is_offloaded;
859 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
862 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
864 /* Don't report error if the graft is part of destroy operation. */
865 if (!err || !new || new == &noop_qdisc)
868 /* Don't report error if the parent, the old child and the new
869 * one are not offloaded.
871 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
872 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
873 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
875 if (any_qdisc_is_offloaded)
876 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
878 EXPORT_SYMBOL(qdisc_offload_graft_helper);
880 void qdisc_offload_query_caps(struct net_device *dev,
881 enum tc_setup_type type,
882 void *caps, size_t caps_len)
884 const struct net_device_ops *ops = dev->netdev_ops;
885 struct tc_query_caps_base base = {
890 memset(caps, 0, caps_len);
892 if (ops->ndo_setup_tc)
893 ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
895 EXPORT_SYMBOL(qdisc_offload_query_caps);
897 static void qdisc_offload_graft_root(struct net_device *dev,
898 struct Qdisc *new, struct Qdisc *old,
899 struct netlink_ext_ack *extack)
901 struct tc_root_qopt_offload graft_offload = {
902 .command = TC_ROOT_GRAFT,
903 .handle = new ? new->handle : 0,
904 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
905 (old && old->flags & TCQ_F_INGRESS),
908 qdisc_offload_graft_helper(dev, NULL, new, old,
909 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
912 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
913 u32 portid, u32 seq, u16 flags, int event,
914 struct netlink_ext_ack *extack)
916 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
917 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
919 struct nlmsghdr *nlh;
920 unsigned char *b = skb_tail_pointer(skb);
922 struct qdisc_size_table *stab;
927 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
930 tcm = nlmsg_data(nlh);
931 tcm->tcm_family = AF_UNSPEC;
934 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
935 tcm->tcm_parent = clid;
936 tcm->tcm_handle = q->handle;
937 tcm->tcm_info = refcount_read(&q->refcnt);
938 if (nla_put_string(skb, TCA_KIND, q->ops->id))
939 goto nla_put_failure;
940 if (q->ops->ingress_block_get) {
941 block_index = q->ops->ingress_block_get(q);
943 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
944 goto nla_put_failure;
946 if (q->ops->egress_block_get) {
947 block_index = q->ops->egress_block_get(q);
949 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
950 goto nla_put_failure;
952 if (q->ops->dump && q->ops->dump(q, skb) < 0)
953 goto nla_put_failure;
954 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
955 goto nla_put_failure;
956 qlen = qdisc_qlen_sum(q);
958 stab = rtnl_dereference(q->stab);
959 if (stab && qdisc_dump_stab(skb, stab) < 0)
960 goto nla_put_failure;
962 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
963 NULL, &d, TCA_PAD) < 0)
964 goto nla_put_failure;
966 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
967 goto nla_put_failure;
969 if (qdisc_is_percpu_stats(q)) {
970 cpu_bstats = q->cpu_bstats;
971 cpu_qstats = q->cpu_qstats;
974 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
975 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
976 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
977 goto nla_put_failure;
979 if (gnet_stats_finish_copy(&d) < 0)
980 goto nla_put_failure;
982 if (extack && extack->_msg &&
983 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
986 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
996 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
998 if (q->flags & TCQ_F_BUILTIN)
1000 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1006 static int qdisc_get_notify(struct net *net, struct sk_buff *oskb,
1007 struct nlmsghdr *n, u32 clid, struct Qdisc *q,
1008 struct netlink_ext_ack *extack)
1010 struct sk_buff *skb;
1011 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1013 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1017 if (!tc_qdisc_dump_ignore(q, false)) {
1018 if (tc_fill_qdisc(skb, q, clid, portid, n->nlmsg_seq, 0,
1019 RTM_NEWQDISC, extack) < 0)
1024 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1025 n->nlmsg_flags & NLM_F_ECHO);
1032 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1033 struct nlmsghdr *n, u32 clid,
1034 struct Qdisc *old, struct Qdisc *new,
1035 struct netlink_ext_ack *extack)
1037 struct sk_buff *skb;
1038 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1040 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
1043 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1047 if (old && !tc_qdisc_dump_ignore(old, false)) {
1048 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1049 0, RTM_DELQDISC, extack) < 0)
1052 if (new && !tc_qdisc_dump_ignore(new, false)) {
1053 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1054 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
1059 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1060 n->nlmsg_flags & NLM_F_ECHO);
1067 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1068 struct nlmsghdr *n, u32 clid,
1069 struct Qdisc *old, struct Qdisc *new,
1070 struct netlink_ext_ack *extack)
1073 qdisc_notify(net, skb, n, clid, old, new, extack);
1079 static void qdisc_clear_nolock(struct Qdisc *sch)
1081 sch->flags &= ~TCQ_F_NOLOCK;
1082 if (!(sch->flags & TCQ_F_CPUSTATS))
1085 free_percpu(sch->cpu_bstats);
1086 free_percpu(sch->cpu_qstats);
1087 sch->cpu_bstats = NULL;
1088 sch->cpu_qstats = NULL;
1089 sch->flags &= ~TCQ_F_CPUSTATS;
1092 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1095 * When appropriate send a netlink notification using 'skb'
1098 * On success, destroy old qdisc.
1101 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1102 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1103 struct Qdisc *new, struct Qdisc *old,
1104 struct netlink_ext_ack *extack)
1106 struct Qdisc *q = old;
1107 struct net *net = dev_net(dev);
1109 if (parent == NULL) {
1110 unsigned int i, num_q, ingress;
1111 struct netdev_queue *dev_queue;
1114 num_q = dev->num_tx_queues;
1115 if ((q && q->flags & TCQ_F_INGRESS) ||
1116 (new && new->flags & TCQ_F_INGRESS)) {
1118 dev_queue = dev_ingress_queue(dev);
1120 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1124 q = rtnl_dereference(dev_queue->qdisc_sleeping);
1126 /* This is the counterpart of that qdisc_refcount_inc_nz() call in
1127 * __tcf_qdisc_find() for filter requests.
1129 if (!qdisc_refcount_dec_if_one(q)) {
1130 NL_SET_ERR_MSG(extack,
1131 "Current ingress or clsact Qdisc has ongoing filter requests");
1136 if (dev->flags & IFF_UP)
1137 dev_deactivate(dev);
1139 qdisc_offload_graft_root(dev, new, old, extack);
1141 if (new && new->ops->attach && !ingress)
1145 for (i = 0; i < num_q; i++) {
1146 dev_queue = netdev_get_tx_queue(dev, i);
1147 old = dev_graft_qdisc(dev_queue, new);
1150 qdisc_refcount_inc(new);
1154 old = dev_graft_qdisc(dev_queue, NULL);
1156 /* {ingress,clsact}_destroy() @old before grafting @new to avoid
1157 * unprotected concurrent accesses to net_device::miniq_{in,e}gress
1158 * pointer(s) in mini_qdisc_pair_swap().
1160 qdisc_notify(net, skb, n, classid, old, new, extack);
1163 dev_graft_qdisc(dev_queue, new);
1168 old = rtnl_dereference(dev->qdisc);
1169 if (new && !new->ops->attach)
1170 qdisc_refcount_inc(new);
1171 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1173 notify_and_destroy(net, skb, n, classid, old, new, extack);
1175 if (new && new->ops->attach)
1176 new->ops->attach(new);
1179 if (dev->flags & IFF_UP)
1182 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1186 /* Only support running class lockless if parent is lockless */
1187 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1188 qdisc_clear_nolock(new);
1190 if (!cops || !cops->graft)
1193 cl = cops->find(parent, classid);
1195 NL_SET_ERR_MSG(extack, "Specified class not found");
1199 if (new && new->ops == &noqueue_qdisc_ops) {
1200 NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1204 err = cops->graft(parent, cl, new, &old, extack);
1207 notify_and_destroy(net, skb, n, classid, old, new, extack);
1212 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1213 struct netlink_ext_ack *extack)
1217 if (tca[TCA_INGRESS_BLOCK]) {
1218 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1221 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1224 if (!sch->ops->ingress_block_set) {
1225 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1228 sch->ops->ingress_block_set(sch, block_index);
1230 if (tca[TCA_EGRESS_BLOCK]) {
1231 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1234 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1237 if (!sch->ops->egress_block_set) {
1238 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1241 sch->ops->egress_block_set(sch, block_index);
1247 Allocate and initialize new qdisc.
1249 Parameters are passed via opt.
1252 static struct Qdisc *qdisc_create(struct net_device *dev,
1253 struct netdev_queue *dev_queue,
1254 u32 parent, u32 handle,
1255 struct nlattr **tca, int *errp,
1256 struct netlink_ext_ack *extack)
1259 struct nlattr *kind = tca[TCA_KIND];
1261 struct Qdisc_ops *ops;
1262 struct qdisc_size_table *stab;
1264 ops = qdisc_lookup_ops(kind);
1265 #ifdef CONFIG_MODULES
1266 if (ops == NULL && kind != NULL) {
1267 char name[IFNAMSIZ];
1268 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1269 /* We dropped the RTNL semaphore in order to
1270 * perform the module load. So, even if we
1271 * succeeded in loading the module we have to
1272 * tell the caller to replay the request. We
1273 * indicate this using -EAGAIN.
1274 * We replay the request because the device may
1275 * go away in the mean time.
1278 request_module(NET_SCH_ALIAS_PREFIX "%s", name);
1280 ops = qdisc_lookup_ops(kind);
1282 /* We will try again qdisc_lookup_ops,
1283 * so don't keep a reference.
1285 module_put(ops->owner);
1295 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1299 sch = qdisc_alloc(dev_queue, ops, extack);
1305 sch->parent = parent;
1307 if (handle == TC_H_INGRESS) {
1308 if (!(sch->flags & TCQ_F_INGRESS)) {
1309 NL_SET_ERR_MSG(extack,
1310 "Specified parent ID is reserved for ingress and clsact Qdiscs");
1314 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1317 handle = qdisc_alloc_handle(dev);
1319 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1324 if (!netif_is_multiqueue(dev))
1325 sch->flags |= TCQ_F_ONETXQUEUE;
1328 sch->handle = handle;
1330 /* This exist to keep backward compatible with a userspace
1331 * loophole, what allowed userspace to get IFF_NO_QUEUE
1332 * facility on older kernels by setting tx_queue_len=0 (prior
1333 * to qdisc init), and then forgot to reinit tx_queue_len
1334 * before again attaching a qdisc.
1336 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1337 WRITE_ONCE(dev->tx_queue_len, DEFAULT_TX_QUEUE_LEN);
1338 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1341 err = qdisc_block_indexes_set(sch, tca, extack);
1345 if (tca[TCA_STAB]) {
1346 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1348 err = PTR_ERR(stab);
1351 rcu_assign_pointer(sch->stab, stab);
1355 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1360 if (tca[TCA_RATE]) {
1362 if (sch->flags & TCQ_F_MQROOT) {
1363 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1367 err = gen_new_estimator(&sch->bstats,
1374 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1379 qdisc_hash_add(sch, false);
1380 trace_qdisc_create(ops, dev, parent);
1385 /* Even if ops->init() failed, we call ops->destroy()
1386 * like qdisc_create_dflt().
1390 qdisc_put_stab(rtnl_dereference(sch->stab));
1392 lockdep_unregister_key(&sch->root_lock_key);
1393 netdev_put(dev, &sch->dev_tracker);
1396 module_put(ops->owner);
1402 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1403 struct netlink_ext_ack *extack)
1405 struct qdisc_size_table *ostab, *stab = NULL;
1408 if (tca[TCA_OPTIONS]) {
1409 if (!sch->ops->change) {
1410 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1413 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1414 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1417 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1422 if (tca[TCA_STAB]) {
1423 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1425 return PTR_ERR(stab);
1428 ostab = rtnl_dereference(sch->stab);
1429 rcu_assign_pointer(sch->stab, stab);
1430 qdisc_put_stab(ostab);
1432 if (tca[TCA_RATE]) {
1433 /* NB: ignores errors from replace_estimator
1434 because change can't be undone. */
1435 if (sch->flags & TCQ_F_MQROOT)
1437 gen_replace_estimator(&sch->bstats,
1448 struct check_loop_arg {
1449 struct qdisc_walker w;
1454 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1455 struct qdisc_walker *w);
1457 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1459 struct check_loop_arg arg;
1461 if (q->ops->cl_ops == NULL)
1464 arg.w.stop = arg.w.skip = arg.w.count = 0;
1465 arg.w.fn = check_loop_fn;
1468 q->ops->cl_ops->walk(q, &arg.w);
1469 return arg.w.stop ? -ELOOP : 0;
1473 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1476 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1477 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1479 leaf = cops->leaf(q, cl);
1481 if (leaf == arg->p || arg->depth > 7)
1483 return check_loop(leaf, arg->p, arg->depth + 1);
1488 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1489 [TCA_KIND] = { .type = NLA_STRING },
1490 [TCA_RATE] = { .type = NLA_BINARY,
1491 .len = sizeof(struct tc_estimator) },
1492 [TCA_STAB] = { .type = NLA_NESTED },
1493 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1494 [TCA_CHAIN] = { .type = NLA_U32 },
1495 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1496 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1503 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1504 struct netlink_ext_ack *extack)
1506 struct net *net = sock_net(skb->sk);
1507 struct tcmsg *tcm = nlmsg_data(n);
1508 struct nlattr *tca[TCA_MAX + 1];
1509 struct net_device *dev;
1511 struct Qdisc *q = NULL;
1512 struct Qdisc *p = NULL;
1515 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1516 rtm_tca_policy, extack);
1520 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1524 clid = tcm->tcm_parent;
1526 if (clid != TC_H_ROOT) {
1527 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1528 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1530 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1533 q = qdisc_leaf(p, clid);
1534 } else if (dev_ingress_queue(dev)) {
1535 q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1538 q = rtnl_dereference(dev->qdisc);
1541 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1545 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1546 NL_SET_ERR_MSG(extack, "Invalid handle");
1550 q = qdisc_lookup(dev, tcm->tcm_handle);
1552 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1557 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1558 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1562 if (n->nlmsg_type == RTM_DELQDISC) {
1564 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1567 if (q->handle == 0) {
1568 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1571 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1575 qdisc_get_notify(net, skb, n, clid, q, NULL);
1580 static bool req_create_or_replace(struct nlmsghdr *n)
1582 return (n->nlmsg_flags & NLM_F_CREATE &&
1583 n->nlmsg_flags & NLM_F_REPLACE);
1586 static bool req_create_exclusive(struct nlmsghdr *n)
1588 return (n->nlmsg_flags & NLM_F_CREATE &&
1589 n->nlmsg_flags & NLM_F_EXCL);
1592 static bool req_change(struct nlmsghdr *n)
1594 return (!(n->nlmsg_flags & NLM_F_CREATE) &&
1595 !(n->nlmsg_flags & NLM_F_REPLACE) &&
1596 !(n->nlmsg_flags & NLM_F_EXCL));
1600 * Create/change qdisc.
1602 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1603 struct netlink_ext_ack *extack)
1605 struct net *net = sock_net(skb->sk);
1607 struct nlattr *tca[TCA_MAX + 1];
1608 struct net_device *dev;
1610 struct Qdisc *q, *p;
1614 /* Reinit, just in case something touches this. */
1615 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1616 rtm_tca_policy, extack);
1620 tcm = nlmsg_data(n);
1621 clid = tcm->tcm_parent;
1624 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1630 if (clid != TC_H_ROOT) {
1631 if (clid != TC_H_INGRESS) {
1632 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1634 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1637 q = qdisc_leaf(p, clid);
1638 } else if (dev_ingress_queue_create(dev)) {
1639 q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1642 q = rtnl_dereference(dev->qdisc);
1645 /* It may be default qdisc, ignore it */
1646 if (q && q->handle == 0)
1649 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1650 if (tcm->tcm_handle) {
1651 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1652 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1655 if (TC_H_MIN(tcm->tcm_handle)) {
1656 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1659 q = qdisc_lookup(dev, tcm->tcm_handle);
1661 goto create_n_graft;
1662 if (n->nlmsg_flags & NLM_F_EXCL) {
1663 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1666 if (tca[TCA_KIND] &&
1667 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1668 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1671 if (q->flags & TCQ_F_INGRESS) {
1672 NL_SET_ERR_MSG(extack,
1673 "Cannot regraft ingress or clsact Qdiscs");
1677 (p && check_loop(q, p, 0))) {
1678 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1681 if (clid == TC_H_INGRESS) {
1682 NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
1685 qdisc_refcount_inc(q);
1689 goto create_n_graft;
1691 /* This magic test requires explanation.
1693 * We know, that some child q is already
1694 * attached to this parent and have choice:
1695 * 1) change it or 2) create/graft new one.
1696 * If the requested qdisc kind is different
1697 * than the existing one, then we choose graft.
1698 * If they are the same then this is "change"
1699 * operation - just let it fallthrough..
1701 * 1. We are allowed to create/graft only
1702 * if the request is explicitly stating
1703 * "please create if it doesn't exist".
1705 * 2. If the request is to exclusive create
1706 * then the qdisc tcm_handle is not expected
1707 * to exist, so that we choose create/graft too.
1709 * 3. The last case is when no flags are set.
1710 * This will happen when for example tc
1711 * utility issues a "change" command.
1712 * Alas, it is sort of hole in API, we
1713 * cannot decide what to do unambiguously.
1714 * For now we select create/graft.
1716 if (tca[TCA_KIND] &&
1717 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1718 if (req_create_or_replace(n) ||
1719 req_create_exclusive(n))
1720 goto create_n_graft;
1721 else if (req_change(n))
1722 goto create_n_graft2;
1727 if (!tcm->tcm_handle) {
1728 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1731 q = qdisc_lookup(dev, tcm->tcm_handle);
1734 /* Change qdisc parameters */
1736 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1739 if (n->nlmsg_flags & NLM_F_EXCL) {
1740 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1743 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1744 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1747 err = qdisc_change(q, tca, extack);
1749 qdisc_notify(net, skb, n, clid, NULL, q, extack);
1753 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1754 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1758 if (clid == TC_H_INGRESS) {
1759 if (dev_ingress_queue(dev)) {
1760 q = qdisc_create(dev, dev_ingress_queue(dev),
1761 tcm->tcm_parent, tcm->tcm_parent,
1764 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1768 struct netdev_queue *dev_queue;
1770 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1771 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1773 dev_queue = p->dev_queue;
1775 dev_queue = netdev_get_tx_queue(dev, 0);
1777 q = qdisc_create(dev, dev_queue,
1778 tcm->tcm_parent, tcm->tcm_handle,
1788 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1798 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1799 struct netlink_callback *cb,
1800 int *q_idx_p, int s_q_idx, bool recur,
1801 bool dump_invisible)
1803 int ret = 0, q_idx = *q_idx_p;
1811 if (q_idx < s_q_idx) {
1814 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1815 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1816 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1817 RTM_NEWQDISC, NULL) <= 0)
1822 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1823 * itself has already been dumped.
1825 * If we've already dumped the top-level (ingress) qdisc above and the global
1826 * qdisc hashtable, we don't want to hit it again
1828 if (!qdisc_dev(root) || !recur)
1831 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1832 if (q_idx < s_q_idx) {
1836 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1837 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1838 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1839 RTM_NEWQDISC, NULL) <= 0)
1852 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1854 struct net *net = sock_net(skb->sk);
1857 struct net_device *dev;
1858 const struct nlmsghdr *nlh = cb->nlh;
1859 struct nlattr *tca[TCA_MAX + 1];
1862 s_idx = cb->args[0];
1863 s_q_idx = q_idx = cb->args[1];
1868 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1869 rtm_tca_policy, cb->extack);
1873 for_each_netdev(net, dev) {
1874 struct netdev_queue *dev_queue;
1882 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1883 skb, cb, &q_idx, s_q_idx,
1884 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1887 dev_queue = dev_ingress_queue(dev);
1889 tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
1890 skb, cb, &q_idx, s_q_idx, false,
1891 tca[TCA_DUMP_INVISIBLE]) < 0)
1900 cb->args[1] = q_idx;
1907 /************************************************
1908 * Traffic classes manipulation. *
1909 ************************************************/
1911 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1912 unsigned long cl, u32 portid, u32 seq, u16 flags,
1913 int event, struct netlink_ext_ack *extack)
1916 struct nlmsghdr *nlh;
1917 unsigned char *b = skb_tail_pointer(skb);
1919 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1922 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1924 goto out_nlmsg_trim;
1925 tcm = nlmsg_data(nlh);
1926 tcm->tcm_family = AF_UNSPEC;
1929 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1930 tcm->tcm_parent = q->handle;
1931 tcm->tcm_handle = q->handle;
1933 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1934 goto nla_put_failure;
1935 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1936 goto nla_put_failure;
1938 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1939 NULL, &d, TCA_PAD) < 0)
1940 goto nla_put_failure;
1942 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1943 goto nla_put_failure;
1945 if (gnet_stats_finish_copy(&d) < 0)
1946 goto nla_put_failure;
1948 if (extack && extack->_msg &&
1949 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1950 goto out_nlmsg_trim;
1952 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1962 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1963 struct nlmsghdr *n, struct Qdisc *q,
1964 unsigned long cl, int event, struct netlink_ext_ack *extack)
1966 struct sk_buff *skb;
1967 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1969 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
1972 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1976 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
1981 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1982 n->nlmsg_flags & NLM_F_ECHO);
1985 static int tclass_get_notify(struct net *net, struct sk_buff *oskb,
1986 struct nlmsghdr *n, struct Qdisc *q,
1987 unsigned long cl, struct netlink_ext_ack *extack)
1989 struct sk_buff *skb;
1990 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1992 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1996 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, RTM_NEWTCLASS,
2002 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2003 n->nlmsg_flags & NLM_F_ECHO);
2006 static int tclass_del_notify(struct net *net,
2007 const struct Qdisc_class_ops *cops,
2008 struct sk_buff *oskb, struct nlmsghdr *n,
2009 struct Qdisc *q, unsigned long cl,
2010 struct netlink_ext_ack *extack)
2012 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2013 struct sk_buff *skb;
2019 if (rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
2020 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2024 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
2025 RTM_DELTCLASS, extack) < 0) {
2033 err = cops->delete(q, cl, extack);
2039 err = rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
2040 n->nlmsg_flags & NLM_F_ECHO);
2044 #ifdef CONFIG_NET_CLS
2046 struct tcf_bind_args {
2047 struct tcf_walker w;
2053 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2055 struct tcf_bind_args *a = (void *)arg;
2057 if (n && tp->ops->bind_class) {
2058 struct Qdisc *q = tcf_block_q(tp->chain->block);
2061 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
2067 struct tc_bind_class_args {
2068 struct qdisc_walker w;
2069 unsigned long new_cl;
2074 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
2075 struct qdisc_walker *w)
2077 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
2078 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2079 struct tcf_block *block;
2080 struct tcf_chain *chain;
2082 block = cops->tcf_block(q, cl, NULL);
2085 for (chain = tcf_get_next_chain(block, NULL);
2087 chain = tcf_get_next_chain(block, chain)) {
2088 struct tcf_proto *tp;
2090 for (tp = tcf_get_next_proto(chain, NULL);
2091 tp; tp = tcf_get_next_proto(chain, tp)) {
2092 struct tcf_bind_args arg = {};
2094 arg.w.fn = tcf_node_bind;
2095 arg.classid = a->clid;
2098 tp->ops->walk(tp, &arg.w, true);
2105 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2106 unsigned long new_cl)
2108 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2109 struct tc_bind_class_args args = {};
2111 if (!cops->tcf_block)
2113 args.portid = portid;
2115 args.new_cl = new_cl;
2116 args.w.fn = tc_bind_class_walker;
2117 q->ops->cl_ops->walk(q, &args.w);
2122 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2123 unsigned long new_cl)
2129 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2130 struct netlink_ext_ack *extack)
2132 struct net *net = sock_net(skb->sk);
2133 struct tcmsg *tcm = nlmsg_data(n);
2134 struct nlattr *tca[TCA_MAX + 1];
2135 struct net_device *dev;
2136 struct Qdisc *q = NULL;
2137 const struct Qdisc_class_ops *cops;
2138 unsigned long cl = 0;
2139 unsigned long new_cl;
2145 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2146 rtm_tca_policy, extack);
2150 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2155 parent == TC_H_UNSPEC - unspecified parent.
2156 parent == TC_H_ROOT - class is root, which has no parent.
2157 parent == X:0 - parent is root class.
2158 parent == X:Y - parent is a node in hierarchy.
2159 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2161 handle == 0:0 - generate handle from kernel pool.
2162 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2163 handle == X:Y - clear.
2164 handle == X:0 - root class.
2167 /* Step 1. Determine qdisc handle X:0 */
2169 portid = tcm->tcm_parent;
2170 clid = tcm->tcm_handle;
2171 qid = TC_H_MAJ(clid);
2173 if (portid != TC_H_ROOT) {
2174 u32 qid1 = TC_H_MAJ(portid);
2177 /* If both majors are known, they must be identical. */
2182 } else if (qid == 0)
2183 qid = rtnl_dereference(dev->qdisc)->handle;
2185 /* Now qid is genuine qdisc handle consistent
2186 * both with parent and child.
2188 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2191 portid = TC_H_MAKE(qid, portid);
2194 qid = rtnl_dereference(dev->qdisc)->handle;
2197 /* OK. Locate qdisc */
2198 q = qdisc_lookup(dev, qid);
2202 /* An check that it supports classes */
2203 cops = q->ops->cl_ops;
2207 /* Now try to get class */
2209 if (portid == TC_H_ROOT)
2212 clid = TC_H_MAKE(qid, clid);
2215 cl = cops->find(q, clid);
2219 if (n->nlmsg_type != RTM_NEWTCLASS ||
2220 !(n->nlmsg_flags & NLM_F_CREATE))
2223 switch (n->nlmsg_type) {
2226 if (n->nlmsg_flags & NLM_F_EXCL)
2230 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2231 /* Unbind the class with flilters with 0 */
2232 tc_bind_tclass(q, portid, clid, 0);
2235 err = tclass_get_notify(net, skb, n, q, cl, extack);
2243 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2244 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2251 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2253 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
2254 /* We just create a new class, need to do reverse binding. */
2256 tc_bind_tclass(q, portid, clid, new_cl);
2262 struct qdisc_dump_args {
2263 struct qdisc_walker w;
2264 struct sk_buff *skb;
2265 struct netlink_callback *cb;
2268 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2269 struct qdisc_walker *arg)
2271 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2273 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2274 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2275 RTM_NEWTCLASS, NULL);
2278 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2279 struct tcmsg *tcm, struct netlink_callback *cb,
2282 struct qdisc_dump_args arg;
2284 if (tc_qdisc_dump_ignore(q, false) ||
2285 *t_p < s_t || !q->ops->cl_ops ||
2287 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2292 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2293 arg.w.fn = qdisc_class_dump;
2297 arg.w.skip = cb->args[1];
2299 q->ops->cl_ops->walk(q, &arg.w);
2300 cb->args[1] = arg.w.count;
2307 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2308 struct tcmsg *tcm, struct netlink_callback *cb,
2309 int *t_p, int s_t, bool recur)
2317 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2320 if (!qdisc_dev(root) || !recur)
2323 if (tcm->tcm_parent) {
2324 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2325 if (q && q != root &&
2326 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2330 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2331 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2338 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2340 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2341 struct net *net = sock_net(skb->sk);
2342 struct netdev_queue *dev_queue;
2343 struct net_device *dev;
2346 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2348 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2355 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2356 skb, tcm, cb, &t, s_t, true) < 0)
2359 dev_queue = dev_ingress_queue(dev);
2361 tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
2362 skb, tcm, cb, &t, s_t, false) < 0)
2372 #ifdef CONFIG_PROC_FS
2373 static int psched_show(struct seq_file *seq, void *v)
2375 seq_printf(seq, "%08x %08x %08x %08x\n",
2376 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2378 (u32)NSEC_PER_SEC / hrtimer_resolution);
2383 static int __net_init psched_net_init(struct net *net)
2385 struct proc_dir_entry *e;
2387 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2394 static void __net_exit psched_net_exit(struct net *net)
2396 remove_proc_entry("psched", net->proc_net);
2399 static int __net_init psched_net_init(struct net *net)
2404 static void __net_exit psched_net_exit(struct net *net)
2409 static struct pernet_operations psched_net_ops = {
2410 .init = psched_net_init,
2411 .exit = psched_net_exit,
2414 #if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)
2415 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2418 static int __init pktsched_init(void)
2422 err = register_pernet_subsys(&psched_net_ops);
2424 pr_err("pktsched_init: "
2425 "cannot initialize per netns operations\n");
2429 register_qdisc(&pfifo_fast_ops);
2430 register_qdisc(&pfifo_qdisc_ops);
2431 register_qdisc(&bfifo_qdisc_ops);
2432 register_qdisc(&pfifo_head_drop_qdisc_ops);
2433 register_qdisc(&mq_qdisc_ops);
2434 register_qdisc(&noqueue_qdisc_ops);
2436 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2437 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2438 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2440 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2441 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2442 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2450 subsys_initcall(pktsched_init);