1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_api.c Packet scheduler API.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
29 #include <net/net_namespace.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
34 #include <net/tc_wrapper.h>
36 #include <trace/events/qdisc.h>
43 This file consists of two interrelated parts:
45 1. queueing disciplines manager frontend.
46 2. traffic classes manager frontend.
48 Generally, queueing discipline ("qdisc") is a black box,
49 which is able to enqueue packets and to dequeue them (when
50 device is ready to send something) in order and at times
51 determined by algorithm hidden in it.
53 qdisc's are divided to two categories:
54 - "queues", which have no internal structure visible from outside.
55 - "schedulers", which split all the packets to "traffic classes",
56 using "packet classifiers" (look at cls_api.c)
58 In turn, classes may have child qdiscs (as rule, queues)
59 attached to them etc. etc. etc.
61 The goal of the routines in this file is to translate
62 information supplied by user in the form of handles
63 to more intelligible for kernel form, to make some sanity
64 checks and part of work, which is common to all qdiscs
65 and to provide rtnetlink notifications.
67 All real intelligent work is done inside qdisc modules.
71 Every discipline has two major routines: enqueue and dequeue.
75 dequeue usually returns a skb to send. It is allowed to return NULL,
76 but it does not mean that queue is empty, it just means that
77 discipline does not want to send anything this time.
78 Queue is really empty if q->q.qlen == 0.
79 For complicated disciplines with multiple queues q->q is not
80 real packet queue, but however q->q.qlen must be valid.
84 enqueue returns 0, if packet was enqueued successfully.
85 If packet (this one or another one) was dropped, it returns
87 NET_XMIT_DROP - this packet dropped
88 Expected action: do not backoff, but wait until queue will clear.
89 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
90 Expected action: backoff or ignore
96 like dequeue but without removing a packet from the queue
100 returns qdisc to initial state: purge all buffers, clear all
101 timers, counters (except for statistics) etc.
105 initializes newly created qdisc.
109 destroys resources allocated by init and during lifetime of qdisc.
113 changes qdisc parameters.
116 /* Protects list of registered TC modules. It is pure SMP lock. */
117 static DEFINE_RWLOCK(qdisc_mod_lock);
120 /************************************************
121 * Queueing disciplines manipulation. *
122 ************************************************/
125 /* The list of all installed queueing disciplines. */
127 static struct Qdisc_ops *qdisc_base;
129 /* Register/unregister queueing discipline */
131 int register_qdisc(struct Qdisc_ops *qops)
133 struct Qdisc_ops *q, **qp;
136 write_lock(&qdisc_mod_lock);
137 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 if (!strcmp(qops->id, q->id))
141 if (qops->enqueue == NULL)
142 qops->enqueue = noop_qdisc_ops.enqueue;
143 if (qops->peek == NULL) {
144 if (qops->dequeue == NULL)
145 qops->peek = noop_qdisc_ops.peek;
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
153 const struct Qdisc_class_ops *cops = qops->cl_ops;
155 if (!(cops->find && cops->walk && cops->leaf))
158 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
166 write_unlock(&qdisc_mod_lock);
173 EXPORT_SYMBOL(register_qdisc);
175 void unregister_qdisc(struct Qdisc_ops *qops)
177 struct Qdisc_ops *q, **qp;
180 write_lock(&qdisc_mod_lock);
181 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
189 write_unlock(&qdisc_mod_lock);
191 WARN(err, "unregister qdisc(%s) failed\n", qops->id);
193 EXPORT_SYMBOL(unregister_qdisc);
195 /* Get default qdisc if not otherwise specified */
196 void qdisc_get_default(char *name, size_t len)
198 read_lock(&qdisc_mod_lock);
199 strscpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 struct Qdisc_ops *q = NULL;
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
218 /* Set new default qdisc to use */
219 int qdisc_set_default(const char *name)
221 const struct Qdisc_ops *ops;
223 if (!capable(CAP_NET_ADMIN))
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module("sch_%s", name);
232 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
242 write_unlock(&qdisc_mod_lock);
244 return ops ? 0 : -ENOENT;
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
249 static int __init sch_default_qdisc(void)
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 late_initcall(sch_default_qdisc);
256 /* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 lockdep_rtnl_is_held()) {
274 if (q->handle == handle)
280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
286 q->flags |= TCQ_F_INVISIBLE;
289 EXPORT_SYMBOL(qdisc_hash_add);
291 void qdisc_hash_del(struct Qdisc *q)
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
295 hash_del_rcu(&q->hash);
298 EXPORT_SYMBOL(qdisc_hash_del);
300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
306 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 dev_ingress_queue(dev)->qdisc_sleeping,
318 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
320 struct netdev_queue *nq;
325 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
329 nq = dev_ingress_queue_rcu(dev);
331 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
336 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
339 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
343 cl = cops->find(p, classid);
347 return cops->leaf(p, cl);
350 /* Find queueing discipline by name */
352 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
354 struct Qdisc_ops *q = NULL;
357 read_lock(&qdisc_mod_lock);
358 for (q = qdisc_base; q; q = q->next) {
359 if (nla_strcmp(kind, q->id) == 0) {
360 if (!try_module_get(q->owner))
365 read_unlock(&qdisc_mod_lock);
370 /* The linklayer setting were not transferred from iproute2, in older
371 * versions, and the rate tables lookup systems have been dropped in
372 * the kernel. To keep backward compatible with older iproute2 tc
373 * utils, we detect the linklayer setting by detecting if the rate
374 * table were modified.
376 * For linklayer ATM table entries, the rate table will be aligned to
377 * 48 bytes, thus some table entries will contain the same value. The
378 * mpu (min packet unit) is also encoded into the old rate table, thus
379 * starting from the mpu, we find low and high table entries for
380 * mapping this cell. If these entries contain the same value, when
381 * the rate tables have been modified for linklayer ATM.
383 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
384 * and then roundup to the next cell, calc the table entry one below,
387 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
389 int low = roundup(r->mpu, 48);
390 int high = roundup(low+1, 48);
391 int cell_low = low >> r->cell_log;
392 int cell_high = (high >> r->cell_log) - 1;
394 /* rtab is too inaccurate at rates > 100Mbit/s */
395 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
396 pr_debug("TC linklayer: Giving up ATM detection\n");
397 return TC_LINKLAYER_ETHERNET;
400 if ((cell_high > cell_low) && (cell_high < 256)
401 && (rtab[cell_low] == rtab[cell_high])) {
402 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
403 cell_low, cell_high, rtab[cell_high]);
404 return TC_LINKLAYER_ATM;
406 return TC_LINKLAYER_ETHERNET;
409 static struct qdisc_rate_table *qdisc_rtab_list;
411 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
413 struct netlink_ext_ack *extack)
415 struct qdisc_rate_table *rtab;
417 if (tab == NULL || r->rate == 0 ||
418 r->cell_log == 0 || r->cell_log >= 32 ||
419 nla_len(tab) != TC_RTAB_SIZE) {
420 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
424 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
425 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
426 !memcmp(&rtab->data, nla_data(tab), 1024)) {
432 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
436 memcpy(rtab->data, nla_data(tab), 1024);
437 if (r->linklayer == TC_LINKLAYER_UNAWARE)
438 r->linklayer = __detect_linklayer(r, rtab->data);
439 rtab->next = qdisc_rtab_list;
440 qdisc_rtab_list = rtab;
442 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
446 EXPORT_SYMBOL(qdisc_get_rtab);
448 void qdisc_put_rtab(struct qdisc_rate_table *tab)
450 struct qdisc_rate_table *rtab, **rtabp;
452 if (!tab || --tab->refcnt)
455 for (rtabp = &qdisc_rtab_list;
456 (rtab = *rtabp) != NULL;
457 rtabp = &rtab->next) {
465 EXPORT_SYMBOL(qdisc_put_rtab);
467 static LIST_HEAD(qdisc_stab_list);
469 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
470 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
471 [TCA_STAB_DATA] = { .type = NLA_BINARY },
474 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
475 struct netlink_ext_ack *extack)
477 struct nlattr *tb[TCA_STAB_MAX + 1];
478 struct qdisc_size_table *stab;
479 struct tc_sizespec *s;
480 unsigned int tsize = 0;
484 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
488 if (!tb[TCA_STAB_BASE]) {
489 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
490 return ERR_PTR(-EINVAL);
493 s = nla_data(tb[TCA_STAB_BASE]);
496 if (!tb[TCA_STAB_DATA]) {
497 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
498 return ERR_PTR(-EINVAL);
500 tab = nla_data(tb[TCA_STAB_DATA]);
501 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
504 if (tsize != s->tsize || (!tab && tsize > 0)) {
505 NL_SET_ERR_MSG(extack, "Invalid size of size table");
506 return ERR_PTR(-EINVAL);
509 list_for_each_entry(stab, &qdisc_stab_list, list) {
510 if (memcmp(&stab->szopts, s, sizeof(*s)))
513 memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
519 if (s->size_log > STAB_SIZE_LOG_MAX ||
520 s->cell_log > STAB_SIZE_LOG_MAX) {
521 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
522 return ERR_PTR(-EINVAL);
525 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
527 return ERR_PTR(-ENOMEM);
532 memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
534 list_add_tail(&stab->list, &qdisc_stab_list);
539 void qdisc_put_stab(struct qdisc_size_table *tab)
544 if (--tab->refcnt == 0) {
545 list_del(&tab->list);
549 EXPORT_SYMBOL(qdisc_put_stab);
551 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
555 nest = nla_nest_start_noflag(skb, TCA_STAB);
557 goto nla_put_failure;
558 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
559 goto nla_put_failure;
560 nla_nest_end(skb, nest);
568 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
569 const struct qdisc_size_table *stab)
573 pkt_len = skb->len + stab->szopts.overhead;
574 if (unlikely(!stab->szopts.tsize))
577 slot = pkt_len + stab->szopts.cell_align;
578 if (unlikely(slot < 0))
581 slot >>= stab->szopts.cell_log;
582 if (likely(slot < stab->szopts.tsize))
583 pkt_len = stab->data[slot];
585 pkt_len = stab->data[stab->szopts.tsize - 1] *
586 (slot / stab->szopts.tsize) +
587 stab->data[slot % stab->szopts.tsize];
589 pkt_len <<= stab->szopts.size_log;
591 if (unlikely(pkt_len < 1))
593 qdisc_skb_cb(skb)->pkt_len = pkt_len;
595 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
597 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
599 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
600 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
601 txt, qdisc->ops->id, qdisc->handle >> 16);
602 qdisc->flags |= TCQ_F_WARN_NONWC;
605 EXPORT_SYMBOL(qdisc_warn_nonwc);
607 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
609 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
613 __netif_schedule(qdisc_root(wd->qdisc));
616 return HRTIMER_NORESTART;
619 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
622 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
623 wd->timer.function = qdisc_watchdog;
626 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
628 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
630 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
632 EXPORT_SYMBOL(qdisc_watchdog_init);
634 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
637 if (test_bit(__QDISC_STATE_DEACTIVATED,
638 &qdisc_root_sleeping(wd->qdisc)->state))
641 if (hrtimer_is_queued(&wd->timer)) {
642 /* If timer is already set in [expires, expires + delta_ns],
643 * do not reprogram it.
645 if (wd->last_expires - expires <= delta_ns)
649 wd->last_expires = expires;
650 hrtimer_start_range_ns(&wd->timer,
651 ns_to_ktime(expires),
653 HRTIMER_MODE_ABS_PINNED);
655 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
657 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
659 hrtimer_cancel(&wd->timer);
661 EXPORT_SYMBOL(qdisc_watchdog_cancel);
663 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
665 struct hlist_head *h;
668 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
671 for (i = 0; i < n; i++)
672 INIT_HLIST_HEAD(&h[i]);
677 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
679 struct Qdisc_class_common *cl;
680 struct hlist_node *next;
681 struct hlist_head *nhash, *ohash;
682 unsigned int nsize, nmask, osize;
685 /* Rehash when load factor exceeds 0.75 */
686 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
688 nsize = clhash->hashsize * 2;
690 nhash = qdisc_class_hash_alloc(nsize);
694 ohash = clhash->hash;
695 osize = clhash->hashsize;
698 for (i = 0; i < osize; i++) {
699 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
700 h = qdisc_class_hash(cl->classid, nmask);
701 hlist_add_head(&cl->hnode, &nhash[h]);
704 clhash->hash = nhash;
705 clhash->hashsize = nsize;
706 clhash->hashmask = nmask;
707 sch_tree_unlock(sch);
711 EXPORT_SYMBOL(qdisc_class_hash_grow);
713 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
715 unsigned int size = 4;
717 clhash->hash = qdisc_class_hash_alloc(size);
720 clhash->hashsize = size;
721 clhash->hashmask = size - 1;
722 clhash->hashelems = 0;
725 EXPORT_SYMBOL(qdisc_class_hash_init);
727 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
729 kvfree(clhash->hash);
731 EXPORT_SYMBOL(qdisc_class_hash_destroy);
733 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
734 struct Qdisc_class_common *cl)
738 INIT_HLIST_NODE(&cl->hnode);
739 h = qdisc_class_hash(cl->classid, clhash->hashmask);
740 hlist_add_head(&cl->hnode, &clhash->hash[h]);
743 EXPORT_SYMBOL(qdisc_class_hash_insert);
745 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
746 struct Qdisc_class_common *cl)
748 hlist_del(&cl->hnode);
751 EXPORT_SYMBOL(qdisc_class_hash_remove);
753 /* Allocate an unique handle from space managed by kernel
754 * Possible range is [8000-FFFF]:0000 (0x8000 values)
756 static u32 qdisc_alloc_handle(struct net_device *dev)
759 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
762 autohandle += TC_H_MAKE(0x10000U, 0);
763 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
764 autohandle = TC_H_MAKE(0x80000000U, 0);
765 if (!qdisc_lookup(dev, autohandle))
773 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
775 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
776 const struct Qdisc_class_ops *cops;
782 if (n == 0 && len == 0)
784 drops = max_t(int, n, 0);
786 while ((parentid = sch->parent)) {
787 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
790 if (sch->flags & TCQ_F_NOPARENT)
792 /* Notify parent qdisc only if child qdisc becomes empty.
794 * If child was empty even before update then backlog
795 * counter is screwed and we skip notification because
796 * parent class is already passive.
798 * If the original child was offloaded then it is allowed
799 * to be seem as empty, so the parent is notified anyway.
801 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
802 !qdisc_is_offloaded);
803 /* TODO: perform the search on a per txq basis */
804 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
806 WARN_ON_ONCE(parentid != TC_H_ROOT);
809 cops = sch->ops->cl_ops;
810 if (notify && cops->qlen_notify) {
811 cl = cops->find(sch, parentid);
812 cops->qlen_notify(sch, cl);
815 sch->qstats.backlog -= len;
816 __qdisc_qstats_drop(sch, drops);
820 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
822 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
825 struct net_device *dev = qdisc_dev(sch);
828 sch->flags &= ~TCQ_F_OFFLOADED;
829 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
832 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
833 if (err == -EOPNOTSUPP)
837 sch->flags |= TCQ_F_OFFLOADED;
841 EXPORT_SYMBOL(qdisc_offload_dump_helper);
843 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
844 struct Qdisc *new, struct Qdisc *old,
845 enum tc_setup_type type, void *type_data,
846 struct netlink_ext_ack *extack)
848 bool any_qdisc_is_offloaded;
851 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
854 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
856 /* Don't report error if the graft is part of destroy operation. */
857 if (!err || !new || new == &noop_qdisc)
860 /* Don't report error if the parent, the old child and the new
861 * one are not offloaded.
863 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
864 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
865 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
867 if (any_qdisc_is_offloaded)
868 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
870 EXPORT_SYMBOL(qdisc_offload_graft_helper);
872 void qdisc_offload_query_caps(struct net_device *dev,
873 enum tc_setup_type type,
874 void *caps, size_t caps_len)
876 const struct net_device_ops *ops = dev->netdev_ops;
877 struct tc_query_caps_base base = {
882 memset(caps, 0, caps_len);
884 if (ops->ndo_setup_tc)
885 ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
887 EXPORT_SYMBOL(qdisc_offload_query_caps);
889 static void qdisc_offload_graft_root(struct net_device *dev,
890 struct Qdisc *new, struct Qdisc *old,
891 struct netlink_ext_ack *extack)
893 struct tc_root_qopt_offload graft_offload = {
894 .command = TC_ROOT_GRAFT,
895 .handle = new ? new->handle : 0,
896 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
897 (old && old->flags & TCQ_F_INGRESS),
900 qdisc_offload_graft_helper(dev, NULL, new, old,
901 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
904 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
905 u32 portid, u32 seq, u16 flags, int event)
907 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
908 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
910 struct nlmsghdr *nlh;
911 unsigned char *b = skb_tail_pointer(skb);
913 struct qdisc_size_table *stab;
918 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
921 tcm = nlmsg_data(nlh);
922 tcm->tcm_family = AF_UNSPEC;
925 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
926 tcm->tcm_parent = clid;
927 tcm->tcm_handle = q->handle;
928 tcm->tcm_info = refcount_read(&q->refcnt);
929 if (nla_put_string(skb, TCA_KIND, q->ops->id))
930 goto nla_put_failure;
931 if (q->ops->ingress_block_get) {
932 block_index = q->ops->ingress_block_get(q);
934 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
935 goto nla_put_failure;
937 if (q->ops->egress_block_get) {
938 block_index = q->ops->egress_block_get(q);
940 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
941 goto nla_put_failure;
943 if (q->ops->dump && q->ops->dump(q, skb) < 0)
944 goto nla_put_failure;
945 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
946 goto nla_put_failure;
947 qlen = qdisc_qlen_sum(q);
949 stab = rtnl_dereference(q->stab);
950 if (stab && qdisc_dump_stab(skb, stab) < 0)
951 goto nla_put_failure;
953 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
954 NULL, &d, TCA_PAD) < 0)
955 goto nla_put_failure;
957 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
958 goto nla_put_failure;
960 if (qdisc_is_percpu_stats(q)) {
961 cpu_bstats = q->cpu_bstats;
962 cpu_qstats = q->cpu_qstats;
965 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
966 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
967 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
968 goto nla_put_failure;
970 if (gnet_stats_finish_copy(&d) < 0)
971 goto nla_put_failure;
973 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
982 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
984 if (q->flags & TCQ_F_BUILTIN)
986 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
992 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
993 struct nlmsghdr *n, u32 clid,
994 struct Qdisc *old, struct Qdisc *new)
997 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
999 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1003 if (old && !tc_qdisc_dump_ignore(old, false)) {
1004 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1005 0, RTM_DELQDISC) < 0)
1008 if (new && !tc_qdisc_dump_ignore(new, false)) {
1009 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1010 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1015 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1016 n->nlmsg_flags & NLM_F_ECHO);
1023 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1024 struct nlmsghdr *n, u32 clid,
1025 struct Qdisc *old, struct Qdisc *new)
1028 qdisc_notify(net, skb, n, clid, old, new);
1034 static void qdisc_clear_nolock(struct Qdisc *sch)
1036 sch->flags &= ~TCQ_F_NOLOCK;
1037 if (!(sch->flags & TCQ_F_CPUSTATS))
1040 free_percpu(sch->cpu_bstats);
1041 free_percpu(sch->cpu_qstats);
1042 sch->cpu_bstats = NULL;
1043 sch->cpu_qstats = NULL;
1044 sch->flags &= ~TCQ_F_CPUSTATS;
1047 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1050 * When appropriate send a netlink notification using 'skb'
1053 * On success, destroy old qdisc.
1056 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1057 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1058 struct Qdisc *new, struct Qdisc *old,
1059 struct netlink_ext_ack *extack)
1061 struct Qdisc *q = old;
1062 struct net *net = dev_net(dev);
1064 if (parent == NULL) {
1065 unsigned int i, num_q, ingress;
1068 num_q = dev->num_tx_queues;
1069 if ((q && q->flags & TCQ_F_INGRESS) ||
1070 (new && new->flags & TCQ_F_INGRESS)) {
1073 if (!dev_ingress_queue(dev)) {
1074 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1079 if (dev->flags & IFF_UP)
1080 dev_deactivate(dev);
1082 qdisc_offload_graft_root(dev, new, old, extack);
1084 if (new && new->ops->attach && !ingress)
1087 for (i = 0; i < num_q; i++) {
1088 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1091 dev_queue = netdev_get_tx_queue(dev, i);
1093 old = dev_graft_qdisc(dev_queue, new);
1095 qdisc_refcount_inc(new);
1103 old = rtnl_dereference(dev->qdisc);
1104 if (new && !new->ops->attach)
1105 qdisc_refcount_inc(new);
1106 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1108 notify_and_destroy(net, skb, n, classid, old, new);
1110 if (new && new->ops->attach)
1111 new->ops->attach(new);
1113 notify_and_destroy(net, skb, n, classid, old, new);
1116 if (dev->flags & IFF_UP)
1119 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1123 /* Only support running class lockless if parent is lockless */
1124 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1125 qdisc_clear_nolock(new);
1127 if (!cops || !cops->graft)
1130 cl = cops->find(parent, classid);
1132 NL_SET_ERR_MSG(extack, "Specified class not found");
1136 err = cops->graft(parent, cl, new, &old, extack);
1139 notify_and_destroy(net, skb, n, classid, old, new);
1144 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1145 struct netlink_ext_ack *extack)
1149 if (tca[TCA_INGRESS_BLOCK]) {
1150 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1153 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1156 if (!sch->ops->ingress_block_set) {
1157 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1160 sch->ops->ingress_block_set(sch, block_index);
1162 if (tca[TCA_EGRESS_BLOCK]) {
1163 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1166 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1169 if (!sch->ops->egress_block_set) {
1170 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1173 sch->ops->egress_block_set(sch, block_index);
1179 Allocate and initialize new qdisc.
1181 Parameters are passed via opt.
1184 static struct Qdisc *qdisc_create(struct net_device *dev,
1185 struct netdev_queue *dev_queue,
1186 u32 parent, u32 handle,
1187 struct nlattr **tca, int *errp,
1188 struct netlink_ext_ack *extack)
1191 struct nlattr *kind = tca[TCA_KIND];
1193 struct Qdisc_ops *ops;
1194 struct qdisc_size_table *stab;
1196 ops = qdisc_lookup_ops(kind);
1197 #ifdef CONFIG_MODULES
1198 if (ops == NULL && kind != NULL) {
1199 char name[IFNAMSIZ];
1200 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1201 /* We dropped the RTNL semaphore in order to
1202 * perform the module load. So, even if we
1203 * succeeded in loading the module we have to
1204 * tell the caller to replay the request. We
1205 * indicate this using -EAGAIN.
1206 * We replay the request because the device may
1207 * go away in the mean time.
1210 request_module("sch_%s", name);
1212 ops = qdisc_lookup_ops(kind);
1214 /* We will try again qdisc_lookup_ops,
1215 * so don't keep a reference.
1217 module_put(ops->owner);
1227 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1231 sch = qdisc_alloc(dev_queue, ops, extack);
1237 sch->parent = parent;
1239 if (handle == TC_H_INGRESS) {
1240 sch->flags |= TCQ_F_INGRESS;
1241 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1244 handle = qdisc_alloc_handle(dev);
1246 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1251 if (!netif_is_multiqueue(dev))
1252 sch->flags |= TCQ_F_ONETXQUEUE;
1255 sch->handle = handle;
1257 /* This exist to keep backward compatible with a userspace
1258 * loophole, what allowed userspace to get IFF_NO_QUEUE
1259 * facility on older kernels by setting tx_queue_len=0 (prior
1260 * to qdisc init), and then forgot to reinit tx_queue_len
1261 * before again attaching a qdisc.
1263 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1264 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1265 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1268 err = qdisc_block_indexes_set(sch, tca, extack);
1273 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1278 if (tca[TCA_STAB]) {
1279 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1281 err = PTR_ERR(stab);
1284 rcu_assign_pointer(sch->stab, stab);
1286 if (tca[TCA_RATE]) {
1288 if (sch->flags & TCQ_F_MQROOT) {
1289 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1293 err = gen_new_estimator(&sch->bstats,
1300 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1305 qdisc_hash_add(sch, false);
1306 trace_qdisc_create(ops, dev, parent);
1311 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1315 netdev_put(dev, &sch->dev_tracker);
1318 module_put(ops->owner);
1325 * Any broken qdiscs that would require a ops->reset() here?
1326 * The qdisc was never in action so it shouldn't be necessary.
1328 qdisc_put_stab(rtnl_dereference(sch->stab));
1334 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1335 struct netlink_ext_ack *extack)
1337 struct qdisc_size_table *ostab, *stab = NULL;
1340 if (tca[TCA_OPTIONS]) {
1341 if (!sch->ops->change) {
1342 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1345 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1346 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1349 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1354 if (tca[TCA_STAB]) {
1355 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1357 return PTR_ERR(stab);
1360 ostab = rtnl_dereference(sch->stab);
1361 rcu_assign_pointer(sch->stab, stab);
1362 qdisc_put_stab(ostab);
1364 if (tca[TCA_RATE]) {
1365 /* NB: ignores errors from replace_estimator
1366 because change can't be undone. */
1367 if (sch->flags & TCQ_F_MQROOT)
1369 gen_replace_estimator(&sch->bstats,
1380 struct check_loop_arg {
1381 struct qdisc_walker w;
1386 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1387 struct qdisc_walker *w);
1389 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1391 struct check_loop_arg arg;
1393 if (q->ops->cl_ops == NULL)
1396 arg.w.stop = arg.w.skip = arg.w.count = 0;
1397 arg.w.fn = check_loop_fn;
1400 q->ops->cl_ops->walk(q, &arg.w);
1401 return arg.w.stop ? -ELOOP : 0;
1405 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1408 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1409 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1411 leaf = cops->leaf(q, cl);
1413 if (leaf == arg->p || arg->depth > 7)
1415 return check_loop(leaf, arg->p, arg->depth + 1);
1420 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1421 [TCA_KIND] = { .type = NLA_STRING },
1422 [TCA_RATE] = { .type = NLA_BINARY,
1423 .len = sizeof(struct tc_estimator) },
1424 [TCA_STAB] = { .type = NLA_NESTED },
1425 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1426 [TCA_CHAIN] = { .type = NLA_U32 },
1427 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1428 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1435 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1436 struct netlink_ext_ack *extack)
1438 struct net *net = sock_net(skb->sk);
1439 struct tcmsg *tcm = nlmsg_data(n);
1440 struct nlattr *tca[TCA_MAX + 1];
1441 struct net_device *dev;
1443 struct Qdisc *q = NULL;
1444 struct Qdisc *p = NULL;
1447 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1448 rtm_tca_policy, extack);
1452 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1456 clid = tcm->tcm_parent;
1458 if (clid != TC_H_ROOT) {
1459 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1460 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1462 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1465 q = qdisc_leaf(p, clid);
1466 } else if (dev_ingress_queue(dev)) {
1467 q = dev_ingress_queue(dev)->qdisc_sleeping;
1470 q = rtnl_dereference(dev->qdisc);
1473 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1477 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1478 NL_SET_ERR_MSG(extack, "Invalid handle");
1482 q = qdisc_lookup(dev, tcm->tcm_handle);
1484 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1489 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1490 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1494 if (n->nlmsg_type == RTM_DELQDISC) {
1496 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1499 if (q->handle == 0) {
1500 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1503 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1507 qdisc_notify(net, skb, n, clid, NULL, q);
1513 * Create/change qdisc.
1516 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1517 struct netlink_ext_ack *extack)
1519 struct net *net = sock_net(skb->sk);
1521 struct nlattr *tca[TCA_MAX + 1];
1522 struct net_device *dev;
1524 struct Qdisc *q, *p;
1528 /* Reinit, just in case something touches this. */
1529 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1530 rtm_tca_policy, extack);
1534 tcm = nlmsg_data(n);
1535 clid = tcm->tcm_parent;
1538 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1544 if (clid != TC_H_ROOT) {
1545 if (clid != TC_H_INGRESS) {
1546 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1548 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1551 q = qdisc_leaf(p, clid);
1552 } else if (dev_ingress_queue_create(dev)) {
1553 q = dev_ingress_queue(dev)->qdisc_sleeping;
1556 q = rtnl_dereference(dev->qdisc);
1559 /* It may be default qdisc, ignore it */
1560 if (q && q->handle == 0)
1563 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1564 if (tcm->tcm_handle) {
1565 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1566 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1569 if (TC_H_MIN(tcm->tcm_handle)) {
1570 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1573 q = qdisc_lookup(dev, tcm->tcm_handle);
1575 goto create_n_graft;
1576 if (n->nlmsg_flags & NLM_F_EXCL) {
1577 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1580 if (tca[TCA_KIND] &&
1581 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1582 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1586 (p && check_loop(q, p, 0))) {
1587 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1590 qdisc_refcount_inc(q);
1594 goto create_n_graft;
1596 /* This magic test requires explanation.
1598 * We know, that some child q is already
1599 * attached to this parent and have choice:
1600 * either to change it or to create/graft new one.
1602 * 1. We are allowed to create/graft only
1603 * if CREATE and REPLACE flags are set.
1605 * 2. If EXCL is set, requestor wanted to say,
1606 * that qdisc tcm_handle is not expected
1607 * to exist, so that we choose create/graft too.
1609 * 3. The last case is when no flags are set.
1610 * Alas, it is sort of hole in API, we
1611 * cannot decide what to do unambiguously.
1612 * For now we select create/graft, if
1613 * user gave KIND, which does not match existing.
1615 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1616 (n->nlmsg_flags & NLM_F_REPLACE) &&
1617 ((n->nlmsg_flags & NLM_F_EXCL) ||
1619 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1620 goto create_n_graft;
1624 if (!tcm->tcm_handle) {
1625 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1628 q = qdisc_lookup(dev, tcm->tcm_handle);
1631 /* Change qdisc parameters */
1633 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1636 if (n->nlmsg_flags & NLM_F_EXCL) {
1637 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1640 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1641 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1644 err = qdisc_change(q, tca, extack);
1646 qdisc_notify(net, skb, n, clid, NULL, q);
1650 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1651 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1654 if (clid == TC_H_INGRESS) {
1655 if (dev_ingress_queue(dev)) {
1656 q = qdisc_create(dev, dev_ingress_queue(dev),
1657 tcm->tcm_parent, tcm->tcm_parent,
1660 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1664 struct netdev_queue *dev_queue;
1666 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1667 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1669 dev_queue = p->dev_queue;
1671 dev_queue = netdev_get_tx_queue(dev, 0);
1673 q = qdisc_create(dev, dev_queue,
1674 tcm->tcm_parent, tcm->tcm_handle,
1684 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1694 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1695 struct netlink_callback *cb,
1696 int *q_idx_p, int s_q_idx, bool recur,
1697 bool dump_invisible)
1699 int ret = 0, q_idx = *q_idx_p;
1707 if (q_idx < s_q_idx) {
1710 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1711 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1712 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1718 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1719 * itself has already been dumped.
1721 * If we've already dumped the top-level (ingress) qdisc above and the global
1722 * qdisc hashtable, we don't want to hit it again
1724 if (!qdisc_dev(root) || !recur)
1727 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1728 if (q_idx < s_q_idx) {
1732 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1733 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1734 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1748 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1750 struct net *net = sock_net(skb->sk);
1753 struct net_device *dev;
1754 const struct nlmsghdr *nlh = cb->nlh;
1755 struct nlattr *tca[TCA_MAX + 1];
1758 s_idx = cb->args[0];
1759 s_q_idx = q_idx = cb->args[1];
1764 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1765 rtm_tca_policy, cb->extack);
1769 for_each_netdev(net, dev) {
1770 struct netdev_queue *dev_queue;
1778 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1779 skb, cb, &q_idx, s_q_idx,
1780 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1783 dev_queue = dev_ingress_queue(dev);
1785 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1786 &q_idx, s_q_idx, false,
1787 tca[TCA_DUMP_INVISIBLE]) < 0)
1796 cb->args[1] = q_idx;
1803 /************************************************
1804 * Traffic classes manipulation. *
1805 ************************************************/
1807 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1809 u32 portid, u32 seq, u16 flags, int event)
1812 struct nlmsghdr *nlh;
1813 unsigned char *b = skb_tail_pointer(skb);
1815 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1818 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1820 goto out_nlmsg_trim;
1821 tcm = nlmsg_data(nlh);
1822 tcm->tcm_family = AF_UNSPEC;
1825 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1826 tcm->tcm_parent = q->handle;
1827 tcm->tcm_handle = q->handle;
1829 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1830 goto nla_put_failure;
1831 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1832 goto nla_put_failure;
1834 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1835 NULL, &d, TCA_PAD) < 0)
1836 goto nla_put_failure;
1838 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1839 goto nla_put_failure;
1841 if (gnet_stats_finish_copy(&d) < 0)
1842 goto nla_put_failure;
1844 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1853 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1854 struct nlmsghdr *n, struct Qdisc *q,
1855 unsigned long cl, int event)
1857 struct sk_buff *skb;
1858 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1860 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1864 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1869 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1870 n->nlmsg_flags & NLM_F_ECHO);
1873 static int tclass_del_notify(struct net *net,
1874 const struct Qdisc_class_ops *cops,
1875 struct sk_buff *oskb, struct nlmsghdr *n,
1876 struct Qdisc *q, unsigned long cl,
1877 struct netlink_ext_ack *extack)
1879 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1880 struct sk_buff *skb;
1886 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1890 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1891 RTM_DELTCLASS) < 0) {
1896 err = cops->delete(q, cl, extack);
1902 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1903 n->nlmsg_flags & NLM_F_ECHO);
1907 #ifdef CONFIG_NET_CLS
1909 struct tcf_bind_args {
1910 struct tcf_walker w;
1916 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1918 struct tcf_bind_args *a = (void *)arg;
1920 if (n && tp->ops->bind_class) {
1921 struct Qdisc *q = tcf_block_q(tp->chain->block);
1924 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
1930 struct tc_bind_class_args {
1931 struct qdisc_walker w;
1932 unsigned long new_cl;
1937 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
1938 struct qdisc_walker *w)
1940 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
1941 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1942 struct tcf_block *block;
1943 struct tcf_chain *chain;
1945 block = cops->tcf_block(q, cl, NULL);
1948 for (chain = tcf_get_next_chain(block, NULL);
1950 chain = tcf_get_next_chain(block, chain)) {
1951 struct tcf_proto *tp;
1953 for (tp = tcf_get_next_proto(chain, NULL);
1954 tp; tp = tcf_get_next_proto(chain, tp)) {
1955 struct tcf_bind_args arg = {};
1957 arg.w.fn = tcf_node_bind;
1958 arg.classid = a->clid;
1961 tp->ops->walk(tp, &arg.w, true);
1968 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1969 unsigned long new_cl)
1971 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1972 struct tc_bind_class_args args = {};
1974 if (!cops->tcf_block)
1976 args.portid = portid;
1978 args.new_cl = new_cl;
1979 args.w.fn = tc_bind_class_walker;
1980 q->ops->cl_ops->walk(q, &args.w);
1985 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1986 unsigned long new_cl)
1992 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1993 struct netlink_ext_ack *extack)
1995 struct net *net = sock_net(skb->sk);
1996 struct tcmsg *tcm = nlmsg_data(n);
1997 struct nlattr *tca[TCA_MAX + 1];
1998 struct net_device *dev;
1999 struct Qdisc *q = NULL;
2000 const struct Qdisc_class_ops *cops;
2001 unsigned long cl = 0;
2002 unsigned long new_cl;
2008 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2009 rtm_tca_policy, extack);
2013 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2018 parent == TC_H_UNSPEC - unspecified parent.
2019 parent == TC_H_ROOT - class is root, which has no parent.
2020 parent == X:0 - parent is root class.
2021 parent == X:Y - parent is a node in hierarchy.
2022 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2024 handle == 0:0 - generate handle from kernel pool.
2025 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2026 handle == X:Y - clear.
2027 handle == X:0 - root class.
2030 /* Step 1. Determine qdisc handle X:0 */
2032 portid = tcm->tcm_parent;
2033 clid = tcm->tcm_handle;
2034 qid = TC_H_MAJ(clid);
2036 if (portid != TC_H_ROOT) {
2037 u32 qid1 = TC_H_MAJ(portid);
2040 /* If both majors are known, they must be identical. */
2045 } else if (qid == 0)
2046 qid = rtnl_dereference(dev->qdisc)->handle;
2048 /* Now qid is genuine qdisc handle consistent
2049 * both with parent and child.
2051 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2054 portid = TC_H_MAKE(qid, portid);
2057 qid = rtnl_dereference(dev->qdisc)->handle;
2060 /* OK. Locate qdisc */
2061 q = qdisc_lookup(dev, qid);
2065 /* An check that it supports classes */
2066 cops = q->ops->cl_ops;
2070 /* Now try to get class */
2072 if (portid == TC_H_ROOT)
2075 clid = TC_H_MAKE(qid, clid);
2078 cl = cops->find(q, clid);
2082 if (n->nlmsg_type != RTM_NEWTCLASS ||
2083 !(n->nlmsg_flags & NLM_F_CREATE))
2086 switch (n->nlmsg_type) {
2089 if (n->nlmsg_flags & NLM_F_EXCL)
2093 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2094 /* Unbind the class with flilters with 0 */
2095 tc_bind_tclass(q, portid, clid, 0);
2098 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
2106 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2107 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2114 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2116 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2117 /* We just create a new class, need to do reverse binding. */
2119 tc_bind_tclass(q, portid, clid, new_cl);
2125 struct qdisc_dump_args {
2126 struct qdisc_walker w;
2127 struct sk_buff *skb;
2128 struct netlink_callback *cb;
2131 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2132 struct qdisc_walker *arg)
2134 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2136 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2137 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2141 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2142 struct tcmsg *tcm, struct netlink_callback *cb,
2145 struct qdisc_dump_args arg;
2147 if (tc_qdisc_dump_ignore(q, false) ||
2148 *t_p < s_t || !q->ops->cl_ops ||
2150 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2155 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2156 arg.w.fn = qdisc_class_dump;
2160 arg.w.skip = cb->args[1];
2162 q->ops->cl_ops->walk(q, &arg.w);
2163 cb->args[1] = arg.w.count;
2170 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2171 struct tcmsg *tcm, struct netlink_callback *cb,
2172 int *t_p, int s_t, bool recur)
2180 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2183 if (!qdisc_dev(root) || !recur)
2186 if (tcm->tcm_parent) {
2187 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2188 if (q && q != root &&
2189 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2193 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2194 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2201 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2203 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2204 struct net *net = sock_net(skb->sk);
2205 struct netdev_queue *dev_queue;
2206 struct net_device *dev;
2209 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2211 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2218 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2219 skb, tcm, cb, &t, s_t, true) < 0)
2222 dev_queue = dev_ingress_queue(dev);
2224 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2225 &t, s_t, false) < 0)
2235 #ifdef CONFIG_PROC_FS
2236 static int psched_show(struct seq_file *seq, void *v)
2238 seq_printf(seq, "%08x %08x %08x %08x\n",
2239 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2241 (u32)NSEC_PER_SEC / hrtimer_resolution);
2246 static int __net_init psched_net_init(struct net *net)
2248 struct proc_dir_entry *e;
2250 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2257 static void __net_exit psched_net_exit(struct net *net)
2259 remove_proc_entry("psched", net->proc_net);
2262 static int __net_init psched_net_init(struct net *net)
2267 static void __net_exit psched_net_exit(struct net *net)
2272 static struct pernet_operations psched_net_ops = {
2273 .init = psched_net_init,
2274 .exit = psched_net_exit,
2277 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2279 static int __init pktsched_init(void)
2283 err = register_pernet_subsys(&psched_net_ops);
2285 pr_err("pktsched_init: "
2286 "cannot initialize per netns operations\n");
2290 register_qdisc(&pfifo_fast_ops);
2291 register_qdisc(&pfifo_qdisc_ops);
2292 register_qdisc(&bfifo_qdisc_ops);
2293 register_qdisc(&pfifo_head_drop_qdisc_ops);
2294 register_qdisc(&mq_qdisc_ops);
2295 register_qdisc(&noqueue_qdisc_ops);
2297 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2298 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2299 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2301 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2302 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2303 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2311 subsys_initcall(pktsched_init);