1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_api.c Packet scheduler API.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
29 #include <net/net_namespace.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
34 #include <net/tc_wrapper.h>
36 #include <trace/events/qdisc.h>
43 This file consists of two interrelated parts:
45 1. queueing disciplines manager frontend.
46 2. traffic classes manager frontend.
48 Generally, queueing discipline ("qdisc") is a black box,
49 which is able to enqueue packets and to dequeue them (when
50 device is ready to send something) in order and at times
51 determined by algorithm hidden in it.
53 qdisc's are divided to two categories:
54 - "queues", which have no internal structure visible from outside.
55 - "schedulers", which split all the packets to "traffic classes",
56 using "packet classifiers" (look at cls_api.c)
58 In turn, classes may have child qdiscs (as rule, queues)
59 attached to them etc. etc. etc.
61 The goal of the routines in this file is to translate
62 information supplied by user in the form of handles
63 to more intelligible for kernel form, to make some sanity
64 checks and part of work, which is common to all qdiscs
65 and to provide rtnetlink notifications.
67 All real intelligent work is done inside qdisc modules.
71 Every discipline has two major routines: enqueue and dequeue.
75 dequeue usually returns a skb to send. It is allowed to return NULL,
76 but it does not mean that queue is empty, it just means that
77 discipline does not want to send anything this time.
78 Queue is really empty if q->q.qlen == 0.
79 For complicated disciplines with multiple queues q->q is not
80 real packet queue, but however q->q.qlen must be valid.
84 enqueue returns 0, if packet was enqueued successfully.
85 If packet (this one or another one) was dropped, it returns
87 NET_XMIT_DROP - this packet dropped
88 Expected action: do not backoff, but wait until queue will clear.
89 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
90 Expected action: backoff or ignore
96 like dequeue but without removing a packet from the queue
100 returns qdisc to initial state: purge all buffers, clear all
101 timers, counters (except for statistics) etc.
105 initializes newly created qdisc.
109 destroys resources allocated by init and during lifetime of qdisc.
113 changes qdisc parameters.
116 /* Protects list of registered TC modules. It is pure SMP lock. */
117 static DEFINE_RWLOCK(qdisc_mod_lock);
120 /************************************************
121 * Queueing disciplines manipulation. *
122 ************************************************/
125 /* The list of all installed queueing disciplines. */
127 static struct Qdisc_ops *qdisc_base;
129 /* Register/unregister queueing discipline */
131 int register_qdisc(struct Qdisc_ops *qops)
133 struct Qdisc_ops *q, **qp;
136 write_lock(&qdisc_mod_lock);
137 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 if (!strcmp(qops->id, q->id))
141 if (qops->enqueue == NULL)
142 qops->enqueue = noop_qdisc_ops.enqueue;
143 if (qops->peek == NULL) {
144 if (qops->dequeue == NULL)
145 qops->peek = noop_qdisc_ops.peek;
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
153 const struct Qdisc_class_ops *cops = qops->cl_ops;
155 if (!(cops->find && cops->walk && cops->leaf))
158 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
166 write_unlock(&qdisc_mod_lock);
173 EXPORT_SYMBOL(register_qdisc);
175 void unregister_qdisc(struct Qdisc_ops *qops)
177 struct Qdisc_ops *q, **qp;
180 write_lock(&qdisc_mod_lock);
181 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
189 write_unlock(&qdisc_mod_lock);
191 WARN(err, "unregister qdisc(%s) failed\n", qops->id);
193 EXPORT_SYMBOL(unregister_qdisc);
195 /* Get default qdisc if not otherwise specified */
196 void qdisc_get_default(char *name, size_t len)
198 read_lock(&qdisc_mod_lock);
199 strscpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 struct Qdisc_ops *q = NULL;
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
218 /* Set new default qdisc to use */
219 int qdisc_set_default(const char *name)
221 const struct Qdisc_ops *ops;
223 if (!capable(CAP_NET_ADMIN))
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module("sch_%s", name);
232 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
242 write_unlock(&qdisc_mod_lock);
244 return ops ? 0 : -ENOENT;
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
249 static int __init sch_default_qdisc(void)
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 late_initcall(sch_default_qdisc);
256 /* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 lockdep_rtnl_is_held()) {
274 if (q->handle == handle)
280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
286 q->flags |= TCQ_F_INVISIBLE;
289 EXPORT_SYMBOL(qdisc_hash_add);
291 void qdisc_hash_del(struct Qdisc *q)
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
295 hash_del_rcu(&q->hash);
298 EXPORT_SYMBOL(qdisc_hash_del);
300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
306 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 dev_ingress_queue(dev)->qdisc_sleeping,
318 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
320 struct netdev_queue *nq;
325 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
329 nq = dev_ingress_queue_rcu(dev);
331 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
336 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
339 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
343 cl = cops->find(p, classid);
347 return cops->leaf(p, cl);
350 /* Find queueing discipline by name */
352 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
354 struct Qdisc_ops *q = NULL;
357 read_lock(&qdisc_mod_lock);
358 for (q = qdisc_base; q; q = q->next) {
359 if (nla_strcmp(kind, q->id) == 0) {
360 if (!try_module_get(q->owner))
365 read_unlock(&qdisc_mod_lock);
370 /* The linklayer setting were not transferred from iproute2, in older
371 * versions, and the rate tables lookup systems have been dropped in
372 * the kernel. To keep backward compatible with older iproute2 tc
373 * utils, we detect the linklayer setting by detecting if the rate
374 * table were modified.
376 * For linklayer ATM table entries, the rate table will be aligned to
377 * 48 bytes, thus some table entries will contain the same value. The
378 * mpu (min packet unit) is also encoded into the old rate table, thus
379 * starting from the mpu, we find low and high table entries for
380 * mapping this cell. If these entries contain the same value, when
381 * the rate tables have been modified for linklayer ATM.
383 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
384 * and then roundup to the next cell, calc the table entry one below,
387 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
389 int low = roundup(r->mpu, 48);
390 int high = roundup(low+1, 48);
391 int cell_low = low >> r->cell_log;
392 int cell_high = (high >> r->cell_log) - 1;
394 /* rtab is too inaccurate at rates > 100Mbit/s */
395 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
396 pr_debug("TC linklayer: Giving up ATM detection\n");
397 return TC_LINKLAYER_ETHERNET;
400 if ((cell_high > cell_low) && (cell_high < 256)
401 && (rtab[cell_low] == rtab[cell_high])) {
402 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
403 cell_low, cell_high, rtab[cell_high]);
404 return TC_LINKLAYER_ATM;
406 return TC_LINKLAYER_ETHERNET;
409 static struct qdisc_rate_table *qdisc_rtab_list;
411 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
413 struct netlink_ext_ack *extack)
415 struct qdisc_rate_table *rtab;
417 if (tab == NULL || r->rate == 0 ||
418 r->cell_log == 0 || r->cell_log >= 32 ||
419 nla_len(tab) != TC_RTAB_SIZE) {
420 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
424 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
425 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
426 !memcmp(&rtab->data, nla_data(tab), 1024)) {
432 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
436 memcpy(rtab->data, nla_data(tab), 1024);
437 if (r->linklayer == TC_LINKLAYER_UNAWARE)
438 r->linklayer = __detect_linklayer(r, rtab->data);
439 rtab->next = qdisc_rtab_list;
440 qdisc_rtab_list = rtab;
442 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
446 EXPORT_SYMBOL(qdisc_get_rtab);
448 void qdisc_put_rtab(struct qdisc_rate_table *tab)
450 struct qdisc_rate_table *rtab, **rtabp;
452 if (!tab || --tab->refcnt)
455 for (rtabp = &qdisc_rtab_list;
456 (rtab = *rtabp) != NULL;
457 rtabp = &rtab->next) {
465 EXPORT_SYMBOL(qdisc_put_rtab);
467 static LIST_HEAD(qdisc_stab_list);
469 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
470 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
471 [TCA_STAB_DATA] = { .type = NLA_BINARY },
474 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
475 struct netlink_ext_ack *extack)
477 struct nlattr *tb[TCA_STAB_MAX + 1];
478 struct qdisc_size_table *stab;
479 struct tc_sizespec *s;
480 unsigned int tsize = 0;
484 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
488 if (!tb[TCA_STAB_BASE]) {
489 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
490 return ERR_PTR(-EINVAL);
493 s = nla_data(tb[TCA_STAB_BASE]);
496 if (!tb[TCA_STAB_DATA]) {
497 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
498 return ERR_PTR(-EINVAL);
500 tab = nla_data(tb[TCA_STAB_DATA]);
501 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
504 if (tsize != s->tsize || (!tab && tsize > 0)) {
505 NL_SET_ERR_MSG(extack, "Invalid size of size table");
506 return ERR_PTR(-EINVAL);
509 list_for_each_entry(stab, &qdisc_stab_list, list) {
510 if (memcmp(&stab->szopts, s, sizeof(*s)))
513 memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
519 if (s->size_log > STAB_SIZE_LOG_MAX ||
520 s->cell_log > STAB_SIZE_LOG_MAX) {
521 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
522 return ERR_PTR(-EINVAL);
525 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
527 return ERR_PTR(-ENOMEM);
532 memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
534 list_add_tail(&stab->list, &qdisc_stab_list);
539 void qdisc_put_stab(struct qdisc_size_table *tab)
544 if (--tab->refcnt == 0) {
545 list_del(&tab->list);
549 EXPORT_SYMBOL(qdisc_put_stab);
551 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
555 nest = nla_nest_start_noflag(skb, TCA_STAB);
557 goto nla_put_failure;
558 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
559 goto nla_put_failure;
560 nla_nest_end(skb, nest);
568 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
569 const struct qdisc_size_table *stab)
573 pkt_len = skb->len + stab->szopts.overhead;
574 if (unlikely(!stab->szopts.tsize))
577 slot = pkt_len + stab->szopts.cell_align;
578 if (unlikely(slot < 0))
581 slot >>= stab->szopts.cell_log;
582 if (likely(slot < stab->szopts.tsize))
583 pkt_len = stab->data[slot];
585 pkt_len = stab->data[stab->szopts.tsize - 1] *
586 (slot / stab->szopts.tsize) +
587 stab->data[slot % stab->szopts.tsize];
589 pkt_len <<= stab->szopts.size_log;
591 if (unlikely(pkt_len < 1))
593 qdisc_skb_cb(skb)->pkt_len = pkt_len;
595 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
597 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
599 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
600 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
601 txt, qdisc->ops->id, qdisc->handle >> 16);
602 qdisc->flags |= TCQ_F_WARN_NONWC;
605 EXPORT_SYMBOL(qdisc_warn_nonwc);
607 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
609 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
613 __netif_schedule(qdisc_root(wd->qdisc));
616 return HRTIMER_NORESTART;
619 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
622 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
623 wd->timer.function = qdisc_watchdog;
626 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
628 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
630 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
632 EXPORT_SYMBOL(qdisc_watchdog_init);
634 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
637 if (test_bit(__QDISC_STATE_DEACTIVATED,
638 &qdisc_root_sleeping(wd->qdisc)->state))
641 if (hrtimer_is_queued(&wd->timer)) {
644 softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer));
645 /* If timer is already set in [expires, expires + delta_ns],
646 * do not reprogram it.
648 if (softexpires - expires <= delta_ns)
652 hrtimer_start_range_ns(&wd->timer,
653 ns_to_ktime(expires),
655 HRTIMER_MODE_ABS_PINNED);
657 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
659 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
661 hrtimer_cancel(&wd->timer);
663 EXPORT_SYMBOL(qdisc_watchdog_cancel);
665 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
667 struct hlist_head *h;
670 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
673 for (i = 0; i < n; i++)
674 INIT_HLIST_HEAD(&h[i]);
679 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
681 struct Qdisc_class_common *cl;
682 struct hlist_node *next;
683 struct hlist_head *nhash, *ohash;
684 unsigned int nsize, nmask, osize;
687 /* Rehash when load factor exceeds 0.75 */
688 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
690 nsize = clhash->hashsize * 2;
692 nhash = qdisc_class_hash_alloc(nsize);
696 ohash = clhash->hash;
697 osize = clhash->hashsize;
700 for (i = 0; i < osize; i++) {
701 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
702 h = qdisc_class_hash(cl->classid, nmask);
703 hlist_add_head(&cl->hnode, &nhash[h]);
706 clhash->hash = nhash;
707 clhash->hashsize = nsize;
708 clhash->hashmask = nmask;
709 sch_tree_unlock(sch);
713 EXPORT_SYMBOL(qdisc_class_hash_grow);
715 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
717 unsigned int size = 4;
719 clhash->hash = qdisc_class_hash_alloc(size);
722 clhash->hashsize = size;
723 clhash->hashmask = size - 1;
724 clhash->hashelems = 0;
727 EXPORT_SYMBOL(qdisc_class_hash_init);
729 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
731 kvfree(clhash->hash);
733 EXPORT_SYMBOL(qdisc_class_hash_destroy);
735 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
736 struct Qdisc_class_common *cl)
740 INIT_HLIST_NODE(&cl->hnode);
741 h = qdisc_class_hash(cl->classid, clhash->hashmask);
742 hlist_add_head(&cl->hnode, &clhash->hash[h]);
745 EXPORT_SYMBOL(qdisc_class_hash_insert);
747 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
748 struct Qdisc_class_common *cl)
750 hlist_del(&cl->hnode);
753 EXPORT_SYMBOL(qdisc_class_hash_remove);
755 /* Allocate an unique handle from space managed by kernel
756 * Possible range is [8000-FFFF]:0000 (0x8000 values)
758 static u32 qdisc_alloc_handle(struct net_device *dev)
761 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
764 autohandle += TC_H_MAKE(0x10000U, 0);
765 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
766 autohandle = TC_H_MAKE(0x80000000U, 0);
767 if (!qdisc_lookup(dev, autohandle))
775 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
777 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
778 const struct Qdisc_class_ops *cops;
784 if (n == 0 && len == 0)
786 drops = max_t(int, n, 0);
788 while ((parentid = sch->parent)) {
789 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
792 if (sch->flags & TCQ_F_NOPARENT)
794 /* Notify parent qdisc only if child qdisc becomes empty.
796 * If child was empty even before update then backlog
797 * counter is screwed and we skip notification because
798 * parent class is already passive.
800 * If the original child was offloaded then it is allowed
801 * to be seem as empty, so the parent is notified anyway.
803 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
804 !qdisc_is_offloaded);
805 /* TODO: perform the search on a per txq basis */
806 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
808 WARN_ON_ONCE(parentid != TC_H_ROOT);
811 cops = sch->ops->cl_ops;
812 if (notify && cops->qlen_notify) {
813 cl = cops->find(sch, parentid);
814 cops->qlen_notify(sch, cl);
817 sch->qstats.backlog -= len;
818 __qdisc_qstats_drop(sch, drops);
822 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
824 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
827 struct net_device *dev = qdisc_dev(sch);
830 sch->flags &= ~TCQ_F_OFFLOADED;
831 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
834 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
835 if (err == -EOPNOTSUPP)
839 sch->flags |= TCQ_F_OFFLOADED;
843 EXPORT_SYMBOL(qdisc_offload_dump_helper);
845 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
846 struct Qdisc *new, struct Qdisc *old,
847 enum tc_setup_type type, void *type_data,
848 struct netlink_ext_ack *extack)
850 bool any_qdisc_is_offloaded;
853 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
856 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
858 /* Don't report error if the graft is part of destroy operation. */
859 if (!err || !new || new == &noop_qdisc)
862 /* Don't report error if the parent, the old child and the new
863 * one are not offloaded.
865 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
866 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
867 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
869 if (any_qdisc_is_offloaded)
870 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
872 EXPORT_SYMBOL(qdisc_offload_graft_helper);
874 void qdisc_offload_query_caps(struct net_device *dev,
875 enum tc_setup_type type,
876 void *caps, size_t caps_len)
878 const struct net_device_ops *ops = dev->netdev_ops;
879 struct tc_query_caps_base base = {
884 memset(caps, 0, caps_len);
886 if (ops->ndo_setup_tc)
887 ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
889 EXPORT_SYMBOL(qdisc_offload_query_caps);
891 static void qdisc_offload_graft_root(struct net_device *dev,
892 struct Qdisc *new, struct Qdisc *old,
893 struct netlink_ext_ack *extack)
895 struct tc_root_qopt_offload graft_offload = {
896 .command = TC_ROOT_GRAFT,
897 .handle = new ? new->handle : 0,
898 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
899 (old && old->flags & TCQ_F_INGRESS),
902 qdisc_offload_graft_helper(dev, NULL, new, old,
903 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
906 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
907 u32 portid, u32 seq, u16 flags, int event,
908 struct netlink_ext_ack *extack)
910 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
911 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
913 struct nlmsghdr *nlh;
914 unsigned char *b = skb_tail_pointer(skb);
916 struct qdisc_size_table *stab;
921 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
924 tcm = nlmsg_data(nlh);
925 tcm->tcm_family = AF_UNSPEC;
928 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
929 tcm->tcm_parent = clid;
930 tcm->tcm_handle = q->handle;
931 tcm->tcm_info = refcount_read(&q->refcnt);
932 if (nla_put_string(skb, TCA_KIND, q->ops->id))
933 goto nla_put_failure;
934 if (q->ops->ingress_block_get) {
935 block_index = q->ops->ingress_block_get(q);
937 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
938 goto nla_put_failure;
940 if (q->ops->egress_block_get) {
941 block_index = q->ops->egress_block_get(q);
943 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
944 goto nla_put_failure;
946 if (q->ops->dump && q->ops->dump(q, skb) < 0)
947 goto nla_put_failure;
948 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
949 goto nla_put_failure;
950 qlen = qdisc_qlen_sum(q);
952 stab = rtnl_dereference(q->stab);
953 if (stab && qdisc_dump_stab(skb, stab) < 0)
954 goto nla_put_failure;
956 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
957 NULL, &d, TCA_PAD) < 0)
958 goto nla_put_failure;
960 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
961 goto nla_put_failure;
963 if (qdisc_is_percpu_stats(q)) {
964 cpu_bstats = q->cpu_bstats;
965 cpu_qstats = q->cpu_qstats;
968 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
969 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
970 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
971 goto nla_put_failure;
973 if (gnet_stats_finish_copy(&d) < 0)
974 goto nla_put_failure;
976 if (extack && extack->_msg &&
977 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
980 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
990 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
992 if (q->flags & TCQ_F_BUILTIN)
994 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1000 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1001 struct nlmsghdr *n, u32 clid,
1002 struct Qdisc *old, struct Qdisc *new,
1003 struct netlink_ext_ack *extack)
1005 struct sk_buff *skb;
1006 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1008 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1012 if (old && !tc_qdisc_dump_ignore(old, false)) {
1013 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1014 0, RTM_DELQDISC, extack) < 0)
1017 if (new && !tc_qdisc_dump_ignore(new, false)) {
1018 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1019 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
1024 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1025 n->nlmsg_flags & NLM_F_ECHO);
1032 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1033 struct nlmsghdr *n, u32 clid,
1034 struct Qdisc *old, struct Qdisc *new,
1035 struct netlink_ext_ack *extack)
1038 qdisc_notify(net, skb, n, clid, old, new, extack);
1044 static void qdisc_clear_nolock(struct Qdisc *sch)
1046 sch->flags &= ~TCQ_F_NOLOCK;
1047 if (!(sch->flags & TCQ_F_CPUSTATS))
1050 free_percpu(sch->cpu_bstats);
1051 free_percpu(sch->cpu_qstats);
1052 sch->cpu_bstats = NULL;
1053 sch->cpu_qstats = NULL;
1054 sch->flags &= ~TCQ_F_CPUSTATS;
1057 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1060 * When appropriate send a netlink notification using 'skb'
1063 * On success, destroy old qdisc.
1066 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1067 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1068 struct Qdisc *new, struct Qdisc *old,
1069 struct netlink_ext_ack *extack)
1071 struct Qdisc *q = old;
1072 struct net *net = dev_net(dev);
1074 if (parent == NULL) {
1075 unsigned int i, num_q, ingress;
1078 num_q = dev->num_tx_queues;
1079 if ((q && q->flags & TCQ_F_INGRESS) ||
1080 (new && new->flags & TCQ_F_INGRESS)) {
1083 if (!dev_ingress_queue(dev)) {
1084 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1089 if (dev->flags & IFF_UP)
1090 dev_deactivate(dev);
1092 qdisc_offload_graft_root(dev, new, old, extack);
1094 if (new && new->ops->attach && !ingress)
1097 for (i = 0; i < num_q; i++) {
1098 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1101 dev_queue = netdev_get_tx_queue(dev, i);
1103 old = dev_graft_qdisc(dev_queue, new);
1105 qdisc_refcount_inc(new);
1113 old = rtnl_dereference(dev->qdisc);
1114 if (new && !new->ops->attach)
1115 qdisc_refcount_inc(new);
1116 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1118 notify_and_destroy(net, skb, n, classid, old, new, extack);
1120 if (new && new->ops->attach)
1121 new->ops->attach(new);
1123 notify_and_destroy(net, skb, n, classid, old, new, extack);
1126 if (dev->flags & IFF_UP)
1129 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1133 /* Only support running class lockless if parent is lockless */
1134 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1135 qdisc_clear_nolock(new);
1137 if (!cops || !cops->graft)
1140 cl = cops->find(parent, classid);
1142 NL_SET_ERR_MSG(extack, "Specified class not found");
1146 if (new && new->ops == &noqueue_qdisc_ops) {
1147 NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1151 err = cops->graft(parent, cl, new, &old, extack);
1154 notify_and_destroy(net, skb, n, classid, old, new, extack);
1159 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1160 struct netlink_ext_ack *extack)
1164 if (tca[TCA_INGRESS_BLOCK]) {
1165 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1168 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1171 if (!sch->ops->ingress_block_set) {
1172 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1175 sch->ops->ingress_block_set(sch, block_index);
1177 if (tca[TCA_EGRESS_BLOCK]) {
1178 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1181 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1184 if (!sch->ops->egress_block_set) {
1185 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1188 sch->ops->egress_block_set(sch, block_index);
1194 Allocate and initialize new qdisc.
1196 Parameters are passed via opt.
1199 static struct Qdisc *qdisc_create(struct net_device *dev,
1200 struct netdev_queue *dev_queue,
1201 u32 parent, u32 handle,
1202 struct nlattr **tca, int *errp,
1203 struct netlink_ext_ack *extack)
1206 struct nlattr *kind = tca[TCA_KIND];
1208 struct Qdisc_ops *ops;
1209 struct qdisc_size_table *stab;
1211 ops = qdisc_lookup_ops(kind);
1212 #ifdef CONFIG_MODULES
1213 if (ops == NULL && kind != NULL) {
1214 char name[IFNAMSIZ];
1215 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1216 /* We dropped the RTNL semaphore in order to
1217 * perform the module load. So, even if we
1218 * succeeded in loading the module we have to
1219 * tell the caller to replay the request. We
1220 * indicate this using -EAGAIN.
1221 * We replay the request because the device may
1222 * go away in the mean time.
1225 request_module("sch_%s", name);
1227 ops = qdisc_lookup_ops(kind);
1229 /* We will try again qdisc_lookup_ops,
1230 * so don't keep a reference.
1232 module_put(ops->owner);
1242 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1246 sch = qdisc_alloc(dev_queue, ops, extack);
1252 sch->parent = parent;
1254 if (handle == TC_H_INGRESS) {
1255 sch->flags |= TCQ_F_INGRESS;
1256 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1259 handle = qdisc_alloc_handle(dev);
1261 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1266 if (!netif_is_multiqueue(dev))
1267 sch->flags |= TCQ_F_ONETXQUEUE;
1270 sch->handle = handle;
1272 /* This exist to keep backward compatible with a userspace
1273 * loophole, what allowed userspace to get IFF_NO_QUEUE
1274 * facility on older kernels by setting tx_queue_len=0 (prior
1275 * to qdisc init), and then forgot to reinit tx_queue_len
1276 * before again attaching a qdisc.
1278 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1279 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1280 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1283 err = qdisc_block_indexes_set(sch, tca, extack);
1287 if (tca[TCA_STAB]) {
1288 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1290 err = PTR_ERR(stab);
1293 rcu_assign_pointer(sch->stab, stab);
1297 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1302 if (tca[TCA_RATE]) {
1304 if (sch->flags & TCQ_F_MQROOT) {
1305 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1309 err = gen_new_estimator(&sch->bstats,
1316 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1321 qdisc_hash_add(sch, false);
1322 trace_qdisc_create(ops, dev, parent);
1327 /* Even if ops->init() failed, we call ops->destroy()
1328 * like qdisc_create_dflt().
1332 qdisc_put_stab(rtnl_dereference(sch->stab));
1334 netdev_put(dev, &sch->dev_tracker);
1337 module_put(ops->owner);
1343 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1344 struct netlink_ext_ack *extack)
1346 struct qdisc_size_table *ostab, *stab = NULL;
1349 if (tca[TCA_OPTIONS]) {
1350 if (!sch->ops->change) {
1351 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1354 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1355 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1358 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1363 if (tca[TCA_STAB]) {
1364 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1366 return PTR_ERR(stab);
1369 ostab = rtnl_dereference(sch->stab);
1370 rcu_assign_pointer(sch->stab, stab);
1371 qdisc_put_stab(ostab);
1373 if (tca[TCA_RATE]) {
1374 /* NB: ignores errors from replace_estimator
1375 because change can't be undone. */
1376 if (sch->flags & TCQ_F_MQROOT)
1378 gen_replace_estimator(&sch->bstats,
1389 struct check_loop_arg {
1390 struct qdisc_walker w;
1395 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1396 struct qdisc_walker *w);
1398 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1400 struct check_loop_arg arg;
1402 if (q->ops->cl_ops == NULL)
1405 arg.w.stop = arg.w.skip = arg.w.count = 0;
1406 arg.w.fn = check_loop_fn;
1409 q->ops->cl_ops->walk(q, &arg.w);
1410 return arg.w.stop ? -ELOOP : 0;
1414 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1417 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1418 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1420 leaf = cops->leaf(q, cl);
1422 if (leaf == arg->p || arg->depth > 7)
1424 return check_loop(leaf, arg->p, arg->depth + 1);
1429 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1430 [TCA_KIND] = { .type = NLA_STRING },
1431 [TCA_RATE] = { .type = NLA_BINARY,
1432 .len = sizeof(struct tc_estimator) },
1433 [TCA_STAB] = { .type = NLA_NESTED },
1434 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1435 [TCA_CHAIN] = { .type = NLA_U32 },
1436 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1437 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1444 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1445 struct netlink_ext_ack *extack)
1447 struct net *net = sock_net(skb->sk);
1448 struct tcmsg *tcm = nlmsg_data(n);
1449 struct nlattr *tca[TCA_MAX + 1];
1450 struct net_device *dev;
1452 struct Qdisc *q = NULL;
1453 struct Qdisc *p = NULL;
1456 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1457 rtm_tca_policy, extack);
1461 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1465 clid = tcm->tcm_parent;
1467 if (clid != TC_H_ROOT) {
1468 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1469 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1471 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1474 q = qdisc_leaf(p, clid);
1475 } else if (dev_ingress_queue(dev)) {
1476 q = dev_ingress_queue(dev)->qdisc_sleeping;
1479 q = rtnl_dereference(dev->qdisc);
1482 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1486 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1487 NL_SET_ERR_MSG(extack, "Invalid handle");
1491 q = qdisc_lookup(dev, tcm->tcm_handle);
1493 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1498 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1499 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1503 if (n->nlmsg_type == RTM_DELQDISC) {
1505 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1508 if (q->handle == 0) {
1509 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1512 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1516 qdisc_notify(net, skb, n, clid, NULL, q, NULL);
1522 * Create/change qdisc.
1525 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1526 struct netlink_ext_ack *extack)
1528 struct net *net = sock_net(skb->sk);
1530 struct nlattr *tca[TCA_MAX + 1];
1531 struct net_device *dev;
1533 struct Qdisc *q, *p;
1537 /* Reinit, just in case something touches this. */
1538 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1539 rtm_tca_policy, extack);
1543 tcm = nlmsg_data(n);
1544 clid = tcm->tcm_parent;
1547 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1553 if (clid != TC_H_ROOT) {
1554 if (clid != TC_H_INGRESS) {
1555 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1557 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1560 q = qdisc_leaf(p, clid);
1561 } else if (dev_ingress_queue_create(dev)) {
1562 q = dev_ingress_queue(dev)->qdisc_sleeping;
1565 q = rtnl_dereference(dev->qdisc);
1568 /* It may be default qdisc, ignore it */
1569 if (q && q->handle == 0)
1572 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1573 if (tcm->tcm_handle) {
1574 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1575 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1578 if (TC_H_MIN(tcm->tcm_handle)) {
1579 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1582 q = qdisc_lookup(dev, tcm->tcm_handle);
1584 goto create_n_graft;
1585 if (n->nlmsg_flags & NLM_F_EXCL) {
1586 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1589 if (tca[TCA_KIND] &&
1590 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1591 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1595 (p && check_loop(q, p, 0))) {
1596 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1599 qdisc_refcount_inc(q);
1603 goto create_n_graft;
1605 /* This magic test requires explanation.
1607 * We know, that some child q is already
1608 * attached to this parent and have choice:
1609 * either to change it or to create/graft new one.
1611 * 1. We are allowed to create/graft only
1612 * if CREATE and REPLACE flags are set.
1614 * 2. If EXCL is set, requestor wanted to say,
1615 * that qdisc tcm_handle is not expected
1616 * to exist, so that we choose create/graft too.
1618 * 3. The last case is when no flags are set.
1619 * Alas, it is sort of hole in API, we
1620 * cannot decide what to do unambiguously.
1621 * For now we select create/graft, if
1622 * user gave KIND, which does not match existing.
1624 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1625 (n->nlmsg_flags & NLM_F_REPLACE) &&
1626 ((n->nlmsg_flags & NLM_F_EXCL) ||
1628 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1629 goto create_n_graft;
1633 if (!tcm->tcm_handle) {
1634 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1637 q = qdisc_lookup(dev, tcm->tcm_handle);
1640 /* Change qdisc parameters */
1642 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1645 if (n->nlmsg_flags & NLM_F_EXCL) {
1646 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1649 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1650 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1653 err = qdisc_change(q, tca, extack);
1655 qdisc_notify(net, skb, n, clid, NULL, q, extack);
1659 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1660 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1663 if (clid == TC_H_INGRESS) {
1664 if (dev_ingress_queue(dev)) {
1665 q = qdisc_create(dev, dev_ingress_queue(dev),
1666 tcm->tcm_parent, tcm->tcm_parent,
1669 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1673 struct netdev_queue *dev_queue;
1675 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1676 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1678 dev_queue = p->dev_queue;
1680 dev_queue = netdev_get_tx_queue(dev, 0);
1682 q = qdisc_create(dev, dev_queue,
1683 tcm->tcm_parent, tcm->tcm_handle,
1693 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1703 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1704 struct netlink_callback *cb,
1705 int *q_idx_p, int s_q_idx, bool recur,
1706 bool dump_invisible)
1708 int ret = 0, q_idx = *q_idx_p;
1716 if (q_idx < s_q_idx) {
1719 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1720 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1721 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1722 RTM_NEWQDISC, NULL) <= 0)
1727 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1728 * itself has already been dumped.
1730 * If we've already dumped the top-level (ingress) qdisc above and the global
1731 * qdisc hashtable, we don't want to hit it again
1733 if (!qdisc_dev(root) || !recur)
1736 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1737 if (q_idx < s_q_idx) {
1741 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1742 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1743 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1744 RTM_NEWQDISC, NULL) <= 0)
1757 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1759 struct net *net = sock_net(skb->sk);
1762 struct net_device *dev;
1763 const struct nlmsghdr *nlh = cb->nlh;
1764 struct nlattr *tca[TCA_MAX + 1];
1767 s_idx = cb->args[0];
1768 s_q_idx = q_idx = cb->args[1];
1773 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1774 rtm_tca_policy, cb->extack);
1778 for_each_netdev(net, dev) {
1779 struct netdev_queue *dev_queue;
1787 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1788 skb, cb, &q_idx, s_q_idx,
1789 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1792 dev_queue = dev_ingress_queue(dev);
1794 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1795 &q_idx, s_q_idx, false,
1796 tca[TCA_DUMP_INVISIBLE]) < 0)
1805 cb->args[1] = q_idx;
1812 /************************************************
1813 * Traffic classes manipulation. *
1814 ************************************************/
1816 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1817 unsigned long cl, u32 portid, u32 seq, u16 flags,
1818 int event, struct netlink_ext_ack *extack)
1821 struct nlmsghdr *nlh;
1822 unsigned char *b = skb_tail_pointer(skb);
1824 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1827 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1829 goto out_nlmsg_trim;
1830 tcm = nlmsg_data(nlh);
1831 tcm->tcm_family = AF_UNSPEC;
1834 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1835 tcm->tcm_parent = q->handle;
1836 tcm->tcm_handle = q->handle;
1838 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1839 goto nla_put_failure;
1840 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1841 goto nla_put_failure;
1843 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1844 NULL, &d, TCA_PAD) < 0)
1845 goto nla_put_failure;
1847 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1848 goto nla_put_failure;
1850 if (gnet_stats_finish_copy(&d) < 0)
1851 goto nla_put_failure;
1853 if (extack && extack->_msg &&
1854 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1855 goto out_nlmsg_trim;
1857 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1867 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1868 struct nlmsghdr *n, struct Qdisc *q,
1869 unsigned long cl, int event, struct netlink_ext_ack *extack)
1871 struct sk_buff *skb;
1872 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1874 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1878 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
1883 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1884 n->nlmsg_flags & NLM_F_ECHO);
1887 static int tclass_del_notify(struct net *net,
1888 const struct Qdisc_class_ops *cops,
1889 struct sk_buff *oskb, struct nlmsghdr *n,
1890 struct Qdisc *q, unsigned long cl,
1891 struct netlink_ext_ack *extack)
1893 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1894 struct sk_buff *skb;
1900 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1904 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1905 RTM_DELTCLASS, extack) < 0) {
1910 err = cops->delete(q, cl, extack);
1916 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1917 n->nlmsg_flags & NLM_F_ECHO);
1921 #ifdef CONFIG_NET_CLS
1923 struct tcf_bind_args {
1924 struct tcf_walker w;
1930 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1932 struct tcf_bind_args *a = (void *)arg;
1934 if (n && tp->ops->bind_class) {
1935 struct Qdisc *q = tcf_block_q(tp->chain->block);
1938 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
1944 struct tc_bind_class_args {
1945 struct qdisc_walker w;
1946 unsigned long new_cl;
1951 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
1952 struct qdisc_walker *w)
1954 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
1955 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1956 struct tcf_block *block;
1957 struct tcf_chain *chain;
1959 block = cops->tcf_block(q, cl, NULL);
1962 for (chain = tcf_get_next_chain(block, NULL);
1964 chain = tcf_get_next_chain(block, chain)) {
1965 struct tcf_proto *tp;
1967 for (tp = tcf_get_next_proto(chain, NULL);
1968 tp; tp = tcf_get_next_proto(chain, tp)) {
1969 struct tcf_bind_args arg = {};
1971 arg.w.fn = tcf_node_bind;
1972 arg.classid = a->clid;
1975 tp->ops->walk(tp, &arg.w, true);
1982 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1983 unsigned long new_cl)
1985 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1986 struct tc_bind_class_args args = {};
1988 if (!cops->tcf_block)
1990 args.portid = portid;
1992 args.new_cl = new_cl;
1993 args.w.fn = tc_bind_class_walker;
1994 q->ops->cl_ops->walk(q, &args.w);
1999 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2000 unsigned long new_cl)
2006 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2007 struct netlink_ext_ack *extack)
2009 struct net *net = sock_net(skb->sk);
2010 struct tcmsg *tcm = nlmsg_data(n);
2011 struct nlattr *tca[TCA_MAX + 1];
2012 struct net_device *dev;
2013 struct Qdisc *q = NULL;
2014 const struct Qdisc_class_ops *cops;
2015 unsigned long cl = 0;
2016 unsigned long new_cl;
2022 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2023 rtm_tca_policy, extack);
2027 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2032 parent == TC_H_UNSPEC - unspecified parent.
2033 parent == TC_H_ROOT - class is root, which has no parent.
2034 parent == X:0 - parent is root class.
2035 parent == X:Y - parent is a node in hierarchy.
2036 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2038 handle == 0:0 - generate handle from kernel pool.
2039 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2040 handle == X:Y - clear.
2041 handle == X:0 - root class.
2044 /* Step 1. Determine qdisc handle X:0 */
2046 portid = tcm->tcm_parent;
2047 clid = tcm->tcm_handle;
2048 qid = TC_H_MAJ(clid);
2050 if (portid != TC_H_ROOT) {
2051 u32 qid1 = TC_H_MAJ(portid);
2054 /* If both majors are known, they must be identical. */
2059 } else if (qid == 0)
2060 qid = rtnl_dereference(dev->qdisc)->handle;
2062 /* Now qid is genuine qdisc handle consistent
2063 * both with parent and child.
2065 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2068 portid = TC_H_MAKE(qid, portid);
2071 qid = rtnl_dereference(dev->qdisc)->handle;
2074 /* OK. Locate qdisc */
2075 q = qdisc_lookup(dev, qid);
2079 /* An check that it supports classes */
2080 cops = q->ops->cl_ops;
2084 /* Now try to get class */
2086 if (portid == TC_H_ROOT)
2089 clid = TC_H_MAKE(qid, clid);
2092 cl = cops->find(q, clid);
2096 if (n->nlmsg_type != RTM_NEWTCLASS ||
2097 !(n->nlmsg_flags & NLM_F_CREATE))
2100 switch (n->nlmsg_type) {
2103 if (n->nlmsg_flags & NLM_F_EXCL)
2107 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2108 /* Unbind the class with flilters with 0 */
2109 tc_bind_tclass(q, portid, clid, 0);
2112 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS, extack);
2120 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2121 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2128 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2130 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
2131 /* We just create a new class, need to do reverse binding. */
2133 tc_bind_tclass(q, portid, clid, new_cl);
2139 struct qdisc_dump_args {
2140 struct qdisc_walker w;
2141 struct sk_buff *skb;
2142 struct netlink_callback *cb;
2145 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2146 struct qdisc_walker *arg)
2148 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2150 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2151 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2152 RTM_NEWTCLASS, NULL);
2155 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2156 struct tcmsg *tcm, struct netlink_callback *cb,
2159 struct qdisc_dump_args arg;
2161 if (tc_qdisc_dump_ignore(q, false) ||
2162 *t_p < s_t || !q->ops->cl_ops ||
2164 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2169 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2170 arg.w.fn = qdisc_class_dump;
2174 arg.w.skip = cb->args[1];
2176 q->ops->cl_ops->walk(q, &arg.w);
2177 cb->args[1] = arg.w.count;
2184 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2185 struct tcmsg *tcm, struct netlink_callback *cb,
2186 int *t_p, int s_t, bool recur)
2194 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2197 if (!qdisc_dev(root) || !recur)
2200 if (tcm->tcm_parent) {
2201 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2202 if (q && q != root &&
2203 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2207 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2208 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2215 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2217 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2218 struct net *net = sock_net(skb->sk);
2219 struct netdev_queue *dev_queue;
2220 struct net_device *dev;
2223 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2225 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2232 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2233 skb, tcm, cb, &t, s_t, true) < 0)
2236 dev_queue = dev_ingress_queue(dev);
2238 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2239 &t, s_t, false) < 0)
2249 #ifdef CONFIG_PROC_FS
2250 static int psched_show(struct seq_file *seq, void *v)
2252 seq_printf(seq, "%08x %08x %08x %08x\n",
2253 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2255 (u32)NSEC_PER_SEC / hrtimer_resolution);
2260 static int __net_init psched_net_init(struct net *net)
2262 struct proc_dir_entry *e;
2264 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2271 static void __net_exit psched_net_exit(struct net *net)
2273 remove_proc_entry("psched", net->proc_net);
2276 static int __net_init psched_net_init(struct net *net)
2281 static void __net_exit psched_net_exit(struct net *net)
2286 static struct pernet_operations psched_net_ops = {
2287 .init = psched_net_init,
2288 .exit = psched_net_exit,
2291 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2293 static int __init pktsched_init(void)
2297 err = register_pernet_subsys(&psched_net_ops);
2299 pr_err("pktsched_init: "
2300 "cannot initialize per netns operations\n");
2304 register_qdisc(&pfifo_fast_ops);
2305 register_qdisc(&pfifo_qdisc_ops);
2306 register_qdisc(&bfifo_qdisc_ops);
2307 register_qdisc(&pfifo_head_drop_qdisc_ops);
2308 register_qdisc(&mq_qdisc_ops);
2309 register_qdisc(&noqueue_qdisc_ops);
2311 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2312 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2313 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2315 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2316 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2317 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2325 subsys_initcall(pktsched_init);