2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/sch_generic.h>
23 struct Qdisc **qdiscs;
26 static void mq_destroy(struct Qdisc *sch)
28 struct net_device *dev = qdisc_dev(sch);
29 struct mq_sched *priv = qdisc_priv(sch);
34 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
35 qdisc_destroy(priv->qdiscs[ntx]);
39 static int mq_init(struct Qdisc *sch, struct nlattr *opt,
40 struct netlink_ext_ack *extack)
42 struct net_device *dev = qdisc_dev(sch);
43 struct mq_sched *priv = qdisc_priv(sch);
44 struct netdev_queue *dev_queue;
48 if (sch->parent != TC_H_ROOT)
51 if (!netif_is_multiqueue(dev))
54 /* pre-allocate qdiscs, attachment can't fail */
55 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
60 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
61 dev_queue = netdev_get_tx_queue(dev, ntx);
62 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
63 TC_H_MAKE(TC_H_MAJ(sch->handle),
68 priv->qdiscs[ntx] = qdisc;
69 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
72 sch->flags |= TCQ_F_MQROOT;
76 static void mq_attach(struct Qdisc *sch)
78 struct net_device *dev = qdisc_dev(sch);
79 struct mq_sched *priv = qdisc_priv(sch);
80 struct Qdisc *qdisc, *old;
83 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
84 qdisc = priv->qdiscs[ntx];
85 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
88 #ifdef CONFIG_NET_SCHED
89 if (ntx < dev->real_num_tx_queues)
90 qdisc_hash_add(qdisc, false);
98 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
100 struct net_device *dev = qdisc_dev(sch);
106 memset(&sch->bstats, 0, sizeof(sch->bstats));
107 memset(&sch->qstats, 0, sizeof(sch->qstats));
109 /* MQ supports lockless qdiscs. However, statistics accounting needs
110 * to account for all, none, or a mix of locked and unlocked child
111 * qdiscs. Percpu stats are added to counters in-band and locking
112 * qdisc totals are added at end.
114 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
115 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
116 spin_lock_bh(qdisc_lock(qdisc));
118 if (qdisc_is_percpu_stats(qdisc)) {
119 qlen = qdisc_qlen_sum(qdisc);
120 __gnet_stats_copy_basic(NULL, &sch->bstats,
123 __gnet_stats_copy_queue(&sch->qstats,
125 &qdisc->qstats, qlen);
127 sch->q.qlen += qdisc->q.qlen;
128 sch->bstats.bytes += qdisc->bstats.bytes;
129 sch->bstats.packets += qdisc->bstats.packets;
130 sch->qstats.backlog += qdisc->qstats.backlog;
131 sch->qstats.drops += qdisc->qstats.drops;
132 sch->qstats.requeues += qdisc->qstats.requeues;
133 sch->qstats.overlimits += qdisc->qstats.overlimits;
136 spin_unlock_bh(qdisc_lock(qdisc));
142 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
144 struct net_device *dev = qdisc_dev(sch);
145 unsigned long ntx = cl - 1;
147 if (ntx >= dev->num_tx_queues)
149 return netdev_get_tx_queue(dev, ntx);
152 static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
155 return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
158 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
159 struct Qdisc **old, struct netlink_ext_ack *extack)
161 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
162 struct net_device *dev = qdisc_dev(sch);
164 if (dev->flags & IFF_UP)
167 *old = dev_graft_qdisc(dev_queue, new);
169 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
170 if (dev->flags & IFF_UP)
175 static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
177 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
179 return dev_queue->qdisc_sleeping;
182 static unsigned long mq_find(struct Qdisc *sch, u32 classid)
184 unsigned int ntx = TC_H_MIN(classid);
186 if (!mq_queue_get(sch, ntx))
191 static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
192 struct sk_buff *skb, struct tcmsg *tcm)
194 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
196 tcm->tcm_parent = TC_H_ROOT;
197 tcm->tcm_handle |= TC_H_MIN(cl);
198 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
202 static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
205 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
207 sch = dev_queue->qdisc_sleeping;
208 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
209 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
214 static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
216 struct net_device *dev = qdisc_dev(sch);
222 arg->count = arg->skip;
223 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
224 if (arg->fn(sch, ntx + 1, arg) < 0) {
232 static const struct Qdisc_class_ops mq_class_ops = {
233 .select_queue = mq_select_queue,
238 .dump = mq_dump_class,
239 .dump_stats = mq_dump_class_stats,
242 struct Qdisc_ops mq_qdisc_ops __read_mostly = {
243 .cl_ops = &mq_class_ops,
245 .priv_size = sizeof(struct mq_sched),
247 .destroy = mq_destroy,
250 .owner = THIS_MODULE,