]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b8970f0b JF |
2 | /* |
3 | * net/sched/sch_mqprio.c | |
4 | * | |
5 | * Copyright (c) 2010 John Fastabend <[email protected]> | |
b8970f0b JF |
6 | */ |
7 | ||
8 | #include <linux/types.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/skbuff.h> | |
3a9a231d | 14 | #include <linux/module.h> |
b8970f0b JF |
15 | #include <net/netlink.h> |
16 | #include <net/pkt_sched.h> | |
17 | #include <net/sch_generic.h> | |
4e8b86c0 | 18 | #include <net/pkt_cls.h> |
b8970f0b JF |
19 | |
20 | struct mqprio_sched { | |
21 | struct Qdisc **qdiscs; | |
4e8b86c0 AN |
22 | u16 mode; |
23 | u16 shaper; | |
2026fecf | 24 | int hw_offload; |
4e8b86c0 AN |
25 | u32 flags; |
26 | u64 min_rate[TC_QOPT_MAX_QUEUE]; | |
27 | u64 max_rate[TC_QOPT_MAX_QUEUE]; | |
b8970f0b JF |
28 | }; |
29 | ||
30 | static void mqprio_destroy(struct Qdisc *sch) | |
31 | { | |
32 | struct net_device *dev = qdisc_dev(sch); | |
33 | struct mqprio_sched *priv = qdisc_priv(sch); | |
34 | unsigned int ntx; | |
35 | ||
ac7100ba BH |
36 | if (priv->qdiscs) { |
37 | for (ntx = 0; | |
38 | ntx < dev->num_tx_queues && priv->qdiscs[ntx]; | |
39 | ntx++) | |
86bd446b | 40 | qdisc_put(priv->qdiscs[ntx]); |
ac7100ba BH |
41 | kfree(priv->qdiscs); |
42 | } | |
b8970f0b | 43 | |
56f36acd | 44 | if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) { |
4e8b86c0 AN |
45 | struct tc_mqprio_qopt_offload mqprio = { { 0 } }; |
46 | ||
47 | switch (priv->mode) { | |
48 | case TC_MQPRIO_MODE_DCB: | |
49 | case TC_MQPRIO_MODE_CHANNEL: | |
575ed7d3 NF |
50 | dev->netdev_ops->ndo_setup_tc(dev, |
51 | TC_SETUP_QDISC_MQPRIO, | |
4e8b86c0 AN |
52 | &mqprio); |
53 | break; | |
54 | default: | |
55 | return; | |
56 | } | |
56f36acd | 57 | } else { |
b8970f0b | 58 | netdev_set_num_tc(dev, 0); |
56f36acd | 59 | } |
b8970f0b JF |
60 | } |
61 | ||
62 | static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) | |
63 | { | |
64 | int i, j; | |
65 | ||
66 | /* Verify num_tc is not out of max range */ | |
67 | if (qopt->num_tc > TC_MAX_QUEUE) | |
68 | return -EINVAL; | |
69 | ||
70 | /* Verify priority mapping uses valid tcs */ | |
71 | for (i = 0; i < TC_BITMASK + 1; i++) { | |
72 | if (qopt->prio_tc_map[i] >= qopt->num_tc) | |
73 | return -EINVAL; | |
74 | } | |
75 | ||
2026fecf AD |
76 | /* Limit qopt->hw to maximum supported offload value. Drivers have |
77 | * the option of overriding this later if they don't support the a | |
78 | * given offload type. | |
79 | */ | |
80 | if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX) | |
81 | qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX; | |
b8970f0b | 82 | |
2026fecf AD |
83 | /* If hardware offload is requested we will leave it to the device |
84 | * to either populate the queue counts itself or to validate the | |
85 | * provided queue counts. If ndo_setup_tc is not present then | |
86 | * hardware doesn't support offload and we should return an error. | |
b8970f0b JF |
87 | */ |
88 | if (qopt->hw) | |
2026fecf | 89 | return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL; |
b8970f0b JF |
90 | |
91 | for (i = 0; i < qopt->num_tc; i++) { | |
92 | unsigned int last = qopt->offset[i] + qopt->count[i]; | |
93 | ||
94 | /* Verify the queue count is in tx range being equal to the | |
95 | * real_num_tx_queues indicates the last queue is in use. | |
96 | */ | |
97 | if (qopt->offset[i] >= dev->real_num_tx_queues || | |
98 | !qopt->count[i] || | |
99 | last > dev->real_num_tx_queues) | |
100 | return -EINVAL; | |
101 | ||
102 | /* Verify that the offset and counts do not overlap */ | |
103 | for (j = i + 1; j < qopt->num_tc; j++) { | |
104 | if (last > qopt->offset[j]) | |
105 | return -EINVAL; | |
106 | } | |
107 | } | |
108 | ||
109 | return 0; | |
110 | } | |
111 | ||
4e8b86c0 AN |
112 | static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = { |
113 | [TCA_MQPRIO_MODE] = { .len = sizeof(u16) }, | |
114 | [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) }, | |
115 | [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED }, | |
116 | [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED }, | |
117 | }; | |
118 | ||
119 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, | |
120 | const struct nla_policy *policy, int len) | |
121 | { | |
122 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | |
123 | ||
124 | if (nested_len >= nla_attr_size(0)) | |
8cb08174 JB |
125 | return nla_parse_deprecated(tb, maxtype, |
126 | nla_data(nla) + NLA_ALIGN(len), | |
127 | nested_len, policy, NULL); | |
4e8b86c0 AN |
128 | |
129 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); | |
130 | return 0; | |
131 | } | |
132 | ||
e63d7dfd AA |
133 | static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, |
134 | struct netlink_ext_ack *extack) | |
b8970f0b JF |
135 | { |
136 | struct net_device *dev = qdisc_dev(sch); | |
137 | struct mqprio_sched *priv = qdisc_priv(sch); | |
138 | struct netdev_queue *dev_queue; | |
139 | struct Qdisc *qdisc; | |
140 | int i, err = -EOPNOTSUPP; | |
141 | struct tc_mqprio_qopt *qopt = NULL; | |
4e8b86c0 AN |
142 | struct nlattr *tb[TCA_MQPRIO_MAX + 1]; |
143 | struct nlattr *attr; | |
144 | int rem; | |
22ce97fe | 145 | int len; |
b8970f0b JF |
146 | |
147 | BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); | |
148 | BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); | |
149 | ||
150 | if (sch->parent != TC_H_ROOT) | |
151 | return -EOPNOTSUPP; | |
152 | ||
153 | if (!netif_is_multiqueue(dev)) | |
154 | return -EOPNOTSUPP; | |
155 | ||
32302902 AD |
156 | /* make certain can allocate enough classids to handle queues */ |
157 | if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) | |
158 | return -ENOMEM; | |
159 | ||
7838f2ce | 160 | if (!opt || nla_len(opt) < sizeof(*qopt)) |
b8970f0b JF |
161 | return -EINVAL; |
162 | ||
163 | qopt = nla_data(opt); | |
164 | if (mqprio_parse_opt(dev, qopt)) | |
165 | return -EINVAL; | |
166 | ||
22ce97fe | 167 | len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); |
4e8b86c0 AN |
168 | if (len > 0) { |
169 | err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy, | |
170 | sizeof(*qopt)); | |
171 | if (err < 0) | |
172 | return err; | |
173 | ||
174 | if (!qopt->hw) | |
175 | return -EINVAL; | |
176 | ||
177 | if (tb[TCA_MQPRIO_MODE]) { | |
178 | priv->flags |= TC_MQPRIO_F_MODE; | |
179 | priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]); | |
180 | } | |
181 | ||
182 | if (tb[TCA_MQPRIO_SHAPER]) { | |
183 | priv->flags |= TC_MQPRIO_F_SHAPER; | |
184 | priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]); | |
185 | } | |
186 | ||
187 | if (tb[TCA_MQPRIO_MIN_RATE64]) { | |
188 | if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) | |
189 | return -EINVAL; | |
190 | i = 0; | |
191 | nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64], | |
192 | rem) { | |
193 | if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) | |
194 | return -EINVAL; | |
195 | if (i >= qopt->num_tc) | |
196 | break; | |
197 | priv->min_rate[i] = *(u64 *)nla_data(attr); | |
198 | i++; | |
199 | } | |
200 | priv->flags |= TC_MQPRIO_F_MIN_RATE; | |
201 | } | |
202 | ||
203 | if (tb[TCA_MQPRIO_MAX_RATE64]) { | |
204 | if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) | |
205 | return -EINVAL; | |
206 | i = 0; | |
207 | nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64], | |
208 | rem) { | |
209 | if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) | |
210 | return -EINVAL; | |
211 | if (i >= qopt->num_tc) | |
212 | break; | |
213 | priv->max_rate[i] = *(u64 *)nla_data(attr); | |
214 | i++; | |
215 | } | |
216 | priv->flags |= TC_MQPRIO_F_MAX_RATE; | |
217 | } | |
218 | } | |
219 | ||
b8970f0b JF |
220 | /* pre-allocate qdisc, attachment can't fail */ |
221 | priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), | |
222 | GFP_KERNEL); | |
87b60cfa ED |
223 | if (!priv->qdiscs) |
224 | return -ENOMEM; | |
b8970f0b JF |
225 | |
226 | for (i = 0; i < dev->num_tx_queues; i++) { | |
227 | dev_queue = netdev_get_tx_queue(dev, i); | |
1f27cde3 ED |
228 | qdisc = qdisc_create_dflt(dev_queue, |
229 | get_default_qdisc_ops(dev, i), | |
b8970f0b | 230 | TC_H_MAKE(TC_H_MAJ(sch->handle), |
a38a9882 | 231 | TC_H_MIN(i + 1)), extack); |
87b60cfa ED |
232 | if (!qdisc) |
233 | return -ENOMEM; | |
234 | ||
b8970f0b | 235 | priv->qdiscs[i] = qdisc; |
4eaf3b84 | 236 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
b8970f0b JF |
237 | } |
238 | ||
239 | /* If the mqprio options indicate that hardware should own | |
240 | * the queue mapping then run ndo_setup_tc otherwise use the | |
241 | * supplied and verified mapping | |
242 | */ | |
243 | if (qopt->hw) { | |
4e8b86c0 | 244 | struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt}; |
16e5cc64 | 245 | |
4e8b86c0 AN |
246 | switch (priv->mode) { |
247 | case TC_MQPRIO_MODE_DCB: | |
248 | if (priv->shaper != TC_MQPRIO_SHAPER_DCB) | |
249 | return -EINVAL; | |
250 | break; | |
251 | case TC_MQPRIO_MODE_CHANNEL: | |
252 | mqprio.flags = priv->flags; | |
253 | if (priv->flags & TC_MQPRIO_F_MODE) | |
254 | mqprio.mode = priv->mode; | |
255 | if (priv->flags & TC_MQPRIO_F_SHAPER) | |
256 | mqprio.shaper = priv->shaper; | |
257 | if (priv->flags & TC_MQPRIO_F_MIN_RATE) | |
258 | for (i = 0; i < mqprio.qopt.num_tc; i++) | |
259 | mqprio.min_rate[i] = priv->min_rate[i]; | |
260 | if (priv->flags & TC_MQPRIO_F_MAX_RATE) | |
261 | for (i = 0; i < mqprio.qopt.num_tc; i++) | |
262 | mqprio.max_rate[i] = priv->max_rate[i]; | |
263 | break; | |
264 | default: | |
265 | return -EINVAL; | |
266 | } | |
267 | err = dev->netdev_ops->ndo_setup_tc(dev, | |
575ed7d3 | 268 | TC_SETUP_QDISC_MQPRIO, |
de4784ca | 269 | &mqprio); |
b8970f0b | 270 | if (err) |
87b60cfa | 271 | return err; |
2026fecf | 272 | |
4e8b86c0 | 273 | priv->hw_offload = mqprio.qopt.hw; |
b8970f0b JF |
274 | } else { |
275 | netdev_set_num_tc(dev, qopt->num_tc); | |
276 | for (i = 0; i < qopt->num_tc; i++) | |
277 | netdev_set_tc_queue(dev, i, | |
278 | qopt->count[i], qopt->offset[i]); | |
279 | } | |
280 | ||
281 | /* Always use supplied priority mappings */ | |
282 | for (i = 0; i < TC_BITMASK + 1; i++) | |
283 | netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]); | |
284 | ||
285 | sch->flags |= TCQ_F_MQROOT; | |
286 | return 0; | |
b8970f0b JF |
287 | } |
288 | ||
289 | static void mqprio_attach(struct Qdisc *sch) | |
290 | { | |
291 | struct net_device *dev = qdisc_dev(sch); | |
292 | struct mqprio_sched *priv = qdisc_priv(sch); | |
95dc1929 | 293 | struct Qdisc *qdisc, *old; |
b8970f0b JF |
294 | unsigned int ntx; |
295 | ||
296 | /* Attach underlying qdisc */ | |
297 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | |
298 | qdisc = priv->qdiscs[ntx]; | |
95dc1929 ED |
299 | old = dev_graft_qdisc(qdisc->dev_queue, qdisc); |
300 | if (old) | |
86bd446b | 301 | qdisc_put(old); |
95dc1929 | 302 | if (ntx < dev->real_num_tx_queues) |
49b49971 | 303 | qdisc_hash_add(qdisc, false); |
b8970f0b JF |
304 | } |
305 | kfree(priv->qdiscs); | |
306 | priv->qdiscs = NULL; | |
307 | } | |
308 | ||
309 | static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, | |
310 | unsigned long cl) | |
311 | { | |
312 | struct net_device *dev = qdisc_dev(sch); | |
32302902 | 313 | unsigned long ntx = cl - 1; |
b8970f0b JF |
314 | |
315 | if (ntx >= dev->num_tx_queues) | |
316 | return NULL; | |
317 | return netdev_get_tx_queue(dev, ntx); | |
318 | } | |
319 | ||
320 | static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, | |
653d6fd6 | 321 | struct Qdisc **old, struct netlink_ext_ack *extack) |
b8970f0b JF |
322 | { |
323 | struct net_device *dev = qdisc_dev(sch); | |
324 | struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); | |
325 | ||
326 | if (!dev_queue) | |
327 | return -EINVAL; | |
328 | ||
329 | if (dev->flags & IFF_UP) | |
330 | dev_deactivate(dev); | |
331 | ||
332 | *old = dev_graft_qdisc(dev_queue, new); | |
333 | ||
1abbe139 | 334 | if (new) |
4eaf3b84 | 335 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
1abbe139 | 336 | |
b8970f0b JF |
337 | if (dev->flags & IFF_UP) |
338 | dev_activate(dev); | |
339 | ||
340 | return 0; | |
341 | } | |
342 | ||
4e8b86c0 AN |
343 | static int dump_rates(struct mqprio_sched *priv, |
344 | struct tc_mqprio_qopt *opt, struct sk_buff *skb) | |
345 | { | |
346 | struct nlattr *nest; | |
347 | int i; | |
348 | ||
349 | if (priv->flags & TC_MQPRIO_F_MIN_RATE) { | |
ae0be8de | 350 | nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64); |
4e8b86c0 AN |
351 | if (!nest) |
352 | goto nla_put_failure; | |
353 | ||
354 | for (i = 0; i < opt->num_tc; i++) { | |
355 | if (nla_put(skb, TCA_MQPRIO_MIN_RATE64, | |
356 | sizeof(priv->min_rate[i]), | |
357 | &priv->min_rate[i])) | |
358 | goto nla_put_failure; | |
359 | } | |
360 | nla_nest_end(skb, nest); | |
361 | } | |
362 | ||
363 | if (priv->flags & TC_MQPRIO_F_MAX_RATE) { | |
ae0be8de | 364 | nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64); |
4e8b86c0 AN |
365 | if (!nest) |
366 | goto nla_put_failure; | |
367 | ||
368 | for (i = 0; i < opt->num_tc; i++) { | |
369 | if (nla_put(skb, TCA_MQPRIO_MAX_RATE64, | |
370 | sizeof(priv->max_rate[i]), | |
371 | &priv->max_rate[i])) | |
372 | goto nla_put_failure; | |
373 | } | |
374 | nla_nest_end(skb, nest); | |
375 | } | |
376 | return 0; | |
377 | ||
378 | nla_put_failure: | |
379 | nla_nest_cancel(skb, nest); | |
380 | return -1; | |
381 | } | |
382 | ||
b8970f0b JF |
383 | static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) |
384 | { | |
385 | struct net_device *dev = qdisc_dev(sch); | |
386 | struct mqprio_sched *priv = qdisc_priv(sch); | |
4e8b86c0 | 387 | struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb); |
144ce879 | 388 | struct tc_mqprio_qopt opt = { 0 }; |
b8970f0b | 389 | struct Qdisc *qdisc; |
ce679e8d | 390 | unsigned int ntx, tc; |
b8970f0b JF |
391 | |
392 | sch->q.qlen = 0; | |
393 | memset(&sch->bstats, 0, sizeof(sch->bstats)); | |
394 | memset(&sch->qstats, 0, sizeof(sch->qstats)); | |
395 | ||
ce679e8d JF |
396 | /* MQ supports lockless qdiscs. However, statistics accounting needs |
397 | * to account for all, none, or a mix of locked and unlocked child | |
398 | * qdiscs. Percpu stats are added to counters in-band and locking | |
399 | * qdisc totals are added at end. | |
400 | */ | |
401 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | |
402 | qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; | |
b8970f0b | 403 | spin_lock_bh(qdisc_lock(qdisc)); |
ce679e8d JF |
404 | |
405 | if (qdisc_is_percpu_stats(qdisc)) { | |
406 | __u32 qlen = qdisc_qlen_sum(qdisc); | |
407 | ||
408 | __gnet_stats_copy_basic(NULL, &sch->bstats, | |
409 | qdisc->cpu_bstats, | |
410 | &qdisc->bstats); | |
411 | __gnet_stats_copy_queue(&sch->qstats, | |
412 | qdisc->cpu_qstats, | |
413 | &qdisc->qstats, qlen); | |
2f23cd42 | 414 | sch->q.qlen += qlen; |
ce679e8d JF |
415 | } else { |
416 | sch->q.qlen += qdisc->q.qlen; | |
417 | sch->bstats.bytes += qdisc->bstats.bytes; | |
418 | sch->bstats.packets += qdisc->bstats.packets; | |
419 | sch->qstats.backlog += qdisc->qstats.backlog; | |
420 | sch->qstats.drops += qdisc->qstats.drops; | |
421 | sch->qstats.requeues += qdisc->qstats.requeues; | |
422 | sch->qstats.overlimits += qdisc->qstats.overlimits; | |
423 | } | |
424 | ||
b8970f0b JF |
425 | spin_unlock_bh(qdisc_lock(qdisc)); |
426 | } | |
427 | ||
428 | opt.num_tc = netdev_get_num_tc(dev); | |
429 | memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); | |
2026fecf | 430 | opt.hw = priv->hw_offload; |
b8970f0b | 431 | |
ce679e8d JF |
432 | for (tc = 0; tc < netdev_get_num_tc(dev); tc++) { |
433 | opt.count[tc] = dev->tc_to_txq[tc].count; | |
434 | opt.offset[tc] = dev->tc_to_txq[tc].offset; | |
b8970f0b JF |
435 | } |
436 | ||
9f104c77 | 437 | if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) |
4e8b86c0 AN |
438 | goto nla_put_failure; |
439 | ||
440 | if ((priv->flags & TC_MQPRIO_F_MODE) && | |
441 | nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode)) | |
442 | goto nla_put_failure; | |
443 | ||
444 | if ((priv->flags & TC_MQPRIO_F_SHAPER) && | |
445 | nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper)) | |
446 | goto nla_put_failure; | |
447 | ||
448 | if ((priv->flags & TC_MQPRIO_F_MIN_RATE || | |
449 | priv->flags & TC_MQPRIO_F_MAX_RATE) && | |
450 | (dump_rates(priv, &opt, skb) != 0)) | |
1b34ec43 | 451 | goto nla_put_failure; |
b8970f0b | 452 | |
4e8b86c0 | 453 | return nla_nest_end(skb, nla); |
b8970f0b | 454 | nla_put_failure: |
4e8b86c0 | 455 | nlmsg_trim(skb, nla); |
b8970f0b JF |
456 | return -1; |
457 | } | |
458 | ||
459 | static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) | |
460 | { | |
461 | struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); | |
462 | ||
463 | if (!dev_queue) | |
464 | return NULL; | |
465 | ||
466 | return dev_queue->qdisc_sleeping; | |
467 | } | |
468 | ||
143976ce | 469 | static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) |
b8970f0b JF |
470 | { |
471 | struct net_device *dev = qdisc_dev(sch); | |
472 | unsigned int ntx = TC_H_MIN(classid); | |
473 | ||
32302902 AD |
474 | /* There are essentially two regions here that have valid classid |
475 | * values. The first region will have a classid value of 1 through | |
476 | * num_tx_queues. All of these are backed by actual Qdiscs. | |
477 | */ | |
478 | if (ntx < TC_H_MIN_PRIORITY) | |
479 | return (ntx <= dev->num_tx_queues) ? ntx : 0; | |
480 | ||
481 | /* The second region represents the hardware traffic classes. These | |
482 | * are represented by classid values of TC_H_MIN_PRIORITY through | |
483 | * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1 | |
484 | */ | |
485 | return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0; | |
b8970f0b JF |
486 | } |
487 | ||
b8970f0b JF |
488 | static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, |
489 | struct sk_buff *skb, struct tcmsg *tcm) | |
490 | { | |
32302902 AD |
491 | if (cl < TC_H_MIN_PRIORITY) { |
492 | struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); | |
493 | struct net_device *dev = qdisc_dev(sch); | |
494 | int tc = netdev_txq_to_tc(dev, cl - 1); | |
b8970f0b | 495 | |
32302902 AD |
496 | tcm->tcm_parent = (tc < 0) ? 0 : |
497 | TC_H_MAKE(TC_H_MAJ(sch->handle), | |
498 | TC_H_MIN(tc + TC_H_MIN_PRIORITY)); | |
499 | tcm->tcm_info = dev_queue->qdisc_sleeping->handle; | |
500 | } else { | |
b8970f0b JF |
501 | tcm->tcm_parent = TC_H_ROOT; |
502 | tcm->tcm_info = 0; | |
b8970f0b JF |
503 | } |
504 | tcm->tcm_handle |= TC_H_MIN(cl); | |
505 | return 0; | |
506 | } | |
507 | ||
508 | static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
ea18fd95 | 509 | struct gnet_dump *d) |
510 | __releases(d->lock) | |
511 | __acquires(d->lock) | |
b8970f0b | 512 | { |
32302902 | 513 | if (cl >= TC_H_MIN_PRIORITY) { |
b8970f0b | 514 | int i; |
64015853 | 515 | __u32 qlen = 0; |
b8970f0b JF |
516 | struct gnet_stats_queue qstats = {0}; |
517 | struct gnet_stats_basic_packed bstats = {0}; | |
32302902 AD |
518 | struct net_device *dev = qdisc_dev(sch); |
519 | struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK]; | |
b8970f0b JF |
520 | |
521 | /* Drop lock here it will be reclaimed before touching | |
522 | * statistics this is required because the d->lock we | |
523 | * hold here is the look on dev_queue->qdisc_sleeping | |
524 | * also acquired below. | |
525 | */ | |
edb09eb1 ED |
526 | if (d->lock) |
527 | spin_unlock_bh(d->lock); | |
b8970f0b JF |
528 | |
529 | for (i = tc.offset; i < tc.offset + tc.count; i++) { | |
46e5da40 | 530 | struct netdev_queue *q = netdev_get_tx_queue(dev, i); |
ce679e8d JF |
531 | struct Qdisc *qdisc = rtnl_dereference(q->qdisc); |
532 | struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; | |
533 | struct gnet_stats_queue __percpu *cpu_qstats = NULL; | |
46e5da40 | 534 | |
b8970f0b | 535 | spin_lock_bh(qdisc_lock(qdisc)); |
ce679e8d JF |
536 | if (qdisc_is_percpu_stats(qdisc)) { |
537 | cpu_bstats = qdisc->cpu_bstats; | |
538 | cpu_qstats = qdisc->cpu_qstats; | |
539 | } | |
540 | ||
541 | qlen = qdisc_qlen_sum(qdisc); | |
542 | __gnet_stats_copy_basic(NULL, &sch->bstats, | |
543 | cpu_bstats, &qdisc->bstats); | |
544 | __gnet_stats_copy_queue(&sch->qstats, | |
545 | cpu_qstats, | |
546 | &qdisc->qstats, | |
547 | qlen); | |
b8970f0b JF |
548 | spin_unlock_bh(qdisc_lock(qdisc)); |
549 | } | |
ce679e8d | 550 | |
b8970f0b | 551 | /* Reclaim root sleeping lock before completing stats */ |
edb09eb1 ED |
552 | if (d->lock) |
553 | spin_lock_bh(d->lock); | |
554 | if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 || | |
b0ab6f92 | 555 | gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) |
b8970f0b JF |
556 | return -1; |
557 | } else { | |
558 | struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); | |
559 | ||
560 | sch = dev_queue->qdisc_sleeping; | |
14e54ab9 DL |
561 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, |
562 | sch->cpu_bstats, &sch->bstats) < 0 || | |
5dd431b6 | 563 | qdisc_qstats_copy(d, sch) < 0) |
b8970f0b JF |
564 | return -1; |
565 | } | |
566 | return 0; | |
567 | } | |
568 | ||
569 | static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
570 | { | |
571 | struct net_device *dev = qdisc_dev(sch); | |
572 | unsigned long ntx; | |
573 | ||
574 | if (arg->stop) | |
575 | return; | |
576 | ||
577 | /* Walk hierarchy with a virtual class per tc */ | |
578 | arg->count = arg->skip; | |
32302902 AD |
579 | for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) { |
580 | if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) { | |
581 | arg->stop = 1; | |
582 | return; | |
583 | } | |
584 | arg->count++; | |
585 | } | |
586 | ||
587 | /* Pad the values and skip over unused traffic classes */ | |
588 | if (ntx < TC_MAX_QUEUE) { | |
589 | arg->count = TC_MAX_QUEUE; | |
590 | ntx = TC_MAX_QUEUE; | |
591 | } | |
592 | ||
593 | /* Reset offset, sort out remaining per-queue qdiscs */ | |
594 | for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { | |
b8970f0b JF |
595 | if (arg->fn(sch, ntx + 1, arg) < 0) { |
596 | arg->stop = 1; | |
32302902 | 597 | return; |
b8970f0b JF |
598 | } |
599 | arg->count++; | |
600 | } | |
601 | } | |
602 | ||
0f7787b4 JSP |
603 | static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch, |
604 | struct tcmsg *tcm) | |
605 | { | |
606 | return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); | |
607 | } | |
608 | ||
b8970f0b JF |
609 | static const struct Qdisc_class_ops mqprio_class_ops = { |
610 | .graft = mqprio_graft, | |
611 | .leaf = mqprio_leaf, | |
143976ce | 612 | .find = mqprio_find, |
b8970f0b JF |
613 | .walk = mqprio_walk, |
614 | .dump = mqprio_dump_class, | |
615 | .dump_stats = mqprio_dump_class_stats, | |
0f7787b4 | 616 | .select_queue = mqprio_select_queue, |
b8970f0b JF |
617 | }; |
618 | ||
ea18fd95 | 619 | static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = { |
b8970f0b JF |
620 | .cl_ops = &mqprio_class_ops, |
621 | .id = "mqprio", | |
622 | .priv_size = sizeof(struct mqprio_sched), | |
623 | .init = mqprio_init, | |
624 | .destroy = mqprio_destroy, | |
625 | .attach = mqprio_attach, | |
626 | .dump = mqprio_dump, | |
627 | .owner = THIS_MODULE, | |
628 | }; | |
629 | ||
630 | static int __init mqprio_module_init(void) | |
631 | { | |
632 | return register_qdisc(&mqprio_qdisc_ops); | |
633 | } | |
634 | ||
635 | static void __exit mqprio_module_exit(void) | |
636 | { | |
637 | unregister_qdisc(&mqprio_qdisc_ops); | |
638 | } | |
639 | ||
640 | module_init(mqprio_module_init); | |
641 | module_exit(mqprio_module_exit); | |
642 | ||
643 | MODULE_LICENSE("GPL"); |