]>
Commit | Line | Data |
---|---|---|
dcc68b4d PM |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * net/sched/sch_ets.c Enhanced Transmission Selection scheduler | |
4 | * | |
5 | * Description | |
6 | * ----------- | |
7 | * | |
8 | * The Enhanced Transmission Selection scheduler is a classful queuing | |
9 | * discipline that merges functionality of PRIO and DRR qdiscs in one scheduler. | |
10 | * ETS makes it easy to configure a set of strict and bandwidth-sharing bands to | |
11 | * implement the transmission selection described in 802.1Qaz. | |
12 | * | |
13 | * Although ETS is technically classful, it's not possible to add and remove | |
14 | * classes at will. Instead one specifies number of classes, how many are | |
15 | * PRIO-like and how many DRR-like, and quanta for the latter. | |
16 | * | |
17 | * Algorithm | |
18 | * --------- | |
19 | * | |
20 | * The strict classes, if any, are tried for traffic first: first band 0, if it | |
21 | * has no traffic then band 1, etc. | |
22 | * | |
23 | * When there is no traffic in any of the strict queues, the bandwidth-sharing | |
24 | * ones are tried next. Each band is assigned a deficit counter, initialized to | |
25 | * "quantum" of that band. ETS maintains a list of active bandwidth-sharing | |
26 | * bands whose qdiscs are non-empty. A packet is dequeued from the band at the | |
27 | * head of the list if the packet size is smaller or equal to the deficit | |
28 | * counter. If the counter is too small, it is increased by "quantum" and the | |
29 | * scheduler moves on to the next band in the active list. | |
30 | */ | |
31 | ||
32 | #include <linux/module.h> | |
33 | #include <net/gen_stats.h> | |
34 | #include <net/netlink.h> | |
35 | #include <net/pkt_cls.h> | |
36 | #include <net/pkt_sched.h> | |
37 | #include <net/sch_generic.h> | |
38 | ||
39 | struct ets_class { | |
40 | struct list_head alist; /* In struct ets_sched.active. */ | |
41 | struct Qdisc *qdisc; | |
42 | u32 quantum; | |
43 | u32 deficit; | |
44 | struct gnet_stats_basic_packed bstats; | |
45 | struct gnet_stats_queue qstats; | |
46 | }; | |
47 | ||
48 | struct ets_sched { | |
49 | struct list_head active; | |
50 | struct tcf_proto __rcu *filter_list; | |
51 | struct tcf_block *block; | |
52 | unsigned int nbands; | |
53 | unsigned int nstrict; | |
54 | u8 prio2band[TC_PRIO_MAX + 1]; | |
55 | struct ets_class classes[TCQ_ETS_MAX_BANDS]; | |
56 | }; | |
57 | ||
58 | static const struct nla_policy ets_policy[TCA_ETS_MAX + 1] = { | |
59 | [TCA_ETS_NBANDS] = { .type = NLA_U8 }, | |
60 | [TCA_ETS_NSTRICT] = { .type = NLA_U8 }, | |
61 | [TCA_ETS_QUANTA] = { .type = NLA_NESTED }, | |
62 | [TCA_ETS_PRIOMAP] = { .type = NLA_NESTED }, | |
63 | }; | |
64 | ||
65 | static const struct nla_policy ets_priomap_policy[TCA_ETS_MAX + 1] = { | |
66 | [TCA_ETS_PRIOMAP_BAND] = { .type = NLA_U8 }, | |
67 | }; | |
68 | ||
69 | static const struct nla_policy ets_quanta_policy[TCA_ETS_MAX + 1] = { | |
70 | [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 }, | |
71 | }; | |
72 | ||
73 | static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = { | |
74 | [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 }, | |
75 | }; | |
76 | ||
77 | static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr, | |
78 | unsigned int *quantum, | |
79 | struct netlink_ext_ack *extack) | |
80 | { | |
81 | *quantum = nla_get_u32(attr); | |
82 | if (!*quantum) { | |
83 | NL_SET_ERR_MSG(extack, "ETS quantum cannot be zero"); | |
84 | return -EINVAL; | |
85 | } | |
86 | return 0; | |
87 | } | |
88 | ||
89 | static struct ets_class * | |
90 | ets_class_from_arg(struct Qdisc *sch, unsigned long arg) | |
91 | { | |
92 | struct ets_sched *q = qdisc_priv(sch); | |
93 | ||
94 | return &q->classes[arg - 1]; | |
95 | } | |
96 | ||
97 | static u32 ets_class_id(struct Qdisc *sch, const struct ets_class *cl) | |
98 | { | |
99 | struct ets_sched *q = qdisc_priv(sch); | |
100 | int band = cl - q->classes; | |
101 | ||
102 | return TC_H_MAKE(sch->handle, band + 1); | |
103 | } | |
104 | ||
d35eb52b PM |
105 | static void ets_offload_change(struct Qdisc *sch) |
106 | { | |
107 | struct net_device *dev = qdisc_dev(sch); | |
108 | struct ets_sched *q = qdisc_priv(sch); | |
109 | struct tc_ets_qopt_offload qopt; | |
110 | unsigned int w_psum_prev = 0; | |
111 | unsigned int q_psum = 0; | |
112 | unsigned int q_sum = 0; | |
113 | unsigned int quantum; | |
114 | unsigned int w_psum; | |
115 | unsigned int weight; | |
116 | unsigned int i; | |
117 | ||
118 | if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) | |
119 | return; | |
120 | ||
121 | qopt.command = TC_ETS_REPLACE; | |
122 | qopt.handle = sch->handle; | |
123 | qopt.parent = sch->parent; | |
124 | qopt.replace_params.bands = q->nbands; | |
125 | qopt.replace_params.qstats = &sch->qstats; | |
126 | memcpy(&qopt.replace_params.priomap, | |
127 | q->prio2band, sizeof(q->prio2band)); | |
128 | ||
129 | for (i = 0; i < q->nbands; i++) | |
130 | q_sum += q->classes[i].quantum; | |
131 | ||
132 | for (i = 0; i < q->nbands; i++) { | |
133 | quantum = q->classes[i].quantum; | |
134 | q_psum += quantum; | |
135 | w_psum = quantum ? q_psum * 100 / q_sum : 0; | |
136 | weight = w_psum - w_psum_prev; | |
137 | w_psum_prev = w_psum; | |
138 | ||
139 | qopt.replace_params.quanta[i] = quantum; | |
140 | qopt.replace_params.weights[i] = weight; | |
141 | } | |
142 | ||
143 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt); | |
144 | } | |
145 | ||
146 | static void ets_offload_destroy(struct Qdisc *sch) | |
147 | { | |
148 | struct net_device *dev = qdisc_dev(sch); | |
149 | struct tc_ets_qopt_offload qopt; | |
150 | ||
151 | if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) | |
152 | return; | |
153 | ||
154 | qopt.command = TC_ETS_DESTROY; | |
155 | qopt.handle = sch->handle; | |
156 | qopt.parent = sch->parent; | |
157 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt); | |
158 | } | |
159 | ||
160 | static void ets_offload_graft(struct Qdisc *sch, struct Qdisc *new, | |
161 | struct Qdisc *old, unsigned long arg, | |
162 | struct netlink_ext_ack *extack) | |
163 | { | |
164 | struct net_device *dev = qdisc_dev(sch); | |
165 | struct tc_ets_qopt_offload qopt; | |
166 | ||
167 | qopt.command = TC_ETS_GRAFT; | |
168 | qopt.handle = sch->handle; | |
169 | qopt.parent = sch->parent; | |
170 | qopt.graft_params.band = arg - 1; | |
171 | qopt.graft_params.child_handle = new->handle; | |
172 | ||
173 | qdisc_offload_graft_helper(dev, sch, new, old, TC_SETUP_QDISC_ETS, | |
174 | &qopt, extack); | |
175 | } | |
176 | ||
177 | static int ets_offload_dump(struct Qdisc *sch) | |
178 | { | |
179 | struct tc_ets_qopt_offload qopt; | |
180 | ||
181 | qopt.command = TC_ETS_STATS; | |
182 | qopt.handle = sch->handle; | |
183 | qopt.parent = sch->parent; | |
184 | qopt.stats.bstats = &sch->bstats; | |
185 | qopt.stats.qstats = &sch->qstats; | |
186 | ||
187 | return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_ETS, &qopt); | |
188 | } | |
189 | ||
dcc68b4d PM |
190 | static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl) |
191 | { | |
192 | unsigned int band = cl - q->classes; | |
193 | ||
194 | return band < q->nstrict; | |
195 | } | |
196 | ||
197 | static int ets_class_change(struct Qdisc *sch, u32 classid, u32 parentid, | |
198 | struct nlattr **tca, unsigned long *arg, | |
199 | struct netlink_ext_ack *extack) | |
200 | { | |
201 | struct ets_class *cl = ets_class_from_arg(sch, *arg); | |
202 | struct ets_sched *q = qdisc_priv(sch); | |
203 | struct nlattr *opt = tca[TCA_OPTIONS]; | |
204 | struct nlattr *tb[TCA_ETS_MAX + 1]; | |
205 | unsigned int quantum; | |
206 | int err; | |
207 | ||
208 | /* Classes can be added and removed only through Qdisc_ops.change | |
209 | * interface. | |
210 | */ | |
211 | if (!cl) { | |
212 | NL_SET_ERR_MSG(extack, "Fine-grained class addition and removal is not supported"); | |
213 | return -EOPNOTSUPP; | |
214 | } | |
215 | ||
216 | if (!opt) { | |
217 | NL_SET_ERR_MSG(extack, "ETS options are required for this operation"); | |
218 | return -EINVAL; | |
219 | } | |
220 | ||
221 | err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_class_policy, extack); | |
222 | if (err < 0) | |
223 | return err; | |
224 | ||
225 | if (!tb[TCA_ETS_QUANTA_BAND]) | |
226 | /* Nothing to configure. */ | |
227 | return 0; | |
228 | ||
229 | if (ets_class_is_strict(q, cl)) { | |
230 | NL_SET_ERR_MSG(extack, "Strict bands do not have a configurable quantum"); | |
231 | return -EINVAL; | |
232 | } | |
233 | ||
234 | err = ets_quantum_parse(sch, tb[TCA_ETS_QUANTA_BAND], &quantum, | |
235 | extack); | |
236 | if (err) | |
237 | return err; | |
238 | ||
239 | sch_tree_lock(sch); | |
240 | cl->quantum = quantum; | |
241 | sch_tree_unlock(sch); | |
d35eb52b PM |
242 | |
243 | ets_offload_change(sch); | |
dcc68b4d PM |
244 | return 0; |
245 | } | |
246 | ||
247 | static int ets_class_graft(struct Qdisc *sch, unsigned long arg, | |
248 | struct Qdisc *new, struct Qdisc **old, | |
249 | struct netlink_ext_ack *extack) | |
250 | { | |
251 | struct ets_class *cl = ets_class_from_arg(sch, arg); | |
252 | ||
253 | if (!new) { | |
254 | new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, | |
255 | ets_class_id(sch, cl), NULL); | |
256 | if (!new) | |
257 | new = &noop_qdisc; | |
258 | else | |
259 | qdisc_hash_add(new, true); | |
260 | } | |
261 | ||
262 | *old = qdisc_replace(sch, new, &cl->qdisc); | |
d35eb52b | 263 | ets_offload_graft(sch, new, *old, arg, extack); |
dcc68b4d PM |
264 | return 0; |
265 | } | |
266 | ||
267 | static struct Qdisc *ets_class_leaf(struct Qdisc *sch, unsigned long arg) | |
268 | { | |
269 | struct ets_class *cl = ets_class_from_arg(sch, arg); | |
270 | ||
271 | return cl->qdisc; | |
272 | } | |
273 | ||
274 | static unsigned long ets_class_find(struct Qdisc *sch, u32 classid) | |
275 | { | |
276 | unsigned long band = TC_H_MIN(classid); | |
277 | struct ets_sched *q = qdisc_priv(sch); | |
278 | ||
279 | if (band - 1 >= q->nbands) | |
280 | return 0; | |
281 | return band; | |
282 | } | |
283 | ||
284 | static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg) | |
285 | { | |
286 | struct ets_class *cl = ets_class_from_arg(sch, arg); | |
287 | struct ets_sched *q = qdisc_priv(sch); | |
288 | ||
289 | /* We get notified about zero-length child Qdiscs as well if they are | |
290 | * offloaded. Those aren't on the active list though, so don't attempt | |
291 | * to remove them. | |
292 | */ | |
293 | if (!ets_class_is_strict(q, cl) && sch->q.qlen) | |
294 | list_del(&cl->alist); | |
295 | } | |
296 | ||
297 | static int ets_class_dump(struct Qdisc *sch, unsigned long arg, | |
298 | struct sk_buff *skb, struct tcmsg *tcm) | |
299 | { | |
300 | struct ets_class *cl = ets_class_from_arg(sch, arg); | |
301 | struct ets_sched *q = qdisc_priv(sch); | |
302 | struct nlattr *nest; | |
303 | ||
304 | tcm->tcm_parent = TC_H_ROOT; | |
305 | tcm->tcm_handle = ets_class_id(sch, cl); | |
306 | tcm->tcm_info = cl->qdisc->handle; | |
307 | ||
308 | nest = nla_nest_start_noflag(skb, TCA_OPTIONS); | |
309 | if (!nest) | |
310 | goto nla_put_failure; | |
311 | if (!ets_class_is_strict(q, cl)) { | |
312 | if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, cl->quantum)) | |
313 | goto nla_put_failure; | |
314 | } | |
315 | return nla_nest_end(skb, nest); | |
316 | ||
317 | nla_put_failure: | |
318 | nla_nest_cancel(skb, nest); | |
319 | return -EMSGSIZE; | |
320 | } | |
321 | ||
322 | static int ets_class_dump_stats(struct Qdisc *sch, unsigned long arg, | |
323 | struct gnet_dump *d) | |
324 | { | |
325 | struct ets_class *cl = ets_class_from_arg(sch, arg); | |
326 | struct Qdisc *cl_q = cl->qdisc; | |
327 | ||
328 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | |
329 | d, NULL, &cl_q->bstats) < 0 || | |
330 | qdisc_qstats_copy(d, cl_q) < 0) | |
331 | return -1; | |
332 | ||
333 | return 0; | |
334 | } | |
335 | ||
336 | static void ets_qdisc_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
337 | { | |
338 | struct ets_sched *q = qdisc_priv(sch); | |
339 | int i; | |
340 | ||
341 | if (arg->stop) | |
342 | return; | |
343 | ||
344 | for (i = 0; i < q->nbands; i++) { | |
345 | if (arg->count < arg->skip) { | |
346 | arg->count++; | |
347 | continue; | |
348 | } | |
349 | if (arg->fn(sch, i + 1, arg) < 0) { | |
350 | arg->stop = 1; | |
351 | break; | |
352 | } | |
353 | arg->count++; | |
354 | } | |
355 | } | |
356 | ||
357 | static struct tcf_block * | |
358 | ets_qdisc_tcf_block(struct Qdisc *sch, unsigned long cl, | |
359 | struct netlink_ext_ack *extack) | |
360 | { | |
361 | struct ets_sched *q = qdisc_priv(sch); | |
362 | ||
363 | if (cl) { | |
364 | NL_SET_ERR_MSG(extack, "ETS classid must be zero"); | |
365 | return NULL; | |
366 | } | |
367 | ||
368 | return q->block; | |
369 | } | |
370 | ||
371 | static unsigned long ets_qdisc_bind_tcf(struct Qdisc *sch, unsigned long parent, | |
372 | u32 classid) | |
373 | { | |
374 | return ets_class_find(sch, classid); | |
375 | } | |
376 | ||
377 | static void ets_qdisc_unbind_tcf(struct Qdisc *sch, unsigned long arg) | |
378 | { | |
379 | } | |
380 | ||
381 | static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch, | |
382 | int *qerr) | |
383 | { | |
384 | struct ets_sched *q = qdisc_priv(sch); | |
385 | u32 band = skb->priority; | |
386 | struct tcf_result res; | |
387 | struct tcf_proto *fl; | |
388 | int err; | |
389 | ||
390 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | |
391 | if (TC_H_MAJ(skb->priority) != sch->handle) { | |
392 | fl = rcu_dereference_bh(q->filter_list); | |
393 | err = tcf_classify(skb, fl, &res, false); | |
394 | #ifdef CONFIG_NET_CLS_ACT | |
395 | switch (err) { | |
396 | case TC_ACT_STOLEN: | |
397 | case TC_ACT_QUEUED: | |
398 | case TC_ACT_TRAP: | |
399 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | |
964201de | 400 | fallthrough; |
dcc68b4d PM |
401 | case TC_ACT_SHOT: |
402 | return NULL; | |
403 | } | |
404 | #endif | |
405 | if (!fl || err < 0) { | |
406 | if (TC_H_MAJ(band)) | |
407 | band = 0; | |
408 | return &q->classes[q->prio2band[band & TC_PRIO_MAX]]; | |
409 | } | |
410 | band = res.classid; | |
411 | } | |
412 | band = TC_H_MIN(band) - 1; | |
413 | if (band >= q->nbands) | |
414 | return &q->classes[q->prio2band[0]]; | |
415 | return &q->classes[band]; | |
416 | } | |
417 | ||
ac5c66f2 | 418 | static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
dcc68b4d PM |
419 | struct sk_buff **to_free) |
420 | { | |
421 | unsigned int len = qdisc_pkt_len(skb); | |
422 | struct ets_sched *q = qdisc_priv(sch); | |
423 | struct ets_class *cl; | |
424 | int err = 0; | |
425 | bool first; | |
426 | ||
427 | cl = ets_classify(skb, sch, &err); | |
428 | if (!cl) { | |
429 | if (err & __NET_XMIT_BYPASS) | |
430 | qdisc_qstats_drop(sch); | |
431 | __qdisc_drop(skb, to_free); | |
432 | return err; | |
433 | } | |
434 | ||
435 | first = !cl->qdisc->q.qlen; | |
ac5c66f2 | 436 | err = qdisc_enqueue(skb, cl->qdisc, to_free); |
dcc68b4d PM |
437 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
438 | if (net_xmit_drop_count(err)) { | |
439 | cl->qstats.drops++; | |
440 | qdisc_qstats_drop(sch); | |
441 | } | |
442 | return err; | |
443 | } | |
444 | ||
445 | if (first && !ets_class_is_strict(q, cl)) { | |
446 | list_add_tail(&cl->alist, &q->active); | |
447 | cl->deficit = cl->quantum; | |
448 | } | |
449 | ||
450 | sch->qstats.backlog += len; | |
451 | sch->q.qlen++; | |
452 | return err; | |
453 | } | |
454 | ||
455 | static struct sk_buff * | |
456 | ets_qdisc_dequeue_skb(struct Qdisc *sch, struct sk_buff *skb) | |
457 | { | |
458 | qdisc_bstats_update(sch, skb); | |
459 | qdisc_qstats_backlog_dec(sch, skb); | |
460 | sch->q.qlen--; | |
461 | return skb; | |
462 | } | |
463 | ||
464 | static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch) | |
465 | { | |
466 | struct ets_sched *q = qdisc_priv(sch); | |
467 | struct ets_class *cl; | |
468 | struct sk_buff *skb; | |
469 | unsigned int band; | |
470 | unsigned int len; | |
471 | ||
472 | while (1) { | |
473 | for (band = 0; band < q->nstrict; band++) { | |
474 | cl = &q->classes[band]; | |
475 | skb = qdisc_dequeue_peeked(cl->qdisc); | |
476 | if (skb) | |
477 | return ets_qdisc_dequeue_skb(sch, skb); | |
478 | } | |
479 | ||
480 | if (list_empty(&q->active)) | |
481 | goto out; | |
482 | ||
483 | cl = list_first_entry(&q->active, struct ets_class, alist); | |
484 | skb = cl->qdisc->ops->peek(cl->qdisc); | |
485 | if (!skb) { | |
486 | qdisc_warn_nonwc(__func__, cl->qdisc); | |
487 | goto out; | |
488 | } | |
489 | ||
490 | len = qdisc_pkt_len(skb); | |
491 | if (len <= cl->deficit) { | |
492 | cl->deficit -= len; | |
493 | skb = qdisc_dequeue_peeked(cl->qdisc); | |
494 | if (unlikely(!skb)) | |
495 | goto out; | |
496 | if (cl->qdisc->q.qlen == 0) | |
497 | list_del(&cl->alist); | |
498 | return ets_qdisc_dequeue_skb(sch, skb); | |
499 | } | |
500 | ||
501 | cl->deficit += cl->quantum; | |
502 | list_move_tail(&cl->alist, &q->active); | |
503 | } | |
504 | out: | |
505 | return NULL; | |
506 | } | |
507 | ||
508 | static int ets_qdisc_priomap_parse(struct nlattr *priomap_attr, | |
509 | unsigned int nbands, u8 *priomap, | |
510 | struct netlink_ext_ack *extack) | |
511 | { | |
512 | const struct nlattr *attr; | |
513 | int prio = 0; | |
514 | u8 band; | |
515 | int rem; | |
516 | int err; | |
517 | ||
518 | err = __nla_validate_nested(priomap_attr, TCA_ETS_MAX, | |
519 | ets_priomap_policy, NL_VALIDATE_STRICT, | |
520 | extack); | |
521 | if (err) | |
522 | return err; | |
523 | ||
524 | nla_for_each_nested(attr, priomap_attr, rem) { | |
525 | switch (nla_type(attr)) { | |
526 | case TCA_ETS_PRIOMAP_BAND: | |
527 | if (prio > TC_PRIO_MAX) { | |
528 | NL_SET_ERR_MSG_MOD(extack, "Too many priorities in ETS priomap"); | |
529 | return -EINVAL; | |
530 | } | |
531 | band = nla_get_u8(attr); | |
532 | if (band >= nbands) { | |
533 | NL_SET_ERR_MSG_MOD(extack, "Invalid band number in ETS priomap"); | |
534 | return -EINVAL; | |
535 | } | |
536 | priomap[prio++] = band; | |
537 | break; | |
538 | default: | |
539 | WARN_ON_ONCE(1); /* Validate should have caught this. */ | |
540 | return -EINVAL; | |
541 | } | |
542 | } | |
543 | ||
544 | return 0; | |
545 | } | |
546 | ||
547 | static int ets_qdisc_quanta_parse(struct Qdisc *sch, struct nlattr *quanta_attr, | |
548 | unsigned int nbands, unsigned int nstrict, | |
549 | unsigned int *quanta, | |
550 | struct netlink_ext_ack *extack) | |
551 | { | |
552 | const struct nlattr *attr; | |
553 | int band = nstrict; | |
554 | int rem; | |
555 | int err; | |
556 | ||
557 | err = __nla_validate_nested(quanta_attr, TCA_ETS_MAX, | |
558 | ets_quanta_policy, NL_VALIDATE_STRICT, | |
559 | extack); | |
560 | if (err < 0) | |
561 | return err; | |
562 | ||
563 | nla_for_each_nested(attr, quanta_attr, rem) { | |
564 | switch (nla_type(attr)) { | |
565 | case TCA_ETS_QUANTA_BAND: | |
566 | if (band >= nbands) { | |
567 | NL_SET_ERR_MSG_MOD(extack, "ETS quanta has more values than bands"); | |
568 | return -EINVAL; | |
569 | } | |
570 | err = ets_quantum_parse(sch, attr, &quanta[band++], | |
571 | extack); | |
572 | if (err) | |
573 | return err; | |
574 | break; | |
575 | default: | |
576 | WARN_ON_ONCE(1); /* Validate should have caught this. */ | |
577 | return -EINVAL; | |
578 | } | |
579 | } | |
580 | ||
581 | return 0; | |
582 | } | |
583 | ||
584 | static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, | |
585 | struct netlink_ext_ack *extack) | |
586 | { | |
587 | unsigned int quanta[TCQ_ETS_MAX_BANDS] = {0}; | |
588 | struct Qdisc *queues[TCQ_ETS_MAX_BANDS]; | |
589 | struct ets_sched *q = qdisc_priv(sch); | |
590 | struct nlattr *tb[TCA_ETS_MAX + 1]; | |
591 | unsigned int oldbands = q->nbands; | |
592 | u8 priomap[TC_PRIO_MAX + 1]; | |
593 | unsigned int nstrict = 0; | |
594 | unsigned int nbands; | |
595 | unsigned int i; | |
596 | int err; | |
597 | ||
598 | if (!opt) { | |
599 | NL_SET_ERR_MSG(extack, "ETS options are required for this operation"); | |
600 | return -EINVAL; | |
601 | } | |
602 | ||
603 | err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_policy, extack); | |
604 | if (err < 0) | |
605 | return err; | |
606 | ||
607 | if (!tb[TCA_ETS_NBANDS]) { | |
608 | NL_SET_ERR_MSG_MOD(extack, "Number of bands is a required argument"); | |
609 | return -EINVAL; | |
610 | } | |
611 | nbands = nla_get_u8(tb[TCA_ETS_NBANDS]); | |
612 | if (nbands < 1 || nbands > TCQ_ETS_MAX_BANDS) { | |
613 | NL_SET_ERR_MSG_MOD(extack, "Invalid number of bands"); | |
614 | return -EINVAL; | |
615 | } | |
616 | /* Unless overridden, traffic goes to the last band. */ | |
617 | memset(priomap, nbands - 1, sizeof(priomap)); | |
618 | ||
619 | if (tb[TCA_ETS_NSTRICT]) { | |
620 | nstrict = nla_get_u8(tb[TCA_ETS_NSTRICT]); | |
621 | if (nstrict > nbands) { | |
622 | NL_SET_ERR_MSG_MOD(extack, "Invalid number of strict bands"); | |
623 | return -EINVAL; | |
624 | } | |
625 | } | |
626 | ||
627 | if (tb[TCA_ETS_PRIOMAP]) { | |
628 | err = ets_qdisc_priomap_parse(tb[TCA_ETS_PRIOMAP], | |
629 | nbands, priomap, extack); | |
630 | if (err) | |
631 | return err; | |
632 | } | |
633 | ||
634 | if (tb[TCA_ETS_QUANTA]) { | |
635 | err = ets_qdisc_quanta_parse(sch, tb[TCA_ETS_QUANTA], | |
636 | nbands, nstrict, quanta, extack); | |
637 | if (err) | |
638 | return err; | |
639 | } | |
640 | /* If there are more bands than strict + quanta provided, the remaining | |
641 | * ones are ETS with quantum of MTU. Initialize the missing values here. | |
642 | */ | |
643 | for (i = nstrict; i < nbands; i++) { | |
644 | if (!quanta[i]) | |
645 | quanta[i] = psched_mtu(qdisc_dev(sch)); | |
646 | } | |
647 | ||
648 | /* Before commit, make sure we can allocate all new qdiscs */ | |
649 | for (i = oldbands; i < nbands; i++) { | |
650 | queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, | |
651 | ets_class_id(sch, &q->classes[i]), | |
652 | extack); | |
653 | if (!queues[i]) { | |
654 | while (i > oldbands) | |
655 | qdisc_put(queues[--i]); | |
656 | return -ENOMEM; | |
657 | } | |
658 | } | |
659 | ||
660 | sch_tree_lock(sch); | |
661 | ||
662 | q->nbands = nbands; | |
663 | q->nstrict = nstrict; | |
664 | memcpy(q->prio2band, priomap, sizeof(priomap)); | |
665 | ||
666 | for (i = q->nbands; i < oldbands; i++) | |
667 | qdisc_tree_flush_backlog(q->classes[i].qdisc); | |
668 | ||
669 | for (i = 0; i < q->nbands; i++) | |
670 | q->classes[i].quantum = quanta[i]; | |
671 | ||
672 | for (i = oldbands; i < q->nbands; i++) { | |
673 | q->classes[i].qdisc = queues[i]; | |
674 | if (q->classes[i].qdisc != &noop_qdisc) | |
675 | qdisc_hash_add(q->classes[i].qdisc, true); | |
676 | } | |
677 | ||
678 | sch_tree_unlock(sch); | |
679 | ||
d35eb52b | 680 | ets_offload_change(sch); |
dcc68b4d PM |
681 | for (i = q->nbands; i < oldbands; i++) { |
682 | qdisc_put(q->classes[i].qdisc); | |
683 | memset(&q->classes[i], 0, sizeof(q->classes[i])); | |
684 | } | |
685 | return 0; | |
686 | } | |
687 | ||
688 | static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt, | |
689 | struct netlink_ext_ack *extack) | |
690 | { | |
691 | struct ets_sched *q = qdisc_priv(sch); | |
692 | int err; | |
693 | ||
694 | if (!opt) | |
695 | return -EINVAL; | |
696 | ||
697 | err = tcf_block_get(&q->block, &q->filter_list, sch, extack); | |
698 | if (err) | |
699 | return err; | |
700 | ||
701 | INIT_LIST_HEAD(&q->active); | |
702 | return ets_qdisc_change(sch, opt, extack); | |
703 | } | |
704 | ||
705 | static void ets_qdisc_reset(struct Qdisc *sch) | |
706 | { | |
707 | struct ets_sched *q = qdisc_priv(sch); | |
708 | int band; | |
709 | ||
710 | for (band = q->nstrict; band < q->nbands; band++) { | |
711 | if (q->classes[band].qdisc->q.qlen) | |
712 | list_del(&q->classes[band].alist); | |
713 | } | |
714 | for (band = 0; band < q->nbands; band++) | |
715 | qdisc_reset(q->classes[band].qdisc); | |
716 | sch->qstats.backlog = 0; | |
717 | sch->q.qlen = 0; | |
718 | } | |
719 | ||
720 | static void ets_qdisc_destroy(struct Qdisc *sch) | |
721 | { | |
722 | struct ets_sched *q = qdisc_priv(sch); | |
723 | int band; | |
724 | ||
d35eb52b | 725 | ets_offload_destroy(sch); |
dcc68b4d PM |
726 | tcf_block_put(q->block); |
727 | for (band = 0; band < q->nbands; band++) | |
728 | qdisc_put(q->classes[band].qdisc); | |
729 | } | |
730 | ||
731 | static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) | |
732 | { | |
733 | struct ets_sched *q = qdisc_priv(sch); | |
734 | struct nlattr *opts; | |
735 | struct nlattr *nest; | |
736 | int band; | |
737 | int prio; | |
d35eb52b PM |
738 | int err; |
739 | ||
740 | err = ets_offload_dump(sch); | |
741 | if (err) | |
742 | return err; | |
dcc68b4d PM |
743 | |
744 | opts = nla_nest_start_noflag(skb, TCA_OPTIONS); | |
745 | if (!opts) | |
746 | goto nla_err; | |
747 | ||
748 | if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands)) | |
749 | goto nla_err; | |
750 | ||
751 | if (q->nstrict && | |
752 | nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict)) | |
753 | goto nla_err; | |
754 | ||
755 | if (q->nbands > q->nstrict) { | |
756 | nest = nla_nest_start(skb, TCA_ETS_QUANTA); | |
757 | if (!nest) | |
758 | goto nla_err; | |
759 | ||
760 | for (band = q->nstrict; band < q->nbands; band++) { | |
761 | if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, | |
762 | q->classes[band].quantum)) | |
763 | goto nla_err; | |
764 | } | |
765 | ||
766 | nla_nest_end(skb, nest); | |
767 | } | |
768 | ||
769 | nest = nla_nest_start(skb, TCA_ETS_PRIOMAP); | |
770 | if (!nest) | |
771 | goto nla_err; | |
772 | ||
773 | for (prio = 0; prio <= TC_PRIO_MAX; prio++) { | |
774 | if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio])) | |
775 | goto nla_err; | |
776 | } | |
777 | ||
778 | nla_nest_end(skb, nest); | |
779 | ||
780 | return nla_nest_end(skb, opts); | |
781 | ||
782 | nla_err: | |
783 | nla_nest_cancel(skb, opts); | |
784 | return -EMSGSIZE; | |
785 | } | |
786 | ||
787 | static const struct Qdisc_class_ops ets_class_ops = { | |
788 | .change = ets_class_change, | |
789 | .graft = ets_class_graft, | |
790 | .leaf = ets_class_leaf, | |
791 | .find = ets_class_find, | |
792 | .qlen_notify = ets_class_qlen_notify, | |
793 | .dump = ets_class_dump, | |
794 | .dump_stats = ets_class_dump_stats, | |
795 | .walk = ets_qdisc_walk, | |
796 | .tcf_block = ets_qdisc_tcf_block, | |
797 | .bind_tcf = ets_qdisc_bind_tcf, | |
798 | .unbind_tcf = ets_qdisc_unbind_tcf, | |
799 | }; | |
800 | ||
801 | static struct Qdisc_ops ets_qdisc_ops __read_mostly = { | |
802 | .cl_ops = &ets_class_ops, | |
803 | .id = "ets", | |
804 | .priv_size = sizeof(struct ets_sched), | |
805 | .enqueue = ets_qdisc_enqueue, | |
806 | .dequeue = ets_qdisc_dequeue, | |
807 | .peek = qdisc_peek_dequeued, | |
808 | .change = ets_qdisc_change, | |
809 | .init = ets_qdisc_init, | |
810 | .reset = ets_qdisc_reset, | |
811 | .destroy = ets_qdisc_destroy, | |
812 | .dump = ets_qdisc_dump, | |
813 | .owner = THIS_MODULE, | |
814 | }; | |
815 | ||
816 | static int __init ets_init(void) | |
817 | { | |
818 | return register_qdisc(&ets_qdisc_ops); | |
819 | } | |
820 | ||
821 | static void __exit ets_exit(void) | |
822 | { | |
823 | unregister_qdisc(&ets_qdisc_ops); | |
824 | } | |
825 | ||
826 | module_init(ets_init); | |
827 | module_exit(ets_exit); | |
828 | MODULE_LICENSE("GPL"); |