]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * net/sched/sch_red.c Random Early Detection queue. | |
4 | * | |
1da177e4 LT |
5 | * Authors: Alexey Kuznetsov, <[email protected]> |
6 | * | |
7 | * Changes: | |
dba051f3 | 8 | * J Hadi Salim 980914: computation fixes |
1da177e4 | 9 | * Alexey Makarenko <[email protected]> 990814: qave on idle link was calculated incorrectly. |
dba051f3 | 10 | * J Hadi Salim 980816: ECN support |
1da177e4 LT |
11 | */ |
12 | ||
1da177e4 | 13 | #include <linux/module.h> |
1da177e4 LT |
14 | #include <linux/types.h> |
15 | #include <linux/kernel.h> | |
1da177e4 | 16 | #include <linux/skbuff.h> |
1da177e4 | 17 | #include <net/pkt_sched.h> |
602f3baf | 18 | #include <net/pkt_cls.h> |
1da177e4 | 19 | #include <net/inet_ecn.h> |
6b31b28a | 20 | #include <net/red.h> |
1da177e4 LT |
21 | |
22 | ||
6b31b28a | 23 | /* Parameters, settable by user: |
1da177e4 LT |
24 | ----------------------------- |
25 | ||
26 | limit - bytes (must be > qth_max + burst) | |
27 | ||
28 | Hard limit on queue length, should be chosen >qth_max | |
29 | to allow packet bursts. This parameter does not | |
30 | affect the algorithms behaviour and can be chosen | |
31 | arbitrarily high (well, less than ram size) | |
32 | Really, this limit will never be reached | |
33 | if RED works correctly. | |
1da177e4 LT |
34 | */ |
35 | ||
cc7ec456 | 36 | struct red_sched_data { |
6b31b28a | 37 | u32 limit; /* HARD maximal queue length */ |
14bc175d | 38 | |
6b31b28a | 39 | unsigned char flags; |
14bc175d PM |
40 | /* Non-flags in tc_red_qopt.flags. */ |
41 | unsigned char userbits; | |
42 | ||
8af2a218 | 43 | struct timer_list adapt_timer; |
cdeabbb8 | 44 | struct Qdisc *sch; |
6b31b28a | 45 | struct red_parms parms; |
eeca6688 | 46 | struct red_vars vars; |
6b31b28a | 47 | struct red_stats stats; |
f38c39d6 | 48 | struct Qdisc *qdisc; |
aee9caa0 PM |
49 | struct tcf_qevent qe_early_drop; |
50 | struct tcf_qevent qe_mark; | |
1da177e4 LT |
51 | }; |
52 | ||
47a1494b | 53 | #define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP) |
14bc175d | 54 | |
6b31b28a | 55 | static inline int red_use_ecn(struct red_sched_data *q) |
1da177e4 | 56 | { |
6b31b28a | 57 | return q->flags & TC_RED_ECN; |
1da177e4 LT |
58 | } |
59 | ||
bdc450a0 TG |
60 | static inline int red_use_harddrop(struct red_sched_data *q) |
61 | { | |
62 | return q->flags & TC_RED_HARDDROP; | |
63 | } | |
64 | ||
0a7fad23 PM |
65 | static int red_use_nodrop(struct red_sched_data *q) |
66 | { | |
67 | return q->flags & TC_RED_NODROP; | |
68 | } | |
69 | ||
ac5c66f2 | 70 | static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
520ac30f | 71 | struct sk_buff **to_free) |
1da177e4 LT |
72 | { |
73 | struct red_sched_data *q = qdisc_priv(sch); | |
f38c39d6 PM |
74 | struct Qdisc *child = q->qdisc; |
75 | int ret; | |
1da177e4 | 76 | |
eeca6688 ED |
77 | q->vars.qavg = red_calc_qavg(&q->parms, |
78 | &q->vars, | |
79 | child->qstats.backlog); | |
1da177e4 | 80 | |
eeca6688 ED |
81 | if (red_is_idling(&q->vars)) |
82 | red_end_of_idle_period(&q->vars); | |
1da177e4 | 83 | |
eeca6688 | 84 | switch (red_action(&q->parms, &q->vars, q->vars.qavg)) { |
cc7ec456 ED |
85 | case RED_DONT_MARK: |
86 | break; | |
87 | ||
88 | case RED_PROB_MARK: | |
25331d6c | 89 | qdisc_qstats_overlimit(sch); |
0a7fad23 | 90 | if (!red_use_ecn(q)) { |
cc7ec456 ED |
91 | q->stats.prob_drop++; |
92 | goto congestion_drop; | |
93 | } | |
94 | ||
0a7fad23 PM |
95 | if (INET_ECN_set_ce(skb)) { |
96 | q->stats.prob_mark++; | |
55f656cd | 97 | skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret); |
aee9caa0 PM |
98 | if (!skb) |
99 | return NET_XMIT_CN | ret; | |
0a7fad23 PM |
100 | } else if (!red_use_nodrop(q)) { |
101 | q->stats.prob_drop++; | |
102 | goto congestion_drop; | |
103 | } | |
104 | ||
105 | /* Non-ECT packet in ECN nodrop mode: queue it. */ | |
cc7ec456 ED |
106 | break; |
107 | ||
108 | case RED_HARD_MARK: | |
25331d6c | 109 | qdisc_qstats_overlimit(sch); |
0a7fad23 PM |
110 | if (red_use_harddrop(q) || !red_use_ecn(q)) { |
111 | q->stats.forced_drop++; | |
112 | goto congestion_drop; | |
113 | } | |
114 | ||
115 | if (INET_ECN_set_ce(skb)) { | |
116 | q->stats.forced_mark++; | |
55f656cd | 117 | skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret); |
aee9caa0 PM |
118 | if (!skb) |
119 | return NET_XMIT_CN | ret; | |
0a7fad23 | 120 | } else if (!red_use_nodrop(q)) { |
cc7ec456 ED |
121 | q->stats.forced_drop++; |
122 | goto congestion_drop; | |
123 | } | |
124 | ||
0a7fad23 | 125 | /* Non-ECT packet in ECN nodrop mode: queue it. */ |
cc7ec456 | 126 | break; |
1da177e4 LT |
127 | } |
128 | ||
ac5c66f2 | 129 | ret = qdisc_enqueue(skb, child, to_free); |
f38c39d6 | 130 | if (likely(ret == NET_XMIT_SUCCESS)) { |
d7f4f332 | 131 | qdisc_qstats_backlog_inc(sch, skb); |
f38c39d6 | 132 | sch->q.qlen++; |
378a2f09 | 133 | } else if (net_xmit_drop_count(ret)) { |
f38c39d6 | 134 | q->stats.pdrop++; |
25331d6c | 135 | qdisc_qstats_drop(sch); |
f38c39d6 PM |
136 | } |
137 | return ret; | |
6b31b28a TG |
138 | |
139 | congestion_drop: | |
55f656cd | 140 | skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret); |
aee9caa0 PM |
141 | if (!skb) |
142 | return NET_XMIT_CN | ret; | |
143 | ||
520ac30f | 144 | qdisc_drop(skb, sch, to_free); |
1da177e4 LT |
145 | return NET_XMIT_CN; |
146 | } | |
147 | ||
cc7ec456 | 148 | static struct sk_buff *red_dequeue(struct Qdisc *sch) |
1da177e4 LT |
149 | { |
150 | struct sk_buff *skb; | |
151 | struct red_sched_data *q = qdisc_priv(sch); | |
f38c39d6 | 152 | struct Qdisc *child = q->qdisc; |
1da177e4 | 153 | |
f38c39d6 | 154 | skb = child->dequeue(child); |
9190b3b3 ED |
155 | if (skb) { |
156 | qdisc_bstats_update(sch, skb); | |
d7f4f332 | 157 | qdisc_qstats_backlog_dec(sch, skb); |
f38c39d6 | 158 | sch->q.qlen--; |
9190b3b3 | 159 | } else { |
eeca6688 ED |
160 | if (!red_is_idling(&q->vars)) |
161 | red_start_of_idle_period(&q->vars); | |
9190b3b3 | 162 | } |
9e178ff2 | 163 | return skb; |
1da177e4 LT |
164 | } |
165 | ||
cc7ec456 | 166 | static struct sk_buff *red_peek(struct Qdisc *sch) |
8e3af978 JP |
167 | { |
168 | struct red_sched_data *q = qdisc_priv(sch); | |
169 | struct Qdisc *child = q->qdisc; | |
170 | ||
171 | return child->ops->peek(child); | |
172 | } | |
173 | ||
cc7ec456 | 174 | static void red_reset(struct Qdisc *sch) |
1da177e4 LT |
175 | { |
176 | struct red_sched_data *q = qdisc_priv(sch); | |
177 | ||
f38c39d6 | 178 | qdisc_reset(q->qdisc); |
d7f4f332 | 179 | sch->qstats.backlog = 0; |
f38c39d6 | 180 | sch->q.qlen = 0; |
eeca6688 | 181 | red_restart(&q->vars); |
1da177e4 LT |
182 | } |
183 | ||
602f3baf NF |
184 | static int red_offload(struct Qdisc *sch, bool enable) |
185 | { | |
186 | struct red_sched_data *q = qdisc_priv(sch); | |
187 | struct net_device *dev = qdisc_dev(sch); | |
188 | struct tc_red_qopt_offload opt = { | |
189 | .handle = sch->handle, | |
190 | .parent = sch->parent, | |
191 | }; | |
192 | ||
193 | if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) | |
194 | return -EOPNOTSUPP; | |
195 | ||
196 | if (enable) { | |
197 | opt.command = TC_RED_REPLACE; | |
198 | opt.set.min = q->parms.qth_min >> q->parms.Wlog; | |
199 | opt.set.max = q->parms.qth_max >> q->parms.Wlog; | |
200 | opt.set.probability = q->parms.max_P; | |
c0b7490b | 201 | opt.set.limit = q->limit; |
602f3baf | 202 | opt.set.is_ecn = red_use_ecn(q); |
190852a5 | 203 | opt.set.is_harddrop = red_use_harddrop(q); |
0a7fad23 | 204 | opt.set.is_nodrop = red_use_nodrop(q); |
416ef9b1 | 205 | opt.set.qstats = &sch->qstats; |
602f3baf NF |
206 | } else { |
207 | opt.command = TC_RED_DESTROY; | |
208 | } | |
209 | ||
8234af2d | 210 | return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); |
602f3baf NF |
211 | } |
212 | ||
f38c39d6 PM |
213 | static void red_destroy(struct Qdisc *sch) |
214 | { | |
215 | struct red_sched_data *q = qdisc_priv(sch); | |
8af2a218 | 216 | |
aee9caa0 PM |
217 | tcf_qevent_destroy(&q->qe_mark, sch); |
218 | tcf_qevent_destroy(&q->qe_early_drop, sch); | |
8af2a218 | 219 | del_timer_sync(&q->adapt_timer); |
602f3baf | 220 | red_offload(sch, false); |
86bd446b | 221 | qdisc_put(q->qdisc); |
f38c39d6 PM |
222 | } |
223 | ||
27a3421e | 224 | static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { |
14bc175d | 225 | [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS }, |
27a3421e PM |
226 | [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) }, |
227 | [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, | |
a73ed26b | 228 | [TCA_RED_MAX_P] = { .type = NLA_U32 }, |
47a1494b | 229 | [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS), |
aee9caa0 PM |
230 | [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 }, |
231 | [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 }, | |
27a3421e PM |
232 | }; |
233 | ||
65545ea2 PM |
234 | static int __red_change(struct Qdisc *sch, struct nlattr **tb, |
235 | struct netlink_ext_ack *extack) | |
1da177e4 | 236 | { |
0c8d13ac | 237 | struct Qdisc *old_child = NULL, *child = NULL; |
1da177e4 | 238 | struct red_sched_data *q = qdisc_priv(sch); |
14bc175d | 239 | struct nla_bitfield32 flags_bf; |
1da177e4 | 240 | struct tc_red_qopt *ctl; |
14bc175d PM |
241 | unsigned char userbits; |
242 | unsigned char flags; | |
cee63723 | 243 | int err; |
a73ed26b | 244 | u32 max_P; |
1da177e4 | 245 | |
1e90474c | 246 | if (tb[TCA_RED_PARMS] == NULL || |
27a3421e | 247 | tb[TCA_RED_STAB] == NULL) |
1da177e4 LT |
248 | return -EINVAL; |
249 | ||
a73ed26b ED |
250 | max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; |
251 | ||
1e90474c | 252 | ctl = nla_data(tb[TCA_RED_PARMS]); |
8afa10cb NF |
253 | if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) |
254 | return -EINVAL; | |
1da177e4 | 255 | |
14bc175d | 256 | err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS, |
47a1494b | 257 | tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS, |
14bc175d PM |
258 | &flags_bf, &userbits, extack); |
259 | if (err) | |
260 | return err; | |
261 | ||
f38c39d6 | 262 | if (ctl->limit > 0) { |
a38a9882 AA |
263 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit, |
264 | extack); | |
fb0305ce PM |
265 | if (IS_ERR(child)) |
266 | return PTR_ERR(child); | |
f38c39d6 | 267 | |
44a63b13 | 268 | /* child is fifo, no need to check for noop_qdisc */ |
49b49971 | 269 | qdisc_hash_add(child, true); |
44a63b13 PA |
270 | } |
271 | ||
1da177e4 | 272 | sch_tree_lock(sch); |
14bc175d PM |
273 | |
274 | flags = (q->flags & ~flags_bf.selector) | flags_bf.value; | |
275 | err = red_validate_flags(flags, extack); | |
276 | if (err) | |
277 | goto unlock_out; | |
278 | ||
279 | q->flags = flags; | |
280 | q->userbits = userbits; | |
1da177e4 | 281 | q->limit = ctl->limit; |
5e50da01 | 282 | if (child) { |
e5f0e8f8 | 283 | qdisc_tree_flush_backlog(q->qdisc); |
0c8d13ac | 284 | old_child = q->qdisc; |
b94c8afc | 285 | q->qdisc = child; |
5e50da01 | 286 | } |
1da177e4 | 287 | |
eeca6688 ED |
288 | red_set_parms(&q->parms, |
289 | ctl->qth_min, ctl->qth_max, ctl->Wlog, | |
a73ed26b ED |
290 | ctl->Plog, ctl->Scell_log, |
291 | nla_data(tb[TCA_RED_STAB]), | |
292 | max_P); | |
eeca6688 | 293 | red_set_vars(&q->vars); |
6b31b28a | 294 | |
8af2a218 ED |
295 | del_timer(&q->adapt_timer); |
296 | if (ctl->flags & TC_RED_ADAPTATIVE) | |
297 | mod_timer(&q->adapt_timer, jiffies + HZ/2); | |
298 | ||
1ee5fa1e | 299 | if (!q->qdisc->q.qlen) |
eeca6688 | 300 | red_start_of_idle_period(&q->vars); |
dba051f3 | 301 | |
1da177e4 | 302 | sch_tree_unlock(sch); |
0c8d13ac | 303 | |
602f3baf | 304 | red_offload(sch, true); |
0c8d13ac JK |
305 | |
306 | if (old_child) | |
307 | qdisc_put(old_child); | |
1da177e4 | 308 | return 0; |
14bc175d PM |
309 | |
310 | unlock_out: | |
311 | sch_tree_unlock(sch); | |
312 | if (child) | |
313 | qdisc_put(child); | |
314 | return err; | |
1da177e4 LT |
315 | } |
316 | ||
cdeabbb8 | 317 | static inline void red_adaptative_timer(struct timer_list *t) |
8af2a218 | 318 | { |
cdeabbb8 KC |
319 | struct red_sched_data *q = from_timer(q, t, adapt_timer); |
320 | struct Qdisc *sch = q->sch; | |
8af2a218 ED |
321 | spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); |
322 | ||
323 | spin_lock(root_lock); | |
eeca6688 | 324 | red_adaptative_algo(&q->parms, &q->vars); |
8af2a218 ED |
325 | mod_timer(&q->adapt_timer, jiffies + HZ/2); |
326 | spin_unlock(root_lock); | |
327 | } | |
328 | ||
e63d7dfd AA |
329 | static int red_init(struct Qdisc *sch, struct nlattr *opt, |
330 | struct netlink_ext_ack *extack) | |
1da177e4 | 331 | { |
f38c39d6 | 332 | struct red_sched_data *q = qdisc_priv(sch); |
65545ea2 PM |
333 | struct nlattr *tb[TCA_RED_MAX + 1]; |
334 | int err; | |
335 | ||
336 | if (!opt) | |
337 | return -EINVAL; | |
338 | ||
339 | err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy, | |
340 | extack); | |
341 | if (err < 0) | |
342 | return err; | |
f38c39d6 PM |
343 | |
344 | q->qdisc = &noop_qdisc; | |
cdeabbb8 KC |
345 | q->sch = sch; |
346 | timer_setup(&q->adapt_timer, red_adaptative_timer, 0); | |
aee9caa0 PM |
347 | |
348 | err = __red_change(sch, tb, extack); | |
349 | if (err) | |
350 | return err; | |
351 | ||
352 | err = tcf_qevent_init(&q->qe_early_drop, sch, | |
353 | FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP, | |
354 | tb[TCA_RED_EARLY_DROP_BLOCK], extack); | |
355 | if (err) | |
356 | goto err_early_drop_init; | |
357 | ||
358 | err = tcf_qevent_init(&q->qe_mark, sch, | |
359 | FLOW_BLOCK_BINDER_TYPE_RED_MARK, | |
360 | tb[TCA_RED_MARK_BLOCK], extack); | |
361 | if (err) | |
362 | goto err_mark_init; | |
363 | ||
364 | return 0; | |
365 | ||
366 | err_mark_init: | |
367 | tcf_qevent_destroy(&q->qe_early_drop, sch); | |
368 | err_early_drop_init: | |
369 | del_timer_sync(&q->adapt_timer); | |
370 | red_offload(sch, false); | |
371 | qdisc_put(q->qdisc); | |
372 | return err; | |
65545ea2 PM |
373 | } |
374 | ||
375 | static int red_change(struct Qdisc *sch, struct nlattr *opt, | |
376 | struct netlink_ext_ack *extack) | |
377 | { | |
aee9caa0 | 378 | struct red_sched_data *q = qdisc_priv(sch); |
65545ea2 PM |
379 | struct nlattr *tb[TCA_RED_MAX + 1]; |
380 | int err; | |
381 | ||
382 | if (!opt) | |
383 | return -EINVAL; | |
384 | ||
385 | err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy, | |
386 | extack); | |
387 | if (err < 0) | |
388 | return err; | |
389 | ||
aee9caa0 PM |
390 | err = tcf_qevent_validate_change(&q->qe_early_drop, |
391 | tb[TCA_RED_EARLY_DROP_BLOCK], extack); | |
392 | if (err) | |
393 | return err; | |
394 | ||
395 | err = tcf_qevent_validate_change(&q->qe_mark, | |
396 | tb[TCA_RED_MARK_BLOCK], extack); | |
397 | if (err) | |
398 | return err; | |
399 | ||
65545ea2 | 400 | return __red_change(sch, tb, extack); |
1da177e4 LT |
401 | } |
402 | ||
dad54c0f | 403 | static int red_dump_offload_stats(struct Qdisc *sch) |
602f3baf | 404 | { |
602f3baf | 405 | struct tc_red_qopt_offload hw_stats = { |
ee9d3429 | 406 | .command = TC_RED_STATS, |
602f3baf NF |
407 | .handle = sch->handle, |
408 | .parent = sch->parent, | |
ee9d3429 AM |
409 | { |
410 | .stats.bstats = &sch->bstats, | |
411 | .stats.qstats = &sch->qstats, | |
412 | }, | |
602f3baf | 413 | }; |
8234af2d | 414 | |
b592843c | 415 | return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats); |
602f3baf NF |
416 | } |
417 | ||
1da177e4 LT |
418 | static int red_dump(struct Qdisc *sch, struct sk_buff *skb) |
419 | { | |
420 | struct red_sched_data *q = qdisc_priv(sch); | |
1e90474c | 421 | struct nlattr *opts = NULL; |
6b31b28a TG |
422 | struct tc_red_qopt opt = { |
423 | .limit = q->limit, | |
14bc175d PM |
424 | .flags = (q->flags & TC_RED_HISTORIC_FLAGS) | |
425 | q->userbits, | |
6b31b28a TG |
426 | .qth_min = q->parms.qth_min >> q->parms.Wlog, |
427 | .qth_max = q->parms.qth_max >> q->parms.Wlog, | |
428 | .Wlog = q->parms.Wlog, | |
429 | .Plog = q->parms.Plog, | |
430 | .Scell_log = q->parms.Scell_log, | |
431 | }; | |
602f3baf | 432 | int err; |
1da177e4 | 433 | |
dad54c0f | 434 | err = red_dump_offload_stats(sch); |
602f3baf NF |
435 | if (err) |
436 | goto nla_put_failure; | |
437 | ||
ae0be8de | 438 | opts = nla_nest_start_noflag(skb, TCA_OPTIONS); |
1e90474c PM |
439 | if (opts == NULL) |
440 | goto nla_put_failure; | |
1b34ec43 | 441 | if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) || |
14bc175d | 442 | nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) || |
8953b077 | 443 | nla_put_bitfield32(skb, TCA_RED_FLAGS, |
aee9caa0 PM |
444 | q->flags, TC_RED_SUPPORTED_FLAGS) || |
445 | tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) || | |
446 | tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop)) | |
1b34ec43 | 447 | goto nla_put_failure; |
1e90474c | 448 | return nla_nest_end(skb, opts); |
1da177e4 | 449 | |
1e90474c | 450 | nla_put_failure: |
bc3ed28c TG |
451 | nla_nest_cancel(skb, opts); |
452 | return -EMSGSIZE; | |
1da177e4 LT |
453 | } |
454 | ||
455 | static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |
456 | { | |
457 | struct red_sched_data *q = qdisc_priv(sch); | |
602f3baf | 458 | struct net_device *dev = qdisc_dev(sch); |
f8253df5 | 459 | struct tc_red_xstats st = {0}; |
6b31b28a | 460 | |
428a68af | 461 | if (sch->flags & TCQ_F_OFFLOADED) { |
602f3baf | 462 | struct tc_red_qopt_offload hw_stats_request = { |
ee9d3429 | 463 | .command = TC_RED_XSTATS, |
602f3baf NF |
464 | .handle = sch->handle, |
465 | .parent = sch->parent, | |
ee9d3429 | 466 | { |
f8253df5 | 467 | .xstats = &q->stats, |
ee9d3429 | 468 | }, |
602f3baf | 469 | }; |
f8253df5 NF |
470 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, |
471 | &hw_stats_request); | |
602f3baf | 472 | } |
f8253df5 NF |
473 | st.early = q->stats.prob_drop + q->stats.forced_drop; |
474 | st.pdrop = q->stats.pdrop; | |
475 | st.other = q->stats.other; | |
476 | st.marked = q->stats.prob_mark + q->stats.forced_mark; | |
602f3baf | 477 | |
6b31b28a | 478 | return gnet_stats_copy_app(d, &st, sizeof(st)); |
1da177e4 LT |
479 | } |
480 | ||
f38c39d6 PM |
481 | static int red_dump_class(struct Qdisc *sch, unsigned long cl, |
482 | struct sk_buff *skb, struct tcmsg *tcm) | |
483 | { | |
484 | struct red_sched_data *q = qdisc_priv(sch); | |
485 | ||
f38c39d6 PM |
486 | tcm->tcm_handle |= TC_H_MIN(1); |
487 | tcm->tcm_info = q->qdisc->handle; | |
488 | return 0; | |
489 | } | |
490 | ||
bf2a752b JK |
491 | static void red_graft_offload(struct Qdisc *sch, |
492 | struct Qdisc *new, struct Qdisc *old, | |
493 | struct netlink_ext_ack *extack) | |
494 | { | |
495 | struct tc_red_qopt_offload graft_offload = { | |
496 | .handle = sch->handle, | |
497 | .parent = sch->parent, | |
498 | .child_handle = new->handle, | |
499 | .command = TC_RED_GRAFT, | |
500 | }; | |
501 | ||
502 | qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old, | |
503 | TC_SETUP_QDISC_RED, &graft_offload, extack); | |
504 | } | |
505 | ||
f38c39d6 | 506 | static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, |
653d6fd6 | 507 | struct Qdisc **old, struct netlink_ext_ack *extack) |
f38c39d6 PM |
508 | { |
509 | struct red_sched_data *q = qdisc_priv(sch); | |
510 | ||
511 | if (new == NULL) | |
512 | new = &noop_qdisc; | |
513 | ||
86a7996c | 514 | *old = qdisc_replace(sch, new, &q->qdisc); |
bf2a752b JK |
515 | |
516 | red_graft_offload(sch, new, *old, extack); | |
f38c39d6 PM |
517 | return 0; |
518 | } | |
519 | ||
520 | static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg) | |
521 | { | |
522 | struct red_sched_data *q = qdisc_priv(sch); | |
523 | return q->qdisc; | |
524 | } | |
525 | ||
143976ce | 526 | static unsigned long red_find(struct Qdisc *sch, u32 classid) |
f38c39d6 PM |
527 | { |
528 | return 1; | |
529 | } | |
530 | ||
f38c39d6 PM |
531 | static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
532 | { | |
533 | if (!walker->stop) { | |
534 | if (walker->count >= walker->skip) | |
535 | if (walker->fn(sch, 1, walker) < 0) { | |
536 | walker->stop = 1; | |
537 | return; | |
538 | } | |
539 | walker->count++; | |
540 | } | |
541 | } | |
542 | ||
20fea08b | 543 | static const struct Qdisc_class_ops red_class_ops = { |
f38c39d6 PM |
544 | .graft = red_graft, |
545 | .leaf = red_leaf, | |
143976ce | 546 | .find = red_find, |
f38c39d6 | 547 | .walk = red_walk, |
f38c39d6 PM |
548 | .dump = red_dump_class, |
549 | }; | |
550 | ||
20fea08b | 551 | static struct Qdisc_ops red_qdisc_ops __read_mostly = { |
1da177e4 LT |
552 | .id = "red", |
553 | .priv_size = sizeof(struct red_sched_data), | |
f38c39d6 | 554 | .cl_ops = &red_class_ops, |
1da177e4 LT |
555 | .enqueue = red_enqueue, |
556 | .dequeue = red_dequeue, | |
8e3af978 | 557 | .peek = red_peek, |
1da177e4 LT |
558 | .init = red_init, |
559 | .reset = red_reset, | |
f38c39d6 | 560 | .destroy = red_destroy, |
1da177e4 LT |
561 | .change = red_change, |
562 | .dump = red_dump, | |
563 | .dump_stats = red_dump_stats, | |
564 | .owner = THIS_MODULE, | |
565 | }; | |
566 | ||
567 | static int __init red_module_init(void) | |
568 | { | |
569 | return register_qdisc(&red_qdisc_ops); | |
570 | } | |
dba051f3 TG |
571 | |
572 | static void __exit red_module_exit(void) | |
1da177e4 LT |
573 | { |
574 | unregister_qdisc(&red_qdisc_ops); | |
575 | } | |
dba051f3 | 576 | |
1da177e4 LT |
577 | module_init(red_module_init) |
578 | module_exit(red_module_exit) | |
dba051f3 | 579 | |
1da177e4 | 580 | MODULE_LICENSE("GPL"); |