]> Git Repo - linux.git/blame - net/sched/sch_red.c
Merge tag 'nfsd-5.11' of git://git.linux-nfs.org/projects/cel/cel-2.6
[linux.git] / net / sched / sch_red.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * net/sched/sch_red.c Random Early Detection queue.
4 *
1da177e4
LT
5 * Authors: Alexey Kuznetsov, <[email protected]>
6 *
7 * Changes:
dba051f3 8 * J Hadi Salim 980914: computation fixes
1da177e4 9 * Alexey Makarenko <[email protected]> 990814: qave on idle link was calculated incorrectly.
dba051f3 10 * J Hadi Salim 980816: ECN support
1da177e4
LT
11 */
12
1da177e4 13#include <linux/module.h>
1da177e4
LT
14#include <linux/types.h>
15#include <linux/kernel.h>
1da177e4 16#include <linux/skbuff.h>
1da177e4 17#include <net/pkt_sched.h>
602f3baf 18#include <net/pkt_cls.h>
1da177e4 19#include <net/inet_ecn.h>
6b31b28a 20#include <net/red.h>
1da177e4
LT
21
22
6b31b28a 23/* Parameters, settable by user:
1da177e4
LT
24 -----------------------------
25
26 limit - bytes (must be > qth_max + burst)
27
28 Hard limit on queue length, should be chosen >qth_max
29 to allow packet bursts. This parameter does not
30 affect the algorithms behaviour and can be chosen
31 arbitrarily high (well, less than ram size)
32 Really, this limit will never be reached
33 if RED works correctly.
1da177e4
LT
34 */
35
cc7ec456 36struct red_sched_data {
6b31b28a 37 u32 limit; /* HARD maximal queue length */
14bc175d 38
6b31b28a 39 unsigned char flags;
14bc175d
PM
40 /* Non-flags in tc_red_qopt.flags. */
41 unsigned char userbits;
42
8af2a218 43 struct timer_list adapt_timer;
cdeabbb8 44 struct Qdisc *sch;
6b31b28a 45 struct red_parms parms;
eeca6688 46 struct red_vars vars;
6b31b28a 47 struct red_stats stats;
f38c39d6 48 struct Qdisc *qdisc;
aee9caa0
PM
49 struct tcf_qevent qe_early_drop;
50 struct tcf_qevent qe_mark;
1da177e4
LT
51};
52
47a1494b 53#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
14bc175d 54
6b31b28a 55static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 56{
6b31b28a 57 return q->flags & TC_RED_ECN;
1da177e4
LT
58}
59
bdc450a0
TG
60static inline int red_use_harddrop(struct red_sched_data *q)
61{
62 return q->flags & TC_RED_HARDDROP;
63}
64
0a7fad23
PM
65static int red_use_nodrop(struct red_sched_data *q)
66{
67 return q->flags & TC_RED_NODROP;
68}
69
ac5c66f2 70static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
520ac30f 71 struct sk_buff **to_free)
1da177e4
LT
72{
73 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6
PM
74 struct Qdisc *child = q->qdisc;
75 int ret;
1da177e4 76
eeca6688
ED
77 q->vars.qavg = red_calc_qavg(&q->parms,
78 &q->vars,
79 child->qstats.backlog);
1da177e4 80
eeca6688
ED
81 if (red_is_idling(&q->vars))
82 red_end_of_idle_period(&q->vars);
1da177e4 83
eeca6688 84 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
cc7ec456
ED
85 case RED_DONT_MARK:
86 break;
87
88 case RED_PROB_MARK:
25331d6c 89 qdisc_qstats_overlimit(sch);
0a7fad23 90 if (!red_use_ecn(q)) {
cc7ec456
ED
91 q->stats.prob_drop++;
92 goto congestion_drop;
93 }
94
0a7fad23
PM
95 if (INET_ECN_set_ce(skb)) {
96 q->stats.prob_mark++;
55f656cd 97 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
aee9caa0
PM
98 if (!skb)
99 return NET_XMIT_CN | ret;
0a7fad23
PM
100 } else if (!red_use_nodrop(q)) {
101 q->stats.prob_drop++;
102 goto congestion_drop;
103 }
104
105 /* Non-ECT packet in ECN nodrop mode: queue it. */
cc7ec456
ED
106 break;
107
108 case RED_HARD_MARK:
25331d6c 109 qdisc_qstats_overlimit(sch);
0a7fad23
PM
110 if (red_use_harddrop(q) || !red_use_ecn(q)) {
111 q->stats.forced_drop++;
112 goto congestion_drop;
113 }
114
115 if (INET_ECN_set_ce(skb)) {
116 q->stats.forced_mark++;
55f656cd 117 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
aee9caa0
PM
118 if (!skb)
119 return NET_XMIT_CN | ret;
0a7fad23 120 } else if (!red_use_nodrop(q)) {
cc7ec456
ED
121 q->stats.forced_drop++;
122 goto congestion_drop;
123 }
124
0a7fad23 125 /* Non-ECT packet in ECN nodrop mode: queue it. */
cc7ec456 126 break;
1da177e4
LT
127 }
128
ac5c66f2 129 ret = qdisc_enqueue(skb, child, to_free);
f38c39d6 130 if (likely(ret == NET_XMIT_SUCCESS)) {
d7f4f332 131 qdisc_qstats_backlog_inc(sch, skb);
f38c39d6 132 sch->q.qlen++;
378a2f09 133 } else if (net_xmit_drop_count(ret)) {
f38c39d6 134 q->stats.pdrop++;
25331d6c 135 qdisc_qstats_drop(sch);
f38c39d6
PM
136 }
137 return ret;
6b31b28a
TG
138
139congestion_drop:
55f656cd 140 skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
aee9caa0
PM
141 if (!skb)
142 return NET_XMIT_CN | ret;
143
520ac30f 144 qdisc_drop(skb, sch, to_free);
1da177e4
LT
145 return NET_XMIT_CN;
146}
147
cc7ec456 148static struct sk_buff *red_dequeue(struct Qdisc *sch)
1da177e4
LT
149{
150 struct sk_buff *skb;
151 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 152 struct Qdisc *child = q->qdisc;
1da177e4 153
f38c39d6 154 skb = child->dequeue(child);
9190b3b3
ED
155 if (skb) {
156 qdisc_bstats_update(sch, skb);
d7f4f332 157 qdisc_qstats_backlog_dec(sch, skb);
f38c39d6 158 sch->q.qlen--;
9190b3b3 159 } else {
eeca6688
ED
160 if (!red_is_idling(&q->vars))
161 red_start_of_idle_period(&q->vars);
9190b3b3 162 }
9e178ff2 163 return skb;
1da177e4
LT
164}
165
cc7ec456 166static struct sk_buff *red_peek(struct Qdisc *sch)
8e3af978
JP
167{
168 struct red_sched_data *q = qdisc_priv(sch);
169 struct Qdisc *child = q->qdisc;
170
171 return child->ops->peek(child);
172}
173
cc7ec456 174static void red_reset(struct Qdisc *sch)
1da177e4
LT
175{
176 struct red_sched_data *q = qdisc_priv(sch);
177
f38c39d6 178 qdisc_reset(q->qdisc);
d7f4f332 179 sch->qstats.backlog = 0;
f38c39d6 180 sch->q.qlen = 0;
eeca6688 181 red_restart(&q->vars);
1da177e4
LT
182}
183
602f3baf
NF
184static int red_offload(struct Qdisc *sch, bool enable)
185{
186 struct red_sched_data *q = qdisc_priv(sch);
187 struct net_device *dev = qdisc_dev(sch);
188 struct tc_red_qopt_offload opt = {
189 .handle = sch->handle,
190 .parent = sch->parent,
191 };
192
193 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
194 return -EOPNOTSUPP;
195
196 if (enable) {
197 opt.command = TC_RED_REPLACE;
198 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
199 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
200 opt.set.probability = q->parms.max_P;
c0b7490b 201 opt.set.limit = q->limit;
602f3baf 202 opt.set.is_ecn = red_use_ecn(q);
190852a5 203 opt.set.is_harddrop = red_use_harddrop(q);
0a7fad23 204 opt.set.is_nodrop = red_use_nodrop(q);
416ef9b1 205 opt.set.qstats = &sch->qstats;
602f3baf
NF
206 } else {
207 opt.command = TC_RED_DESTROY;
208 }
209
8234af2d 210 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
602f3baf
NF
211}
212
f38c39d6
PM
213static void red_destroy(struct Qdisc *sch)
214{
215 struct red_sched_data *q = qdisc_priv(sch);
8af2a218 216
aee9caa0
PM
217 tcf_qevent_destroy(&q->qe_mark, sch);
218 tcf_qevent_destroy(&q->qe_early_drop, sch);
8af2a218 219 del_timer_sync(&q->adapt_timer);
602f3baf 220 red_offload(sch, false);
86bd446b 221 qdisc_put(q->qdisc);
f38c39d6
PM
222}
223
27a3421e 224static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
14bc175d 225 [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
27a3421e
PM
226 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
227 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
a73ed26b 228 [TCA_RED_MAX_P] = { .type = NLA_U32 },
47a1494b 229 [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
aee9caa0
PM
230 [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
231 [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
27a3421e
PM
232};
233
65545ea2
PM
234static int __red_change(struct Qdisc *sch, struct nlattr **tb,
235 struct netlink_ext_ack *extack)
1da177e4 236{
0c8d13ac 237 struct Qdisc *old_child = NULL, *child = NULL;
1da177e4 238 struct red_sched_data *q = qdisc_priv(sch);
14bc175d 239 struct nla_bitfield32 flags_bf;
1da177e4 240 struct tc_red_qopt *ctl;
14bc175d
PM
241 unsigned char userbits;
242 unsigned char flags;
cee63723 243 int err;
a73ed26b 244 u32 max_P;
1da177e4 245
1e90474c 246 if (tb[TCA_RED_PARMS] == NULL ||
27a3421e 247 tb[TCA_RED_STAB] == NULL)
1da177e4
LT
248 return -EINVAL;
249
a73ed26b
ED
250 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
251
1e90474c 252 ctl = nla_data(tb[TCA_RED_PARMS]);
8afa10cb
NF
253 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
254 return -EINVAL;
1da177e4 255
14bc175d 256 err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
47a1494b 257 tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
14bc175d
PM
258 &flags_bf, &userbits, extack);
259 if (err)
260 return err;
261
f38c39d6 262 if (ctl->limit > 0) {
a38a9882
AA
263 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
264 extack);
fb0305ce
PM
265 if (IS_ERR(child))
266 return PTR_ERR(child);
f38c39d6 267
44a63b13 268 /* child is fifo, no need to check for noop_qdisc */
49b49971 269 qdisc_hash_add(child, true);
44a63b13
PA
270 }
271
1da177e4 272 sch_tree_lock(sch);
14bc175d
PM
273
274 flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
275 err = red_validate_flags(flags, extack);
276 if (err)
277 goto unlock_out;
278
279 q->flags = flags;
280 q->userbits = userbits;
1da177e4 281 q->limit = ctl->limit;
5e50da01 282 if (child) {
e5f0e8f8 283 qdisc_tree_flush_backlog(q->qdisc);
0c8d13ac 284 old_child = q->qdisc;
b94c8afc 285 q->qdisc = child;
5e50da01 286 }
1da177e4 287
eeca6688
ED
288 red_set_parms(&q->parms,
289 ctl->qth_min, ctl->qth_max, ctl->Wlog,
a73ed26b
ED
290 ctl->Plog, ctl->Scell_log,
291 nla_data(tb[TCA_RED_STAB]),
292 max_P);
eeca6688 293 red_set_vars(&q->vars);
6b31b28a 294
8af2a218
ED
295 del_timer(&q->adapt_timer);
296 if (ctl->flags & TC_RED_ADAPTATIVE)
297 mod_timer(&q->adapt_timer, jiffies + HZ/2);
298
1ee5fa1e 299 if (!q->qdisc->q.qlen)
eeca6688 300 red_start_of_idle_period(&q->vars);
dba051f3 301
1da177e4 302 sch_tree_unlock(sch);
0c8d13ac 303
602f3baf 304 red_offload(sch, true);
0c8d13ac
JK
305
306 if (old_child)
307 qdisc_put(old_child);
1da177e4 308 return 0;
14bc175d
PM
309
310unlock_out:
311 sch_tree_unlock(sch);
312 if (child)
313 qdisc_put(child);
314 return err;
1da177e4
LT
315}
316
cdeabbb8 317static inline void red_adaptative_timer(struct timer_list *t)
8af2a218 318{
cdeabbb8
KC
319 struct red_sched_data *q = from_timer(q, t, adapt_timer);
320 struct Qdisc *sch = q->sch;
8af2a218
ED
321 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
322
323 spin_lock(root_lock);
eeca6688 324 red_adaptative_algo(&q->parms, &q->vars);
8af2a218
ED
325 mod_timer(&q->adapt_timer, jiffies + HZ/2);
326 spin_unlock(root_lock);
327}
328
e63d7dfd
AA
329static int red_init(struct Qdisc *sch, struct nlattr *opt,
330 struct netlink_ext_ack *extack)
1da177e4 331{
f38c39d6 332 struct red_sched_data *q = qdisc_priv(sch);
65545ea2
PM
333 struct nlattr *tb[TCA_RED_MAX + 1];
334 int err;
335
608b4ada
CW
336 q->qdisc = &noop_qdisc;
337 q->sch = sch;
338 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
339
65545ea2
PM
340 if (!opt)
341 return -EINVAL;
342
343 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
344 extack);
345 if (err < 0)
346 return err;
f38c39d6 347
aee9caa0
PM
348 err = __red_change(sch, tb, extack);
349 if (err)
350 return err;
351
352 err = tcf_qevent_init(&q->qe_early_drop, sch,
353 FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
354 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
355 if (err)
5438dd45 356 return err;
aee9caa0 357
5438dd45
CW
358 return tcf_qevent_init(&q->qe_mark, sch,
359 FLOW_BLOCK_BINDER_TYPE_RED_MARK,
360 tb[TCA_RED_MARK_BLOCK], extack);
65545ea2
PM
361}
362
363static int red_change(struct Qdisc *sch, struct nlattr *opt,
364 struct netlink_ext_ack *extack)
365{
aee9caa0 366 struct red_sched_data *q = qdisc_priv(sch);
65545ea2
PM
367 struct nlattr *tb[TCA_RED_MAX + 1];
368 int err;
369
370 if (!opt)
371 return -EINVAL;
372
373 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
374 extack);
375 if (err < 0)
376 return err;
377
aee9caa0
PM
378 err = tcf_qevent_validate_change(&q->qe_early_drop,
379 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
380 if (err)
381 return err;
382
383 err = tcf_qevent_validate_change(&q->qe_mark,
384 tb[TCA_RED_MARK_BLOCK], extack);
385 if (err)
386 return err;
387
65545ea2 388 return __red_change(sch, tb, extack);
1da177e4
LT
389}
390
dad54c0f 391static int red_dump_offload_stats(struct Qdisc *sch)
602f3baf 392{
602f3baf 393 struct tc_red_qopt_offload hw_stats = {
ee9d3429 394 .command = TC_RED_STATS,
602f3baf
NF
395 .handle = sch->handle,
396 .parent = sch->parent,
ee9d3429
AM
397 {
398 .stats.bstats = &sch->bstats,
399 .stats.qstats = &sch->qstats,
400 },
602f3baf 401 };
8234af2d 402
b592843c 403 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
602f3baf
NF
404}
405
1da177e4
LT
406static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
407{
408 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 409 struct nlattr *opts = NULL;
6b31b28a
TG
410 struct tc_red_qopt opt = {
411 .limit = q->limit,
14bc175d
PM
412 .flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
413 q->userbits,
6b31b28a
TG
414 .qth_min = q->parms.qth_min >> q->parms.Wlog,
415 .qth_max = q->parms.qth_max >> q->parms.Wlog,
416 .Wlog = q->parms.Wlog,
417 .Plog = q->parms.Plog,
418 .Scell_log = q->parms.Scell_log,
419 };
602f3baf 420 int err;
1da177e4 421
dad54c0f 422 err = red_dump_offload_stats(sch);
602f3baf
NF
423 if (err)
424 goto nla_put_failure;
425
ae0be8de 426 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
1e90474c
PM
427 if (opts == NULL)
428 goto nla_put_failure;
1b34ec43 429 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
14bc175d 430 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
8953b077 431 nla_put_bitfield32(skb, TCA_RED_FLAGS,
aee9caa0
PM
432 q->flags, TC_RED_SUPPORTED_FLAGS) ||
433 tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
434 tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
1b34ec43 435 goto nla_put_failure;
1e90474c 436 return nla_nest_end(skb, opts);
1da177e4 437
1e90474c 438nla_put_failure:
bc3ed28c
TG
439 nla_nest_cancel(skb, opts);
440 return -EMSGSIZE;
1da177e4
LT
441}
442
443static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
444{
445 struct red_sched_data *q = qdisc_priv(sch);
602f3baf 446 struct net_device *dev = qdisc_dev(sch);
f8253df5 447 struct tc_red_xstats st = {0};
6b31b28a 448
428a68af 449 if (sch->flags & TCQ_F_OFFLOADED) {
602f3baf 450 struct tc_red_qopt_offload hw_stats_request = {
ee9d3429 451 .command = TC_RED_XSTATS,
602f3baf
NF
452 .handle = sch->handle,
453 .parent = sch->parent,
ee9d3429 454 {
f8253df5 455 .xstats = &q->stats,
ee9d3429 456 },
602f3baf 457 };
f8253df5
NF
458 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
459 &hw_stats_request);
602f3baf 460 }
f8253df5
NF
461 st.early = q->stats.prob_drop + q->stats.forced_drop;
462 st.pdrop = q->stats.pdrop;
463 st.other = q->stats.other;
464 st.marked = q->stats.prob_mark + q->stats.forced_mark;
602f3baf 465
6b31b28a 466 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
467}
468
f38c39d6
PM
469static int red_dump_class(struct Qdisc *sch, unsigned long cl,
470 struct sk_buff *skb, struct tcmsg *tcm)
471{
472 struct red_sched_data *q = qdisc_priv(sch);
473
f38c39d6
PM
474 tcm->tcm_handle |= TC_H_MIN(1);
475 tcm->tcm_info = q->qdisc->handle;
476 return 0;
477}
478
bf2a752b
JK
479static void red_graft_offload(struct Qdisc *sch,
480 struct Qdisc *new, struct Qdisc *old,
481 struct netlink_ext_ack *extack)
482{
483 struct tc_red_qopt_offload graft_offload = {
484 .handle = sch->handle,
485 .parent = sch->parent,
486 .child_handle = new->handle,
487 .command = TC_RED_GRAFT,
488 };
489
490 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
491 TC_SETUP_QDISC_RED, &graft_offload, extack);
492}
493
f38c39d6 494static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653d6fd6 495 struct Qdisc **old, struct netlink_ext_ack *extack)
f38c39d6
PM
496{
497 struct red_sched_data *q = qdisc_priv(sch);
498
499 if (new == NULL)
500 new = &noop_qdisc;
501
86a7996c 502 *old = qdisc_replace(sch, new, &q->qdisc);
bf2a752b
JK
503
504 red_graft_offload(sch, new, *old, extack);
f38c39d6
PM
505 return 0;
506}
507
508static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
509{
510 struct red_sched_data *q = qdisc_priv(sch);
511 return q->qdisc;
512}
513
143976ce 514static unsigned long red_find(struct Qdisc *sch, u32 classid)
f38c39d6
PM
515{
516 return 1;
517}
518
f38c39d6
PM
519static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
520{
521 if (!walker->stop) {
522 if (walker->count >= walker->skip)
523 if (walker->fn(sch, 1, walker) < 0) {
524 walker->stop = 1;
525 return;
526 }
527 walker->count++;
528 }
529}
530
20fea08b 531static const struct Qdisc_class_ops red_class_ops = {
f38c39d6
PM
532 .graft = red_graft,
533 .leaf = red_leaf,
143976ce 534 .find = red_find,
f38c39d6 535 .walk = red_walk,
f38c39d6
PM
536 .dump = red_dump_class,
537};
538
20fea08b 539static struct Qdisc_ops red_qdisc_ops __read_mostly = {
1da177e4
LT
540 .id = "red",
541 .priv_size = sizeof(struct red_sched_data),
f38c39d6 542 .cl_ops = &red_class_ops,
1da177e4
LT
543 .enqueue = red_enqueue,
544 .dequeue = red_dequeue,
8e3af978 545 .peek = red_peek,
1da177e4
LT
546 .init = red_init,
547 .reset = red_reset,
f38c39d6 548 .destroy = red_destroy,
1da177e4
LT
549 .change = red_change,
550 .dump = red_dump,
551 .dump_stats = red_dump_stats,
552 .owner = THIS_MODULE,
553};
554
555static int __init red_module_init(void)
556{
557 return register_qdisc(&red_qdisc_ops);
558}
dba051f3
TG
559
560static void __exit red_module_exit(void)
1da177e4
LT
561{
562 unregister_qdisc(&red_qdisc_ops);
563}
dba051f3 564
1da177e4
LT
565module_init(red_module_init)
566module_exit(red_module_exit)
dba051f3 567
1da177e4 568MODULE_LICENSE("GPL");
This page took 1.22495 seconds and 4 git commands to generate.