]>
Commit | Line | Data |
---|---|---|
e13e02a3 ED |
1 | /* |
2 | * net/sched/sch_sfb.c Stochastic Fair Blue | |
3 | * | |
4 | * Copyright (c) 2008-2011 Juliusz Chroboczek <[email protected]> | |
5 | * Copyright (c) 2011 Eric Dumazet <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * version 2 as published by the Free Software Foundation. | |
10 | * | |
11 | * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue: | |
12 | * A New Class of Active Queue Management Algorithms. | |
13 | * U. Michigan CSE-TR-387-99, April 1999. | |
14 | * | |
15 | * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf | |
16 | * | |
17 | */ | |
18 | ||
19 | #include <linux/module.h> | |
20 | #include <linux/types.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/skbuff.h> | |
24 | #include <linux/random.h> | |
25 | #include <linux/jhash.h> | |
26 | #include <net/ip.h> | |
27 | #include <net/pkt_sched.h> | |
28 | #include <net/inet_ecn.h> | |
29 | ||
30 | /* | |
31 | * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) | |
32 | * This implementation uses L = 8 and N = 16 | |
33 | * This permits us to split one 32bit hash (provided per packet by rxhash or | |
34 | * external classifier) into 8 subhashes of 4 bits. | |
35 | */ | |
36 | #define SFB_BUCKET_SHIFT 4 | |
37 | #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */ | |
38 | #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1) | |
39 | #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */ | |
40 | ||
41 | /* SFB algo uses a virtual queue, named "bin" */ | |
42 | struct sfb_bucket { | |
43 | u16 qlen; /* length of virtual queue */ | |
44 | u16 p_mark; /* marking probability */ | |
45 | }; | |
46 | ||
47 | /* We use a double buffering right before hash change | |
48 | * (Section 4.4 of SFB reference : moving hash functions) | |
49 | */ | |
50 | struct sfb_bins { | |
51 | u32 perturbation; /* jhash perturbation */ | |
52 | struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; | |
53 | }; | |
54 | ||
55 | struct sfb_sched_data { | |
56 | struct Qdisc *qdisc; | |
25d8c0d5 | 57 | struct tcf_proto __rcu *filter_list; |
e13e02a3 ED |
58 | unsigned long rehash_interval; |
59 | unsigned long warmup_time; /* double buffering warmup time in jiffies */ | |
60 | u32 max; | |
61 | u32 bin_size; /* maximum queue length per bin */ | |
62 | u32 increment; /* d1 */ | |
63 | u32 decrement; /* d2 */ | |
64 | u32 limit; /* HARD maximal queue length */ | |
65 | u32 penalty_rate; | |
66 | u32 penalty_burst; | |
67 | u32 tokens_avail; | |
68 | unsigned long rehash_time; | |
69 | unsigned long token_time; | |
70 | ||
71 | u8 slot; /* current active bins (0 or 1) */ | |
72 | bool double_buffering; | |
73 | struct sfb_bins bins[2]; | |
74 | ||
75 | struct { | |
76 | u32 earlydrop; | |
77 | u32 penaltydrop; | |
78 | u32 bucketdrop; | |
79 | u32 queuedrop; | |
80 | u32 childdrop; /* drops in child qdisc */ | |
81 | u32 marked; /* ECN mark */ | |
82 | } stats; | |
83 | }; | |
84 | ||
85 | /* | |
86 | * Each queued skb might be hashed on one or two bins | |
87 | * We store in skb_cb the two hash values. | |
88 | * (A zero value means double buffering was not used) | |
89 | */ | |
90 | struct sfb_skb_cb { | |
91 | u32 hashes[2]; | |
92 | }; | |
93 | ||
94 | static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) | |
95 | { | |
16bda13d | 96 | qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb)); |
e13e02a3 ED |
97 | return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; |
98 | } | |
99 | ||
100 | /* | |
101 | * If using 'internal' SFB flow classifier, hash comes from skb rxhash | |
102 | * If using external classifier, hash comes from the classid. | |
103 | */ | |
104 | static u32 sfb_hash(const struct sk_buff *skb, u32 slot) | |
105 | { | |
106 | return sfb_skb_cb(skb)->hashes[slot]; | |
107 | } | |
108 | ||
109 | /* Probabilities are coded as Q0.16 fixed-point values, | |
110 | * with 0xFFFF representing 65535/65536 (almost 1.0) | |
111 | * Addition and subtraction are saturating in [0, 65535] | |
112 | */ | |
113 | static u32 prob_plus(u32 p1, u32 p2) | |
114 | { | |
115 | u32 res = p1 + p2; | |
116 | ||
117 | return min_t(u32, res, SFB_MAX_PROB); | |
118 | } | |
119 | ||
120 | static u32 prob_minus(u32 p1, u32 p2) | |
121 | { | |
122 | return p1 > p2 ? p1 - p2 : 0; | |
123 | } | |
124 | ||
125 | static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) | |
126 | { | |
127 | int i; | |
128 | struct sfb_bucket *b = &q->bins[slot].bins[0][0]; | |
129 | ||
130 | for (i = 0; i < SFB_LEVELS; i++) { | |
131 | u32 hash = sfbhash & SFB_BUCKET_MASK; | |
132 | ||
133 | sfbhash >>= SFB_BUCKET_SHIFT; | |
134 | if (b[hash].qlen < 0xFFFF) | |
135 | b[hash].qlen++; | |
136 | b += SFB_NUMBUCKETS; /* next level */ | |
137 | } | |
138 | } | |
139 | ||
140 | static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) | |
141 | { | |
142 | u32 sfbhash; | |
143 | ||
144 | sfbhash = sfb_hash(skb, 0); | |
145 | if (sfbhash) | |
146 | increment_one_qlen(sfbhash, 0, q); | |
147 | ||
148 | sfbhash = sfb_hash(skb, 1); | |
149 | if (sfbhash) | |
150 | increment_one_qlen(sfbhash, 1, q); | |
151 | } | |
152 | ||
153 | static void decrement_one_qlen(u32 sfbhash, u32 slot, | |
154 | struct sfb_sched_data *q) | |
155 | { | |
156 | int i; | |
157 | struct sfb_bucket *b = &q->bins[slot].bins[0][0]; | |
158 | ||
159 | for (i = 0; i < SFB_LEVELS; i++) { | |
160 | u32 hash = sfbhash & SFB_BUCKET_MASK; | |
161 | ||
162 | sfbhash >>= SFB_BUCKET_SHIFT; | |
163 | if (b[hash].qlen > 0) | |
164 | b[hash].qlen--; | |
165 | b += SFB_NUMBUCKETS; /* next level */ | |
166 | } | |
167 | } | |
168 | ||
169 | static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) | |
170 | { | |
171 | u32 sfbhash; | |
172 | ||
173 | sfbhash = sfb_hash(skb, 0); | |
174 | if (sfbhash) | |
175 | decrement_one_qlen(sfbhash, 0, q); | |
176 | ||
177 | sfbhash = sfb_hash(skb, 1); | |
178 | if (sfbhash) | |
179 | decrement_one_qlen(sfbhash, 1, q); | |
180 | } | |
181 | ||
182 | static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) | |
183 | { | |
184 | b->p_mark = prob_minus(b->p_mark, q->decrement); | |
185 | } | |
186 | ||
187 | static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) | |
188 | { | |
189 | b->p_mark = prob_plus(b->p_mark, q->increment); | |
190 | } | |
191 | ||
192 | static void sfb_zero_all_buckets(struct sfb_sched_data *q) | |
193 | { | |
194 | memset(&q->bins, 0, sizeof(q->bins)); | |
195 | } | |
196 | ||
197 | /* | |
198 | * compute max qlen, max p_mark, and avg p_mark | |
199 | */ | |
200 | static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) | |
201 | { | |
202 | int i; | |
203 | u32 qlen = 0, prob = 0, totalpm = 0; | |
204 | const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; | |
205 | ||
206 | for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) { | |
207 | if (qlen < b->qlen) | |
208 | qlen = b->qlen; | |
209 | totalpm += b->p_mark; | |
210 | if (prob < b->p_mark) | |
211 | prob = b->p_mark; | |
212 | b++; | |
213 | } | |
214 | *prob_r = prob; | |
215 | *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS); | |
216 | return qlen; | |
217 | } | |
218 | ||
219 | ||
220 | static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) | |
221 | { | |
63862b5b | 222 | q->bins[slot].perturbation = prandom_u32(); |
e13e02a3 ED |
223 | } |
224 | ||
225 | static void sfb_swap_slot(struct sfb_sched_data *q) | |
226 | { | |
227 | sfb_init_perturbation(q->slot, q); | |
228 | q->slot ^= 1; | |
229 | q->double_buffering = false; | |
230 | } | |
231 | ||
232 | /* Non elastic flows are allowed to use part of the bandwidth, expressed | |
233 | * in "penalty_rate" packets per second, with "penalty_burst" burst | |
234 | */ | |
235 | static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) | |
236 | { | |
237 | if (q->penalty_rate == 0 || q->penalty_burst == 0) | |
238 | return true; | |
239 | ||
240 | if (q->tokens_avail < 1) { | |
241 | unsigned long age = min(10UL * HZ, jiffies - q->token_time); | |
242 | ||
243 | q->tokens_avail = (age * q->penalty_rate) / HZ; | |
244 | if (q->tokens_avail > q->penalty_burst) | |
245 | q->tokens_avail = q->penalty_burst; | |
246 | q->token_time = jiffies; | |
247 | if (q->tokens_avail < 1) | |
248 | return true; | |
249 | } | |
250 | ||
251 | q->tokens_avail--; | |
252 | return false; | |
253 | } | |
254 | ||
25d8c0d5 | 255 | static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, |
e13e02a3 ED |
256 | int *qerr, u32 *salt) |
257 | { | |
258 | struct tcf_result res; | |
259 | int result; | |
260 | ||
25d8c0d5 | 261 | result = tc_classify(skb, fl, &res); |
e13e02a3 ED |
262 | if (result >= 0) { |
263 | #ifdef CONFIG_NET_CLS_ACT | |
264 | switch (result) { | |
265 | case TC_ACT_STOLEN: | |
266 | case TC_ACT_QUEUED: | |
267 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | |
268 | case TC_ACT_SHOT: | |
269 | return false; | |
270 | } | |
271 | #endif | |
272 | *salt = TC_H_MIN(res.classid); | |
273 | return true; | |
274 | } | |
275 | return false; | |
276 | } | |
277 | ||
278 | static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
279 | { | |
280 | ||
281 | struct sfb_sched_data *q = qdisc_priv(sch); | |
282 | struct Qdisc *child = q->qdisc; | |
25d8c0d5 | 283 | struct tcf_proto *fl; |
e13e02a3 ED |
284 | int i; |
285 | u32 p_min = ~0; | |
286 | u32 minqlen = ~0; | |
63c0ad4d TH |
287 | u32 r, sfbhash; |
288 | u32 slot = q->slot; | |
e13e02a3 ED |
289 | int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
290 | ||
363437f4 | 291 | if (unlikely(sch->q.qlen >= q->limit)) { |
25331d6c | 292 | qdisc_qstats_overlimit(sch); |
363437f4 ED |
293 | q->stats.queuedrop++; |
294 | goto drop; | |
295 | } | |
296 | ||
e13e02a3 ED |
297 | if (q->rehash_interval > 0) { |
298 | unsigned long limit = q->rehash_time + q->rehash_interval; | |
299 | ||
300 | if (unlikely(time_after(jiffies, limit))) { | |
301 | sfb_swap_slot(q); | |
302 | q->rehash_time = jiffies; | |
303 | } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && | |
304 | time_after(jiffies, limit - q->warmup_time))) { | |
305 | q->double_buffering = true; | |
306 | } | |
307 | } | |
308 | ||
25d8c0d5 JF |
309 | fl = rcu_dereference_bh(q->filter_list); |
310 | if (fl) { | |
63c0ad4d TH |
311 | u32 salt; |
312 | ||
e13e02a3 | 313 | /* If using external classifiers, get result and record it. */ |
25d8c0d5 | 314 | if (!sfb_classify(skb, fl, &ret, &salt)) |
e13e02a3 | 315 | goto other_drop; |
63c0ad4d | 316 | sfbhash = jhash_1word(salt, q->bins[slot].perturbation); |
e13e02a3 | 317 | } else { |
63c0ad4d | 318 | sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); |
e13e02a3 ED |
319 | } |
320 | ||
e13e02a3 | 321 | |
e13e02a3 ED |
322 | if (!sfbhash) |
323 | sfbhash = 1; | |
324 | sfb_skb_cb(skb)->hashes[slot] = sfbhash; | |
325 | ||
326 | for (i = 0; i < SFB_LEVELS; i++) { | |
327 | u32 hash = sfbhash & SFB_BUCKET_MASK; | |
328 | struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; | |
329 | ||
330 | sfbhash >>= SFB_BUCKET_SHIFT; | |
331 | if (b->qlen == 0) | |
332 | decrement_prob(b, q); | |
333 | else if (b->qlen >= q->bin_size) | |
334 | increment_prob(b, q); | |
335 | if (minqlen > b->qlen) | |
336 | minqlen = b->qlen; | |
337 | if (p_min > b->p_mark) | |
338 | p_min = b->p_mark; | |
339 | } | |
340 | ||
341 | slot ^= 1; | |
342 | sfb_skb_cb(skb)->hashes[slot] = 0; | |
343 | ||
363437f4 | 344 | if (unlikely(minqlen >= q->max)) { |
25331d6c | 345 | qdisc_qstats_overlimit(sch); |
363437f4 | 346 | q->stats.bucketdrop++; |
e13e02a3 ED |
347 | goto drop; |
348 | } | |
349 | ||
350 | if (unlikely(p_min >= SFB_MAX_PROB)) { | |
351 | /* Inelastic flow */ | |
352 | if (q->double_buffering) { | |
63c0ad4d TH |
353 | sfbhash = skb_get_hash_perturb(skb, |
354 | q->bins[slot].perturbation); | |
e13e02a3 ED |
355 | if (!sfbhash) |
356 | sfbhash = 1; | |
357 | sfb_skb_cb(skb)->hashes[slot] = sfbhash; | |
358 | ||
359 | for (i = 0; i < SFB_LEVELS; i++) { | |
360 | u32 hash = sfbhash & SFB_BUCKET_MASK; | |
361 | struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; | |
362 | ||
363 | sfbhash >>= SFB_BUCKET_SHIFT; | |
364 | if (b->qlen == 0) | |
365 | decrement_prob(b, q); | |
366 | else if (b->qlen >= q->bin_size) | |
367 | increment_prob(b, q); | |
368 | } | |
369 | } | |
370 | if (sfb_rate_limit(skb, q)) { | |
25331d6c | 371 | qdisc_qstats_overlimit(sch); |
e13e02a3 ED |
372 | q->stats.penaltydrop++; |
373 | goto drop; | |
374 | } | |
375 | goto enqueue; | |
376 | } | |
377 | ||
63862b5b | 378 | r = prandom_u32() & SFB_MAX_PROB; |
e13e02a3 ED |
379 | |
380 | if (unlikely(r < p_min)) { | |
381 | if (unlikely(p_min > SFB_MAX_PROB / 2)) { | |
382 | /* If we're marking that many packets, then either | |
383 | * this flow is unresponsive, or we're badly congested. | |
384 | * In either case, we want to start dropping packets. | |
385 | */ | |
386 | if (r < (p_min - SFB_MAX_PROB / 2) * 2) { | |
387 | q->stats.earlydrop++; | |
388 | goto drop; | |
389 | } | |
390 | } | |
391 | if (INET_ECN_set_ce(skb)) { | |
392 | q->stats.marked++; | |
393 | } else { | |
394 | q->stats.earlydrop++; | |
395 | goto drop; | |
396 | } | |
397 | } | |
398 | ||
399 | enqueue: | |
400 | ret = qdisc_enqueue(skb, child); | |
401 | if (likely(ret == NET_XMIT_SUCCESS)) { | |
402 | sch->q.qlen++; | |
403 | increment_qlen(skb, q); | |
404 | } else if (net_xmit_drop_count(ret)) { | |
405 | q->stats.childdrop++; | |
25331d6c | 406 | qdisc_qstats_drop(sch); |
e13e02a3 ED |
407 | } |
408 | return ret; | |
409 | ||
410 | drop: | |
411 | qdisc_drop(skb, sch); | |
412 | return NET_XMIT_CN; | |
413 | other_drop: | |
414 | if (ret & __NET_XMIT_BYPASS) | |
25331d6c | 415 | qdisc_qstats_drop(sch); |
e13e02a3 ED |
416 | kfree_skb(skb); |
417 | return ret; | |
418 | } | |
419 | ||
420 | static struct sk_buff *sfb_dequeue(struct Qdisc *sch) | |
421 | { | |
422 | struct sfb_sched_data *q = qdisc_priv(sch); | |
423 | struct Qdisc *child = q->qdisc; | |
424 | struct sk_buff *skb; | |
425 | ||
426 | skb = child->dequeue(q->qdisc); | |
427 | ||
428 | if (skb) { | |
429 | qdisc_bstats_update(sch, skb); | |
430 | sch->q.qlen--; | |
431 | decrement_qlen(skb, q); | |
432 | } | |
433 | ||
434 | return skb; | |
435 | } | |
436 | ||
437 | static struct sk_buff *sfb_peek(struct Qdisc *sch) | |
438 | { | |
439 | struct sfb_sched_data *q = qdisc_priv(sch); | |
440 | struct Qdisc *child = q->qdisc; | |
441 | ||
442 | return child->ops->peek(child); | |
443 | } | |
444 | ||
445 | /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */ | |
446 | ||
447 | static void sfb_reset(struct Qdisc *sch) | |
448 | { | |
449 | struct sfb_sched_data *q = qdisc_priv(sch); | |
450 | ||
451 | qdisc_reset(q->qdisc); | |
452 | sch->q.qlen = 0; | |
453 | q->slot = 0; | |
454 | q->double_buffering = false; | |
455 | sfb_zero_all_buckets(q); | |
456 | sfb_init_perturbation(0, q); | |
457 | } | |
458 | ||
459 | static void sfb_destroy(struct Qdisc *sch) | |
460 | { | |
461 | struct sfb_sched_data *q = qdisc_priv(sch); | |
462 | ||
463 | tcf_destroy_chain(&q->filter_list); | |
464 | qdisc_destroy(q->qdisc); | |
465 | } | |
466 | ||
467 | static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = { | |
468 | [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) }, | |
469 | }; | |
470 | ||
471 | static const struct tc_sfb_qopt sfb_default_ops = { | |
472 | .rehash_interval = 600 * MSEC_PER_SEC, | |
473 | .warmup_time = 60 * MSEC_PER_SEC, | |
474 | .limit = 0, | |
475 | .max = 25, | |
476 | .bin_size = 20, | |
477 | .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */ | |
478 | .decrement = (SFB_MAX_PROB + 3000) / 6000, | |
479 | .penalty_rate = 10, | |
480 | .penalty_burst = 20, | |
481 | }; | |
482 | ||
483 | static int sfb_change(struct Qdisc *sch, struct nlattr *opt) | |
484 | { | |
485 | struct sfb_sched_data *q = qdisc_priv(sch); | |
486 | struct Qdisc *child; | |
487 | struct nlattr *tb[TCA_SFB_MAX + 1]; | |
488 | const struct tc_sfb_qopt *ctl = &sfb_default_ops; | |
489 | u32 limit; | |
490 | int err; | |
491 | ||
492 | if (opt) { | |
493 | err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy); | |
494 | if (err < 0) | |
495 | return -EINVAL; | |
496 | ||
497 | if (tb[TCA_SFB_PARMS] == NULL) | |
498 | return -EINVAL; | |
499 | ||
500 | ctl = nla_data(tb[TCA_SFB_PARMS]); | |
501 | } | |
502 | ||
503 | limit = ctl->limit; | |
504 | if (limit == 0) | |
505 | limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); | |
506 | ||
507 | child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit); | |
508 | if (IS_ERR(child)) | |
509 | return PTR_ERR(child); | |
510 | ||
511 | sch_tree_lock(sch); | |
512 | ||
513 | qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); | |
514 | qdisc_destroy(q->qdisc); | |
515 | q->qdisc = child; | |
516 | ||
517 | q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); | |
518 | q->warmup_time = msecs_to_jiffies(ctl->warmup_time); | |
519 | q->rehash_time = jiffies; | |
520 | q->limit = limit; | |
521 | q->increment = ctl->increment; | |
522 | q->decrement = ctl->decrement; | |
523 | q->max = ctl->max; | |
524 | q->bin_size = ctl->bin_size; | |
525 | q->penalty_rate = ctl->penalty_rate; | |
526 | q->penalty_burst = ctl->penalty_burst; | |
527 | q->tokens_avail = ctl->penalty_burst; | |
528 | q->token_time = jiffies; | |
529 | ||
530 | q->slot = 0; | |
531 | q->double_buffering = false; | |
532 | sfb_zero_all_buckets(q); | |
533 | sfb_init_perturbation(0, q); | |
534 | sfb_init_perturbation(1, q); | |
535 | ||
536 | sch_tree_unlock(sch); | |
537 | ||
538 | return 0; | |
539 | } | |
540 | ||
541 | static int sfb_init(struct Qdisc *sch, struct nlattr *opt) | |
542 | { | |
543 | struct sfb_sched_data *q = qdisc_priv(sch); | |
544 | ||
545 | q->qdisc = &noop_qdisc; | |
546 | return sfb_change(sch, opt); | |
547 | } | |
548 | ||
549 | static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) | |
550 | { | |
551 | struct sfb_sched_data *q = qdisc_priv(sch); | |
552 | struct nlattr *opts; | |
553 | struct tc_sfb_qopt opt = { | |
554 | .rehash_interval = jiffies_to_msecs(q->rehash_interval), | |
555 | .warmup_time = jiffies_to_msecs(q->warmup_time), | |
556 | .limit = q->limit, | |
557 | .max = q->max, | |
558 | .bin_size = q->bin_size, | |
559 | .increment = q->increment, | |
560 | .decrement = q->decrement, | |
561 | .penalty_rate = q->penalty_rate, | |
562 | .penalty_burst = q->penalty_burst, | |
563 | }; | |
564 | ||
565 | sch->qstats.backlog = q->qdisc->qstats.backlog; | |
566 | opts = nla_nest_start(skb, TCA_OPTIONS); | |
7ac2908e AC |
567 | if (opts == NULL) |
568 | goto nla_put_failure; | |
1b34ec43 DM |
569 | if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) |
570 | goto nla_put_failure; | |
e13e02a3 ED |
571 | return nla_nest_end(skb, opts); |
572 | ||
573 | nla_put_failure: | |
574 | nla_nest_cancel(skb, opts); | |
575 | return -EMSGSIZE; | |
576 | } | |
577 | ||
578 | static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |
579 | { | |
580 | struct sfb_sched_data *q = qdisc_priv(sch); | |
581 | struct tc_sfb_xstats st = { | |
582 | .earlydrop = q->stats.earlydrop, | |
583 | .penaltydrop = q->stats.penaltydrop, | |
584 | .bucketdrop = q->stats.bucketdrop, | |
585 | .queuedrop = q->stats.queuedrop, | |
586 | .childdrop = q->stats.childdrop, | |
587 | .marked = q->stats.marked, | |
588 | }; | |
589 | ||
590 | st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); | |
591 | ||
592 | return gnet_stats_copy_app(d, &st, sizeof(st)); | |
593 | } | |
594 | ||
595 | static int sfb_dump_class(struct Qdisc *sch, unsigned long cl, | |
596 | struct sk_buff *skb, struct tcmsg *tcm) | |
597 | { | |
598 | return -ENOSYS; | |
599 | } | |
600 | ||
601 | static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
602 | struct Qdisc **old) | |
603 | { | |
604 | struct sfb_sched_data *q = qdisc_priv(sch); | |
605 | ||
606 | if (new == NULL) | |
607 | new = &noop_qdisc; | |
608 | ||
609 | sch_tree_lock(sch); | |
610 | *old = q->qdisc; | |
611 | q->qdisc = new; | |
612 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | |
613 | qdisc_reset(*old); | |
614 | sch_tree_unlock(sch); | |
615 | return 0; | |
616 | } | |
617 | ||
618 | static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg) | |
619 | { | |
620 | struct sfb_sched_data *q = qdisc_priv(sch); | |
621 | ||
622 | return q->qdisc; | |
623 | } | |
624 | ||
625 | static unsigned long sfb_get(struct Qdisc *sch, u32 classid) | |
626 | { | |
627 | return 1; | |
628 | } | |
629 | ||
630 | static void sfb_put(struct Qdisc *sch, unsigned long arg) | |
631 | { | |
632 | } | |
633 | ||
634 | static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |
635 | struct nlattr **tca, unsigned long *arg) | |
636 | { | |
637 | return -ENOSYS; | |
638 | } | |
639 | ||
640 | static int sfb_delete(struct Qdisc *sch, unsigned long cl) | |
641 | { | |
642 | return -ENOSYS; | |
643 | } | |
644 | ||
645 | static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |
646 | { | |
647 | if (!walker->stop) { | |
648 | if (walker->count >= walker->skip) | |
649 | if (walker->fn(sch, 1, walker) < 0) { | |
650 | walker->stop = 1; | |
651 | return; | |
652 | } | |
653 | walker->count++; | |
654 | } | |
655 | } | |
656 | ||
25d8c0d5 JF |
657 | static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch, |
658 | unsigned long cl) | |
e13e02a3 ED |
659 | { |
660 | struct sfb_sched_data *q = qdisc_priv(sch); | |
661 | ||
662 | if (cl) | |
663 | return NULL; | |
664 | return &q->filter_list; | |
665 | } | |
666 | ||
667 | static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, | |
668 | u32 classid) | |
669 | { | |
670 | return 0; | |
671 | } | |
672 | ||
673 | ||
674 | static const struct Qdisc_class_ops sfb_class_ops = { | |
675 | .graft = sfb_graft, | |
676 | .leaf = sfb_leaf, | |
677 | .get = sfb_get, | |
678 | .put = sfb_put, | |
679 | .change = sfb_change_class, | |
680 | .delete = sfb_delete, | |
681 | .walk = sfb_walk, | |
682 | .tcf_chain = sfb_find_tcf, | |
683 | .bind_tcf = sfb_bind, | |
684 | .unbind_tcf = sfb_put, | |
685 | .dump = sfb_dump_class, | |
686 | }; | |
687 | ||
688 | static struct Qdisc_ops sfb_qdisc_ops __read_mostly = { | |
689 | .id = "sfb", | |
690 | .priv_size = sizeof(struct sfb_sched_data), | |
691 | .cl_ops = &sfb_class_ops, | |
692 | .enqueue = sfb_enqueue, | |
693 | .dequeue = sfb_dequeue, | |
694 | .peek = sfb_peek, | |
695 | .init = sfb_init, | |
696 | .reset = sfb_reset, | |
697 | .destroy = sfb_destroy, | |
698 | .change = sfb_change, | |
699 | .dump = sfb_dump, | |
700 | .dump_stats = sfb_dump_stats, | |
701 | .owner = THIS_MODULE, | |
702 | }; | |
703 | ||
704 | static int __init sfb_module_init(void) | |
705 | { | |
706 | return register_qdisc(&sfb_qdisc_ops); | |
707 | } | |
708 | ||
709 | static void __exit sfb_module_exit(void) | |
710 | { | |
711 | unregister_qdisc(&sfb_qdisc_ops); | |
712 | } | |
713 | ||
714 | module_init(sfb_module_init) | |
715 | module_exit(sfb_module_exit) | |
716 | ||
717 | MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline"); | |
718 | MODULE_AUTHOR("Juliusz Chroboczek"); | |
719 | MODULE_AUTHOR("Eric Dumazet"); | |
720 | MODULE_LICENSE("GPL"); |