]> Git Repo - linux.git/blame - net/sched/sch_sfq.c
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[linux.git] / net / sched / sch_sfq.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <[email protected]>
10 */
11
1da177e4 12#include <linux/module.h>
1da177e4
LT
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
1da177e4
LT
17#include <linux/in.h>
18#include <linux/errno.h>
1da177e4 19#include <linux/init.h>
1da177e4 20#include <linux/ipv6.h>
1da177e4 21#include <linux/skbuff.h>
32740ddc 22#include <linux/jhash.h>
5a0e3ad6 23#include <linux/slab.h>
0ba48053
PM
24#include <net/ip.h>
25#include <net/netlink.h>
1da177e4
LT
26#include <net/pkt_sched.h>
27
28
29/* Stochastic Fairness Queuing algorithm.
30 =======================================
31
32 Source:
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
35
36 Paul E. McKenney "Stochastic Fairness Queuing",
37 "Interworking: Research and Experience", v.2, 1991, p.113-131.
38
39
40 See also:
41 M. Shreedhar and George Varghese "Efficient Fair
42 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
43
44
10297b99 45 This is not the thing that is usually called (W)FQ nowadays.
1da177e4
LT
46 It does not use any timestamp mechanism, but instead
47 processes queues in round-robin order.
48
49 ADVANTAGE:
50
51 - It is very cheap. Both CPU and memory requirements are minimal.
52
53 DRAWBACKS:
54
10297b99 55 - "Stochastic" -> It is not 100% fair.
1da177e4
LT
56 When hash collisions occur, several flows are considered as one.
57
58 - "Round-robin" -> It introduces larger delays than virtual clock
59 based schemes, and should not be used for isolating interactive
60 traffic from non-interactive. It means, that this scheduler
61 should be used as leaf of CBQ or P3, which put interactive traffic
62 to higher priority band.
63
64 We still need true WFQ for top level CSZ, but using WFQ
65 for the best effort traffic is absolutely pointless:
66 SFQ is superior for this purpose.
67
68 IMPLEMENTATION:
69 This implementation limits maximal queue length to 128;
eeaeb068 70 max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
1da177e4 71 The only goal of this restrictions was that all data
eda83e3b 72 fit into one 4K page on 32bit arches.
1da177e4
LT
73
74 It is easy to increase these values, but not in flight. */
75
eda83e3b
ED
76#define SFQ_DEPTH 128 /* max number of packets per flow */
77#define SFQ_SLOTS 128 /* max number of flows */
78#define SFQ_EMPTY_SLOT 255
1da177e4 79#define SFQ_HASH_DIVISOR 1024
eeaeb068
ED
80/* We use 16 bits to store allot, and want to handle packets up to 64K
81 * Scale allot by 8 (1<<3) so that no overflow occurs.
82 */
83#define SFQ_ALLOT_SHIFT 3
84#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
1da177e4 85
eda83e3b 86/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
1da177e4
LT
87typedef unsigned char sfq_index;
88
eda83e3b
ED
89/*
90 * We dont use pointers to save space.
91 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
92 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
93 * are 'pointers' to dep[] array
94 */
1da177e4
LT
95struct sfq_head
96{
97 sfq_index next;
98 sfq_index prev;
99};
100
eda83e3b
ED
101struct sfq_slot {
102 struct sk_buff *skblist_next;
103 struct sk_buff *skblist_prev;
104 sfq_index qlen; /* number of skbs in skblist */
105 sfq_index next; /* next slot in sfq chain */
106 struct sfq_head dep; /* anchor in dep[] chains */
107 unsigned short hash; /* hash value (index in ht[]) */
108 short allot; /* credit for this slot */
109};
110
1da177e4
LT
111struct sfq_sched_data
112{
113/* Parameters */
114 int perturb_period;
115 unsigned quantum; /* Allotment per round: MUST BE >= MTU */
116 int limit;
117
118/* Variables */
7d2681a6 119 struct tcf_proto *filter_list;
1da177e4 120 struct timer_list perturb_timer;
32740ddc 121 u32 perturbation;
eda83e3b 122 sfq_index cur_depth; /* depth of longest slot */
eeaeb068 123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
eda83e3b 124 struct sfq_slot *tail; /* current slot in round */
1da177e4 125 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
eda83e3b
ED
126 struct sfq_slot slots[SFQ_SLOTS];
127 struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
1da177e4
LT
128};
129
eda83e3b
ED
130/*
131 * sfq_head are either in a sfq_slot or in dep[] array
132 */
133static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
134{
135 if (val < SFQ_SLOTS)
136 return &q->slots[val].dep;
137 return &q->dep[val - SFQ_SLOTS];
138}
139
1da177e4
LT
140static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
141{
32740ddc 142 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
1da177e4
LT
143}
144
145static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
146{
147 u32 h, h2;
148
149 switch (skb->protocol) {
60678040 150 case htons(ETH_P_IP):
1da177e4 151 {
f2f00981 152 const struct iphdr *iph;
b9959c2e 153 int poff;
f2f00981
CG
154
155 if (!pskb_network_may_pull(skb, sizeof(*iph)))
156 goto err;
157 iph = ip_hdr(skb);
0eae88f3
ED
158 h = (__force u32)iph->daddr;
159 h2 = (__force u32)iph->saddr ^ iph->protocol;
b9959c2e
CG
160 if (iph->frag_off & htons(IP_MF|IP_OFFSET))
161 break;
162 poff = proto_ports_offset(iph->protocol);
163 if (poff >= 0 &&
164 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
165 iph = ip_hdr(skb);
166 h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
167 }
1da177e4
LT
168 break;
169 }
60678040 170 case htons(ETH_P_IPV6):
1da177e4 171 {
f2f00981 172 struct ipv6hdr *iph;
b9959c2e 173 int poff;
f2f00981
CG
174
175 if (!pskb_network_may_pull(skb, sizeof(*iph)))
176 goto err;
177 iph = ipv6_hdr(skb);
0eae88f3
ED
178 h = (__force u32)iph->daddr.s6_addr32[3];
179 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
b9959c2e
CG
180 poff = proto_ports_offset(iph->nexthdr);
181 if (poff >= 0 &&
182 pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
183 iph = ipv6_hdr(skb);
184 h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
185 }
1da177e4
LT
186 break;
187 }
188 default:
f2f00981 189err:
0eae88f3 190 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
6f9e98f7 191 h2 = (unsigned long)skb->sk;
1da177e4 192 }
6f9e98f7 193
1da177e4
LT
194 return sfq_fold_hash(q, h, h2);
195}
196
7d2681a6
PM
197static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
198 int *qerr)
199{
200 struct sfq_sched_data *q = qdisc_priv(sch);
201 struct tcf_result res;
202 int result;
203
204 if (TC_H_MAJ(skb->priority) == sch->handle &&
205 TC_H_MIN(skb->priority) > 0 &&
206 TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
207 return TC_H_MIN(skb->priority);
208
209 if (!q->filter_list)
210 return sfq_hash(q, skb) + 1;
211
c27f339a 212 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
7d2681a6
PM
213 result = tc_classify(skb, q->filter_list, &res);
214 if (result >= 0) {
215#ifdef CONFIG_NET_CLS_ACT
216 switch (result) {
217 case TC_ACT_STOLEN:
218 case TC_ACT_QUEUED:
378a2f09 219 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
7d2681a6
PM
220 case TC_ACT_SHOT:
221 return 0;
222 }
223#endif
224 if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
225 return TC_H_MIN(res.classid);
226 }
227 return 0;
228}
229
eda83e3b
ED
230/*
231 * x : slot number [0 .. SFQ_SLOTS - 1]
232 */
1da177e4
LT
233static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
234{
235 sfq_index p, n;
eda83e3b
ED
236 int qlen = q->slots[x].qlen;
237
238 p = qlen + SFQ_SLOTS;
239 n = q->dep[qlen].next;
1da177e4 240
eda83e3b
ED
241 q->slots[x].dep.next = n;
242 q->slots[x].dep.prev = p;
243
244 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
245 sfq_dep_head(q, n)->prev = x;
1da177e4
LT
246}
247
eda83e3b
ED
248#define sfq_unlink(q, x, n, p) \
249 n = q->slots[x].dep.next; \
250 p = q->slots[x].dep.prev; \
251 sfq_dep_head(q, p)->next = n; \
252 sfq_dep_head(q, n)->prev = p
253
254
1da177e4
LT
255static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
256{
257 sfq_index p, n;
eda83e3b 258 int d;
1da177e4 259
eda83e3b 260 sfq_unlink(q, x, n, p);
1da177e4 261
eda83e3b
ED
262 d = q->slots[x].qlen--;
263 if (n == p && q->cur_depth == d)
264 q->cur_depth--;
1da177e4
LT
265 sfq_link(q, x);
266}
267
268static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
269{
270 sfq_index p, n;
271 int d;
272
eda83e3b 273 sfq_unlink(q, x, n, p);
1da177e4 274
eda83e3b
ED
275 d = ++q->slots[x].qlen;
276 if (q->cur_depth < d)
277 q->cur_depth = d;
1da177e4
LT
278 sfq_link(q, x);
279}
280
eda83e3b
ED
281/* helper functions : might be changed when/if skb use a standard list_head */
282
283/* remove one skb from tail of slot queue */
284static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
285{
286 struct sk_buff *skb = slot->skblist_prev;
287
288 slot->skblist_prev = skb->prev;
ee09b3c1 289 skb->prev->next = (struct sk_buff *)slot;
eda83e3b
ED
290 skb->next = skb->prev = NULL;
291 return skb;
292}
293
294/* remove one skb from head of slot queue */
295static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
296{
297 struct sk_buff *skb = slot->skblist_next;
298
299 slot->skblist_next = skb->next;
18c8d82a 300 skb->next->prev = (struct sk_buff *)slot;
eda83e3b
ED
301 skb->next = skb->prev = NULL;
302 return skb;
303}
304
305static inline void slot_queue_init(struct sfq_slot *slot)
306{
307 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
308}
309
310/* add skb to slot queue (tail add) */
311static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
312{
313 skb->prev = slot->skblist_prev;
314 skb->next = (struct sk_buff *)slot;
315 slot->skblist_prev->next = skb;
316 slot->skblist_prev = skb;
317}
318
319#define slot_queue_walk(slot, skb) \
320 for (skb = slot->skblist_next; \
321 skb != (struct sk_buff *)slot; \
322 skb = skb->next)
323
1da177e4
LT
324static unsigned int sfq_drop(struct Qdisc *sch)
325{
326 struct sfq_sched_data *q = qdisc_priv(sch);
eda83e3b 327 sfq_index x, d = q->cur_depth;
1da177e4
LT
328 struct sk_buff *skb;
329 unsigned int len;
eda83e3b 330 struct sfq_slot *slot;
1da177e4 331
eda83e3b 332 /* Queue is full! Find the longest slot and drop tail packet from it */
1da177e4 333 if (d > 1) {
eda83e3b
ED
334 x = q->dep[d].next;
335 slot = &q->slots[x];
336drop:
337 skb = slot_dequeue_tail(slot);
0abf77e5 338 len = qdisc_pkt_len(skb);
1da177e4 339 sfq_dec(q, x);
eda83e3b 340 kfree_skb(skb);
1da177e4
LT
341 sch->q.qlen--;
342 sch->qstats.drops++;
f5539eb8 343 sch->qstats.backlog -= len;
1da177e4
LT
344 return len;
345 }
346
347 if (d == 1) {
348 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
eda83e3b
ED
349 x = q->tail->next;
350 slot = &q->slots[x];
351 q->tail->next = slot->next;
352 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
353 goto drop;
1da177e4
LT
354 }
355
356 return 0;
357}
358
359static int
6f9e98f7 360sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1da177e4
LT
361{
362 struct sfq_sched_data *q = qdisc_priv(sch);
7d2681a6 363 unsigned int hash;
1da177e4 364 sfq_index x;
eda83e3b 365 struct sfq_slot *slot;
7f3ff4f6 366 int uninitialized_var(ret);
7d2681a6
PM
367
368 hash = sfq_classify(skb, sch, &ret);
369 if (hash == 0) {
c27f339a 370 if (ret & __NET_XMIT_BYPASS)
7d2681a6
PM
371 sch->qstats.drops++;
372 kfree_skb(skb);
373 return ret;
374 }
375 hash--;
1da177e4
LT
376
377 x = q->ht[hash];
eda83e3b
ED
378 slot = &q->slots[x];
379 if (x == SFQ_EMPTY_SLOT) {
380 x = q->dep[0].next; /* get a free slot */
381 q->ht[hash] = x;
382 slot = &q->slots[x];
383 slot->hash = hash;
1da177e4 384 }
6f9e98f7 385
eda83e3b 386 /* If selected queue has length q->limit, do simple tail drop,
32740ddc
AK
387 * i.e. drop _this_ packet.
388 */
eda83e3b 389 if (slot->qlen >= q->limit)
32740ddc
AK
390 return qdisc_drop(skb, sch);
391
0abf77e5 392 sch->qstats.backlog += qdisc_pkt_len(skb);
eda83e3b 393 slot_queue_add(slot, skb);
1da177e4 394 sfq_inc(q, x);
eda83e3b
ED
395 if (slot->qlen == 1) { /* The flow is new */
396 if (q->tail == NULL) { /* It is the first flow */
397 slot->next = x;
1da177e4 398 } else {
eda83e3b
ED
399 slot->next = q->tail->next;
400 q->tail->next = x;
1da177e4 401 }
eda83e3b 402 q->tail = slot;
eeaeb068 403 slot->allot = q->scaled_quantum;
1da177e4 404 }
5588b40d 405 if (++sch->q.qlen <= q->limit) {
0abf77e5 406 sch->bstats.bytes += qdisc_pkt_len(skb);
1da177e4 407 sch->bstats.packets++;
9871e50e 408 return NET_XMIT_SUCCESS;
1da177e4
LT
409 }
410
411 sfq_drop(sch);
412 return NET_XMIT_CN;
413}
414
48a8f519
PM
415static struct sk_buff *
416sfq_peek(struct Qdisc *sch)
417{
418 struct sfq_sched_data *q = qdisc_priv(sch);
1da177e4 419
48a8f519 420 /* No active slots */
eda83e3b 421 if (q->tail == NULL)
48a8f519 422 return NULL;
1da177e4 423
eda83e3b 424 return q->slots[q->tail->next].skblist_next;
48a8f519 425}
1da177e4
LT
426
427static struct sk_buff *
6f9e98f7 428sfq_dequeue(struct Qdisc *sch)
1da177e4
LT
429{
430 struct sfq_sched_data *q = qdisc_priv(sch);
431 struct sk_buff *skb;
aa3e2199 432 sfq_index a, next_a;
eda83e3b 433 struct sfq_slot *slot;
1da177e4
LT
434
435 /* No active slots */
eda83e3b 436 if (q->tail == NULL)
1da177e4
LT
437 return NULL;
438
eeaeb068 439next_slot:
eda83e3b
ED
440 a = q->tail->next;
441 slot = &q->slots[a];
eeaeb068
ED
442 if (slot->allot <= 0) {
443 q->tail = slot;
444 slot->allot += q->scaled_quantum;
445 goto next_slot;
446 }
eda83e3b 447 skb = slot_dequeue_head(slot);
1da177e4
LT
448 sfq_dec(q, a);
449 sch->q.qlen--;
0abf77e5 450 sch->qstats.backlog -= qdisc_pkt_len(skb);
1da177e4
LT
451
452 /* Is the slot empty? */
eda83e3b
ED
453 if (slot->qlen == 0) {
454 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
455 next_a = slot->next;
aa3e2199 456 if (a == next_a) {
eda83e3b 457 q->tail = NULL; /* no more active slots */
1da177e4
LT
458 return skb;
459 }
eda83e3b 460 q->tail->next = next_a;
eeaeb068
ED
461 } else {
462 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
1da177e4
LT
463 }
464 return skb;
465}
466
467static void
6f9e98f7 468sfq_reset(struct Qdisc *sch)
1da177e4
LT
469{
470 struct sk_buff *skb;
471
472 while ((skb = sfq_dequeue(sch)) != NULL)
473 kfree_skb(skb);
474}
475
476static void sfq_perturbation(unsigned long arg)
477{
6f9e98f7 478 struct Qdisc *sch = (struct Qdisc *)arg;
1da177e4
LT
479 struct sfq_sched_data *q = qdisc_priv(sch);
480
d46f8dd8 481 q->perturbation = net_random();
1da177e4 482
32740ddc
AK
483 if (q->perturb_period)
484 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
1da177e4
LT
485}
486
1e90474c 487static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
488{
489 struct sfq_sched_data *q = qdisc_priv(sch);
1e90474c 490 struct tc_sfq_qopt *ctl = nla_data(opt);
5e50da01 491 unsigned int qlen;
1da177e4 492
1e90474c 493 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
1da177e4
LT
494 return -EINVAL;
495
496 sch_tree_lock(sch);
5ce2d488 497 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
eeaeb068 498 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
6f9e98f7 499 q->perturb_period = ctl->perturb_period * HZ;
1da177e4 500 if (ctl->limit)
32740ddc 501 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
1da177e4 502
5e50da01 503 qlen = sch->q.qlen;
5588b40d 504 while (sch->q.qlen > q->limit)
1da177e4 505 sfq_drop(sch);
5e50da01 506 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
1da177e4
LT
507
508 del_timer(&q->perturb_timer);
509 if (q->perturb_period) {
32740ddc 510 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
d46f8dd8 511 q->perturbation = net_random();
1da177e4
LT
512 }
513 sch_tree_unlock(sch);
514 return 0;
515}
516
1e90474c 517static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
518{
519 struct sfq_sched_data *q = qdisc_priv(sch);
520 int i;
521
d3e99483 522 q->perturb_timer.function = sfq_perturbation;
c19a28e1 523 q->perturb_timer.data = (unsigned long)sch;
d3e99483 524 init_timer_deferrable(&q->perturb_timer);
1da177e4 525
6f9e98f7 526 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
eda83e3b 527 q->ht[i] = SFQ_EMPTY_SLOT;
6f9e98f7
SH
528
529 for (i = 0; i < SFQ_DEPTH; i++) {
eda83e3b
ED
530 q->dep[i].next = i + SFQ_SLOTS;
531 q->dep[i].prev = i + SFQ_SLOTS;
1da177e4 532 }
6f9e98f7 533
32740ddc 534 q->limit = SFQ_DEPTH - 1;
eda83e3b
ED
535 q->cur_depth = 0;
536 q->tail = NULL;
1da177e4 537 if (opt == NULL) {
5ce2d488 538 q->quantum = psched_mtu(qdisc_dev(sch));
eeaeb068 539 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
1da177e4 540 q->perturb_period = 0;
d46f8dd8 541 q->perturbation = net_random();
1da177e4
LT
542 } else {
543 int err = sfq_change(sch, opt);
544 if (err)
545 return err;
546 }
6f9e98f7 547
18c8d82a
ED
548 for (i = 0; i < SFQ_SLOTS; i++) {
549 slot_queue_init(&q->slots[i]);
1da177e4 550 sfq_link(q, i);
18c8d82a 551 }
1da177e4
LT
552 return 0;
553}
554
555static void sfq_destroy(struct Qdisc *sch)
556{
557 struct sfq_sched_data *q = qdisc_priv(sch);
7d2681a6 558
ff31ab56 559 tcf_destroy_chain(&q->filter_list);
980c478d
JP
560 q->perturb_period = 0;
561 del_timer_sync(&q->perturb_timer);
1da177e4
LT
562}
563
564static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
565{
566 struct sfq_sched_data *q = qdisc_priv(sch);
27a884dc 567 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
568 struct tc_sfq_qopt opt;
569
570 opt.quantum = q->quantum;
6f9e98f7 571 opt.perturb_period = q->perturb_period / HZ;
1da177e4
LT
572
573 opt.limit = q->limit;
574 opt.divisor = SFQ_HASH_DIVISOR;
cdec7e50 575 opt.flows = q->limit;
1da177e4 576
1e90474c 577 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
1da177e4
LT
578
579 return skb->len;
580
1e90474c 581nla_put_failure:
dc5fc579 582 nlmsg_trim(skb, b);
1da177e4
LT
583 return -1;
584}
585
41065fba
JP
586static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
587{
588 return NULL;
589}
590
7d2681a6
PM
591static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
592{
593 return 0;
594}
595
eb4a5527
JP
596static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
597 u32 classid)
598{
599 return 0;
600}
601
da7115d9
JP
602static void sfq_put(struct Qdisc *q, unsigned long cl)
603{
604}
605
7d2681a6
PM
606static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
607{
608 struct sfq_sched_data *q = qdisc_priv(sch);
609
610 if (cl)
611 return NULL;
612 return &q->filter_list;
613}
614
94de78d1
PM
615static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
616 struct sk_buff *skb, struct tcmsg *tcm)
617{
618 tcm->tcm_handle |= TC_H_MIN(cl);
619 return 0;
620}
621
622static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
623 struct gnet_dump *d)
624{
625 struct sfq_sched_data *q = qdisc_priv(sch);
ee09b3c1
ED
626 sfq_index idx = q->ht[cl - 1];
627 struct gnet_stats_queue qs = { 0 };
628 struct tc_sfq_xstats xstats = { 0 };
c4266263
ED
629 struct sk_buff *skb;
630
ee09b3c1
ED
631 if (idx != SFQ_EMPTY_SLOT) {
632 const struct sfq_slot *slot = &q->slots[idx];
94de78d1 633
eeaeb068 634 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
ee09b3c1
ED
635 qs.qlen = slot->qlen;
636 slot_queue_walk(slot, skb)
637 qs.backlog += qdisc_pkt_len(skb);
638 }
94de78d1
PM
639 if (gnet_stats_copy_queue(d, &qs) < 0)
640 return -1;
641 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
642}
643
7d2681a6
PM
644static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
645{
94de78d1
PM
646 struct sfq_sched_data *q = qdisc_priv(sch);
647 unsigned int i;
648
649 if (arg->stop)
650 return;
651
652 for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
eda83e3b 653 if (q->ht[i] == SFQ_EMPTY_SLOT ||
94de78d1
PM
654 arg->count < arg->skip) {
655 arg->count++;
656 continue;
657 }
658 if (arg->fn(sch, i + 1, arg) < 0) {
659 arg->stop = 1;
660 break;
661 }
662 arg->count++;
663 }
7d2681a6
PM
664}
665
666static const struct Qdisc_class_ops sfq_class_ops = {
41065fba 667 .leaf = sfq_leaf,
7d2681a6 668 .get = sfq_get,
da7115d9 669 .put = sfq_put,
7d2681a6 670 .tcf_chain = sfq_find_tcf,
eb4a5527 671 .bind_tcf = sfq_bind,
da7115d9 672 .unbind_tcf = sfq_put,
94de78d1
PM
673 .dump = sfq_dump_class,
674 .dump_stats = sfq_dump_class_stats,
7d2681a6
PM
675 .walk = sfq_walk,
676};
677
20fea08b 678static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
7d2681a6 679 .cl_ops = &sfq_class_ops,
1da177e4
LT
680 .id = "sfq",
681 .priv_size = sizeof(struct sfq_sched_data),
682 .enqueue = sfq_enqueue,
683 .dequeue = sfq_dequeue,
48a8f519 684 .peek = sfq_peek,
1da177e4
LT
685 .drop = sfq_drop,
686 .init = sfq_init,
687 .reset = sfq_reset,
688 .destroy = sfq_destroy,
689 .change = NULL,
690 .dump = sfq_dump,
691 .owner = THIS_MODULE,
692};
693
694static int __init sfq_module_init(void)
695{
696 return register_qdisc(&sfq_qdisc_ops);
697}
10297b99 698static void __exit sfq_module_exit(void)
1da177e4
LT
699{
700 unregister_qdisc(&sfq_qdisc_ops);
701}
702module_init(sfq_module_init)
703module_exit(sfq_module_exit)
704MODULE_LICENSE("GPL");
This page took 0.677232 seconds and 4 git commands to generate.