]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/sch_tbf.c Token Bucket Filter queue. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Authors: Alexey Kuznetsov, <[email protected]> | |
10 | * Dmitry Torokhov <[email protected]> - allow attaching inner qdiscs - | |
11 | * original idea by Martin Devera | |
12 | * | |
13 | */ | |
14 | ||
1da177e4 | 15 | #include <linux/module.h> |
1da177e4 LT |
16 | #include <linux/types.h> |
17 | #include <linux/kernel.h> | |
1da177e4 | 18 | #include <linux/string.h> |
1da177e4 | 19 | #include <linux/errno.h> |
1da177e4 | 20 | #include <linux/skbuff.h> |
0ba48053 | 21 | #include <net/netlink.h> |
b757c933 | 22 | #include <net/sch_generic.h> |
1da177e4 LT |
23 | #include <net/pkt_sched.h> |
24 | ||
25 | ||
26 | /* Simple Token Bucket Filter. | |
27 | ======================================= | |
28 | ||
29 | SOURCE. | |
30 | ------- | |
31 | ||
32 | None. | |
33 | ||
34 | Description. | |
35 | ------------ | |
36 | ||
37 | A data flow obeys TBF with rate R and depth B, if for any | |
38 | time interval t_i...t_f the number of transmitted bits | |
39 | does not exceed B + R*(t_f-t_i). | |
40 | ||
41 | Packetized version of this definition: | |
42 | The sequence of packets of sizes s_i served at moments t_i | |
43 | obeys TBF, if for any i<=k: | |
44 | ||
45 | s_i+....+s_k <= B + R*(t_k - t_i) | |
46 | ||
47 | Algorithm. | |
48 | ---------- | |
49 | ||
50 | Let N(t_i) be B/R initially and N(t) grow continuously with time as: | |
51 | ||
52 | N(t+delta) = min{B/R, N(t) + delta} | |
53 | ||
54 | If the first packet in queue has length S, it may be | |
55 | transmitted only at the time t_* when S/R <= N(t_*), | |
56 | and in this case N(t) jumps: | |
57 | ||
58 | N(t_* + 0) = N(t_* - 0) - S/R. | |
59 | ||
60 | ||
61 | ||
62 | Actually, QoS requires two TBF to be applied to a data stream. | |
63 | One of them controls steady state burst size, another | |
64 | one with rate P (peak rate) and depth M (equal to link MTU) | |
65 | limits bursts at a smaller time scale. | |
66 | ||
67 | It is easy to see that P>R, and B>M. If P is infinity, this double | |
68 | TBF is equivalent to a single one. | |
69 | ||
70 | When TBF works in reshaping mode, latency is estimated as: | |
71 | ||
72 | lat = max ((L-B)/R, (L-M)/P) | |
73 | ||
74 | ||
75 | NOTES. | |
76 | ------ | |
77 | ||
78 | If TBF throttles, it starts a watchdog timer, which will wake it up | |
79 | when it is ready to transmit. | |
80 | Note that the minimal timer resolution is 1/HZ. | |
81 | If no new packets arrive during this period, | |
82 | or if the device is not awaken by EOI for some previous packet, | |
83 | TBF can stop its activity for 1/HZ. | |
84 | ||
85 | ||
86 | This means, that with depth B, the maximal rate is | |
87 | ||
88 | R_crit = B*HZ | |
89 | ||
90 | F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes. | |
91 | ||
92 | Note that the peak rate TBF is much more tough: with MTU 1500 | |
93 | P_crit = 150Kbytes/sec. So, if you need greater peak | |
94 | rates, use alpha with HZ=1000 :-) | |
95 | ||
96 | With classful TBF, limit is just kept for backwards compatibility. | |
97 | It is passed to the default bfifo qdisc - if the inner qdisc is | |
98 | changed the limit is not effective anymore. | |
99 | */ | |
100 | ||
cc7ec456 | 101 | struct tbf_sched_data { |
1da177e4 LT |
102 | /* Parameters */ |
103 | u32 limit; /* Maximal length of backlog: bytes */ | |
a135e598 | 104 | u32 max_size; |
b757c933 JP |
105 | s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ |
106 | s64 mtu; | |
b757c933 JP |
107 | struct psched_ratecfg rate; |
108 | struct psched_ratecfg peak; | |
1da177e4 LT |
109 | |
110 | /* Variables */ | |
b757c933 JP |
111 | s64 tokens; /* Current number of B tokens */ |
112 | s64 ptokens; /* Current number of P tokens */ | |
113 | s64 t_c; /* Time check-point */ | |
1da177e4 | 114 | struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */ |
f7f593e3 | 115 | struct qdisc_watchdog watchdog; /* Watchdog timer */ |
1da177e4 LT |
116 | }; |
117 | ||
e43ac79a | 118 | |
cc106e44 YY |
119 | /* Time to Length, convert time in ns to length in bytes |
120 | * to determinate how many bytes can be sent in given time. | |
121 | */ | |
122 | static u64 psched_ns_t2l(const struct psched_ratecfg *r, | |
123 | u64 time_in_ns) | |
124 | { | |
125 | /* The formula is : | |
126 | * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC | |
127 | */ | |
128 | u64 len = time_in_ns * r->rate_bytes_ps; | |
129 | ||
130 | do_div(len, NSEC_PER_SEC); | |
131 | ||
d55d282e YY |
132 | if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) { |
133 | do_div(len, 53); | |
134 | len = len * 48; | |
135 | } | |
cc106e44 YY |
136 | |
137 | if (len > r->overhead) | |
138 | len -= r->overhead; | |
139 | else | |
140 | len = 0; | |
141 | ||
142 | return len; | |
143 | } | |
144 | ||
4d0820cf ED |
145 | /* |
146 | * Return length of individual segments of a gso packet, | |
147 | * including all headers (MAC, IP, TCP/UDP) | |
148 | */ | |
de960aa9 | 149 | static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) |
4d0820cf ED |
150 | { |
151 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); | |
de960aa9 | 152 | return hdr_len + skb_gso_transport_seglen(skb); |
4d0820cf ED |
153 | } |
154 | ||
e43ac79a ED |
155 | /* GSO packet is too big, segment it so that tbf can transmit |
156 | * each segment in time | |
157 | */ | |
520ac30f ED |
158 | static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, |
159 | struct sk_buff **to_free) | |
e43ac79a ED |
160 | { |
161 | struct tbf_sched_data *q = qdisc_priv(sch); | |
162 | struct sk_buff *segs, *nskb; | |
163 | netdev_features_t features = netif_skb_features(skb); | |
2ccccf5f | 164 | unsigned int len = 0, prev_len = qdisc_pkt_len(skb); |
e43ac79a ED |
165 | int ret, nb; |
166 | ||
167 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | |
168 | ||
169 | if (IS_ERR_OR_NULL(segs)) | |
520ac30f | 170 | return qdisc_drop(skb, sch, to_free); |
e43ac79a ED |
171 | |
172 | nb = 0; | |
173 | while (segs) { | |
174 | nskb = segs->next; | |
175 | segs->next = NULL; | |
4d0820cf | 176 | qdisc_skb_cb(segs)->pkt_len = segs->len; |
2ccccf5f | 177 | len += segs->len; |
520ac30f | 178 | ret = qdisc_enqueue(segs, q->qdisc, to_free); |
e43ac79a ED |
179 | if (ret != NET_XMIT_SUCCESS) { |
180 | if (net_xmit_drop_count(ret)) | |
25331d6c | 181 | qdisc_qstats_drop(sch); |
e43ac79a ED |
182 | } else { |
183 | nb++; | |
184 | } | |
185 | segs = nskb; | |
186 | } | |
187 | sch->q.qlen += nb; | |
188 | if (nb > 1) | |
2ccccf5f | 189 | qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); |
e43ac79a ED |
190 | consume_skb(skb); |
191 | return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; | |
192 | } | |
193 | ||
520ac30f ED |
194 | static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
195 | struct sk_buff **to_free) | |
1da177e4 LT |
196 | { |
197 | struct tbf_sched_data *q = qdisc_priv(sch); | |
198 | int ret; | |
199 | ||
e43ac79a | 200 | if (qdisc_pkt_len(skb) > q->max_size) { |
de960aa9 | 201 | if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) |
520ac30f ED |
202 | return tbf_segment(skb, sch, to_free); |
203 | return qdisc_drop(skb, sch, to_free); | |
e43ac79a | 204 | } |
520ac30f | 205 | ret = qdisc_enqueue(skb, q->qdisc, to_free); |
9871e50e | 206 | if (ret != NET_XMIT_SUCCESS) { |
378a2f09 | 207 | if (net_xmit_drop_count(ret)) |
25331d6c | 208 | qdisc_qstats_drop(sch); |
1da177e4 LT |
209 | return ret; |
210 | } | |
211 | ||
8d5958f4 | 212 | qdisc_qstats_backlog_inc(sch, skb); |
1da177e4 | 213 | sch->q.qlen++; |
9871e50e | 214 | return NET_XMIT_SUCCESS; |
1da177e4 LT |
215 | } |
216 | ||
a135e598 HS |
217 | static bool tbf_peak_present(const struct tbf_sched_data *q) |
218 | { | |
219 | return q->peak.rate_bytes_ps; | |
220 | } | |
221 | ||
cc7ec456 | 222 | static struct sk_buff *tbf_dequeue(struct Qdisc *sch) |
1da177e4 LT |
223 | { |
224 | struct tbf_sched_data *q = qdisc_priv(sch); | |
225 | struct sk_buff *skb; | |
226 | ||
03c05f0d | 227 | skb = q->qdisc->ops->peek(q->qdisc); |
1da177e4 LT |
228 | |
229 | if (skb) { | |
b757c933 JP |
230 | s64 now; |
231 | s64 toks; | |
232 | s64 ptoks = 0; | |
0abf77e5 | 233 | unsigned int len = qdisc_pkt_len(skb); |
1da177e4 | 234 | |
d2de875c | 235 | now = ktime_get_ns(); |
b757c933 | 236 | toks = min_t(s64, now - q->t_c, q->buffer); |
1da177e4 | 237 | |
a135e598 | 238 | if (tbf_peak_present(q)) { |
1da177e4 | 239 | ptoks = toks + q->ptokens; |
b757c933 | 240 | if (ptoks > q->mtu) |
1da177e4 | 241 | ptoks = q->mtu; |
b757c933 | 242 | ptoks -= (s64) psched_l2t_ns(&q->peak, len); |
1da177e4 LT |
243 | } |
244 | toks += q->tokens; | |
b757c933 | 245 | if (toks > q->buffer) |
1da177e4 | 246 | toks = q->buffer; |
b757c933 | 247 | toks -= (s64) psched_l2t_ns(&q->rate, len); |
1da177e4 LT |
248 | |
249 | if ((toks|ptoks) >= 0) { | |
77be155c | 250 | skb = qdisc_dequeue_peeked(q->qdisc); |
03c05f0d JP |
251 | if (unlikely(!skb)) |
252 | return NULL; | |
253 | ||
1da177e4 LT |
254 | q->t_c = now; |
255 | q->tokens = toks; | |
256 | q->ptokens = ptoks; | |
8d5958f4 | 257 | qdisc_qstats_backlog_dec(sch, skb); |
1da177e4 | 258 | sch->q.qlen--; |
9190b3b3 | 259 | qdisc_bstats_update(sch, skb); |
1da177e4 LT |
260 | return skb; |
261 | } | |
262 | ||
b757c933 | 263 | qdisc_watchdog_schedule_ns(&q->watchdog, |
45f50bed | 264 | now + max_t(long, -toks, -ptoks)); |
1da177e4 LT |
265 | |
266 | /* Maybe we have a shorter packet in the queue, | |
267 | which can be sent now. It sounds cool, | |
268 | but, however, this is wrong in principle. | |
269 | We MUST NOT reorder packets under these circumstances. | |
270 | ||
271 | Really, if we split the flow into independent | |
272 | subflows, it would be a very good solution. | |
273 | This is the main idea of all FQ algorithms | |
274 | (cf. CSZ, HPFQ, HFSC) | |
275 | */ | |
276 | ||
25331d6c | 277 | qdisc_qstats_overlimit(sch); |
1da177e4 LT |
278 | } |
279 | return NULL; | |
280 | } | |
281 | ||
cc7ec456 | 282 | static void tbf_reset(struct Qdisc *sch) |
1da177e4 LT |
283 | { |
284 | struct tbf_sched_data *q = qdisc_priv(sch); | |
285 | ||
286 | qdisc_reset(q->qdisc); | |
8d5958f4 | 287 | sch->qstats.backlog = 0; |
1da177e4 | 288 | sch->q.qlen = 0; |
d2de875c | 289 | q->t_c = ktime_get_ns(); |
1da177e4 LT |
290 | q->tokens = q->buffer; |
291 | q->ptokens = q->mtu; | |
f7f593e3 | 292 | qdisc_watchdog_cancel(&q->watchdog); |
1da177e4 LT |
293 | } |
294 | ||
27a3421e PM |
295 | static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { |
296 | [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, | |
297 | [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | |
298 | [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | |
a33c4a26 YY |
299 | [TCA_TBF_RATE64] = { .type = NLA_U64 }, |
300 | [TCA_TBF_PRATE64] = { .type = NLA_U64 }, | |
2e04ad42 YY |
301 | [TCA_TBF_BURST] = { .type = NLA_U32 }, |
302 | [TCA_TBF_PBURST] = { .type = NLA_U32 }, | |
27a3421e PM |
303 | }; |
304 | ||
cc7ec456 | 305 | static int tbf_change(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 | 306 | { |
cee63723 | 307 | int err; |
1da177e4 | 308 | struct tbf_sched_data *q = qdisc_priv(sch); |
a33c4a26 | 309 | struct nlattr *tb[TCA_TBF_MAX + 1]; |
1da177e4 | 310 | struct tc_tbf_qopt *qopt; |
1da177e4 | 311 | struct Qdisc *child = NULL; |
cc106e44 YY |
312 | struct psched_ratecfg rate; |
313 | struct psched_ratecfg peak; | |
314 | u64 max_size; | |
315 | s64 buffer, mtu; | |
a33c4a26 | 316 | u64 rate64 = 0, prate64 = 0; |
1da177e4 | 317 | |
fceb6435 | 318 | err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy, NULL); |
cee63723 PM |
319 | if (err < 0) |
320 | return err; | |
321 | ||
322 | err = -EINVAL; | |
27a3421e | 323 | if (tb[TCA_TBF_PARMS] == NULL) |
1da177e4 LT |
324 | goto done; |
325 | ||
1e90474c | 326 | qopt = nla_data(tb[TCA_TBF_PARMS]); |
cc106e44 YY |
327 | if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE) |
328 | qdisc_put_rtab(qdisc_get_rtab(&qopt->rate, | |
329 | tb[TCA_TBF_RTAB])); | |
1da177e4 | 330 | |
cc106e44 YY |
331 | if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE) |
332 | qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, | |
333 | tb[TCA_TBF_PTAB])); | |
4d0820cf | 334 | |
cc106e44 YY |
335 | buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); |
336 | mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); | |
337 | ||
338 | if (tb[TCA_TBF_RATE64]) | |
339 | rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); | |
340 | psched_ratecfg_precompute(&rate, &qopt->rate, rate64); | |
341 | ||
2e04ad42 YY |
342 | if (tb[TCA_TBF_BURST]) { |
343 | max_size = nla_get_u32(tb[TCA_TBF_BURST]); | |
344 | buffer = psched_l2t_ns(&rate, max_size); | |
345 | } else { | |
346 | max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U); | |
347 | } | |
cc106e44 YY |
348 | |
349 | if (qopt->peakrate.rate) { | |
350 | if (tb[TCA_TBF_PRATE64]) | |
351 | prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); | |
352 | psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64); | |
353 | if (peak.rate_bytes_ps <= rate.rate_bytes_ps) { | |
354 | pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n", | |
2e04ad42 | 355 | peak.rate_bytes_ps, rate.rate_bytes_ps); |
cc106e44 YY |
356 | err = -EINVAL; |
357 | goto done; | |
358 | } | |
359 | ||
2e04ad42 YY |
360 | if (tb[TCA_TBF_PBURST]) { |
361 | u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]); | |
362 | max_size = min_t(u32, max_size, pburst); | |
363 | mtu = psched_l2t_ns(&peak, pburst); | |
364 | } else { | |
365 | max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); | |
366 | } | |
a135e598 HS |
367 | } else { |
368 | memset(&peak, 0, sizeof(peak)); | |
cc106e44 YY |
369 | } |
370 | ||
371 | if (max_size < psched_mtu(qdisc_dev(sch))) | |
372 | pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n", | |
373 | max_size, qdisc_dev(sch)->name, | |
374 | psched_mtu(qdisc_dev(sch))); | |
375 | ||
376 | if (!max_size) { | |
377 | err = -EINVAL; | |
378 | goto done; | |
379 | } | |
380 | ||
724b9e1d HS |
381 | if (q->qdisc != &noop_qdisc) { |
382 | err = fifo_set_limit(q->qdisc, qopt->limit); | |
383 | if (err) | |
384 | goto done; | |
385 | } else if (qopt->limit > 0) { | |
386 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); | |
387 | if (IS_ERR(child)) { | |
388 | err = PTR_ERR(child); | |
389 | goto done; | |
390 | } | |
391 | } | |
392 | ||
1da177e4 | 393 | sch_tree_lock(sch); |
5e50da01 | 394 | if (child) { |
2ccccf5f WC |
395 | qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, |
396 | q->qdisc->qstats.backlog); | |
b94c8afc PM |
397 | qdisc_destroy(q->qdisc); |
398 | q->qdisc = child; | |
e33cc316 | 399 | if (child != &noop_qdisc) |
49b49971 | 400 | qdisc_hash_add(child, true); |
5e50da01 | 401 | } |
1da177e4 | 402 | q->limit = qopt->limit; |
2e04ad42 YY |
403 | if (tb[TCA_TBF_PBURST]) |
404 | q->mtu = mtu; | |
405 | else | |
406 | q->mtu = PSCHED_TICKS2NS(qopt->mtu); | |
1da177e4 | 407 | q->max_size = max_size; |
2e04ad42 YY |
408 | if (tb[TCA_TBF_BURST]) |
409 | q->buffer = buffer; | |
410 | else | |
411 | q->buffer = PSCHED_TICKS2NS(qopt->buffer); | |
1da177e4 LT |
412 | q->tokens = q->buffer; |
413 | q->ptokens = q->mtu; | |
b94c8afc | 414 | |
cc106e44 | 415 | memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg)); |
a135e598 | 416 | memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); |
b94c8afc | 417 | |
1da177e4 LT |
418 | sch_tree_unlock(sch); |
419 | err = 0; | |
420 | done: | |
1da177e4 LT |
421 | return err; |
422 | } | |
423 | ||
cc7ec456 | 424 | static int tbf_init(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
425 | { |
426 | struct tbf_sched_data *q = qdisc_priv(sch); | |
427 | ||
428 | if (opt == NULL) | |
429 | return -EINVAL; | |
430 | ||
d2de875c | 431 | q->t_c = ktime_get_ns(); |
f7f593e3 | 432 | qdisc_watchdog_init(&q->watchdog, sch); |
1da177e4 LT |
433 | q->qdisc = &noop_qdisc; |
434 | ||
435 | return tbf_change(sch, opt); | |
436 | } | |
437 | ||
438 | static void tbf_destroy(struct Qdisc *sch) | |
439 | { | |
440 | struct tbf_sched_data *q = qdisc_priv(sch); | |
441 | ||
f7f593e3 | 442 | qdisc_watchdog_cancel(&q->watchdog); |
1da177e4 LT |
443 | qdisc_destroy(q->qdisc); |
444 | } | |
445 | ||
446 | static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) | |
447 | { | |
448 | struct tbf_sched_data *q = qdisc_priv(sch); | |
4b3550ef | 449 | struct nlattr *nest; |
1da177e4 LT |
450 | struct tc_tbf_qopt opt; |
451 | ||
b0460e44 | 452 | sch->qstats.backlog = q->qdisc->qstats.backlog; |
4b3550ef PM |
453 | nest = nla_nest_start(skb, TCA_OPTIONS); |
454 | if (nest == NULL) | |
455 | goto nla_put_failure; | |
1da177e4 LT |
456 | |
457 | opt.limit = q->limit; | |
01cb71d2 | 458 | psched_ratecfg_getrate(&opt.rate, &q->rate); |
a135e598 | 459 | if (tbf_peak_present(q)) |
01cb71d2 | 460 | psched_ratecfg_getrate(&opt.peakrate, &q->peak); |
1da177e4 LT |
461 | else |
462 | memset(&opt.peakrate, 0, sizeof(opt.peakrate)); | |
b757c933 JP |
463 | opt.mtu = PSCHED_NS2TICKS(q->mtu); |
464 | opt.buffer = PSCHED_NS2TICKS(q->buffer); | |
1b34ec43 DM |
465 | if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) |
466 | goto nla_put_failure; | |
a33c4a26 | 467 | if (q->rate.rate_bytes_ps >= (1ULL << 32) && |
2a51c1e8 ND |
468 | nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps, |
469 | TCA_TBF_PAD)) | |
a33c4a26 | 470 | goto nla_put_failure; |
a135e598 | 471 | if (tbf_peak_present(q) && |
a33c4a26 | 472 | q->peak.rate_bytes_ps >= (1ULL << 32) && |
2a51c1e8 ND |
473 | nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps, |
474 | TCA_TBF_PAD)) | |
a33c4a26 | 475 | goto nla_put_failure; |
1da177e4 | 476 | |
d59b7d80 | 477 | return nla_nest_end(skb, nest); |
1da177e4 | 478 | |
1e90474c | 479 | nla_put_failure: |
4b3550ef | 480 | nla_nest_cancel(skb, nest); |
1da177e4 LT |
481 | return -1; |
482 | } | |
483 | ||
484 | static int tbf_dump_class(struct Qdisc *sch, unsigned long cl, | |
485 | struct sk_buff *skb, struct tcmsg *tcm) | |
486 | { | |
487 | struct tbf_sched_data *q = qdisc_priv(sch); | |
488 | ||
1da177e4 LT |
489 | tcm->tcm_handle |= TC_H_MIN(1); |
490 | tcm->tcm_info = q->qdisc->handle; | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
495 | static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
496 | struct Qdisc **old) | |
497 | { | |
498 | struct tbf_sched_data *q = qdisc_priv(sch); | |
499 | ||
500 | if (new == NULL) | |
501 | new = &noop_qdisc; | |
502 | ||
86a7996c | 503 | *old = qdisc_replace(sch, new, &q->qdisc); |
1da177e4 LT |
504 | return 0; |
505 | } | |
506 | ||
507 | static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg) | |
508 | { | |
509 | struct tbf_sched_data *q = qdisc_priv(sch); | |
510 | return q->qdisc; | |
511 | } | |
512 | ||
513 | static unsigned long tbf_get(struct Qdisc *sch, u32 classid) | |
514 | { | |
515 | return 1; | |
516 | } | |
517 | ||
518 | static void tbf_put(struct Qdisc *sch, unsigned long arg) | |
519 | { | |
520 | } | |
521 | ||
1da177e4 LT |
522 | static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
523 | { | |
524 | if (!walker->stop) { | |
525 | if (walker->count >= walker->skip) | |
526 | if (walker->fn(sch, 1, walker) < 0) { | |
527 | walker->stop = 1; | |
528 | return; | |
529 | } | |
530 | walker->count++; | |
531 | } | |
532 | } | |
533 | ||
cc7ec456 | 534 | static const struct Qdisc_class_ops tbf_class_ops = { |
1da177e4 LT |
535 | .graft = tbf_graft, |
536 | .leaf = tbf_leaf, | |
537 | .get = tbf_get, | |
538 | .put = tbf_put, | |
1da177e4 | 539 | .walk = tbf_walk, |
1da177e4 LT |
540 | .dump = tbf_dump_class, |
541 | }; | |
542 | ||
20fea08b | 543 | static struct Qdisc_ops tbf_qdisc_ops __read_mostly = { |
1da177e4 LT |
544 | .next = NULL, |
545 | .cl_ops = &tbf_class_ops, | |
546 | .id = "tbf", | |
547 | .priv_size = sizeof(struct tbf_sched_data), | |
548 | .enqueue = tbf_enqueue, | |
549 | .dequeue = tbf_dequeue, | |
77be155c | 550 | .peek = qdisc_peek_dequeued, |
1da177e4 LT |
551 | .init = tbf_init, |
552 | .reset = tbf_reset, | |
553 | .destroy = tbf_destroy, | |
554 | .change = tbf_change, | |
555 | .dump = tbf_dump, | |
556 | .owner = THIS_MODULE, | |
557 | }; | |
558 | ||
559 | static int __init tbf_module_init(void) | |
560 | { | |
561 | return register_qdisc(&tbf_qdisc_ops); | |
562 | } | |
563 | ||
564 | static void __exit tbf_module_exit(void) | |
565 | { | |
566 | unregister_qdisc(&tbf_qdisc_ops); | |
567 | } | |
568 | module_init(tbf_module_init) | |
569 | module_exit(tbf_module_exit) | |
570 | MODULE_LICENSE("GPL"); |