]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
4b549a2e ED |
2 | /* |
3 | * Fair Queue CoDel discipline | |
4 | * | |
80ba92fa | 5 | * Copyright (C) 2012,2015 Eric Dumazet <[email protected]> |
4b549a2e ED |
6 | */ |
7 | ||
8 | #include <linux/module.h> | |
9 | #include <linux/types.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/jiffies.h> | |
12 | #include <linux/string.h> | |
13 | #include <linux/in.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/skbuff.h> | |
4b549a2e ED |
17 | #include <linux/slab.h> |
18 | #include <linux/vmalloc.h> | |
19 | #include <net/netlink.h> | |
20 | #include <net/pkt_sched.h> | |
cf1facda | 21 | #include <net/pkt_cls.h> |
4b549a2e | 22 | #include <net/codel.h> |
d068ca2a MK |
23 | #include <net/codel_impl.h> |
24 | #include <net/codel_qdisc.h> | |
4b549a2e ED |
25 | |
26 | /* Fair Queue CoDel. | |
27 | * | |
28 | * Principles : | |
29 | * Packets are classified (internal classifier or external) on flows. | |
30 | * This is a Stochastic model (as we use a hash, several flows | |
31 | * might be hashed on same slot) | |
32 | * Each flow has a CoDel managed queue. | |
33 | * Flows are linked onto two (Round Robin) lists, | |
34 | * so that new flows have priority on old ones. | |
35 | * | |
36 | * For a given flow, packets are not reordered (CoDel uses a FIFO) | |
37 | * head drops only. | |
38 | * ECN capability is on by default. | |
39 | * Low memory footprint (64 bytes per flow) | |
40 | */ | |
41 | ||
42 | struct fq_codel_flow { | |
43 | struct sk_buff *head; | |
44 | struct sk_buff *tail; | |
45 | struct list_head flowchain; | |
46 | int deficit; | |
4b549a2e ED |
47 | struct codel_vars cvars; |
48 | }; /* please try to keep this structure <= 64 bytes */ | |
49 | ||
50 | struct fq_codel_sched_data { | |
25d8c0d5 | 51 | struct tcf_proto __rcu *filter_list; /* optional external classifier */ |
6529eaba | 52 | struct tcf_block *block; |
4b549a2e ED |
53 | struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ |
54 | u32 *backlogs; /* backlog table [flows_cnt] */ | |
55 | u32 flows_cnt; /* number of flows */ | |
4b549a2e | 56 | u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ |
9d18562a | 57 | u32 drop_batch_size; |
95b58430 | 58 | u32 memory_limit; |
4b549a2e ED |
59 | struct codel_params cparams; |
60 | struct codel_stats cstats; | |
95b58430 ED |
61 | u32 memory_usage; |
62 | u32 drop_overmemory; | |
4b549a2e ED |
63 | u32 drop_overlimit; |
64 | u32 new_flow_count; | |
65 | ||
66 | struct list_head new_flows; /* list of new flows */ | |
67 | struct list_head old_flows; /* list of old flows */ | |
68 | }; | |
69 | ||
70 | static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, | |
342db221 | 71 | struct sk_buff *skb) |
4b549a2e | 72 | { |
264b87fa | 73 | return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); |
4b549a2e ED |
74 | } |
75 | ||
76 | static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, | |
77 | int *qerr) | |
78 | { | |
79 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
25d8c0d5 | 80 | struct tcf_proto *filter; |
4b549a2e ED |
81 | struct tcf_result res; |
82 | int result; | |
83 | ||
84 | if (TC_H_MAJ(skb->priority) == sch->handle && | |
85 | TC_H_MIN(skb->priority) > 0 && | |
86 | TC_H_MIN(skb->priority) <= q->flows_cnt) | |
87 | return TC_H_MIN(skb->priority); | |
88 | ||
69204cf7 | 89 | filter = rcu_dereference_bh(q->filter_list); |
25d8c0d5 | 90 | if (!filter) |
4b549a2e ED |
91 | return fq_codel_hash(q, skb) + 1; |
92 | ||
93 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | |
3aa26055 | 94 | result = tcf_classify(skb, NULL, filter, &res, false); |
4b549a2e ED |
95 | if (result >= 0) { |
96 | #ifdef CONFIG_NET_CLS_ACT | |
97 | switch (result) { | |
98 | case TC_ACT_STOLEN: | |
99 | case TC_ACT_QUEUED: | |
e25ea21f | 100 | case TC_ACT_TRAP: |
4b549a2e | 101 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; |
964201de | 102 | fallthrough; |
4b549a2e ED |
103 | case TC_ACT_SHOT: |
104 | return 0; | |
105 | } | |
106 | #endif | |
107 | if (TC_H_MIN(res.classid) <= q->flows_cnt) | |
108 | return TC_H_MIN(res.classid); | |
109 | } | |
110 | return 0; | |
111 | } | |
112 | ||
113 | /* helper functions : might be changed when/if skb use a standard list_head */ | |
114 | ||
115 | /* remove one skb from head of slot queue */ | |
116 | static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) | |
117 | { | |
118 | struct sk_buff *skb = flow->head; | |
119 | ||
120 | flow->head = skb->next; | |
a8305bff | 121 | skb_mark_not_on_list(skb); |
4b549a2e ED |
122 | return skb; |
123 | } | |
124 | ||
125 | /* add skb to flow queue (tail add) */ | |
126 | static inline void flow_queue_add(struct fq_codel_flow *flow, | |
127 | struct sk_buff *skb) | |
128 | { | |
129 | if (flow->head == NULL) | |
130 | flow->head = skb; | |
131 | else | |
132 | flow->tail->next = skb; | |
133 | flow->tail = skb; | |
134 | skb->next = NULL; | |
135 | } | |
136 | ||
520ac30f ED |
137 | static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, |
138 | struct sk_buff **to_free) | |
4b549a2e ED |
139 | { |
140 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
141 | struct sk_buff *skb; | |
142 | unsigned int maxbacklog = 0, idx = 0, i, len; | |
143 | struct fq_codel_flow *flow; | |
9d18562a | 144 | unsigned int threshold; |
95b58430 | 145 | unsigned int mem = 0; |
4b549a2e | 146 | |
9d18562a | 147 | /* Queue is full! Find the fat flow and drop packet(s) from it. |
4b549a2e ED |
148 | * This might sound expensive, but with 1024 flows, we scan |
149 | * 4KB of memory, and we dont need to handle a complex tree | |
150 | * in fast path (packet queue/enqueue) with many cache misses. | |
9d18562a ED |
151 | * In stress mode, we'll try to drop 64 packets from the flow, |
152 | * amortizing this linear lookup to one cache line per drop. | |
4b549a2e ED |
153 | */ |
154 | for (i = 0; i < q->flows_cnt; i++) { | |
155 | if (q->backlogs[i] > maxbacklog) { | |
156 | maxbacklog = q->backlogs[i]; | |
157 | idx = i; | |
158 | } | |
159 | } | |
9d18562a ED |
160 | |
161 | /* Our goal is to drop half of this fat flow backlog */ | |
162 | threshold = maxbacklog >> 1; | |
163 | ||
4b549a2e | 164 | flow = &q->flows[idx]; |
9d18562a ED |
165 | len = 0; |
166 | i = 0; | |
167 | do { | |
168 | skb = dequeue_head(flow); | |
169 | len += qdisc_pkt_len(skb); | |
008830bc | 170 | mem += get_codel_cb(skb)->mem_usage; |
520ac30f | 171 | __qdisc_drop(skb, to_free); |
9d18562a ED |
172 | } while (++i < max_packets && len < threshold); |
173 | ||
ae697f3b DT |
174 | /* Tell codel to increase its signal strength also */ |
175 | flow->cvars.count += i; | |
4b549a2e | 176 | q->backlogs[idx] -= len; |
95b58430 | 177 | q->memory_usage -= mem; |
9d18562a ED |
178 | sch->qstats.drops += i; |
179 | sch->qstats.backlog -= len; | |
180 | sch->q.qlen -= i; | |
4b549a2e ED |
181 | return idx; |
182 | } | |
183 | ||
520ac30f ED |
184 | static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
185 | struct sk_buff **to_free) | |
4b549a2e ED |
186 | { |
187 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
9d18562a | 188 | unsigned int idx, prev_backlog, prev_qlen; |
4b549a2e | 189 | struct fq_codel_flow *flow; |
3f649ab7 | 190 | int ret; |
80e509db | 191 | unsigned int pkt_len; |
95b58430 | 192 | bool memory_limited; |
4b549a2e ED |
193 | |
194 | idx = fq_codel_classify(skb, sch, &ret); | |
195 | if (idx == 0) { | |
196 | if (ret & __NET_XMIT_BYPASS) | |
25331d6c | 197 | qdisc_qstats_drop(sch); |
520ac30f | 198 | __qdisc_drop(skb, to_free); |
4b549a2e ED |
199 | return ret; |
200 | } | |
201 | idx--; | |
202 | ||
203 | codel_set_enqueue_time(skb); | |
204 | flow = &q->flows[idx]; | |
205 | flow_queue_add(flow, skb); | |
206 | q->backlogs[idx] += qdisc_pkt_len(skb); | |
25331d6c | 207 | qdisc_qstats_backlog_inc(sch, skb); |
4b549a2e ED |
208 | |
209 | if (list_empty(&flow->flowchain)) { | |
210 | list_add_tail(&flow->flowchain, &q->new_flows); | |
4b549a2e ED |
211 | q->new_flow_count++; |
212 | flow->deficit = q->quantum; | |
4b549a2e | 213 | } |
008830bc ED |
214 | get_codel_cb(skb)->mem_usage = skb->truesize; |
215 | q->memory_usage += get_codel_cb(skb)->mem_usage; | |
95b58430 ED |
216 | memory_limited = q->memory_usage > q->memory_limit; |
217 | if (++sch->q.qlen <= sch->limit && !memory_limited) | |
4b549a2e ED |
218 | return NET_XMIT_SUCCESS; |
219 | ||
2ccccf5f | 220 | prev_backlog = sch->qstats.backlog; |
9d18562a ED |
221 | prev_qlen = sch->q.qlen; |
222 | ||
80e509db ED |
223 | /* save this packet length as it might be dropped by fq_codel_drop() */ |
224 | pkt_len = qdisc_pkt_len(skb); | |
9d18562a ED |
225 | /* fq_codel_drop() is quite expensive, as it performs a linear search |
226 | * in q->backlogs[] to find a fat flow. | |
227 | * So instead of dropping a single packet, drop half of its backlog | |
228 | * with a 64 packets limit to not add a too big cpu spike here. | |
4b549a2e | 229 | */ |
520ac30f | 230 | ret = fq_codel_drop(sch, q->drop_batch_size, to_free); |
9d18562a | 231 | |
80e509db ED |
232 | prev_qlen -= sch->q.qlen; |
233 | prev_backlog -= sch->qstats.backlog; | |
234 | q->drop_overlimit += prev_qlen; | |
95b58430 | 235 | if (memory_limited) |
80e509db | 236 | q->drop_overmemory += prev_qlen; |
9d18562a | 237 | |
80e509db ED |
238 | /* As we dropped packet(s), better let upper stack know this. |
239 | * If we dropped a packet for this flow, return NET_XMIT_CN, | |
240 | * but in this case, our parents wont increase their backlogs. | |
241 | */ | |
242 | if (ret == idx) { | |
243 | qdisc_tree_reduce_backlog(sch, prev_qlen - 1, | |
244 | prev_backlog - pkt_len); | |
245 | return NET_XMIT_CN; | |
246 | } | |
247 | qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); | |
248 | return NET_XMIT_SUCCESS; | |
4b549a2e ED |
249 | } |
250 | ||
251 | /* This is the specific function called from codel_dequeue() | |
252 | * to dequeue a packet from queue. Note: backlog is handled in | |
253 | * codel, we dont need to reduce it here. | |
254 | */ | |
79bdc4c8 | 255 | static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) |
4b549a2e | 256 | { |
79bdc4c8 | 257 | struct Qdisc *sch = ctx; |
865ec552 | 258 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
4b549a2e ED |
259 | struct fq_codel_flow *flow; |
260 | struct sk_buff *skb = NULL; | |
261 | ||
262 | flow = container_of(vars, struct fq_codel_flow, cvars); | |
263 | if (flow->head) { | |
264 | skb = dequeue_head(flow); | |
865ec552 | 265 | q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); |
008830bc | 266 | q->memory_usage -= get_codel_cb(skb)->mem_usage; |
4b549a2e | 267 | sch->q.qlen--; |
79bdc4c8 | 268 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
4b549a2e ED |
269 | } |
270 | return skb; | |
271 | } | |
272 | ||
79bdc4c8 MK |
273 | static void drop_func(struct sk_buff *skb, void *ctx) |
274 | { | |
275 | struct Qdisc *sch = ctx; | |
276 | ||
520ac30f ED |
277 | kfree_skb(skb); |
278 | qdisc_qstats_drop(sch); | |
79bdc4c8 MK |
279 | } |
280 | ||
4b549a2e ED |
281 | static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) |
282 | { | |
283 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
284 | struct sk_buff *skb; | |
285 | struct fq_codel_flow *flow; | |
286 | struct list_head *head; | |
4b549a2e ED |
287 | |
288 | begin: | |
289 | head = &q->new_flows; | |
290 | if (list_empty(head)) { | |
291 | head = &q->old_flows; | |
292 | if (list_empty(head)) | |
293 | return NULL; | |
294 | } | |
295 | flow = list_first_entry(head, struct fq_codel_flow, flowchain); | |
296 | ||
297 | if (flow->deficit <= 0) { | |
298 | flow->deficit += q->quantum; | |
299 | list_move_tail(&flow->flowchain, &q->old_flows); | |
300 | goto begin; | |
301 | } | |
302 | ||
79bdc4c8 MK |
303 | skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, |
304 | &flow->cvars, &q->cstats, qdisc_pkt_len, | |
305 | codel_get_enqueue_time, drop_func, dequeue_func); | |
4b549a2e | 306 | |
4b549a2e ED |
307 | if (!skb) { |
308 | /* force a pass through old_flows to prevent starvation */ | |
309 | if ((head == &q->new_flows) && !list_empty(&q->old_flows)) | |
310 | list_move_tail(&flow->flowchain, &q->old_flows); | |
311 | else | |
312 | list_del_init(&flow->flowchain); | |
313 | goto begin; | |
314 | } | |
315 | qdisc_bstats_update(sch, skb); | |
316 | flow->deficit -= qdisc_pkt_len(skb); | |
2ccccf5f | 317 | /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, |
4b549a2e ED |
318 | * or HTB crashes. Defer it for next round. |
319 | */ | |
320 | if (q->cstats.drop_count && sch->q.qlen) { | |
2ccccf5f WC |
321 | qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, |
322 | q->cstats.drop_len); | |
4b549a2e | 323 | q->cstats.drop_count = 0; |
2ccccf5f | 324 | q->cstats.drop_len = 0; |
4b549a2e ED |
325 | } |
326 | return skb; | |
327 | } | |
328 | ||
ece5d4c7 ED |
329 | static void fq_codel_flow_purge(struct fq_codel_flow *flow) |
330 | { | |
331 | rtnl_kfree_skbs(flow->head, flow->tail); | |
332 | flow->head = NULL; | |
333 | } | |
334 | ||
4b549a2e ED |
335 | static void fq_codel_reset(struct Qdisc *sch) |
336 | { | |
3d0e0af4 ED |
337 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
338 | int i; | |
4b549a2e | 339 | |
3d0e0af4 ED |
340 | INIT_LIST_HEAD(&q->new_flows); |
341 | INIT_LIST_HEAD(&q->old_flows); | |
342 | for (i = 0; i < q->flows_cnt; i++) { | |
343 | struct fq_codel_flow *flow = q->flows + i; | |
344 | ||
ece5d4c7 | 345 | fq_codel_flow_purge(flow); |
3d0e0af4 ED |
346 | INIT_LIST_HEAD(&flow->flowchain); |
347 | codel_vars_init(&flow->cvars); | |
348 | } | |
349 | memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); | |
77f57761 | 350 | q->memory_usage = 0; |
4b549a2e ED |
351 | } |
352 | ||
353 | static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { | |
354 | [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 }, | |
355 | [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 }, | |
356 | [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 }, | |
357 | [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 }, | |
358 | [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, | |
359 | [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, | |
80ba92fa | 360 | [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, |
9d18562a | 361 | [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, |
95b58430 | 362 | [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 }, |
dfcb63ce THJ |
363 | [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 }, |
364 | [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 }, | |
4b549a2e ED |
365 | }; |
366 | ||
2030721c AA |
367 | static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, |
368 | struct netlink_ext_ack *extack) | |
4b549a2e ED |
369 | { |
370 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
371 | struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; | |
c7c5e6ff | 372 | u32 quantum = 0; |
4b549a2e ED |
373 | int err; |
374 | ||
8cb08174 JB |
375 | err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt, |
376 | fq_codel_policy, NULL); | |
4b549a2e ED |
377 | if (err < 0) |
378 | return err; | |
379 | if (tb[TCA_FQ_CODEL_FLOWS]) { | |
380 | if (q->flows) | |
381 | return -EINVAL; | |
382 | q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); | |
383 | if (!q->flows_cnt || | |
384 | q->flows_cnt > 65536) | |
385 | return -EINVAL; | |
386 | } | |
c7c5e6ff ED |
387 | if (tb[TCA_FQ_CODEL_QUANTUM]) { |
388 | quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); | |
389 | if (quantum > FQ_CODEL_QUANTUM_MAX) { | |
390 | NL_SET_ERR_MSG(extack, "Invalid quantum"); | |
391 | return -EINVAL; | |
392 | } | |
393 | } | |
4b549a2e ED |
394 | sch_tree_lock(sch); |
395 | ||
396 | if (tb[TCA_FQ_CODEL_TARGET]) { | |
397 | u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); | |
398 | ||
399 | q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; | |
400 | } | |
401 | ||
80ba92fa ED |
402 | if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { |
403 | u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); | |
404 | ||
405 | q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; | |
406 | } | |
407 | ||
dfcb63ce THJ |
408 | if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]) |
409 | q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]); | |
410 | if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]) | |
411 | q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]); | |
e72aeb9e | 412 | |
4b549a2e ED |
413 | if (tb[TCA_FQ_CODEL_INTERVAL]) { |
414 | u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); | |
415 | ||
416 | q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; | |
417 | } | |
418 | ||
419 | if (tb[TCA_FQ_CODEL_LIMIT]) | |
420 | sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); | |
421 | ||
422 | if (tb[TCA_FQ_CODEL_ECN]) | |
423 | q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); | |
424 | ||
c7c5e6ff ED |
425 | if (quantum) |
426 | q->quantum = quantum; | |
4b549a2e | 427 | |
9d18562a | 428 | if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) |
14695212 | 429 | q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); |
9d18562a | 430 | |
95b58430 ED |
431 | if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) |
432 | q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); | |
433 | ||
434 | while (sch->q.qlen > sch->limit || | |
435 | q->memory_usage > q->memory_limit) { | |
4b549a2e ED |
436 | struct sk_buff *skb = fq_codel_dequeue(sch); |
437 | ||
2ccccf5f | 438 | q->cstats.drop_len += qdisc_pkt_len(skb); |
ece5d4c7 | 439 | rtnl_kfree_skbs(skb, skb); |
4b549a2e ED |
440 | q->cstats.drop_count++; |
441 | } | |
2ccccf5f | 442 | qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); |
4b549a2e | 443 | q->cstats.drop_count = 0; |
2ccccf5f | 444 | q->cstats.drop_len = 0; |
4b549a2e ED |
445 | |
446 | sch_tree_unlock(sch); | |
447 | return 0; | |
448 | } | |
449 | ||
4b549a2e ED |
450 | static void fq_codel_destroy(struct Qdisc *sch) |
451 | { | |
452 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
453 | ||
6529eaba | 454 | tcf_block_put(q->block); |
752ade68 MH |
455 | kvfree(q->backlogs); |
456 | kvfree(q->flows); | |
4b549a2e ED |
457 | } |
458 | ||
e63d7dfd AA |
459 | static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, |
460 | struct netlink_ext_ack *extack) | |
4b549a2e ED |
461 | { |
462 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
463 | int i; | |
6529eaba | 464 | int err; |
4b549a2e ED |
465 | |
466 | sch->limit = 10*1024; | |
467 | q->flows_cnt = 1024; | |
95b58430 | 468 | q->memory_limit = 32 << 20; /* 32 MBytes */ |
9d18562a | 469 | q->drop_batch_size = 64; |
4b549a2e | 470 | q->quantum = psched_mtu(qdisc_dev(sch)); |
4b549a2e ED |
471 | INIT_LIST_HEAD(&q->new_flows); |
472 | INIT_LIST_HEAD(&q->old_flows); | |
79bdc4c8 | 473 | codel_params_init(&q->cparams); |
4b549a2e ED |
474 | codel_stats_init(&q->cstats); |
475 | q->cparams.ecn = true; | |
79bdc4c8 | 476 | q->cparams.mtu = psched_mtu(qdisc_dev(sch)); |
4b549a2e ED |
477 | |
478 | if (opt) { | |
83fe6b87 | 479 | err = fq_codel_change(sch, opt, extack); |
4b549a2e | 480 | if (err) |
f5ffa3b1 | 481 | goto init_failure; |
4b549a2e ED |
482 | } |
483 | ||
8d1a77f9 | 484 | err = tcf_block_get(&q->block, &q->filter_list, sch, extack); |
6529eaba | 485 | if (err) |
f5ffa3b1 | 486 | goto init_failure; |
6529eaba | 487 | |
4b549a2e | 488 | if (!q->flows) { |
778e1cdd KC |
489 | q->flows = kvcalloc(q->flows_cnt, |
490 | sizeof(struct fq_codel_flow), | |
491 | GFP_KERNEL); | |
f5ffa3b1 ZS |
492 | if (!q->flows) { |
493 | err = -ENOMEM; | |
494 | goto init_failure; | |
495 | } | |
778e1cdd | 496 | q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); |
f5ffa3b1 ZS |
497 | if (!q->backlogs) { |
498 | err = -ENOMEM; | |
499 | goto alloc_failure; | |
500 | } | |
4b549a2e ED |
501 | for (i = 0; i < q->flows_cnt; i++) { |
502 | struct fq_codel_flow *flow = q->flows + i; | |
503 | ||
504 | INIT_LIST_HEAD(&flow->flowchain); | |
b379135c | 505 | codel_vars_init(&flow->cvars); |
4b549a2e ED |
506 | } |
507 | } | |
508 | if (sch->limit >= 1) | |
509 | sch->flags |= TCQ_F_CAN_BYPASS; | |
510 | else | |
511 | sch->flags &= ~TCQ_F_CAN_BYPASS; | |
512 | return 0; | |
f5ffa3b1 ZS |
513 | |
514 | alloc_failure: | |
515 | kvfree(q->flows); | |
516 | q->flows = NULL; | |
517 | init_failure: | |
518 | q->flows_cnt = 0; | |
519 | return err; | |
4b549a2e ED |
520 | } |
521 | ||
522 | static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) | |
523 | { | |
524 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
525 | struct nlattr *opts; | |
526 | ||
ae0be8de | 527 | opts = nla_nest_start_noflag(skb, TCA_OPTIONS); |
4b549a2e ED |
528 | if (opts == NULL) |
529 | goto nla_put_failure; | |
530 | ||
531 | if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, | |
532 | codel_time_to_us(q->cparams.target)) || | |
533 | nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, | |
534 | sch->limit) || | |
535 | nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, | |
536 | codel_time_to_us(q->cparams.interval)) || | |
537 | nla_put_u32(skb, TCA_FQ_CODEL_ECN, | |
538 | q->cparams.ecn) || | |
539 | nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, | |
540 | q->quantum) || | |
9d18562a ED |
541 | nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, |
542 | q->drop_batch_size) || | |
95b58430 ED |
543 | nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, |
544 | q->memory_limit) || | |
4b549a2e ED |
545 | nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, |
546 | q->flows_cnt)) | |
547 | goto nla_put_failure; | |
548 | ||
e72aeb9e ED |
549 | if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) { |
550 | if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, | |
551 | codel_time_to_us(q->cparams.ce_threshold))) | |
552 | goto nla_put_failure; | |
dfcb63ce THJ |
553 | if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector)) |
554 | goto nla_put_failure; | |
555 | if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask)) | |
e72aeb9e ED |
556 | goto nla_put_failure; |
557 | } | |
80ba92fa | 558 | |
d59b7d80 | 559 | return nla_nest_end(skb, opts); |
4b549a2e ED |
560 | |
561 | nla_put_failure: | |
562 | return -1; | |
563 | } | |
564 | ||
565 | static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |
566 | { | |
567 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
568 | struct tc_fq_codel_xstats st = { | |
569 | .type = TCA_FQ_CODEL_XSTATS_QDISC, | |
4b549a2e ED |
570 | }; |
571 | struct list_head *pos; | |
572 | ||
669d67bf SL |
573 | st.qdisc_stats.maxpacket = q->cstats.maxpacket; |
574 | st.qdisc_stats.drop_overlimit = q->drop_overlimit; | |
575 | st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; | |
576 | st.qdisc_stats.new_flow_count = q->new_flow_count; | |
80ba92fa | 577 | st.qdisc_stats.ce_mark = q->cstats.ce_mark; |
95b58430 ED |
578 | st.qdisc_stats.memory_usage = q->memory_usage; |
579 | st.qdisc_stats.drop_overmemory = q->drop_overmemory; | |
669d67bf | 580 | |
edb09eb1 | 581 | sch_tree_lock(sch); |
4b549a2e ED |
582 | list_for_each(pos, &q->new_flows) |
583 | st.qdisc_stats.new_flows_len++; | |
584 | ||
585 | list_for_each(pos, &q->old_flows) | |
586 | st.qdisc_stats.old_flows_len++; | |
edb09eb1 | 587 | sch_tree_unlock(sch); |
4b549a2e ED |
588 | |
589 | return gnet_stats_copy_app(d, &st, sizeof(st)); | |
590 | } | |
591 | ||
592 | static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) | |
593 | { | |
594 | return NULL; | |
595 | } | |
596 | ||
143976ce | 597 | static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid) |
4b549a2e ED |
598 | { |
599 | return 0; | |
600 | } | |
601 | ||
602 | static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, | |
603 | u32 classid) | |
604 | { | |
4b549a2e ED |
605 | return 0; |
606 | } | |
607 | ||
143976ce | 608 | static void fq_codel_unbind(struct Qdisc *q, unsigned long cl) |
4b549a2e ED |
609 | { |
610 | } | |
611 | ||
cbaacc4e AA |
612 | static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl, |
613 | struct netlink_ext_ack *extack) | |
4b549a2e ED |
614 | { |
615 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
616 | ||
617 | if (cl) | |
618 | return NULL; | |
6529eaba | 619 | return q->block; |
4b549a2e ED |
620 | } |
621 | ||
622 | static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, | |
623 | struct sk_buff *skb, struct tcmsg *tcm) | |
624 | { | |
625 | tcm->tcm_handle |= TC_H_MIN(cl); | |
626 | return 0; | |
627 | } | |
628 | ||
629 | static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
630 | struct gnet_dump *d) | |
631 | { | |
632 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
633 | u32 idx = cl - 1; | |
634 | struct gnet_stats_queue qs = { 0 }; | |
635 | struct tc_fq_codel_xstats xstats; | |
636 | ||
637 | if (idx < q->flows_cnt) { | |
638 | const struct fq_codel_flow *flow = &q->flows[idx]; | |
edb09eb1 | 639 | const struct sk_buff *skb; |
4b549a2e ED |
640 | |
641 | memset(&xstats, 0, sizeof(xstats)); | |
642 | xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; | |
643 | xstats.class_stats.deficit = flow->deficit; | |
644 | xstats.class_stats.ldelay = | |
645 | codel_time_to_us(flow->cvars.ldelay); | |
646 | xstats.class_stats.count = flow->cvars.count; | |
647 | xstats.class_stats.lastcount = flow->cvars.lastcount; | |
648 | xstats.class_stats.dropping = flow->cvars.dropping; | |
649 | if (flow->cvars.dropping) { | |
650 | codel_tdiff_t delta = flow->cvars.drop_next - | |
651 | codel_get_time(); | |
652 | ||
653 | xstats.class_stats.drop_next = (delta >= 0) ? | |
654 | codel_time_to_us(delta) : | |
655 | -codel_time_to_us(-delta); | |
656 | } | |
edb09eb1 ED |
657 | if (flow->head) { |
658 | sch_tree_lock(sch); | |
659 | skb = flow->head; | |
660 | while (skb) { | |
661 | qs.qlen++; | |
662 | skb = skb->next; | |
663 | } | |
664 | sch_tree_unlock(sch); | |
4b549a2e ED |
665 | } |
666 | qs.backlog = q->backlogs[idx]; | |
77ddaff2 | 667 | qs.drops = 0; |
4b549a2e | 668 | } |
aafddbf0 | 669 | if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) |
4b549a2e ED |
670 | return -1; |
671 | if (idx < q->flows_cnt) | |
672 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | |
673 | return 0; | |
674 | } | |
675 | ||
676 | static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
677 | { | |
678 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
679 | unsigned int i; | |
680 | ||
681 | if (arg->stop) | |
682 | return; | |
683 | ||
684 | for (i = 0; i < q->flows_cnt; i++) { | |
e046fa89 | 685 | if (list_empty(&q->flows[i].flowchain)) { |
4b549a2e ED |
686 | arg->count++; |
687 | continue; | |
688 | } | |
e046fa89 | 689 | if (!tc_qdisc_stats_dump(sch, i + 1, arg)) |
4b549a2e | 690 | break; |
4b549a2e ED |
691 | } |
692 | } | |
693 | ||
694 | static const struct Qdisc_class_ops fq_codel_class_ops = { | |
695 | .leaf = fq_codel_leaf, | |
143976ce | 696 | .find = fq_codel_find, |
6529eaba | 697 | .tcf_block = fq_codel_tcf_block, |
4b549a2e | 698 | .bind_tcf = fq_codel_bind, |
143976ce | 699 | .unbind_tcf = fq_codel_unbind, |
4b549a2e ED |
700 | .dump = fq_codel_dump_class, |
701 | .dump_stats = fq_codel_dump_class_stats, | |
702 | .walk = fq_codel_walk, | |
703 | }; | |
704 | ||
705 | static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { | |
706 | .cl_ops = &fq_codel_class_ops, | |
707 | .id = "fq_codel", | |
708 | .priv_size = sizeof(struct fq_codel_sched_data), | |
709 | .enqueue = fq_codel_enqueue, | |
710 | .dequeue = fq_codel_dequeue, | |
711 | .peek = qdisc_peek_dequeued, | |
4b549a2e ED |
712 | .init = fq_codel_init, |
713 | .reset = fq_codel_reset, | |
714 | .destroy = fq_codel_destroy, | |
715 | .change = fq_codel_change, | |
716 | .dump = fq_codel_dump, | |
717 | .dump_stats = fq_codel_dump_stats, | |
718 | .owner = THIS_MODULE, | |
719 | }; | |
720 | ||
721 | static int __init fq_codel_module_init(void) | |
722 | { | |
723 | return register_qdisc(&fq_codel_qdisc_ops); | |
724 | } | |
725 | ||
726 | static void __exit fq_codel_module_exit(void) | |
727 | { | |
728 | unregister_qdisc(&fq_codel_qdisc_ops); | |
729 | } | |
730 | ||
731 | module_init(fq_codel_module_init) | |
732 | module_exit(fq_codel_module_exit) | |
733 | MODULE_AUTHOR("Eric Dumazet"); | |
734 | MODULE_LICENSE("GPL"); | |
67c20de3 | 735 | MODULE_DESCRIPTION("Fair Queue CoDel discipline"); |