]>
Commit | Line | Data |
---|---|---|
557fc4a0 MK |
1 | /* |
2 | * Copyright (c) 2016 Qualcomm Atheros, Inc | |
3 | * | |
4 | * GPL v2 | |
5 | * | |
6 | * Based on net/sched/sch_fq_codel.c | |
7 | */ | |
8 | #ifndef __NET_SCHED_FQ_IMPL_H | |
9 | #define __NET_SCHED_FQ_IMPL_H | |
10 | ||
11 | #include <net/fq.h> | |
12 | ||
13 | /* functions that are embedded into includer */ | |
14 | ||
8c418b5b JB |
15 | static void fq_adjust_removal(struct fq *fq, |
16 | struct fq_flow *flow, | |
17 | struct sk_buff *skb) | |
557fc4a0 MK |
18 | { |
19 | struct fq_tin *tin = flow->tin; | |
557fc4a0 MK |
20 | |
21 | tin->backlog_bytes -= skb->len; | |
22 | tin->backlog_packets--; | |
23 | flow->backlog -= skb->len; | |
24 | fq->backlog--; | |
097b065b | 25 | fq->memory_usage -= skb->truesize; |
8c418b5b JB |
26 | } |
27 | ||
28 | static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) | |
29 | { | |
30 | struct fq_flow *i; | |
557fc4a0 MK |
31 | |
32 | if (flow->backlog == 0) { | |
33 | list_del_init(&flow->backlogchain); | |
34 | } else { | |
35 | i = flow; | |
36 | ||
37 | list_for_each_entry_continue(i, &fq->backlogs, backlogchain) | |
38 | if (i->backlog < flow->backlog) | |
39 | break; | |
40 | ||
41 | list_move_tail(&flow->backlogchain, | |
42 | &i->backlogchain); | |
43 | } | |
8c418b5b JB |
44 | } |
45 | ||
46 | static struct sk_buff *fq_flow_dequeue(struct fq *fq, | |
47 | struct fq_flow *flow) | |
48 | { | |
49 | struct sk_buff *skb; | |
50 | ||
51 | lockdep_assert_held(&fq->lock); | |
52 | ||
53 | skb = __skb_dequeue(&flow->queue); | |
54 | if (!skb) | |
55 | return NULL; | |
56 | ||
57 | fq_adjust_removal(fq, flow, skb); | |
58 | fq_rejigger_backlog(fq, flow); | |
557fc4a0 MK |
59 | |
60 | return skb; | |
61 | } | |
62 | ||
63 | static struct sk_buff *fq_tin_dequeue(struct fq *fq, | |
64 | struct fq_tin *tin, | |
65 | fq_tin_dequeue_t dequeue_func) | |
66 | { | |
67 | struct fq_flow *flow; | |
68 | struct list_head *head; | |
69 | struct sk_buff *skb; | |
70 | ||
71 | lockdep_assert_held(&fq->lock); | |
72 | ||
73 | begin: | |
74 | head = &tin->new_flows; | |
75 | if (list_empty(head)) { | |
76 | head = &tin->old_flows; | |
77 | if (list_empty(head)) | |
78 | return NULL; | |
79 | } | |
80 | ||
81 | flow = list_first_entry(head, struct fq_flow, flowchain); | |
82 | ||
83 | if (flow->deficit <= 0) { | |
84 | flow->deficit += fq->quantum; | |
85 | list_move_tail(&flow->flowchain, | |
86 | &tin->old_flows); | |
87 | goto begin; | |
88 | } | |
89 | ||
90 | skb = dequeue_func(fq, tin, flow); | |
91 | if (!skb) { | |
92 | /* force a pass through old_flows to prevent starvation */ | |
93 | if ((head == &tin->new_flows) && | |
94 | !list_empty(&tin->old_flows)) { | |
95 | list_move_tail(&flow->flowchain, &tin->old_flows); | |
96 | } else { | |
97 | list_del_init(&flow->flowchain); | |
98 | flow->tin = NULL; | |
99 | } | |
100 | goto begin; | |
101 | } | |
102 | ||
103 | flow->deficit -= skb->len; | |
104 | tin->tx_bytes += skb->len; | |
105 | tin->tx_packets++; | |
106 | ||
107 | return skb; | |
108 | } | |
109 | ||
110 | static struct fq_flow *fq_flow_classify(struct fq *fq, | |
111 | struct fq_tin *tin, | |
112 | struct sk_buff *skb, | |
113 | fq_flow_get_default_t get_default_func) | |
114 | { | |
115 | struct fq_flow *flow; | |
116 | u32 hash; | |
117 | u32 idx; | |
118 | ||
119 | lockdep_assert_held(&fq->lock); | |
120 | ||
121 | hash = skb_get_hash_perturb(skb, fq->perturbation); | |
122 | idx = reciprocal_scale(hash, fq->flows_cnt); | |
123 | flow = &fq->flows[idx]; | |
124 | ||
125 | if (flow->tin && flow->tin != tin) { | |
126 | flow = get_default_func(fq, tin, idx, skb); | |
127 | tin->collisions++; | |
128 | fq->collisions++; | |
129 | } | |
130 | ||
131 | if (!flow->tin) | |
132 | tin->flows++; | |
133 | ||
134 | return flow; | |
135 | } | |
136 | ||
b43e7199 MK |
137 | static void fq_recalc_backlog(struct fq *fq, |
138 | struct fq_tin *tin, | |
139 | struct fq_flow *flow) | |
140 | { | |
141 | struct fq_flow *i; | |
142 | ||
143 | if (list_empty(&flow->backlogchain)) | |
144 | list_add_tail(&flow->backlogchain, &fq->backlogs); | |
145 | ||
146 | i = flow; | |
147 | list_for_each_entry_continue_reverse(i, &fq->backlogs, | |
148 | backlogchain) | |
149 | if (i->backlog > flow->backlog) | |
150 | break; | |
151 | ||
152 | list_move(&flow->backlogchain, &i->backlogchain); | |
153 | } | |
154 | ||
557fc4a0 MK |
155 | static void fq_tin_enqueue(struct fq *fq, |
156 | struct fq_tin *tin, | |
157 | struct sk_buff *skb, | |
158 | fq_skb_free_t free_func, | |
159 | fq_flow_get_default_t get_default_func) | |
160 | { | |
161 | struct fq_flow *flow; | |
0bfe649f | 162 | bool oom; |
557fc4a0 MK |
163 | |
164 | lockdep_assert_held(&fq->lock); | |
165 | ||
166 | flow = fq_flow_classify(fq, tin, skb, get_default_func); | |
167 | ||
168 | flow->tin = tin; | |
169 | flow->backlog += skb->len; | |
170 | tin->backlog_bytes += skb->len; | |
171 | tin->backlog_packets++; | |
097b065b | 172 | fq->memory_usage += skb->truesize; |
557fc4a0 MK |
173 | fq->backlog++; |
174 | ||
b43e7199 | 175 | fq_recalc_backlog(fq, tin, flow); |
557fc4a0 MK |
176 | |
177 | if (list_empty(&flow->flowchain)) { | |
178 | flow->deficit = fq->quantum; | |
179 | list_add_tail(&flow->flowchain, | |
180 | &tin->new_flows); | |
181 | } | |
182 | ||
183 | __skb_queue_tail(&flow->queue, skb); | |
0bfe649f THJ |
184 | oom = (fq->memory_usage > fq->memory_limit); |
185 | while (fq->backlog > fq->limit || oom) { | |
557fc4a0 MK |
186 | flow = list_first_entry_or_null(&fq->backlogs, |
187 | struct fq_flow, | |
188 | backlogchain); | |
189 | if (!flow) | |
190 | return; | |
191 | ||
192 | skb = fq_flow_dequeue(fq, flow); | |
193 | if (!skb) | |
194 | return; | |
195 | ||
196 | free_func(fq, flow->tin, flow, skb); | |
197 | ||
198 | flow->tin->overlimit++; | |
199 | fq->overlimit++; | |
0bfe649f | 200 | if (oom) { |
097b065b | 201 | fq->overmemory++; |
0bfe649f THJ |
202 | oom = (fq->memory_usage > fq->memory_limit); |
203 | } | |
557fc4a0 MK |
204 | } |
205 | } | |
206 | ||
8c418b5b JB |
207 | static void fq_flow_filter(struct fq *fq, |
208 | struct fq_flow *flow, | |
209 | fq_skb_filter_t filter_func, | |
210 | void *filter_data, | |
211 | fq_skb_free_t free_func) | |
212 | { | |
213 | struct fq_tin *tin = flow->tin; | |
214 | struct sk_buff *skb, *tmp; | |
215 | ||
216 | lockdep_assert_held(&fq->lock); | |
217 | ||
218 | skb_queue_walk_safe(&flow->queue, skb, tmp) { | |
219 | if (!filter_func(fq, tin, flow, skb, filter_data)) | |
220 | continue; | |
221 | ||
222 | __skb_unlink(skb, &flow->queue); | |
223 | fq_adjust_removal(fq, flow, skb); | |
224 | free_func(fq, tin, flow, skb); | |
225 | } | |
226 | ||
227 | fq_rejigger_backlog(fq, flow); | |
228 | } | |
229 | ||
230 | static void fq_tin_filter(struct fq *fq, | |
231 | struct fq_tin *tin, | |
232 | fq_skb_filter_t filter_func, | |
233 | void *filter_data, | |
234 | fq_skb_free_t free_func) | |
235 | { | |
236 | struct fq_flow *flow; | |
237 | ||
238 | lockdep_assert_held(&fq->lock); | |
239 | ||
240 | list_for_each_entry(flow, &tin->new_flows, flowchain) | |
241 | fq_flow_filter(fq, flow, filter_func, filter_data, free_func); | |
242 | list_for_each_entry(flow, &tin->old_flows, flowchain) | |
243 | fq_flow_filter(fq, flow, filter_func, filter_data, free_func); | |
244 | } | |
245 | ||
557fc4a0 MK |
246 | static void fq_flow_reset(struct fq *fq, |
247 | struct fq_flow *flow, | |
248 | fq_skb_free_t free_func) | |
249 | { | |
250 | struct sk_buff *skb; | |
251 | ||
252 | while ((skb = fq_flow_dequeue(fq, flow))) | |
253 | free_func(fq, flow->tin, flow, skb); | |
254 | ||
255 | if (!list_empty(&flow->flowchain)) | |
256 | list_del_init(&flow->flowchain); | |
257 | ||
258 | if (!list_empty(&flow->backlogchain)) | |
259 | list_del_init(&flow->backlogchain); | |
260 | ||
261 | flow->tin = NULL; | |
262 | ||
263 | WARN_ON_ONCE(flow->backlog); | |
264 | } | |
265 | ||
266 | static void fq_tin_reset(struct fq *fq, | |
267 | struct fq_tin *tin, | |
268 | fq_skb_free_t free_func) | |
269 | { | |
270 | struct list_head *head; | |
271 | struct fq_flow *flow; | |
272 | ||
273 | for (;;) { | |
274 | head = &tin->new_flows; | |
275 | if (list_empty(head)) { | |
276 | head = &tin->old_flows; | |
277 | if (list_empty(head)) | |
278 | break; | |
279 | } | |
280 | ||
281 | flow = list_first_entry(head, struct fq_flow, flowchain); | |
282 | fq_flow_reset(fq, flow, free_func); | |
283 | } | |
284 | ||
285 | WARN_ON_ONCE(tin->backlog_bytes); | |
286 | WARN_ON_ONCE(tin->backlog_packets); | |
287 | } | |
288 | ||
289 | static void fq_flow_init(struct fq_flow *flow) | |
290 | { | |
291 | INIT_LIST_HEAD(&flow->flowchain); | |
292 | INIT_LIST_HEAD(&flow->backlogchain); | |
293 | __skb_queue_head_init(&flow->queue); | |
294 | } | |
295 | ||
296 | static void fq_tin_init(struct fq_tin *tin) | |
297 | { | |
298 | INIT_LIST_HEAD(&tin->new_flows); | |
299 | INIT_LIST_HEAD(&tin->old_flows); | |
300 | } | |
301 | ||
302 | static int fq_init(struct fq *fq, int flows_cnt) | |
303 | { | |
304 | int i; | |
305 | ||
306 | memset(fq, 0, sizeof(fq[0])); | |
307 | INIT_LIST_HEAD(&fq->backlogs); | |
308 | spin_lock_init(&fq->lock); | |
309 | fq->flows_cnt = max_t(u32, flows_cnt, 1); | |
310 | fq->perturbation = prandom_u32(); | |
311 | fq->quantum = 300; | |
312 | fq->limit = 8192; | |
097b065b | 313 | fq->memory_limit = 16 << 20; /* 16 MBytes */ |
557fc4a0 MK |
314 | |
315 | fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); | |
316 | if (!fq->flows) | |
317 | return -ENOMEM; | |
318 | ||
319 | for (i = 0; i < fq->flows_cnt; i++) | |
320 | fq_flow_init(&fq->flows[i]); | |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
325 | static void fq_reset(struct fq *fq, | |
326 | fq_skb_free_t free_func) | |
327 | { | |
328 | int i; | |
329 | ||
330 | for (i = 0; i < fq->flows_cnt; i++) | |
331 | fq_flow_reset(fq, &fq->flows[i], free_func); | |
332 | ||
333 | kfree(fq->flows); | |
334 | fq->flows = NULL; | |
335 | } | |
336 | ||
337 | #endif |