]>
Commit | Line | Data |
---|---|---|
e5dfb815 PM |
1 | /* |
2 | * net/sched/cls_flow.c Generic flow classifier | |
3 | * | |
4 | * Copyright (c) 2007, 2008 Patrick McHardy <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version 2 | |
9 | * of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/list.h> | |
15 | #include <linux/jhash.h> | |
16 | #include <linux/random.h> | |
17 | #include <linux/pkt_cls.h> | |
18 | #include <linux/skbuff.h> | |
19 | #include <linux/in.h> | |
20 | #include <linux/ip.h> | |
21 | #include <linux/ipv6.h> | |
9ec13810 | 22 | #include <linux/if_vlan.h> |
e5dfb815 PM |
23 | |
24 | #include <net/pkt_cls.h> | |
25 | #include <net/ip.h> | |
26 | #include <net/route.h> | |
27 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | |
28 | #include <net/netfilter/nf_conntrack.h> | |
29 | #endif | |
30 | ||
31 | struct flow_head { | |
32 | struct list_head filters; | |
33 | }; | |
34 | ||
35 | struct flow_filter { | |
36 | struct list_head list; | |
37 | struct tcf_exts exts; | |
38 | struct tcf_ematch_tree ematches; | |
72d9794f PM |
39 | struct timer_list perturb_timer; |
40 | u32 perturb_period; | |
e5dfb815 PM |
41 | u32 handle; |
42 | ||
43 | u32 nkeys; | |
44 | u32 keymask; | |
45 | u32 mode; | |
46 | u32 mask; | |
47 | u32 xor; | |
48 | u32 rshift; | |
49 | u32 addend; | |
50 | u32 divisor; | |
51 | u32 baseclass; | |
72d9794f | 52 | u32 hashrnd; |
e5dfb815 PM |
53 | }; |
54 | ||
e5dfb815 PM |
55 | static const struct tcf_ext_map flow_ext_map = { |
56 | .action = TCA_FLOW_ACT, | |
57 | .police = TCA_FLOW_POLICE, | |
58 | }; | |
59 | ||
60 | static inline u32 addr_fold(void *addr) | |
61 | { | |
62 | unsigned long a = (unsigned long)addr; | |
63 | ||
64 | return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); | |
65 | } | |
66 | ||
67 | static u32 flow_get_src(const struct sk_buff *skb) | |
68 | { | |
69 | switch (skb->protocol) { | |
60678040 | 70 | case htons(ETH_P_IP): |
e5dfb815 | 71 | return ntohl(ip_hdr(skb)->saddr); |
60678040 | 72 | case htons(ETH_P_IPV6): |
e5dfb815 PM |
73 | return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); |
74 | default: | |
75 | return addr_fold(skb->sk); | |
76 | } | |
77 | } | |
78 | ||
79 | static u32 flow_get_dst(const struct sk_buff *skb) | |
80 | { | |
81 | switch (skb->protocol) { | |
60678040 | 82 | case htons(ETH_P_IP): |
e5dfb815 | 83 | return ntohl(ip_hdr(skb)->daddr); |
60678040 | 84 | case htons(ETH_P_IPV6): |
e5dfb815 PM |
85 | return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); |
86 | default: | |
87 | return addr_fold(skb->dst) ^ (__force u16)skb->protocol; | |
88 | } | |
89 | } | |
90 | ||
91 | static u32 flow_get_proto(const struct sk_buff *skb) | |
92 | { | |
93 | switch (skb->protocol) { | |
60678040 | 94 | case htons(ETH_P_IP): |
e5dfb815 | 95 | return ip_hdr(skb)->protocol; |
60678040 | 96 | case htons(ETH_P_IPV6): |
e5dfb815 PM |
97 | return ipv6_hdr(skb)->nexthdr; |
98 | default: | |
99 | return 0; | |
100 | } | |
101 | } | |
102 | ||
103 | static int has_ports(u8 protocol) | |
104 | { | |
105 | switch (protocol) { | |
106 | case IPPROTO_TCP: | |
107 | case IPPROTO_UDP: | |
108 | case IPPROTO_UDPLITE: | |
109 | case IPPROTO_SCTP: | |
110 | case IPPROTO_DCCP: | |
111 | case IPPROTO_ESP: | |
112 | return 1; | |
113 | default: | |
114 | return 0; | |
115 | } | |
116 | } | |
117 | ||
118 | static u32 flow_get_proto_src(const struct sk_buff *skb) | |
119 | { | |
120 | u32 res = 0; | |
121 | ||
122 | switch (skb->protocol) { | |
60678040 | 123 | case htons(ETH_P_IP): { |
e5dfb815 PM |
124 | struct iphdr *iph = ip_hdr(skb); |
125 | ||
126 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && | |
127 | has_ports(iph->protocol)) | |
128 | res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); | |
129 | break; | |
130 | } | |
60678040 | 131 | case htons(ETH_P_IPV6): { |
e5dfb815 PM |
132 | struct ipv6hdr *iph = ipv6_hdr(skb); |
133 | ||
134 | if (has_ports(iph->nexthdr)) | |
135 | res = ntohs(*(__be16 *)&iph[1]); | |
136 | break; | |
137 | } | |
138 | default: | |
139 | res = addr_fold(skb->sk); | |
140 | } | |
141 | ||
142 | return res; | |
143 | } | |
144 | ||
145 | static u32 flow_get_proto_dst(const struct sk_buff *skb) | |
146 | { | |
147 | u32 res = 0; | |
148 | ||
149 | switch (skb->protocol) { | |
60678040 | 150 | case htons(ETH_P_IP): { |
e5dfb815 PM |
151 | struct iphdr *iph = ip_hdr(skb); |
152 | ||
153 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && | |
154 | has_ports(iph->protocol)) | |
155 | res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); | |
156 | break; | |
157 | } | |
60678040 | 158 | case htons(ETH_P_IPV6): { |
e5dfb815 PM |
159 | struct ipv6hdr *iph = ipv6_hdr(skb); |
160 | ||
161 | if (has_ports(iph->nexthdr)) | |
162 | res = ntohs(*(__be16 *)((void *)&iph[1] + 2)); | |
163 | break; | |
164 | } | |
165 | default: | |
166 | res = addr_fold(skb->dst) ^ (__force u16)skb->protocol; | |
167 | } | |
168 | ||
169 | return res; | |
170 | } | |
171 | ||
172 | static u32 flow_get_iif(const struct sk_buff *skb) | |
173 | { | |
174 | return skb->iif; | |
175 | } | |
176 | ||
177 | static u32 flow_get_priority(const struct sk_buff *skb) | |
178 | { | |
179 | return skb->priority; | |
180 | } | |
181 | ||
182 | static u32 flow_get_mark(const struct sk_buff *skb) | |
183 | { | |
184 | return skb->mark; | |
185 | } | |
186 | ||
187 | static u32 flow_get_nfct(const struct sk_buff *skb) | |
188 | { | |
189 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | |
190 | return addr_fold(skb->nfct); | |
191 | #else | |
192 | return 0; | |
193 | #endif | |
194 | } | |
195 | ||
196 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | |
197 | #define CTTUPLE(skb, member) \ | |
198 | ({ \ | |
199 | enum ip_conntrack_info ctinfo; \ | |
200 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ | |
201 | if (ct == NULL) \ | |
202 | goto fallback; \ | |
203 | ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ | |
204 | }) | |
205 | #else | |
206 | #define CTTUPLE(skb, member) \ | |
207 | ({ \ | |
208 | goto fallback; \ | |
209 | 0; \ | |
210 | }) | |
211 | #endif | |
212 | ||
213 | static u32 flow_get_nfct_src(const struct sk_buff *skb) | |
214 | { | |
215 | switch (skb->protocol) { | |
60678040 | 216 | case htons(ETH_P_IP): |
e5dfb815 | 217 | return ntohl(CTTUPLE(skb, src.u3.ip)); |
60678040 | 218 | case htons(ETH_P_IPV6): |
e5dfb815 PM |
219 | return ntohl(CTTUPLE(skb, src.u3.ip6[3])); |
220 | } | |
221 | fallback: | |
222 | return flow_get_src(skb); | |
223 | } | |
224 | ||
225 | static u32 flow_get_nfct_dst(const struct sk_buff *skb) | |
226 | { | |
227 | switch (skb->protocol) { | |
60678040 | 228 | case htons(ETH_P_IP): |
e5dfb815 | 229 | return ntohl(CTTUPLE(skb, dst.u3.ip)); |
60678040 | 230 | case htons(ETH_P_IPV6): |
e5dfb815 PM |
231 | return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); |
232 | } | |
233 | fallback: | |
234 | return flow_get_dst(skb); | |
235 | } | |
236 | ||
237 | static u32 flow_get_nfct_proto_src(const struct sk_buff *skb) | |
238 | { | |
239 | return ntohs(CTTUPLE(skb, src.u.all)); | |
240 | fallback: | |
241 | return flow_get_proto_src(skb); | |
242 | } | |
243 | ||
244 | static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb) | |
245 | { | |
246 | return ntohs(CTTUPLE(skb, dst.u.all)); | |
247 | fallback: | |
248 | return flow_get_proto_dst(skb); | |
249 | } | |
250 | ||
251 | static u32 flow_get_rtclassid(const struct sk_buff *skb) | |
252 | { | |
253 | #ifdef CONFIG_NET_CLS_ROUTE | |
254 | if (skb->dst) | |
255 | return skb->dst->tclassid; | |
256 | #endif | |
257 | return 0; | |
258 | } | |
259 | ||
260 | static u32 flow_get_skuid(const struct sk_buff *skb) | |
261 | { | |
262 | if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) | |
d76b0d9b | 263 | return skb->sk->sk_socket->file->f_cred->fsuid; |
e5dfb815 PM |
264 | return 0; |
265 | } | |
266 | ||
267 | static u32 flow_get_skgid(const struct sk_buff *skb) | |
268 | { | |
269 | if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) | |
d76b0d9b | 270 | return skb->sk->sk_socket->file->f_cred->fsgid; |
e5dfb815 PM |
271 | return 0; |
272 | } | |
273 | ||
9ec13810 PM |
274 | static u32 flow_get_vlan_tag(const struct sk_buff *skb) |
275 | { | |
276 | u16 uninitialized_var(tag); | |
277 | ||
278 | if (vlan_get_tag(skb, &tag) < 0) | |
279 | return 0; | |
280 | return tag & VLAN_VID_MASK; | |
281 | } | |
282 | ||
e5dfb815 PM |
283 | static u32 flow_key_get(const struct sk_buff *skb, int key) |
284 | { | |
285 | switch (key) { | |
286 | case FLOW_KEY_SRC: | |
287 | return flow_get_src(skb); | |
288 | case FLOW_KEY_DST: | |
289 | return flow_get_dst(skb); | |
290 | case FLOW_KEY_PROTO: | |
291 | return flow_get_proto(skb); | |
292 | case FLOW_KEY_PROTO_SRC: | |
293 | return flow_get_proto_src(skb); | |
294 | case FLOW_KEY_PROTO_DST: | |
295 | return flow_get_proto_dst(skb); | |
296 | case FLOW_KEY_IIF: | |
297 | return flow_get_iif(skb); | |
298 | case FLOW_KEY_PRIORITY: | |
299 | return flow_get_priority(skb); | |
300 | case FLOW_KEY_MARK: | |
301 | return flow_get_mark(skb); | |
302 | case FLOW_KEY_NFCT: | |
303 | return flow_get_nfct(skb); | |
304 | case FLOW_KEY_NFCT_SRC: | |
305 | return flow_get_nfct_src(skb); | |
306 | case FLOW_KEY_NFCT_DST: | |
307 | return flow_get_nfct_dst(skb); | |
308 | case FLOW_KEY_NFCT_PROTO_SRC: | |
309 | return flow_get_nfct_proto_src(skb); | |
310 | case FLOW_KEY_NFCT_PROTO_DST: | |
311 | return flow_get_nfct_proto_dst(skb); | |
312 | case FLOW_KEY_RTCLASSID: | |
313 | return flow_get_rtclassid(skb); | |
314 | case FLOW_KEY_SKUID: | |
315 | return flow_get_skuid(skb); | |
316 | case FLOW_KEY_SKGID: | |
317 | return flow_get_skgid(skb); | |
9ec13810 PM |
318 | case FLOW_KEY_VLAN_TAG: |
319 | return flow_get_vlan_tag(skb); | |
e5dfb815 PM |
320 | default: |
321 | WARN_ON(1); | |
322 | return 0; | |
323 | } | |
324 | } | |
325 | ||
326 | static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp, | |
327 | struct tcf_result *res) | |
328 | { | |
329 | struct flow_head *head = tp->root; | |
330 | struct flow_filter *f; | |
331 | u32 keymask; | |
332 | u32 classid; | |
333 | unsigned int n, key; | |
334 | int r; | |
335 | ||
336 | list_for_each_entry(f, &head->filters, list) { | |
337 | u32 keys[f->nkeys]; | |
338 | ||
339 | if (!tcf_em_tree_match(skb, &f->ematches, NULL)) | |
340 | continue; | |
341 | ||
342 | keymask = f->keymask; | |
343 | ||
344 | for (n = 0; n < f->nkeys; n++) { | |
345 | key = ffs(keymask) - 1; | |
346 | keymask &= ~(1 << key); | |
347 | keys[n] = flow_key_get(skb, key); | |
348 | } | |
349 | ||
350 | if (f->mode == FLOW_MODE_HASH) | |
72d9794f | 351 | classid = jhash2(keys, f->nkeys, f->hashrnd); |
e5dfb815 PM |
352 | else { |
353 | classid = keys[0]; | |
354 | classid = (classid & f->mask) ^ f->xor; | |
355 | classid = (classid >> f->rshift) + f->addend; | |
356 | } | |
357 | ||
358 | if (f->divisor) | |
359 | classid %= f->divisor; | |
360 | ||
361 | res->class = 0; | |
362 | res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid); | |
363 | ||
364 | r = tcf_exts_exec(skb, &f->exts, res); | |
365 | if (r < 0) | |
366 | continue; | |
367 | return r; | |
368 | } | |
369 | return -1; | |
370 | } | |
371 | ||
72d9794f PM |
372 | static void flow_perturbation(unsigned long arg) |
373 | { | |
374 | struct flow_filter *f = (struct flow_filter *)arg; | |
375 | ||
376 | get_random_bytes(&f->hashrnd, 4); | |
377 | if (f->perturb_period) | |
378 | mod_timer(&f->perturb_timer, jiffies + f->perturb_period); | |
379 | } | |
380 | ||
e5dfb815 PM |
381 | static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { |
382 | [TCA_FLOW_KEYS] = { .type = NLA_U32 }, | |
383 | [TCA_FLOW_MODE] = { .type = NLA_U32 }, | |
384 | [TCA_FLOW_BASECLASS] = { .type = NLA_U32 }, | |
385 | [TCA_FLOW_RSHIFT] = { .type = NLA_U32 }, | |
386 | [TCA_FLOW_ADDEND] = { .type = NLA_U32 }, | |
387 | [TCA_FLOW_MASK] = { .type = NLA_U32 }, | |
388 | [TCA_FLOW_XOR] = { .type = NLA_U32 }, | |
389 | [TCA_FLOW_DIVISOR] = { .type = NLA_U32 }, | |
390 | [TCA_FLOW_ACT] = { .type = NLA_NESTED }, | |
391 | [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, | |
392 | [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, | |
72d9794f | 393 | [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, |
e5dfb815 PM |
394 | }; |
395 | ||
396 | static int flow_change(struct tcf_proto *tp, unsigned long base, | |
397 | u32 handle, struct nlattr **tca, | |
398 | unsigned long *arg) | |
399 | { | |
400 | struct flow_head *head = tp->root; | |
401 | struct flow_filter *f; | |
402 | struct nlattr *opt = tca[TCA_OPTIONS]; | |
403 | struct nlattr *tb[TCA_FLOW_MAX + 1]; | |
404 | struct tcf_exts e; | |
405 | struct tcf_ematch_tree t; | |
406 | unsigned int nkeys = 0; | |
72d9794f | 407 | unsigned int perturb_period = 0; |
e5dfb815 PM |
408 | u32 baseclass = 0; |
409 | u32 keymask = 0; | |
410 | u32 mode; | |
411 | int err; | |
412 | ||
413 | if (opt == NULL) | |
414 | return -EINVAL; | |
415 | ||
416 | err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy); | |
417 | if (err < 0) | |
418 | return err; | |
419 | ||
420 | if (tb[TCA_FLOW_BASECLASS]) { | |
421 | baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]); | |
422 | if (TC_H_MIN(baseclass) == 0) | |
423 | return -EINVAL; | |
424 | } | |
425 | ||
426 | if (tb[TCA_FLOW_KEYS]) { | |
427 | keymask = nla_get_u32(tb[TCA_FLOW_KEYS]); | |
e5dfb815 PM |
428 | |
429 | nkeys = hweight32(keymask); | |
430 | if (nkeys == 0) | |
431 | return -EINVAL; | |
4f250491 PM |
432 | |
433 | if (fls(keymask) - 1 > FLOW_KEY_MAX) | |
434 | return -EOPNOTSUPP; | |
e5dfb815 PM |
435 | } |
436 | ||
437 | err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map); | |
438 | if (err < 0) | |
439 | return err; | |
440 | ||
441 | err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); | |
442 | if (err < 0) | |
443 | goto err1; | |
444 | ||
445 | f = (struct flow_filter *)*arg; | |
446 | if (f != NULL) { | |
447 | err = -EINVAL; | |
448 | if (f->handle != handle && handle) | |
449 | goto err2; | |
450 | ||
451 | mode = f->mode; | |
452 | if (tb[TCA_FLOW_MODE]) | |
453 | mode = nla_get_u32(tb[TCA_FLOW_MODE]); | |
454 | if (mode != FLOW_MODE_HASH && nkeys > 1) | |
455 | goto err2; | |
72d9794f PM |
456 | |
457 | if (mode == FLOW_MODE_HASH) | |
458 | perturb_period = f->perturb_period; | |
459 | if (tb[TCA_FLOW_PERTURB]) { | |
460 | if (mode != FLOW_MODE_HASH) | |
461 | goto err2; | |
462 | perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; | |
463 | } | |
e5dfb815 PM |
464 | } else { |
465 | err = -EINVAL; | |
466 | if (!handle) | |
467 | goto err2; | |
468 | if (!tb[TCA_FLOW_KEYS]) | |
469 | goto err2; | |
470 | ||
471 | mode = FLOW_MODE_MAP; | |
472 | if (tb[TCA_FLOW_MODE]) | |
473 | mode = nla_get_u32(tb[TCA_FLOW_MODE]); | |
474 | if (mode != FLOW_MODE_HASH && nkeys > 1) | |
475 | goto err2; | |
476 | ||
72d9794f PM |
477 | if (tb[TCA_FLOW_PERTURB]) { |
478 | if (mode != FLOW_MODE_HASH) | |
479 | goto err2; | |
480 | perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; | |
481 | } | |
482 | ||
e5dfb815 PM |
483 | if (TC_H_MAJ(baseclass) == 0) |
484 | baseclass = TC_H_MAKE(tp->q->handle, baseclass); | |
485 | if (TC_H_MIN(baseclass) == 0) | |
486 | baseclass = TC_H_MAKE(baseclass, 1); | |
487 | ||
488 | err = -ENOBUFS; | |
489 | f = kzalloc(sizeof(*f), GFP_KERNEL); | |
490 | if (f == NULL) | |
491 | goto err2; | |
492 | ||
493 | f->handle = handle; | |
494 | f->mask = ~0U; | |
72d9794f PM |
495 | |
496 | get_random_bytes(&f->hashrnd, 4); | |
497 | f->perturb_timer.function = flow_perturbation; | |
498 | f->perturb_timer.data = (unsigned long)f; | |
499 | init_timer_deferrable(&f->perturb_timer); | |
e5dfb815 PM |
500 | } |
501 | ||
502 | tcf_exts_change(tp, &f->exts, &e); | |
503 | tcf_em_tree_change(tp, &f->ematches, &t); | |
504 | ||
505 | tcf_tree_lock(tp); | |
506 | ||
507 | if (tb[TCA_FLOW_KEYS]) { | |
508 | f->keymask = keymask; | |
509 | f->nkeys = nkeys; | |
510 | } | |
511 | ||
512 | f->mode = mode; | |
513 | ||
514 | if (tb[TCA_FLOW_MASK]) | |
515 | f->mask = nla_get_u32(tb[TCA_FLOW_MASK]); | |
516 | if (tb[TCA_FLOW_XOR]) | |
517 | f->xor = nla_get_u32(tb[TCA_FLOW_XOR]); | |
518 | if (tb[TCA_FLOW_RSHIFT]) | |
519 | f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); | |
520 | if (tb[TCA_FLOW_ADDEND]) | |
521 | f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); | |
522 | ||
523 | if (tb[TCA_FLOW_DIVISOR]) | |
524 | f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); | |
525 | if (baseclass) | |
526 | f->baseclass = baseclass; | |
527 | ||
72d9794f PM |
528 | f->perturb_period = perturb_period; |
529 | del_timer(&f->perturb_timer); | |
530 | if (perturb_period) | |
531 | mod_timer(&f->perturb_timer, jiffies + perturb_period); | |
532 | ||
e5dfb815 PM |
533 | if (*arg == 0) |
534 | list_add_tail(&f->list, &head->filters); | |
535 | ||
536 | tcf_tree_unlock(tp); | |
537 | ||
538 | *arg = (unsigned long)f; | |
539 | return 0; | |
540 | ||
541 | err2: | |
542 | tcf_em_tree_destroy(tp, &t); | |
543 | err1: | |
544 | tcf_exts_destroy(tp, &e); | |
545 | return err; | |
546 | } | |
547 | ||
548 | static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) | |
549 | { | |
72d9794f | 550 | del_timer_sync(&f->perturb_timer); |
e5dfb815 PM |
551 | tcf_exts_destroy(tp, &f->exts); |
552 | tcf_em_tree_destroy(tp, &f->ematches); | |
553 | kfree(f); | |
554 | } | |
555 | ||
556 | static int flow_delete(struct tcf_proto *tp, unsigned long arg) | |
557 | { | |
558 | struct flow_filter *f = (struct flow_filter *)arg; | |
559 | ||
560 | tcf_tree_lock(tp); | |
561 | list_del(&f->list); | |
562 | tcf_tree_unlock(tp); | |
563 | flow_destroy_filter(tp, f); | |
564 | return 0; | |
565 | } | |
566 | ||
567 | static int flow_init(struct tcf_proto *tp) | |
568 | { | |
569 | struct flow_head *head; | |
570 | ||
e5dfb815 PM |
571 | head = kzalloc(sizeof(*head), GFP_KERNEL); |
572 | if (head == NULL) | |
573 | return -ENOBUFS; | |
574 | INIT_LIST_HEAD(&head->filters); | |
575 | tp->root = head; | |
576 | return 0; | |
577 | } | |
578 | ||
579 | static void flow_destroy(struct tcf_proto *tp) | |
580 | { | |
581 | struct flow_head *head = tp->root; | |
582 | struct flow_filter *f, *next; | |
583 | ||
584 | list_for_each_entry_safe(f, next, &head->filters, list) { | |
585 | list_del(&f->list); | |
586 | flow_destroy_filter(tp, f); | |
587 | } | |
588 | kfree(head); | |
589 | } | |
590 | ||
591 | static unsigned long flow_get(struct tcf_proto *tp, u32 handle) | |
592 | { | |
593 | struct flow_head *head = tp->root; | |
594 | struct flow_filter *f; | |
595 | ||
596 | list_for_each_entry(f, &head->filters, list) | |
597 | if (f->handle == handle) | |
598 | return (unsigned long)f; | |
599 | return 0; | |
600 | } | |
601 | ||
602 | static void flow_put(struct tcf_proto *tp, unsigned long f) | |
603 | { | |
604 | return; | |
605 | } | |
606 | ||
607 | static int flow_dump(struct tcf_proto *tp, unsigned long fh, | |
608 | struct sk_buff *skb, struct tcmsg *t) | |
609 | { | |
610 | struct flow_filter *f = (struct flow_filter *)fh; | |
611 | struct nlattr *nest; | |
612 | ||
613 | if (f == NULL) | |
614 | return skb->len; | |
615 | ||
616 | t->tcm_handle = f->handle; | |
617 | ||
618 | nest = nla_nest_start(skb, TCA_OPTIONS); | |
619 | if (nest == NULL) | |
620 | goto nla_put_failure; | |
621 | ||
622 | NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask); | |
623 | NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode); | |
624 | ||
625 | if (f->mask != ~0 || f->xor != 0) { | |
626 | NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask); | |
627 | NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor); | |
628 | } | |
629 | if (f->rshift) | |
630 | NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift); | |
631 | if (f->addend) | |
632 | NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend); | |
633 | ||
634 | if (f->divisor) | |
635 | NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor); | |
636 | if (f->baseclass) | |
637 | NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); | |
638 | ||
72d9794f PM |
639 | if (f->perturb_period) |
640 | NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ); | |
641 | ||
e5dfb815 PM |
642 | if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) |
643 | goto nla_put_failure; | |
0aead543 | 644 | #ifdef CONFIG_NET_EMATCH |
e5dfb815 PM |
645 | if (f->ematches.hdr.nmatches && |
646 | tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0) | |
647 | goto nla_put_failure; | |
0aead543 | 648 | #endif |
e5dfb815 PM |
649 | nla_nest_end(skb, nest); |
650 | ||
651 | if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0) | |
652 | goto nla_put_failure; | |
653 | ||
654 | return skb->len; | |
655 | ||
656 | nla_put_failure: | |
657 | nlmsg_trim(skb, nest); | |
658 | return -1; | |
659 | } | |
660 | ||
661 | static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) | |
662 | { | |
663 | struct flow_head *head = tp->root; | |
664 | struct flow_filter *f; | |
665 | ||
666 | list_for_each_entry(f, &head->filters, list) { | |
667 | if (arg->count < arg->skip) | |
668 | goto skip; | |
669 | if (arg->fn(tp, (unsigned long)f, arg) < 0) { | |
670 | arg->stop = 1; | |
671 | break; | |
672 | } | |
673 | skip: | |
674 | arg->count++; | |
675 | } | |
676 | } | |
677 | ||
678 | static struct tcf_proto_ops cls_flow_ops __read_mostly = { | |
679 | .kind = "flow", | |
680 | .classify = flow_classify, | |
681 | .init = flow_init, | |
682 | .destroy = flow_destroy, | |
683 | .change = flow_change, | |
684 | .delete = flow_delete, | |
685 | .get = flow_get, | |
686 | .put = flow_put, | |
687 | .dump = flow_dump, | |
688 | .walk = flow_walk, | |
689 | .owner = THIS_MODULE, | |
690 | }; | |
691 | ||
692 | static int __init cls_flow_init(void) | |
693 | { | |
694 | return register_tcf_proto_ops(&cls_flow_ops); | |
695 | } | |
696 | ||
697 | static void __exit cls_flow_exit(void) | |
698 | { | |
699 | unregister_tcf_proto_ops(&cls_flow_ops); | |
700 | } | |
701 | ||
702 | module_init(cls_flow_init); | |
703 | module_exit(cls_flow_exit); | |
704 | ||
705 | MODULE_LICENSE("GPL"); | |
706 | MODULE_AUTHOR("Patrick McHardy <[email protected]>"); | |
707 | MODULE_DESCRIPTION("TC flow classifier"); |