2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
35 MODULE_LICENSE("GPL");
37 MODULE_DESCRIPTION("IPv6 packet filter");
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
46 #define dprintf(format, args...)
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
52 #define duprintf(format, args...)
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
69 return xt_alloc_initial_table(ip6t, IP6T);
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
85 ip6_packet_match(const struct sk_buff *skb,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
130 /* ... might want to do something with class and flowlabel here ... */
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
135 unsigned short _frag_off;
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
143 *fragoff = _frag_off;
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
165 /* should be ip6 safe */
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
193 return (struct ip6t_entry *)(base + offset);
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
200 static const struct ip6t_ip6 uncond;
202 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
205 static inline const struct xt_entry_target *
206 ip6t_get_target_c(const struct ip6t_entry *e)
208 return ip6t_get_target((struct ip6t_entry *)e);
211 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
212 /* This cries for unification! */
213 static const char *const hooknames[] = {
214 [NF_INET_PRE_ROUTING] = "PREROUTING",
215 [NF_INET_LOCAL_IN] = "INPUT",
216 [NF_INET_FORWARD] = "FORWARD",
217 [NF_INET_LOCAL_OUT] = "OUTPUT",
218 [NF_INET_POST_ROUTING] = "POSTROUTING",
221 enum nf_ip_trace_comments {
222 NF_IP6_TRACE_COMMENT_RULE,
223 NF_IP6_TRACE_COMMENT_RETURN,
224 NF_IP6_TRACE_COMMENT_POLICY,
227 static const char *const comments[] = {
228 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
229 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
230 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
233 static struct nf_loginfo trace_loginfo = {
234 .type = NF_LOG_TYPE_LOG,
238 .logflags = NF_LOG_MASK,
243 /* Mildly perf critical (only if packet tracing is on) */
245 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
246 const char *hookname, const char **chainname,
247 const char **comment, unsigned int *rulenum)
249 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
251 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
252 /* Head of user chain: ERROR target with chainname */
253 *chainname = t->target.data;
258 if (s->target_offset == sizeof(struct ip6t_entry) &&
259 strcmp(t->target.u.kernel.target->name,
260 XT_STANDARD_TARGET) == 0 &&
262 unconditional(&s->ipv6)) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment = *chainname == hookname
265 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
266 : comments[NF_IP6_TRACE_COMMENT_RETURN];
275 static void trace_packet(const struct sk_buff *skb,
277 const struct net_device *in,
278 const struct net_device *out,
279 const char *tablename,
280 const struct xt_table_info *private,
281 const struct ip6t_entry *e)
283 const void *table_base;
284 const struct ip6t_entry *root;
285 const char *hookname, *chainname, *comment;
286 const struct ip6t_entry *iter;
287 unsigned int rulenum = 0;
288 struct net *net = dev_net(in ? in : out);
290 table_base = private->entries[smp_processor_id()];
291 root = get_entry(table_base, private->hook_entry[hook]);
293 hookname = chainname = hooknames[hook];
294 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 if (get_chainname_rulenum(iter, e, hookname,
298 &chainname, &comment, &rulenum) != 0)
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum);
307 static inline __pure struct ip6t_entry *
308 ip6t_next_entry(const struct ip6t_entry *entry)
310 return (void *)entry + entry->next_offset;
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ip6t_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
333 indev = in ? in->name : nulldevname;
334 outdev = out ? out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
341 acpar.hotdrop = false;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350 addend = xt_write_recseq_begin();
351 private = table->private;
353 * Ensure we load private-> members after we've fetched the base
356 smp_read_barrier_depends();
357 cpu = smp_processor_id();
358 table_base = private->entries[cpu];
359 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
360 stackptr = per_cpu_ptr(private->stackptr, cpu);
363 e = get_entry(table_base, private->hook_entry[hook]);
366 const struct xt_entry_target *t;
367 const struct xt_entry_match *ematch;
371 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
372 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
374 e = ip6t_next_entry(e);
378 xt_ematch_foreach(ematch, e) {
379 acpar.match = ematch->u.kernel.match;
380 acpar.matchinfo = ematch->data;
381 if (!acpar.match->match(skb, &acpar))
385 ADD_COUNTER(e->counters, skb->len, 1);
387 t = ip6t_get_target_c(e);
388 IP_NF_ASSERT(t->u.kernel.target);
390 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
391 /* The packet is traced: log it */
392 if (unlikely(skb->nf_trace))
393 trace_packet(skb, hook, in, out,
394 table->name, private, e);
396 /* Standard target? */
397 if (!t->u.kernel.target->target) {
400 v = ((struct xt_standard_target *)t)->verdict;
402 /* Pop from stack? */
403 if (v != XT_RETURN) {
404 verdict = (unsigned int)(-v) - 1;
407 if (*stackptr <= origptr)
408 e = get_entry(table_base,
409 private->underflow[hook]);
411 e = ip6t_next_entry(jumpstack[--*stackptr]);
414 if (table_base + v != ip6t_next_entry(e) &&
415 !(e->ipv6.flags & IP6T_F_GOTO)) {
416 if (*stackptr >= private->stacksize) {
420 jumpstack[(*stackptr)++] = e;
423 e = get_entry(table_base, v);
427 acpar.target = t->u.kernel.target;
428 acpar.targinfo = t->data;
430 verdict = t->u.kernel.target->target(skb, &acpar);
431 if (verdict == XT_CONTINUE)
432 e = ip6t_next_entry(e);
436 } while (!acpar.hotdrop);
440 xt_write_recseq_end(addend);
443 #ifdef DEBUG_ALLOW_ALL
452 /* Figures out from what hook each rule can be called: returns 0 if
453 there are loops. Puts hook bitmask in comefrom. */
455 mark_source_chains(const struct xt_table_info *newinfo,
456 unsigned int valid_hooks, void *entry0)
460 /* No recursion; use packet counter to save back ptrs (reset
461 to 0 as we leave), and comefrom to save source hook bitmask */
462 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
463 unsigned int pos = newinfo->hook_entry[hook];
464 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
466 if (!(valid_hooks & (1 << hook)))
469 /* Set initial back pointer. */
470 e->counters.pcnt = pos;
473 const struct xt_standard_target *t
474 = (void *)ip6t_get_target_c(e);
475 int visited = e->comefrom & (1 << hook);
477 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
478 pr_err("iptables: loop hook %u pos %u %08X.\n",
479 hook, pos, e->comefrom);
482 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
484 /* Unconditional return/END. */
485 if ((e->target_offset == sizeof(struct ip6t_entry) &&
486 (strcmp(t->target.u.user.name,
487 XT_STANDARD_TARGET) == 0) &&
489 unconditional(&e->ipv6)) || visited) {
490 unsigned int oldpos, size;
492 if ((strcmp(t->target.u.user.name,
493 XT_STANDARD_TARGET) == 0) &&
494 t->verdict < -NF_MAX_VERDICT - 1) {
495 duprintf("mark_source_chains: bad "
496 "negative verdict (%i)\n",
501 /* Return: backtrack through the last
504 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
505 #ifdef DEBUG_IP_FIREWALL_USER
507 & (1 << NF_INET_NUMHOOKS)) {
508 duprintf("Back unset "
515 pos = e->counters.pcnt;
516 e->counters.pcnt = 0;
518 /* We're at the start. */
522 e = (struct ip6t_entry *)
524 } while (oldpos == pos + e->next_offset);
527 size = e->next_offset;
528 e = (struct ip6t_entry *)
529 (entry0 + pos + size);
530 e->counters.pcnt = pos;
533 int newpos = t->verdict;
535 if (strcmp(t->target.u.user.name,
536 XT_STANDARD_TARGET) == 0 &&
538 if (newpos > newinfo->size -
539 sizeof(struct ip6t_entry)) {
540 duprintf("mark_source_chains: "
541 "bad verdict (%i)\n",
545 /* This a jump; chase it. */
546 duprintf("Jump rule %u -> %u\n",
549 /* ... this is a fallthru */
550 newpos = pos + e->next_offset;
552 e = (struct ip6t_entry *)
554 e->counters.pcnt = pos;
559 duprintf("Finished chain %u\n", hook);
564 static void cleanup_match(struct xt_entry_match *m, struct net *net)
566 struct xt_mtdtor_param par;
569 par.match = m->u.kernel.match;
570 par.matchinfo = m->data;
571 par.family = NFPROTO_IPV6;
572 if (par.match->destroy != NULL)
573 par.match->destroy(&par);
574 module_put(par.match->me);
578 check_entry(const struct ip6t_entry *e, const char *name)
580 const struct xt_entry_target *t;
582 if (!ip6_checkentry(&e->ipv6)) {
583 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
587 if (e->target_offset + sizeof(struct xt_entry_target) >
591 t = ip6t_get_target_c(e);
592 if (e->target_offset + t->u.target_size > e->next_offset)
598 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
600 const struct ip6t_ip6 *ipv6 = par->entryinfo;
603 par->match = m->u.kernel.match;
604 par->matchinfo = m->data;
606 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
607 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
609 duprintf("ip_tables: check failed for `%s'.\n",
617 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
619 struct xt_match *match;
622 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
625 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
626 return PTR_ERR(match);
628 m->u.kernel.match = match;
630 ret = check_match(m, par);
636 module_put(m->u.kernel.match->me);
640 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
642 struct xt_entry_target *t = ip6t_get_target(e);
643 struct xt_tgchk_param par = {
647 .target = t->u.kernel.target,
649 .hook_mask = e->comefrom,
650 .family = NFPROTO_IPV6,
654 t = ip6t_get_target(e);
655 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
656 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
658 duprintf("ip_tables: check failed for `%s'.\n",
659 t->u.kernel.target->name);
666 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
669 struct xt_entry_target *t;
670 struct xt_target *target;
673 struct xt_mtchk_param mtpar;
674 struct xt_entry_match *ematch;
676 ret = check_entry(e, name);
683 mtpar.entryinfo = &e->ipv6;
684 mtpar.hook_mask = e->comefrom;
685 mtpar.family = NFPROTO_IPV6;
686 xt_ematch_foreach(ematch, e) {
687 ret = find_check_match(ematch, &mtpar);
689 goto cleanup_matches;
693 t = ip6t_get_target(e);
694 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
696 if (IS_ERR(target)) {
697 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
698 ret = PTR_ERR(target);
699 goto cleanup_matches;
701 t->u.kernel.target = target;
703 ret = check_target(e, net, name);
708 module_put(t->u.kernel.target->me);
710 xt_ematch_foreach(ematch, e) {
713 cleanup_match(ematch, net);
718 static bool check_underflow(const struct ip6t_entry *e)
720 const struct xt_entry_target *t;
721 unsigned int verdict;
723 if (!unconditional(&e->ipv6))
725 t = ip6t_get_target_c(e);
726 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
728 verdict = ((struct xt_standard_target *)t)->verdict;
729 verdict = -verdict - 1;
730 return verdict == NF_DROP || verdict == NF_ACCEPT;
734 check_entry_size_and_hooks(struct ip6t_entry *e,
735 struct xt_table_info *newinfo,
736 const unsigned char *base,
737 const unsigned char *limit,
738 const unsigned int *hook_entries,
739 const unsigned int *underflows,
740 unsigned int valid_hooks)
744 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
745 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
746 duprintf("Bad offset %p\n", e);
751 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
752 duprintf("checking: element %p size %u\n",
757 /* Check hooks & underflows */
758 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
759 if (!(valid_hooks & (1 << h)))
761 if ((unsigned char *)e - base == hook_entries[h])
762 newinfo->hook_entry[h] = hook_entries[h];
763 if ((unsigned char *)e - base == underflows[h]) {
764 if (!check_underflow(e)) {
765 pr_err("Underflows must be unconditional and "
766 "use the STANDARD target with "
770 newinfo->underflow[h] = underflows[h];
774 /* Clear counters and comefrom */
775 e->counters = ((struct xt_counters) { 0, 0 });
780 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
782 struct xt_tgdtor_param par;
783 struct xt_entry_target *t;
784 struct xt_entry_match *ematch;
786 /* Cleanup all matches */
787 xt_ematch_foreach(ematch, e)
788 cleanup_match(ematch, net);
789 t = ip6t_get_target(e);
792 par.target = t->u.kernel.target;
793 par.targinfo = t->data;
794 par.family = NFPROTO_IPV6;
795 if (par.target->destroy != NULL)
796 par.target->destroy(&par);
797 module_put(par.target->me);
800 /* Checks and translates the user-supplied table segment (held in
803 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
804 const struct ip6t_replace *repl)
806 struct ip6t_entry *iter;
810 newinfo->size = repl->size;
811 newinfo->number = repl->num_entries;
813 /* Init all hooks to impossible value. */
814 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
815 newinfo->hook_entry[i] = 0xFFFFFFFF;
816 newinfo->underflow[i] = 0xFFFFFFFF;
819 duprintf("translate_table: size %u\n", newinfo->size);
821 /* Walk through entries, checking offsets. */
822 xt_entry_foreach(iter, entry0, newinfo->size) {
823 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
831 if (strcmp(ip6t_get_target(iter)->u.user.name,
832 XT_ERROR_TARGET) == 0)
833 ++newinfo->stacksize;
836 if (i != repl->num_entries) {
837 duprintf("translate_table: %u not %u entries\n",
838 i, repl->num_entries);
842 /* Check hooks all assigned */
843 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
844 /* Only hooks which are valid */
845 if (!(repl->valid_hooks & (1 << i)))
847 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
848 duprintf("Invalid hook entry %u %u\n",
849 i, repl->hook_entry[i]);
852 if (newinfo->underflow[i] == 0xFFFFFFFF) {
853 duprintf("Invalid underflow %u %u\n",
854 i, repl->underflow[i]);
859 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
862 /* Finally, each sanity check must pass */
864 xt_entry_foreach(iter, entry0, newinfo->size) {
865 ret = find_check_entry(iter, net, repl->name, repl->size);
872 xt_entry_foreach(iter, entry0, newinfo->size) {
875 cleanup_entry(iter, net);
880 /* And one copy for every other CPU */
881 for_each_possible_cpu(i) {
882 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
883 memcpy(newinfo->entries[i], entry0, newinfo->size);
890 get_counters(const struct xt_table_info *t,
891 struct xt_counters counters[])
893 struct ip6t_entry *iter;
897 for_each_possible_cpu(cpu) {
898 seqcount_t *s = &per_cpu(xt_recseq, cpu);
901 xt_entry_foreach(iter, t->entries[cpu], t->size) {
906 start = read_seqcount_begin(s);
907 bcnt = iter->counters.bcnt;
908 pcnt = iter->counters.pcnt;
909 } while (read_seqcount_retry(s, start));
911 ADD_COUNTER(counters[i], bcnt, pcnt);
917 static struct xt_counters *alloc_counters(const struct xt_table *table)
919 unsigned int countersize;
920 struct xt_counters *counters;
921 const struct xt_table_info *private = table->private;
923 /* We need atomic snapshot of counters: rest doesn't change
924 (other than comefrom, which userspace doesn't care
926 countersize = sizeof(struct xt_counters) * private->number;
927 counters = vzalloc(countersize);
929 if (counters == NULL)
930 return ERR_PTR(-ENOMEM);
932 get_counters(private, counters);
938 copy_entries_to_user(unsigned int total_size,
939 const struct xt_table *table,
940 void __user *userptr)
942 unsigned int off, num;
943 const struct ip6t_entry *e;
944 struct xt_counters *counters;
945 const struct xt_table_info *private = table->private;
947 const void *loc_cpu_entry;
949 counters = alloc_counters(table);
950 if (IS_ERR(counters))
951 return PTR_ERR(counters);
953 /* choose the copy that is on our node/cpu, ...
954 * This choice is lazy (because current thread is
955 * allowed to migrate to another cpu)
957 loc_cpu_entry = private->entries[raw_smp_processor_id()];
958 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
963 /* FIXME: use iterator macros --RR */
964 /* ... then go back and fix counters and names */
965 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
967 const struct xt_entry_match *m;
968 const struct xt_entry_target *t;
970 e = (struct ip6t_entry *)(loc_cpu_entry + off);
971 if (copy_to_user(userptr + off
972 + offsetof(struct ip6t_entry, counters),
974 sizeof(counters[num])) != 0) {
979 for (i = sizeof(struct ip6t_entry);
980 i < e->target_offset;
981 i += m->u.match_size) {
984 if (copy_to_user(userptr + off + i
985 + offsetof(struct xt_entry_match,
987 m->u.kernel.match->name,
988 strlen(m->u.kernel.match->name)+1)
995 t = ip6t_get_target_c(e);
996 if (copy_to_user(userptr + off + e->target_offset
997 + offsetof(struct xt_entry_target,
999 t->u.kernel.target->name,
1000 strlen(t->u.kernel.target->name)+1) != 0) {
1011 #ifdef CONFIG_COMPAT
1012 static void compat_standard_from_user(void *dst, const void *src)
1014 int v = *(compat_int_t *)src;
1017 v += xt_compat_calc_jump(AF_INET6, v);
1018 memcpy(dst, &v, sizeof(v));
1021 static int compat_standard_to_user(void __user *dst, const void *src)
1023 compat_int_t cv = *(int *)src;
1026 cv -= xt_compat_calc_jump(AF_INET6, cv);
1027 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1030 static int compat_calc_entry(const struct ip6t_entry *e,
1031 const struct xt_table_info *info,
1032 const void *base, struct xt_table_info *newinfo)
1034 const struct xt_entry_match *ematch;
1035 const struct xt_entry_target *t;
1036 unsigned int entry_offset;
1039 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1040 entry_offset = (void *)e - base;
1041 xt_ematch_foreach(ematch, e)
1042 off += xt_compat_match_offset(ematch->u.kernel.match);
1043 t = ip6t_get_target_c(e);
1044 off += xt_compat_target_offset(t->u.kernel.target);
1045 newinfo->size -= off;
1046 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1050 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1051 if (info->hook_entry[i] &&
1052 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1053 newinfo->hook_entry[i] -= off;
1054 if (info->underflow[i] &&
1055 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1056 newinfo->underflow[i] -= off;
1061 static int compat_table_info(const struct xt_table_info *info,
1062 struct xt_table_info *newinfo)
1064 struct ip6t_entry *iter;
1065 void *loc_cpu_entry;
1068 if (!newinfo || !info)
1071 /* we dont care about newinfo->entries[] */
1072 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1073 newinfo->initial_entries = 0;
1074 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1075 xt_compat_init_offsets(AF_INET6, info->number);
1076 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1077 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1085 static int get_info(struct net *net, void __user *user,
1086 const int *len, int compat)
1088 char name[XT_TABLE_MAXNAMELEN];
1092 if (*len != sizeof(struct ip6t_getinfo)) {
1093 duprintf("length %u != %zu\n", *len,
1094 sizeof(struct ip6t_getinfo));
1098 if (copy_from_user(name, user, sizeof(name)) != 0)
1101 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1102 #ifdef CONFIG_COMPAT
1104 xt_compat_lock(AF_INET6);
1106 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1107 "ip6table_%s", name);
1108 if (!IS_ERR_OR_NULL(t)) {
1109 struct ip6t_getinfo info;
1110 const struct xt_table_info *private = t->private;
1111 #ifdef CONFIG_COMPAT
1112 struct xt_table_info tmp;
1115 ret = compat_table_info(private, &tmp);
1116 xt_compat_flush_offsets(AF_INET6);
1120 memset(&info, 0, sizeof(info));
1121 info.valid_hooks = t->valid_hooks;
1122 memcpy(info.hook_entry, private->hook_entry,
1123 sizeof(info.hook_entry));
1124 memcpy(info.underflow, private->underflow,
1125 sizeof(info.underflow));
1126 info.num_entries = private->number;
1127 info.size = private->size;
1128 strcpy(info.name, name);
1130 if (copy_to_user(user, &info, *len) != 0)
1138 ret = t ? PTR_ERR(t) : -ENOENT;
1139 #ifdef CONFIG_COMPAT
1141 xt_compat_unlock(AF_INET6);
1147 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1151 struct ip6t_get_entries get;
1154 if (*len < sizeof(get)) {
1155 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1158 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1160 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1161 duprintf("get_entries: %u != %zu\n",
1162 *len, sizeof(get) + get.size);
1166 t = xt_find_table_lock(net, AF_INET6, get.name);
1167 if (!IS_ERR_OR_NULL(t)) {
1168 struct xt_table_info *private = t->private;
1169 duprintf("t->private->number = %u\n", private->number);
1170 if (get.size == private->size)
1171 ret = copy_entries_to_user(private->size,
1172 t, uptr->entrytable);
1174 duprintf("get_entries: I've got %u not %u!\n",
1175 private->size, get.size);
1181 ret = t ? PTR_ERR(t) : -ENOENT;
1187 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1188 struct xt_table_info *newinfo, unsigned int num_counters,
1189 void __user *counters_ptr)
1193 struct xt_table_info *oldinfo;
1194 struct xt_counters *counters;
1195 const void *loc_cpu_old_entry;
1196 struct ip6t_entry *iter;
1199 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1205 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1206 "ip6table_%s", name);
1207 if (IS_ERR_OR_NULL(t)) {
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1209 goto free_newinfo_counters_untrans;
1213 if (valid_hooks != t->valid_hooks) {
1214 duprintf("Valid hook crap: %08X vs %08X\n",
1215 valid_hooks, t->valid_hooks);
1220 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1224 /* Update module usage count based on number of rules */
1225 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1226 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1227 if ((oldinfo->number > oldinfo->initial_entries) ||
1228 (newinfo->number <= oldinfo->initial_entries))
1230 if ((oldinfo->number > oldinfo->initial_entries) &&
1231 (newinfo->number <= oldinfo->initial_entries))
1234 /* Get the old counters, and synchronize with replace */
1235 get_counters(oldinfo, counters);
1237 /* Decrease module usage counts and free resource */
1238 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1239 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1240 cleanup_entry(iter, net);
1242 xt_free_table_info(oldinfo);
1243 if (copy_to_user(counters_ptr, counters,
1244 sizeof(struct xt_counters) * num_counters) != 0)
1253 free_newinfo_counters_untrans:
1260 do_replace(struct net *net, const void __user *user, unsigned int len)
1263 struct ip6t_replace tmp;
1264 struct xt_table_info *newinfo;
1265 void *loc_cpu_entry;
1266 struct ip6t_entry *iter;
1268 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1271 /* overflow check */
1272 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1274 tmp.name[sizeof(tmp.name)-1] = 0;
1276 newinfo = xt_alloc_table_info(tmp.size);
1280 /* choose the copy that is on our node/cpu */
1281 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1282 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1288 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1292 duprintf("ip_tables: Translated table\n");
1294 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1295 tmp.num_counters, tmp.counters);
1297 goto free_newinfo_untrans;
1300 free_newinfo_untrans:
1301 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1302 cleanup_entry(iter, net);
1304 xt_free_table_info(newinfo);
1309 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1312 unsigned int i, curcpu;
1313 struct xt_counters_info tmp;
1314 struct xt_counters *paddc;
1315 unsigned int num_counters;
1320 const struct xt_table_info *private;
1322 const void *loc_cpu_entry;
1323 struct ip6t_entry *iter;
1324 unsigned int addend;
1325 #ifdef CONFIG_COMPAT
1326 struct compat_xt_counters_info compat_tmp;
1330 size = sizeof(struct compat_xt_counters_info);
1335 size = sizeof(struct xt_counters_info);
1338 if (copy_from_user(ptmp, user, size) != 0)
1341 #ifdef CONFIG_COMPAT
1343 num_counters = compat_tmp.num_counters;
1344 name = compat_tmp.name;
1348 num_counters = tmp.num_counters;
1352 if (len != size + num_counters * sizeof(struct xt_counters))
1355 paddc = vmalloc(len - size);
1359 if (copy_from_user(paddc, user + size, len - size) != 0) {
1364 t = xt_find_table_lock(net, AF_INET6, name);
1365 if (IS_ERR_OR_NULL(t)) {
1366 ret = t ? PTR_ERR(t) : -ENOENT;
1372 private = t->private;
1373 if (private->number != num_counters) {
1375 goto unlock_up_free;
1379 /* Choose the copy that is on our node */
1380 curcpu = smp_processor_id();
1381 addend = xt_write_recseq_begin();
1382 loc_cpu_entry = private->entries[curcpu];
1383 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1384 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1387 xt_write_recseq_end(addend);
1399 #ifdef CONFIG_COMPAT
1400 struct compat_ip6t_replace {
1401 char name[XT_TABLE_MAXNAMELEN];
1405 u32 hook_entry[NF_INET_NUMHOOKS];
1406 u32 underflow[NF_INET_NUMHOOKS];
1408 compat_uptr_t counters; /* struct xt_counters * */
1409 struct compat_ip6t_entry entries[0];
1413 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1414 unsigned int *size, struct xt_counters *counters,
1417 struct xt_entry_target *t;
1418 struct compat_ip6t_entry __user *ce;
1419 u_int16_t target_offset, next_offset;
1420 compat_uint_t origsize;
1421 const struct xt_entry_match *ematch;
1425 ce = (struct compat_ip6t_entry __user *)*dstptr;
1426 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1427 copy_to_user(&ce->counters, &counters[i],
1428 sizeof(counters[i])) != 0)
1431 *dstptr += sizeof(struct compat_ip6t_entry);
1432 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1434 xt_ematch_foreach(ematch, e) {
1435 ret = xt_compat_match_to_user(ematch, dstptr, size);
1439 target_offset = e->target_offset - (origsize - *size);
1440 t = ip6t_get_target(e);
1441 ret = xt_compat_target_to_user(t, dstptr, size);
1444 next_offset = e->next_offset - (origsize - *size);
1445 if (put_user(target_offset, &ce->target_offset) != 0 ||
1446 put_user(next_offset, &ce->next_offset) != 0)
1452 compat_find_calc_match(struct xt_entry_match *m,
1454 const struct ip6t_ip6 *ipv6,
1455 unsigned int hookmask,
1458 struct xt_match *match;
1460 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1461 m->u.user.revision);
1462 if (IS_ERR(match)) {
1463 duprintf("compat_check_calc_match: `%s' not found\n",
1465 return PTR_ERR(match);
1467 m->u.kernel.match = match;
1468 *size += xt_compat_match_offset(match);
1472 static void compat_release_entry(struct compat_ip6t_entry *e)
1474 struct xt_entry_target *t;
1475 struct xt_entry_match *ematch;
1477 /* Cleanup all matches */
1478 xt_ematch_foreach(ematch, e)
1479 module_put(ematch->u.kernel.match->me);
1480 t = compat_ip6t_get_target(e);
1481 module_put(t->u.kernel.target->me);
1485 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1486 struct xt_table_info *newinfo,
1488 const unsigned char *base,
1489 const unsigned char *limit,
1490 const unsigned int *hook_entries,
1491 const unsigned int *underflows,
1494 struct xt_entry_match *ematch;
1495 struct xt_entry_target *t;
1496 struct xt_target *target;
1497 unsigned int entry_offset;
1501 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1502 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1503 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1504 duprintf("Bad offset %p, limit = %p\n", e, limit);
1508 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1509 sizeof(struct compat_xt_entry_target)) {
1510 duprintf("checking: element %p size %u\n",
1515 /* For purposes of check_entry casting the compat entry is fine */
1516 ret = check_entry((struct ip6t_entry *)e, name);
1520 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1521 entry_offset = (void *)e - (void *)base;
1523 xt_ematch_foreach(ematch, e) {
1524 ret = compat_find_calc_match(ematch, name,
1525 &e->ipv6, e->comefrom, &off);
1527 goto release_matches;
1531 t = compat_ip6t_get_target(e);
1532 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1533 t->u.user.revision);
1534 if (IS_ERR(target)) {
1535 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1537 ret = PTR_ERR(target);
1538 goto release_matches;
1540 t->u.kernel.target = target;
1542 off += xt_compat_target_offset(target);
1544 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1548 /* Check hooks & underflows */
1549 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1550 if ((unsigned char *)e - base == hook_entries[h])
1551 newinfo->hook_entry[h] = hook_entries[h];
1552 if ((unsigned char *)e - base == underflows[h])
1553 newinfo->underflow[h] = underflows[h];
1556 /* Clear counters and comefrom */
1557 memset(&e->counters, 0, sizeof(e->counters));
1562 module_put(t->u.kernel.target->me);
1564 xt_ematch_foreach(ematch, e) {
1567 module_put(ematch->u.kernel.match->me);
1573 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1574 unsigned int *size, const char *name,
1575 struct xt_table_info *newinfo, unsigned char *base)
1577 struct xt_entry_target *t;
1578 struct ip6t_entry *de;
1579 unsigned int origsize;
1581 struct xt_entry_match *ematch;
1585 de = (struct ip6t_entry *)*dstptr;
1586 memcpy(de, e, sizeof(struct ip6t_entry));
1587 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1589 *dstptr += sizeof(struct ip6t_entry);
1590 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1592 xt_ematch_foreach(ematch, e) {
1593 ret = xt_compat_match_from_user(ematch, dstptr, size);
1597 de->target_offset = e->target_offset - (origsize - *size);
1598 t = compat_ip6t_get_target(e);
1599 xt_compat_target_from_user(t, dstptr, size);
1601 de->next_offset = e->next_offset - (origsize - *size);
1602 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1603 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1604 newinfo->hook_entry[h] -= origsize - *size;
1605 if ((unsigned char *)de - base < newinfo->underflow[h])
1606 newinfo->underflow[h] -= origsize - *size;
1611 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1616 struct xt_mtchk_param mtpar;
1617 struct xt_entry_match *ematch;
1622 mtpar.entryinfo = &e->ipv6;
1623 mtpar.hook_mask = e->comefrom;
1624 mtpar.family = NFPROTO_IPV6;
1625 xt_ematch_foreach(ematch, e) {
1626 ret = check_match(ematch, &mtpar);
1628 goto cleanup_matches;
1632 ret = check_target(e, net, name);
1634 goto cleanup_matches;
1638 xt_ematch_foreach(ematch, e) {
1641 cleanup_match(ematch, net);
1647 translate_compat_table(struct net *net,
1649 unsigned int valid_hooks,
1650 struct xt_table_info **pinfo,
1652 unsigned int total_size,
1653 unsigned int number,
1654 unsigned int *hook_entries,
1655 unsigned int *underflows)
1658 struct xt_table_info *newinfo, *info;
1659 void *pos, *entry0, *entry1;
1660 struct compat_ip6t_entry *iter0;
1661 struct ip6t_entry *iter1;
1668 info->number = number;
1670 /* Init all hooks to impossible value. */
1671 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1672 info->hook_entry[i] = 0xFFFFFFFF;
1673 info->underflow[i] = 0xFFFFFFFF;
1676 duprintf("translate_compat_table: size %u\n", info->size);
1678 xt_compat_lock(AF_INET6);
1679 xt_compat_init_offsets(AF_INET6, number);
1680 /* Walk through entries, checking offsets. */
1681 xt_entry_foreach(iter0, entry0, total_size) {
1682 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1684 entry0 + total_size,
1695 duprintf("translate_compat_table: %u not %u entries\n",
1700 /* Check hooks all assigned */
1701 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1702 /* Only hooks which are valid */
1703 if (!(valid_hooks & (1 << i)))
1705 if (info->hook_entry[i] == 0xFFFFFFFF) {
1706 duprintf("Invalid hook entry %u %u\n",
1707 i, hook_entries[i]);
1710 if (info->underflow[i] == 0xFFFFFFFF) {
1711 duprintf("Invalid underflow %u %u\n",
1718 newinfo = xt_alloc_table_info(size);
1722 newinfo->number = number;
1723 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1724 newinfo->hook_entry[i] = info->hook_entry[i];
1725 newinfo->underflow[i] = info->underflow[i];
1727 entry1 = newinfo->entries[raw_smp_processor_id()];
1730 xt_entry_foreach(iter0, entry0, total_size) {
1731 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1732 name, newinfo, entry1);
1736 xt_compat_flush_offsets(AF_INET6);
1737 xt_compat_unlock(AF_INET6);
1742 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1746 xt_entry_foreach(iter1, entry1, newinfo->size) {
1747 ret = compat_check_entry(iter1, net, name);
1751 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1752 XT_ERROR_TARGET) == 0)
1753 ++newinfo->stacksize;
1757 * The first i matches need cleanup_entry (calls ->destroy)
1758 * because they had called ->check already. The other j-i
1759 * entries need only release.
1763 xt_entry_foreach(iter0, entry0, newinfo->size) {
1768 compat_release_entry(iter0);
1770 xt_entry_foreach(iter1, entry1, newinfo->size) {
1773 cleanup_entry(iter1, net);
1775 xt_free_table_info(newinfo);
1779 /* And one copy for every other CPU */
1780 for_each_possible_cpu(i)
1781 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1782 memcpy(newinfo->entries[i], entry1, newinfo->size);
1786 xt_free_table_info(info);
1790 xt_free_table_info(newinfo);
1792 xt_entry_foreach(iter0, entry0, total_size) {
1795 compat_release_entry(iter0);
1799 xt_compat_flush_offsets(AF_INET6);
1800 xt_compat_unlock(AF_INET6);
1805 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1808 struct compat_ip6t_replace tmp;
1809 struct xt_table_info *newinfo;
1810 void *loc_cpu_entry;
1811 struct ip6t_entry *iter;
1813 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1816 /* overflow check */
1817 if (tmp.size >= INT_MAX / num_possible_cpus())
1819 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1821 tmp.name[sizeof(tmp.name)-1] = 0;
1823 newinfo = xt_alloc_table_info(tmp.size);
1827 /* choose the copy that is on our node/cpu */
1828 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1829 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1835 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1836 &newinfo, &loc_cpu_entry, tmp.size,
1837 tmp.num_entries, tmp.hook_entry,
1842 duprintf("compat_do_replace: Translated table\n");
1844 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1845 tmp.num_counters, compat_ptr(tmp.counters));
1847 goto free_newinfo_untrans;
1850 free_newinfo_untrans:
1851 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1852 cleanup_entry(iter, net);
1854 xt_free_table_info(newinfo);
1859 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1864 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1868 case IP6T_SO_SET_REPLACE:
1869 ret = compat_do_replace(sock_net(sk), user, len);
1872 case IP6T_SO_SET_ADD_COUNTERS:
1873 ret = do_add_counters(sock_net(sk), user, len, 1);
1877 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1884 struct compat_ip6t_get_entries {
1885 char name[XT_TABLE_MAXNAMELEN];
1887 struct compat_ip6t_entry entrytable[0];
1891 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1892 void __user *userptr)
1894 struct xt_counters *counters;
1895 const struct xt_table_info *private = table->private;
1899 const void *loc_cpu_entry;
1901 struct ip6t_entry *iter;
1903 counters = alloc_counters(table);
1904 if (IS_ERR(counters))
1905 return PTR_ERR(counters);
1907 /* choose the copy that is on our node/cpu, ...
1908 * This choice is lazy (because current thread is
1909 * allowed to migrate to another cpu)
1911 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1914 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1915 ret = compat_copy_entry_to_user(iter, &pos,
1916 &size, counters, i++);
1926 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1930 struct compat_ip6t_get_entries get;
1933 if (*len < sizeof(get)) {
1934 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1938 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1941 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1942 duprintf("compat_get_entries: %u != %zu\n",
1943 *len, sizeof(get) + get.size);
1947 xt_compat_lock(AF_INET6);
1948 t = xt_find_table_lock(net, AF_INET6, get.name);
1949 if (!IS_ERR_OR_NULL(t)) {
1950 const struct xt_table_info *private = t->private;
1951 struct xt_table_info info;
1952 duprintf("t->private->number = %u\n", private->number);
1953 ret = compat_table_info(private, &info);
1954 if (!ret && get.size == info.size) {
1955 ret = compat_copy_entries_to_user(private->size,
1956 t, uptr->entrytable);
1958 duprintf("compat_get_entries: I've got %u not %u!\n",
1959 private->size, get.size);
1962 xt_compat_flush_offsets(AF_INET6);
1966 ret = t ? PTR_ERR(t) : -ENOENT;
1968 xt_compat_unlock(AF_INET6);
1972 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1975 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1979 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1983 case IP6T_SO_GET_INFO:
1984 ret = get_info(sock_net(sk), user, len, 1);
1986 case IP6T_SO_GET_ENTRIES:
1987 ret = compat_get_entries(sock_net(sk), user, len);
1990 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1997 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2001 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2005 case IP6T_SO_SET_REPLACE:
2006 ret = do_replace(sock_net(sk), user, len);
2009 case IP6T_SO_SET_ADD_COUNTERS:
2010 ret = do_add_counters(sock_net(sk), user, len, 0);
2014 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2022 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2026 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2030 case IP6T_SO_GET_INFO:
2031 ret = get_info(sock_net(sk), user, len, 0);
2034 case IP6T_SO_GET_ENTRIES:
2035 ret = get_entries(sock_net(sk), user, len);
2038 case IP6T_SO_GET_REVISION_MATCH:
2039 case IP6T_SO_GET_REVISION_TARGET: {
2040 struct xt_get_revision rev;
2043 if (*len != sizeof(rev)) {
2047 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2051 rev.name[sizeof(rev.name)-1] = 0;
2053 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2058 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2061 "ip6t_%s", rev.name);
2066 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2073 struct xt_table *ip6t_register_table(struct net *net,
2074 const struct xt_table *table,
2075 const struct ip6t_replace *repl)
2078 struct xt_table_info *newinfo;
2079 struct xt_table_info bootstrap = {0};
2080 void *loc_cpu_entry;
2081 struct xt_table *new_table;
2083 newinfo = xt_alloc_table_info(repl->size);
2089 /* choose the copy on our node/cpu, but dont care about preemption */
2090 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2091 memcpy(loc_cpu_entry, repl->entries, repl->size);
2093 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2097 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2098 if (IS_ERR(new_table)) {
2099 ret = PTR_ERR(new_table);
2105 xt_free_table_info(newinfo);
2107 return ERR_PTR(ret);
2110 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2112 struct xt_table_info *private;
2113 void *loc_cpu_entry;
2114 struct module *table_owner = table->me;
2115 struct ip6t_entry *iter;
2117 private = xt_unregister_table(table);
2119 /* Decrease module usage counts and free resources */
2120 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2121 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2122 cleanup_entry(iter, net);
2123 if (private->number > private->initial_entries)
2124 module_put(table_owner);
2125 xt_free_table_info(private);
2128 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2130 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2131 u_int8_t type, u_int8_t code,
2134 return (type == test_type && code >= min_code && code <= max_code)
2139 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2141 const struct icmp6hdr *ic;
2142 struct icmp6hdr _icmph;
2143 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2145 /* Must not be a fragment. */
2146 if (par->fragoff != 0)
2149 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2151 /* We've been asked to examine this packet, and we
2152 * can't. Hence, no choice but to drop.
2154 duprintf("Dropping evil ICMP tinygram.\n");
2155 par->hotdrop = true;
2159 return icmp6_type_code_match(icmpinfo->type,
2162 ic->icmp6_type, ic->icmp6_code,
2163 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2166 /* Called when user tries to insert an entry of this type. */
2167 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2169 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2171 /* Must specify no unknown invflags */
2172 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2175 /* The built-in targets: standard (NULL) and error. */
2176 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2178 .name = XT_STANDARD_TARGET,
2179 .targetsize = sizeof(int),
2180 .family = NFPROTO_IPV6,
2181 #ifdef CONFIG_COMPAT
2182 .compatsize = sizeof(compat_int_t),
2183 .compat_from_user = compat_standard_from_user,
2184 .compat_to_user = compat_standard_to_user,
2188 .name = XT_ERROR_TARGET,
2189 .target = ip6t_error,
2190 .targetsize = XT_FUNCTION_MAXNAMELEN,
2191 .family = NFPROTO_IPV6,
2195 static struct nf_sockopt_ops ip6t_sockopts = {
2197 .set_optmin = IP6T_BASE_CTL,
2198 .set_optmax = IP6T_SO_SET_MAX+1,
2199 .set = do_ip6t_set_ctl,
2200 #ifdef CONFIG_COMPAT
2201 .compat_set = compat_do_ip6t_set_ctl,
2203 .get_optmin = IP6T_BASE_CTL,
2204 .get_optmax = IP6T_SO_GET_MAX+1,
2205 .get = do_ip6t_get_ctl,
2206 #ifdef CONFIG_COMPAT
2207 .compat_get = compat_do_ip6t_get_ctl,
2209 .owner = THIS_MODULE,
2212 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2215 .match = icmp6_match,
2216 .matchsize = sizeof(struct ip6t_icmp),
2217 .checkentry = icmp6_checkentry,
2218 .proto = IPPROTO_ICMPV6,
2219 .family = NFPROTO_IPV6,
2223 static int __net_init ip6_tables_net_init(struct net *net)
2225 return xt_proto_init(net, NFPROTO_IPV6);
2228 static void __net_exit ip6_tables_net_exit(struct net *net)
2230 xt_proto_fini(net, NFPROTO_IPV6);
2233 static struct pernet_operations ip6_tables_net_ops = {
2234 .init = ip6_tables_net_init,
2235 .exit = ip6_tables_net_exit,
2238 static int __init ip6_tables_init(void)
2242 ret = register_pernet_subsys(&ip6_tables_net_ops);
2246 /* No one else will be downing sem now, so we won't sleep */
2247 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2250 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2254 /* Register setsockopt */
2255 ret = nf_register_sockopt(&ip6t_sockopts);
2259 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2263 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2265 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2267 unregister_pernet_subsys(&ip6_tables_net_ops);
2272 static void __exit ip6_tables_fini(void)
2274 nf_unregister_sockopt(&ip6t_sockopts);
2276 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2277 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2278 unregister_pernet_subsys(&ip6_tables_net_ops);
2281 EXPORT_SYMBOL(ip6t_register_table);
2282 EXPORT_SYMBOL(ip6t_unregister_table);
2283 EXPORT_SYMBOL(ip6t_do_table);
2285 module_init(ip6_tables_init);
2286 module_exit(ip6_tables_fini);