2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
35 MODULE_LICENSE("GPL");
37 MODULE_DESCRIPTION("IPv6 packet filter");
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
46 #define dprintf(format, args...)
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
52 #define duprintf(format, args...)
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
69 return xt_alloc_initial_table(ip6t, IP6T);
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
85 ip6_packet_match(const struct sk_buff *skb,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
130 /* ... might want to do something with class and flowlabel here ... */
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
135 unsigned short _frag_off;
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
143 *fragoff = _frag_off;
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
165 /* should be ip6 safe */
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
193 return (struct ip6t_entry *)(base + offset);
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
200 static const struct ip6t_ip6 uncond;
202 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
205 static inline const struct xt_entry_target *
206 ip6t_get_target_c(const struct ip6t_entry *e)
208 return ip6t_get_target((struct ip6t_entry *)e);
211 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
212 /* This cries for unification! */
213 static const char *const hooknames[] = {
214 [NF_INET_PRE_ROUTING] = "PREROUTING",
215 [NF_INET_LOCAL_IN] = "INPUT",
216 [NF_INET_FORWARD] = "FORWARD",
217 [NF_INET_LOCAL_OUT] = "OUTPUT",
218 [NF_INET_POST_ROUTING] = "POSTROUTING",
221 enum nf_ip_trace_comments {
222 NF_IP6_TRACE_COMMENT_RULE,
223 NF_IP6_TRACE_COMMENT_RETURN,
224 NF_IP6_TRACE_COMMENT_POLICY,
227 static const char *const comments[] = {
228 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
229 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
230 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
233 static struct nf_loginfo trace_loginfo = {
234 .type = NF_LOG_TYPE_LOG,
238 .logflags = NF_LOG_MASK,
243 /* Mildly perf critical (only if packet tracing is on) */
245 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
246 const char *hookname, const char **chainname,
247 const char **comment, unsigned int *rulenum)
249 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
251 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
252 /* Head of user chain: ERROR target with chainname */
253 *chainname = t->target.data;
258 if (s->target_offset == sizeof(struct ip6t_entry) &&
259 strcmp(t->target.u.kernel.target->name,
260 XT_STANDARD_TARGET) == 0 &&
262 unconditional(&s->ipv6)) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment = *chainname == hookname
265 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
266 : comments[NF_IP6_TRACE_COMMENT_RETURN];
275 static void trace_packet(const struct sk_buff *skb,
277 const struct net_device *in,
278 const struct net_device *out,
279 const char *tablename,
280 const struct xt_table_info *private,
281 const struct ip6t_entry *e)
283 const void *table_base;
284 const struct ip6t_entry *root;
285 const char *hookname, *chainname, *comment;
286 const struct ip6t_entry *iter;
287 unsigned int rulenum = 0;
288 struct net *net = dev_net(in ? in : out);
290 table_base = private->entries[smp_processor_id()];
291 root = get_entry(table_base, private->hook_entry[hook]);
293 hookname = chainname = hooknames[hook];
294 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 if (get_chainname_rulenum(iter, e, hookname,
298 &chainname, &comment, &rulenum) != 0)
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum);
307 static inline __pure struct ip6t_entry *
308 ip6t_next_entry(const struct ip6t_entry *entry)
310 return (void *)entry + entry->next_offset;
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ip6t_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
333 indev = in ? in->name : nulldevname;
334 outdev = out ? out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
341 acpar.hotdrop = false;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350 addend = xt_write_recseq_begin();
351 private = table->private;
352 cpu = smp_processor_id();
353 table_base = private->entries[cpu];
354 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
355 stackptr = per_cpu_ptr(private->stackptr, cpu);
358 e = get_entry(table_base, private->hook_entry[hook]);
361 const struct xt_entry_target *t;
362 const struct xt_entry_match *ematch;
366 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
367 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
369 e = ip6t_next_entry(e);
373 xt_ematch_foreach(ematch, e) {
374 acpar.match = ematch->u.kernel.match;
375 acpar.matchinfo = ematch->data;
376 if (!acpar.match->match(skb, &acpar))
380 ADD_COUNTER(e->counters, skb->len, 1);
382 t = ip6t_get_target_c(e);
383 IP_NF_ASSERT(t->u.kernel.target);
385 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
386 /* The packet is traced: log it */
387 if (unlikely(skb->nf_trace))
388 trace_packet(skb, hook, in, out,
389 table->name, private, e);
391 /* Standard target? */
392 if (!t->u.kernel.target->target) {
395 v = ((struct xt_standard_target *)t)->verdict;
397 /* Pop from stack? */
398 if (v != XT_RETURN) {
399 verdict = (unsigned int)(-v) - 1;
402 if (*stackptr <= origptr)
403 e = get_entry(table_base,
404 private->underflow[hook]);
406 e = ip6t_next_entry(jumpstack[--*stackptr]);
409 if (table_base + v != ip6t_next_entry(e) &&
410 !(e->ipv6.flags & IP6T_F_GOTO)) {
411 if (*stackptr >= private->stacksize) {
415 jumpstack[(*stackptr)++] = e;
418 e = get_entry(table_base, v);
422 acpar.target = t->u.kernel.target;
423 acpar.targinfo = t->data;
425 verdict = t->u.kernel.target->target(skb, &acpar);
426 if (verdict == XT_CONTINUE)
427 e = ip6t_next_entry(e);
431 } while (!acpar.hotdrop);
435 xt_write_recseq_end(addend);
438 #ifdef DEBUG_ALLOW_ALL
447 /* Figures out from what hook each rule can be called: returns 0 if
448 there are loops. Puts hook bitmask in comefrom. */
450 mark_source_chains(const struct xt_table_info *newinfo,
451 unsigned int valid_hooks, void *entry0)
455 /* No recursion; use packet counter to save back ptrs (reset
456 to 0 as we leave), and comefrom to save source hook bitmask */
457 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
458 unsigned int pos = newinfo->hook_entry[hook];
459 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
461 if (!(valid_hooks & (1 << hook)))
464 /* Set initial back pointer. */
465 e->counters.pcnt = pos;
468 const struct xt_standard_target *t
469 = (void *)ip6t_get_target_c(e);
470 int visited = e->comefrom & (1 << hook);
472 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
473 pr_err("iptables: loop hook %u pos %u %08X.\n",
474 hook, pos, e->comefrom);
477 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
479 /* Unconditional return/END. */
480 if ((e->target_offset == sizeof(struct ip6t_entry) &&
481 (strcmp(t->target.u.user.name,
482 XT_STANDARD_TARGET) == 0) &&
484 unconditional(&e->ipv6)) || visited) {
485 unsigned int oldpos, size;
487 if ((strcmp(t->target.u.user.name,
488 XT_STANDARD_TARGET) == 0) &&
489 t->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
496 /* Return: backtrack through the last
499 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
500 #ifdef DEBUG_IP_FIREWALL_USER
502 & (1 << NF_INET_NUMHOOKS)) {
503 duprintf("Back unset "
510 pos = e->counters.pcnt;
511 e->counters.pcnt = 0;
513 /* We're at the start. */
517 e = (struct ip6t_entry *)
519 } while (oldpos == pos + e->next_offset);
522 size = e->next_offset;
523 e = (struct ip6t_entry *)
524 (entry0 + pos + size);
525 e->counters.pcnt = pos;
528 int newpos = t->verdict;
530 if (strcmp(t->target.u.user.name,
531 XT_STANDARD_TARGET) == 0 &&
533 if (newpos > newinfo->size -
534 sizeof(struct ip6t_entry)) {
535 duprintf("mark_source_chains: "
536 "bad verdict (%i)\n",
540 /* This a jump; chase it. */
541 duprintf("Jump rule %u -> %u\n",
544 /* ... this is a fallthru */
545 newpos = pos + e->next_offset;
547 e = (struct ip6t_entry *)
549 e->counters.pcnt = pos;
554 duprintf("Finished chain %u\n", hook);
559 static void cleanup_match(struct xt_entry_match *m, struct net *net)
561 struct xt_mtdtor_param par;
564 par.match = m->u.kernel.match;
565 par.matchinfo = m->data;
566 par.family = NFPROTO_IPV6;
567 if (par.match->destroy != NULL)
568 par.match->destroy(&par);
569 module_put(par.match->me);
573 check_entry(const struct ip6t_entry *e, const char *name)
575 const struct xt_entry_target *t;
577 if (!ip6_checkentry(&e->ipv6)) {
578 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
582 if (e->target_offset + sizeof(struct xt_entry_target) >
586 t = ip6t_get_target_c(e);
587 if (e->target_offset + t->u.target_size > e->next_offset)
593 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
595 const struct ip6t_ip6 *ipv6 = par->entryinfo;
598 par->match = m->u.kernel.match;
599 par->matchinfo = m->data;
601 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
602 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
604 duprintf("ip_tables: check failed for `%s'.\n",
612 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
614 struct xt_match *match;
617 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
620 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
621 return PTR_ERR(match);
623 m->u.kernel.match = match;
625 ret = check_match(m, par);
631 module_put(m->u.kernel.match->me);
635 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
637 struct xt_entry_target *t = ip6t_get_target(e);
638 struct xt_tgchk_param par = {
642 .target = t->u.kernel.target,
644 .hook_mask = e->comefrom,
645 .family = NFPROTO_IPV6,
649 t = ip6t_get_target(e);
650 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
651 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
653 duprintf("ip_tables: check failed for `%s'.\n",
654 t->u.kernel.target->name);
661 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
664 struct xt_entry_target *t;
665 struct xt_target *target;
668 struct xt_mtchk_param mtpar;
669 struct xt_entry_match *ematch;
671 ret = check_entry(e, name);
678 mtpar.entryinfo = &e->ipv6;
679 mtpar.hook_mask = e->comefrom;
680 mtpar.family = NFPROTO_IPV6;
681 xt_ematch_foreach(ematch, e) {
682 ret = find_check_match(ematch, &mtpar);
684 goto cleanup_matches;
688 t = ip6t_get_target(e);
689 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
691 if (IS_ERR(target)) {
692 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
693 ret = PTR_ERR(target);
694 goto cleanup_matches;
696 t->u.kernel.target = target;
698 ret = check_target(e, net, name);
703 module_put(t->u.kernel.target->me);
705 xt_ematch_foreach(ematch, e) {
708 cleanup_match(ematch, net);
713 static bool check_underflow(const struct ip6t_entry *e)
715 const struct xt_entry_target *t;
716 unsigned int verdict;
718 if (!unconditional(&e->ipv6))
720 t = ip6t_get_target_c(e);
721 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
723 verdict = ((struct xt_standard_target *)t)->verdict;
724 verdict = -verdict - 1;
725 return verdict == NF_DROP || verdict == NF_ACCEPT;
729 check_entry_size_and_hooks(struct ip6t_entry *e,
730 struct xt_table_info *newinfo,
731 const unsigned char *base,
732 const unsigned char *limit,
733 const unsigned int *hook_entries,
734 const unsigned int *underflows,
735 unsigned int valid_hooks)
739 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
740 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
741 duprintf("Bad offset %p\n", e);
746 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
747 duprintf("checking: element %p size %u\n",
752 /* Check hooks & underflows */
753 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
754 if (!(valid_hooks & (1 << h)))
756 if ((unsigned char *)e - base == hook_entries[h])
757 newinfo->hook_entry[h] = hook_entries[h];
758 if ((unsigned char *)e - base == underflows[h]) {
759 if (!check_underflow(e)) {
760 pr_err("Underflows must be unconditional and "
761 "use the STANDARD target with "
765 newinfo->underflow[h] = underflows[h];
769 /* Clear counters and comefrom */
770 e->counters = ((struct xt_counters) { 0, 0 });
775 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
777 struct xt_tgdtor_param par;
778 struct xt_entry_target *t;
779 struct xt_entry_match *ematch;
781 /* Cleanup all matches */
782 xt_ematch_foreach(ematch, e)
783 cleanup_match(ematch, net);
784 t = ip6t_get_target(e);
787 par.target = t->u.kernel.target;
788 par.targinfo = t->data;
789 par.family = NFPROTO_IPV6;
790 if (par.target->destroy != NULL)
791 par.target->destroy(&par);
792 module_put(par.target->me);
795 /* Checks and translates the user-supplied table segment (held in
798 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
799 const struct ip6t_replace *repl)
801 struct ip6t_entry *iter;
805 newinfo->size = repl->size;
806 newinfo->number = repl->num_entries;
808 /* Init all hooks to impossible value. */
809 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
810 newinfo->hook_entry[i] = 0xFFFFFFFF;
811 newinfo->underflow[i] = 0xFFFFFFFF;
814 duprintf("translate_table: size %u\n", newinfo->size);
816 /* Walk through entries, checking offsets. */
817 xt_entry_foreach(iter, entry0, newinfo->size) {
818 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
826 if (strcmp(ip6t_get_target(iter)->u.user.name,
827 XT_ERROR_TARGET) == 0)
828 ++newinfo->stacksize;
831 if (i != repl->num_entries) {
832 duprintf("translate_table: %u not %u entries\n",
833 i, repl->num_entries);
837 /* Check hooks all assigned */
838 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
839 /* Only hooks which are valid */
840 if (!(repl->valid_hooks & (1 << i)))
842 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
843 duprintf("Invalid hook entry %u %u\n",
844 i, repl->hook_entry[i]);
847 if (newinfo->underflow[i] == 0xFFFFFFFF) {
848 duprintf("Invalid underflow %u %u\n",
849 i, repl->underflow[i]);
854 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
857 /* Finally, each sanity check must pass */
859 xt_entry_foreach(iter, entry0, newinfo->size) {
860 ret = find_check_entry(iter, net, repl->name, repl->size);
867 xt_entry_foreach(iter, entry0, newinfo->size) {
870 cleanup_entry(iter, net);
875 /* And one copy for every other CPU */
876 for_each_possible_cpu(i) {
877 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
878 memcpy(newinfo->entries[i], entry0, newinfo->size);
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
888 struct ip6t_entry *iter;
892 for_each_possible_cpu(cpu) {
893 seqcount_t *s = &per_cpu(xt_recseq, cpu);
896 xt_entry_foreach(iter, t->entries[cpu], t->size) {
901 start = read_seqcount_begin(s);
902 bcnt = iter->counters.bcnt;
903 pcnt = iter->counters.pcnt;
904 } while (read_seqcount_retry(s, start));
906 ADD_COUNTER(counters[i], bcnt, pcnt);
912 static struct xt_counters *alloc_counters(const struct xt_table *table)
914 unsigned int countersize;
915 struct xt_counters *counters;
916 const struct xt_table_info *private = table->private;
918 /* We need atomic snapshot of counters: rest doesn't change
919 (other than comefrom, which userspace doesn't care
921 countersize = sizeof(struct xt_counters) * private->number;
922 counters = vzalloc(countersize);
924 if (counters == NULL)
925 return ERR_PTR(-ENOMEM);
927 get_counters(private, counters);
933 copy_entries_to_user(unsigned int total_size,
934 const struct xt_table *table,
935 void __user *userptr)
937 unsigned int off, num;
938 const struct ip6t_entry *e;
939 struct xt_counters *counters;
940 const struct xt_table_info *private = table->private;
942 const void *loc_cpu_entry;
944 counters = alloc_counters(table);
945 if (IS_ERR(counters))
946 return PTR_ERR(counters);
948 /* choose the copy that is on our node/cpu, ...
949 * This choice is lazy (because current thread is
950 * allowed to migrate to another cpu)
952 loc_cpu_entry = private->entries[raw_smp_processor_id()];
953 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
958 /* FIXME: use iterator macros --RR */
959 /* ... then go back and fix counters and names */
960 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
962 const struct xt_entry_match *m;
963 const struct xt_entry_target *t;
965 e = (struct ip6t_entry *)(loc_cpu_entry + off);
966 if (copy_to_user(userptr + off
967 + offsetof(struct ip6t_entry, counters),
969 sizeof(counters[num])) != 0) {
974 for (i = sizeof(struct ip6t_entry);
975 i < e->target_offset;
976 i += m->u.match_size) {
979 if (copy_to_user(userptr + off + i
980 + offsetof(struct xt_entry_match,
982 m->u.kernel.match->name,
983 strlen(m->u.kernel.match->name)+1)
990 t = ip6t_get_target_c(e);
991 if (copy_to_user(userptr + off + e->target_offset
992 + offsetof(struct xt_entry_target,
994 t->u.kernel.target->name,
995 strlen(t->u.kernel.target->name)+1) != 0) {
1006 #ifdef CONFIG_COMPAT
1007 static void compat_standard_from_user(void *dst, const void *src)
1009 int v = *(compat_int_t *)src;
1012 v += xt_compat_calc_jump(AF_INET6, v);
1013 memcpy(dst, &v, sizeof(v));
1016 static int compat_standard_to_user(void __user *dst, const void *src)
1018 compat_int_t cv = *(int *)src;
1021 cv -= xt_compat_calc_jump(AF_INET6, cv);
1022 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1025 static int compat_calc_entry(const struct ip6t_entry *e,
1026 const struct xt_table_info *info,
1027 const void *base, struct xt_table_info *newinfo)
1029 const struct xt_entry_match *ematch;
1030 const struct xt_entry_target *t;
1031 unsigned int entry_offset;
1034 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1035 entry_offset = (void *)e - base;
1036 xt_ematch_foreach(ematch, e)
1037 off += xt_compat_match_offset(ematch->u.kernel.match);
1038 t = ip6t_get_target_c(e);
1039 off += xt_compat_target_offset(t->u.kernel.target);
1040 newinfo->size -= off;
1041 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1045 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1046 if (info->hook_entry[i] &&
1047 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1048 newinfo->hook_entry[i] -= off;
1049 if (info->underflow[i] &&
1050 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1051 newinfo->underflow[i] -= off;
1056 static int compat_table_info(const struct xt_table_info *info,
1057 struct xt_table_info *newinfo)
1059 struct ip6t_entry *iter;
1060 void *loc_cpu_entry;
1063 if (!newinfo || !info)
1066 /* we dont care about newinfo->entries[] */
1067 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1068 newinfo->initial_entries = 0;
1069 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1070 xt_compat_init_offsets(AF_INET6, info->number);
1071 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1072 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1080 static int get_info(struct net *net, void __user *user,
1081 const int *len, int compat)
1083 char name[XT_TABLE_MAXNAMELEN];
1087 if (*len != sizeof(struct ip6t_getinfo)) {
1088 duprintf("length %u != %zu\n", *len,
1089 sizeof(struct ip6t_getinfo));
1093 if (copy_from_user(name, user, sizeof(name)) != 0)
1096 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1097 #ifdef CONFIG_COMPAT
1099 xt_compat_lock(AF_INET6);
1101 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1102 "ip6table_%s", name);
1103 if (!IS_ERR_OR_NULL(t)) {
1104 struct ip6t_getinfo info;
1105 const struct xt_table_info *private = t->private;
1106 #ifdef CONFIG_COMPAT
1107 struct xt_table_info tmp;
1110 ret = compat_table_info(private, &tmp);
1111 xt_compat_flush_offsets(AF_INET6);
1115 memset(&info, 0, sizeof(info));
1116 info.valid_hooks = t->valid_hooks;
1117 memcpy(info.hook_entry, private->hook_entry,
1118 sizeof(info.hook_entry));
1119 memcpy(info.underflow, private->underflow,
1120 sizeof(info.underflow));
1121 info.num_entries = private->number;
1122 info.size = private->size;
1123 strcpy(info.name, name);
1125 if (copy_to_user(user, &info, *len) != 0)
1133 ret = t ? PTR_ERR(t) : -ENOENT;
1134 #ifdef CONFIG_COMPAT
1136 xt_compat_unlock(AF_INET6);
1142 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1146 struct ip6t_get_entries get;
1149 if (*len < sizeof(get)) {
1150 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1153 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1155 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1156 duprintf("get_entries: %u != %zu\n",
1157 *len, sizeof(get) + get.size);
1161 t = xt_find_table_lock(net, AF_INET6, get.name);
1162 if (!IS_ERR_OR_NULL(t)) {
1163 struct xt_table_info *private = t->private;
1164 duprintf("t->private->number = %u\n", private->number);
1165 if (get.size == private->size)
1166 ret = copy_entries_to_user(private->size,
1167 t, uptr->entrytable);
1169 duprintf("get_entries: I've got %u not %u!\n",
1170 private->size, get.size);
1176 ret = t ? PTR_ERR(t) : -ENOENT;
1182 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1183 struct xt_table_info *newinfo, unsigned int num_counters,
1184 void __user *counters_ptr)
1188 struct xt_table_info *oldinfo;
1189 struct xt_counters *counters;
1190 const void *loc_cpu_old_entry;
1191 struct ip6t_entry *iter;
1194 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1200 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1201 "ip6table_%s", name);
1202 if (IS_ERR_OR_NULL(t)) {
1203 ret = t ? PTR_ERR(t) : -ENOENT;
1204 goto free_newinfo_counters_untrans;
1208 if (valid_hooks != t->valid_hooks) {
1209 duprintf("Valid hook crap: %08X vs %08X\n",
1210 valid_hooks, t->valid_hooks);
1215 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1219 /* Update module usage count based on number of rules */
1220 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1221 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1222 if ((oldinfo->number > oldinfo->initial_entries) ||
1223 (newinfo->number <= oldinfo->initial_entries))
1225 if ((oldinfo->number > oldinfo->initial_entries) &&
1226 (newinfo->number <= oldinfo->initial_entries))
1229 /* Get the old counters, and synchronize with replace */
1230 get_counters(oldinfo, counters);
1232 /* Decrease module usage counts and free resource */
1233 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1234 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1235 cleanup_entry(iter, net);
1237 xt_free_table_info(oldinfo);
1238 if (copy_to_user(counters_ptr, counters,
1239 sizeof(struct xt_counters) * num_counters) != 0)
1248 free_newinfo_counters_untrans:
1255 do_replace(struct net *net, const void __user *user, unsigned int len)
1258 struct ip6t_replace tmp;
1259 struct xt_table_info *newinfo;
1260 void *loc_cpu_entry;
1261 struct ip6t_entry *iter;
1263 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1266 /* overflow check */
1267 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1269 tmp.name[sizeof(tmp.name)-1] = 0;
1271 newinfo = xt_alloc_table_info(tmp.size);
1275 /* choose the copy that is on our node/cpu */
1276 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1277 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1283 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1287 duprintf("ip_tables: Translated table\n");
1289 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1290 tmp.num_counters, tmp.counters);
1292 goto free_newinfo_untrans;
1295 free_newinfo_untrans:
1296 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1297 cleanup_entry(iter, net);
1299 xt_free_table_info(newinfo);
1304 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1307 unsigned int i, curcpu;
1308 struct xt_counters_info tmp;
1309 struct xt_counters *paddc;
1310 unsigned int num_counters;
1315 const struct xt_table_info *private;
1317 const void *loc_cpu_entry;
1318 struct ip6t_entry *iter;
1319 unsigned int addend;
1320 #ifdef CONFIG_COMPAT
1321 struct compat_xt_counters_info compat_tmp;
1325 size = sizeof(struct compat_xt_counters_info);
1330 size = sizeof(struct xt_counters_info);
1333 if (copy_from_user(ptmp, user, size) != 0)
1336 #ifdef CONFIG_COMPAT
1338 num_counters = compat_tmp.num_counters;
1339 name = compat_tmp.name;
1343 num_counters = tmp.num_counters;
1347 if (len != size + num_counters * sizeof(struct xt_counters))
1350 paddc = vmalloc(len - size);
1354 if (copy_from_user(paddc, user + size, len - size) != 0) {
1359 t = xt_find_table_lock(net, AF_INET6, name);
1360 if (IS_ERR_OR_NULL(t)) {
1361 ret = t ? PTR_ERR(t) : -ENOENT;
1367 private = t->private;
1368 if (private->number != num_counters) {
1370 goto unlock_up_free;
1374 /* Choose the copy that is on our node */
1375 curcpu = smp_processor_id();
1376 addend = xt_write_recseq_begin();
1377 loc_cpu_entry = private->entries[curcpu];
1378 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1379 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1382 xt_write_recseq_end(addend);
1394 #ifdef CONFIG_COMPAT
1395 struct compat_ip6t_replace {
1396 char name[XT_TABLE_MAXNAMELEN];
1400 u32 hook_entry[NF_INET_NUMHOOKS];
1401 u32 underflow[NF_INET_NUMHOOKS];
1403 compat_uptr_t counters; /* struct xt_counters * */
1404 struct compat_ip6t_entry entries[0];
1408 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1409 unsigned int *size, struct xt_counters *counters,
1412 struct xt_entry_target *t;
1413 struct compat_ip6t_entry __user *ce;
1414 u_int16_t target_offset, next_offset;
1415 compat_uint_t origsize;
1416 const struct xt_entry_match *ematch;
1420 ce = (struct compat_ip6t_entry __user *)*dstptr;
1421 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1422 copy_to_user(&ce->counters, &counters[i],
1423 sizeof(counters[i])) != 0)
1426 *dstptr += sizeof(struct compat_ip6t_entry);
1427 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1429 xt_ematch_foreach(ematch, e) {
1430 ret = xt_compat_match_to_user(ematch, dstptr, size);
1434 target_offset = e->target_offset - (origsize - *size);
1435 t = ip6t_get_target(e);
1436 ret = xt_compat_target_to_user(t, dstptr, size);
1439 next_offset = e->next_offset - (origsize - *size);
1440 if (put_user(target_offset, &ce->target_offset) != 0 ||
1441 put_user(next_offset, &ce->next_offset) != 0)
1447 compat_find_calc_match(struct xt_entry_match *m,
1449 const struct ip6t_ip6 *ipv6,
1450 unsigned int hookmask,
1453 struct xt_match *match;
1455 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1456 m->u.user.revision);
1457 if (IS_ERR(match)) {
1458 duprintf("compat_check_calc_match: `%s' not found\n",
1460 return PTR_ERR(match);
1462 m->u.kernel.match = match;
1463 *size += xt_compat_match_offset(match);
1467 static void compat_release_entry(struct compat_ip6t_entry *e)
1469 struct xt_entry_target *t;
1470 struct xt_entry_match *ematch;
1472 /* Cleanup all matches */
1473 xt_ematch_foreach(ematch, e)
1474 module_put(ematch->u.kernel.match->me);
1475 t = compat_ip6t_get_target(e);
1476 module_put(t->u.kernel.target->me);
1480 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1481 struct xt_table_info *newinfo,
1483 const unsigned char *base,
1484 const unsigned char *limit,
1485 const unsigned int *hook_entries,
1486 const unsigned int *underflows,
1489 struct xt_entry_match *ematch;
1490 struct xt_entry_target *t;
1491 struct xt_target *target;
1492 unsigned int entry_offset;
1496 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1497 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1498 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1499 duprintf("Bad offset %p, limit = %p\n", e, limit);
1503 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1504 sizeof(struct compat_xt_entry_target)) {
1505 duprintf("checking: element %p size %u\n",
1510 /* For purposes of check_entry casting the compat entry is fine */
1511 ret = check_entry((struct ip6t_entry *)e, name);
1515 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1516 entry_offset = (void *)e - (void *)base;
1518 xt_ematch_foreach(ematch, e) {
1519 ret = compat_find_calc_match(ematch, name,
1520 &e->ipv6, e->comefrom, &off);
1522 goto release_matches;
1526 t = compat_ip6t_get_target(e);
1527 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1528 t->u.user.revision);
1529 if (IS_ERR(target)) {
1530 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1532 ret = PTR_ERR(target);
1533 goto release_matches;
1535 t->u.kernel.target = target;
1537 off += xt_compat_target_offset(target);
1539 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1543 /* Check hooks & underflows */
1544 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1545 if ((unsigned char *)e - base == hook_entries[h])
1546 newinfo->hook_entry[h] = hook_entries[h];
1547 if ((unsigned char *)e - base == underflows[h])
1548 newinfo->underflow[h] = underflows[h];
1551 /* Clear counters and comefrom */
1552 memset(&e->counters, 0, sizeof(e->counters));
1557 module_put(t->u.kernel.target->me);
1559 xt_ematch_foreach(ematch, e) {
1562 module_put(ematch->u.kernel.match->me);
1568 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1569 unsigned int *size, const char *name,
1570 struct xt_table_info *newinfo, unsigned char *base)
1572 struct xt_entry_target *t;
1573 struct ip6t_entry *de;
1574 unsigned int origsize;
1576 struct xt_entry_match *ematch;
1580 de = (struct ip6t_entry *)*dstptr;
1581 memcpy(de, e, sizeof(struct ip6t_entry));
1582 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1584 *dstptr += sizeof(struct ip6t_entry);
1585 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1587 xt_ematch_foreach(ematch, e) {
1588 ret = xt_compat_match_from_user(ematch, dstptr, size);
1592 de->target_offset = e->target_offset - (origsize - *size);
1593 t = compat_ip6t_get_target(e);
1594 xt_compat_target_from_user(t, dstptr, size);
1596 de->next_offset = e->next_offset - (origsize - *size);
1597 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1598 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1599 newinfo->hook_entry[h] -= origsize - *size;
1600 if ((unsigned char *)de - base < newinfo->underflow[h])
1601 newinfo->underflow[h] -= origsize - *size;
1606 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1611 struct xt_mtchk_param mtpar;
1612 struct xt_entry_match *ematch;
1617 mtpar.entryinfo = &e->ipv6;
1618 mtpar.hook_mask = e->comefrom;
1619 mtpar.family = NFPROTO_IPV6;
1620 xt_ematch_foreach(ematch, e) {
1621 ret = check_match(ematch, &mtpar);
1623 goto cleanup_matches;
1627 ret = check_target(e, net, name);
1629 goto cleanup_matches;
1633 xt_ematch_foreach(ematch, e) {
1636 cleanup_match(ematch, net);
1642 translate_compat_table(struct net *net,
1644 unsigned int valid_hooks,
1645 struct xt_table_info **pinfo,
1647 unsigned int total_size,
1648 unsigned int number,
1649 unsigned int *hook_entries,
1650 unsigned int *underflows)
1653 struct xt_table_info *newinfo, *info;
1654 void *pos, *entry0, *entry1;
1655 struct compat_ip6t_entry *iter0;
1656 struct ip6t_entry *iter1;
1663 info->number = number;
1665 /* Init all hooks to impossible value. */
1666 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1667 info->hook_entry[i] = 0xFFFFFFFF;
1668 info->underflow[i] = 0xFFFFFFFF;
1671 duprintf("translate_compat_table: size %u\n", info->size);
1673 xt_compat_lock(AF_INET6);
1674 xt_compat_init_offsets(AF_INET6, number);
1675 /* Walk through entries, checking offsets. */
1676 xt_entry_foreach(iter0, entry0, total_size) {
1677 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1679 entry0 + total_size,
1690 duprintf("translate_compat_table: %u not %u entries\n",
1695 /* Check hooks all assigned */
1696 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1697 /* Only hooks which are valid */
1698 if (!(valid_hooks & (1 << i)))
1700 if (info->hook_entry[i] == 0xFFFFFFFF) {
1701 duprintf("Invalid hook entry %u %u\n",
1702 i, hook_entries[i]);
1705 if (info->underflow[i] == 0xFFFFFFFF) {
1706 duprintf("Invalid underflow %u %u\n",
1713 newinfo = xt_alloc_table_info(size);
1717 newinfo->number = number;
1718 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1719 newinfo->hook_entry[i] = info->hook_entry[i];
1720 newinfo->underflow[i] = info->underflow[i];
1722 entry1 = newinfo->entries[raw_smp_processor_id()];
1725 xt_entry_foreach(iter0, entry0, total_size) {
1726 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1727 name, newinfo, entry1);
1731 xt_compat_flush_offsets(AF_INET6);
1732 xt_compat_unlock(AF_INET6);
1737 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1741 xt_entry_foreach(iter1, entry1, newinfo->size) {
1742 ret = compat_check_entry(iter1, net, name);
1746 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1747 XT_ERROR_TARGET) == 0)
1748 ++newinfo->stacksize;
1752 * The first i matches need cleanup_entry (calls ->destroy)
1753 * because they had called ->check already. The other j-i
1754 * entries need only release.
1758 xt_entry_foreach(iter0, entry0, newinfo->size) {
1763 compat_release_entry(iter0);
1765 xt_entry_foreach(iter1, entry1, newinfo->size) {
1768 cleanup_entry(iter1, net);
1770 xt_free_table_info(newinfo);
1774 /* And one copy for every other CPU */
1775 for_each_possible_cpu(i)
1776 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1777 memcpy(newinfo->entries[i], entry1, newinfo->size);
1781 xt_free_table_info(info);
1785 xt_free_table_info(newinfo);
1787 xt_entry_foreach(iter0, entry0, total_size) {
1790 compat_release_entry(iter0);
1794 xt_compat_flush_offsets(AF_INET6);
1795 xt_compat_unlock(AF_INET6);
1800 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1803 struct compat_ip6t_replace tmp;
1804 struct xt_table_info *newinfo;
1805 void *loc_cpu_entry;
1806 struct ip6t_entry *iter;
1808 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1811 /* overflow check */
1812 if (tmp.size >= INT_MAX / num_possible_cpus())
1814 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1816 tmp.name[sizeof(tmp.name)-1] = 0;
1818 newinfo = xt_alloc_table_info(tmp.size);
1822 /* choose the copy that is on our node/cpu */
1823 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1824 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1830 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1831 &newinfo, &loc_cpu_entry, tmp.size,
1832 tmp.num_entries, tmp.hook_entry,
1837 duprintf("compat_do_replace: Translated table\n");
1839 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1840 tmp.num_counters, compat_ptr(tmp.counters));
1842 goto free_newinfo_untrans;
1845 free_newinfo_untrans:
1846 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1847 cleanup_entry(iter, net);
1849 xt_free_table_info(newinfo);
1854 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1859 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1863 case IP6T_SO_SET_REPLACE:
1864 ret = compat_do_replace(sock_net(sk), user, len);
1867 case IP6T_SO_SET_ADD_COUNTERS:
1868 ret = do_add_counters(sock_net(sk), user, len, 1);
1872 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1879 struct compat_ip6t_get_entries {
1880 char name[XT_TABLE_MAXNAMELEN];
1882 struct compat_ip6t_entry entrytable[0];
1886 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1887 void __user *userptr)
1889 struct xt_counters *counters;
1890 const struct xt_table_info *private = table->private;
1894 const void *loc_cpu_entry;
1896 struct ip6t_entry *iter;
1898 counters = alloc_counters(table);
1899 if (IS_ERR(counters))
1900 return PTR_ERR(counters);
1902 /* choose the copy that is on our node/cpu, ...
1903 * This choice is lazy (because current thread is
1904 * allowed to migrate to another cpu)
1906 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1909 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1910 ret = compat_copy_entry_to_user(iter, &pos,
1911 &size, counters, i++);
1921 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1925 struct compat_ip6t_get_entries get;
1928 if (*len < sizeof(get)) {
1929 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1933 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1936 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1937 duprintf("compat_get_entries: %u != %zu\n",
1938 *len, sizeof(get) + get.size);
1942 xt_compat_lock(AF_INET6);
1943 t = xt_find_table_lock(net, AF_INET6, get.name);
1944 if (!IS_ERR_OR_NULL(t)) {
1945 const struct xt_table_info *private = t->private;
1946 struct xt_table_info info;
1947 duprintf("t->private->number = %u\n", private->number);
1948 ret = compat_table_info(private, &info);
1949 if (!ret && get.size == info.size) {
1950 ret = compat_copy_entries_to_user(private->size,
1951 t, uptr->entrytable);
1953 duprintf("compat_get_entries: I've got %u not %u!\n",
1954 private->size, get.size);
1957 xt_compat_flush_offsets(AF_INET6);
1961 ret = t ? PTR_ERR(t) : -ENOENT;
1963 xt_compat_unlock(AF_INET6);
1967 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1970 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1974 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1978 case IP6T_SO_GET_INFO:
1979 ret = get_info(sock_net(sk), user, len, 1);
1981 case IP6T_SO_GET_ENTRIES:
1982 ret = compat_get_entries(sock_net(sk), user, len);
1985 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1992 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1996 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2000 case IP6T_SO_SET_REPLACE:
2001 ret = do_replace(sock_net(sk), user, len);
2004 case IP6T_SO_SET_ADD_COUNTERS:
2005 ret = do_add_counters(sock_net(sk), user, len, 0);
2009 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2017 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2021 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2025 case IP6T_SO_GET_INFO:
2026 ret = get_info(sock_net(sk), user, len, 0);
2029 case IP6T_SO_GET_ENTRIES:
2030 ret = get_entries(sock_net(sk), user, len);
2033 case IP6T_SO_GET_REVISION_MATCH:
2034 case IP6T_SO_GET_REVISION_TARGET: {
2035 struct xt_get_revision rev;
2038 if (*len != sizeof(rev)) {
2042 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2046 rev.name[sizeof(rev.name)-1] = 0;
2048 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2053 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2056 "ip6t_%s", rev.name);
2061 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2068 struct xt_table *ip6t_register_table(struct net *net,
2069 const struct xt_table *table,
2070 const struct ip6t_replace *repl)
2073 struct xt_table_info *newinfo;
2074 struct xt_table_info bootstrap = {0};
2075 void *loc_cpu_entry;
2076 struct xt_table *new_table;
2078 newinfo = xt_alloc_table_info(repl->size);
2084 /* choose the copy on our node/cpu, but dont care about preemption */
2085 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2086 memcpy(loc_cpu_entry, repl->entries, repl->size);
2088 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2092 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2093 if (IS_ERR(new_table)) {
2094 ret = PTR_ERR(new_table);
2100 xt_free_table_info(newinfo);
2102 return ERR_PTR(ret);
2105 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2107 struct xt_table_info *private;
2108 void *loc_cpu_entry;
2109 struct module *table_owner = table->me;
2110 struct ip6t_entry *iter;
2112 private = xt_unregister_table(table);
2114 /* Decrease module usage counts and free resources */
2115 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2116 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2117 cleanup_entry(iter, net);
2118 if (private->number > private->initial_entries)
2119 module_put(table_owner);
2120 xt_free_table_info(private);
2123 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2125 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2126 u_int8_t type, u_int8_t code,
2129 return (type == test_type && code >= min_code && code <= max_code)
2134 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2136 const struct icmp6hdr *ic;
2137 struct icmp6hdr _icmph;
2138 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2140 /* Must not be a fragment. */
2141 if (par->fragoff != 0)
2144 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2146 /* We've been asked to examine this packet, and we
2147 * can't. Hence, no choice but to drop.
2149 duprintf("Dropping evil ICMP tinygram.\n");
2150 par->hotdrop = true;
2154 return icmp6_type_code_match(icmpinfo->type,
2157 ic->icmp6_type, ic->icmp6_code,
2158 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2161 /* Called when user tries to insert an entry of this type. */
2162 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2164 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2166 /* Must specify no unknown invflags */
2167 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2170 /* The built-in targets: standard (NULL) and error. */
2171 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2173 .name = XT_STANDARD_TARGET,
2174 .targetsize = sizeof(int),
2175 .family = NFPROTO_IPV6,
2176 #ifdef CONFIG_COMPAT
2177 .compatsize = sizeof(compat_int_t),
2178 .compat_from_user = compat_standard_from_user,
2179 .compat_to_user = compat_standard_to_user,
2183 .name = XT_ERROR_TARGET,
2184 .target = ip6t_error,
2185 .targetsize = XT_FUNCTION_MAXNAMELEN,
2186 .family = NFPROTO_IPV6,
2190 static struct nf_sockopt_ops ip6t_sockopts = {
2192 .set_optmin = IP6T_BASE_CTL,
2193 .set_optmax = IP6T_SO_SET_MAX+1,
2194 .set = do_ip6t_set_ctl,
2195 #ifdef CONFIG_COMPAT
2196 .compat_set = compat_do_ip6t_set_ctl,
2198 .get_optmin = IP6T_BASE_CTL,
2199 .get_optmax = IP6T_SO_GET_MAX+1,
2200 .get = do_ip6t_get_ctl,
2201 #ifdef CONFIG_COMPAT
2202 .compat_get = compat_do_ip6t_get_ctl,
2204 .owner = THIS_MODULE,
2207 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2210 .match = icmp6_match,
2211 .matchsize = sizeof(struct ip6t_icmp),
2212 .checkentry = icmp6_checkentry,
2213 .proto = IPPROTO_ICMPV6,
2214 .family = NFPROTO_IPV6,
2218 static int __net_init ip6_tables_net_init(struct net *net)
2220 return xt_proto_init(net, NFPROTO_IPV6);
2223 static void __net_exit ip6_tables_net_exit(struct net *net)
2225 xt_proto_fini(net, NFPROTO_IPV6);
2228 static struct pernet_operations ip6_tables_net_ops = {
2229 .init = ip6_tables_net_init,
2230 .exit = ip6_tables_net_exit,
2233 static int __init ip6_tables_init(void)
2237 ret = register_pernet_subsys(&ip6_tables_net_ops);
2241 /* No one else will be downing sem now, so we won't sleep */
2242 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2245 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2249 /* Register setsockopt */
2250 ret = nf_register_sockopt(&ip6t_sockopts);
2254 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2258 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2260 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2262 unregister_pernet_subsys(&ip6_tables_net_ops);
2267 static void __exit ip6_tables_fini(void)
2269 nf_unregister_sockopt(&ip6t_sockopts);
2271 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2272 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2273 unregister_pernet_subsys(&ip6_tables_net_ops);
2276 EXPORT_SYMBOL(ip6t_register_table);
2277 EXPORT_SYMBOL(ip6t_unregister_table);
2278 EXPORT_SYMBOL(ip6t_do_table);
2280 module_init(ip6_tables_init);
2281 module_exit(ip6_tables_fini);