1 // SPDX-License-Identifier: GPL-2.0-only
3 * Packet matching code.
5 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/cache.h>
11 #include <linux/capability.h>
12 #include <linux/skbuff.h>
13 #include <linux/kmod.h>
14 #include <linux/vmalloc.h>
15 #include <linux/netdevice.h>
16 #include <linux/module.h>
17 #include <linux/icmp.h>
19 #include <net/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mutex.h>
22 #include <linux/proc_fs.h>
23 #include <linux/err.h>
24 #include <linux/cpumask.h>
26 #include <linux/netfilter/x_tables.h>
27 #include <linux/netfilter_ipv4/ip_tables.h>
28 #include <net/netfilter/nf_log.h>
29 #include "../../netfilter/xt_repldata.h"
31 MODULE_LICENSE("GPL");
33 MODULE_DESCRIPTION("IPv4 packet filter");
34 MODULE_ALIAS("ipt_icmp");
36 void *ipt_alloc_initial_table(const struct xt_table *info)
38 return xt_alloc_initial_table(ipt, IPT);
40 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
42 /* Returns whether matches rule or not. */
43 /* Performance critical - called for every packet */
45 ip_packet_match(const struct iphdr *ip,
48 const struct ipt_ip *ipinfo,
53 if (NF_INVF(ipinfo, IPT_INV_SRCIP,
54 (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
55 NF_INVF(ipinfo, IPT_INV_DSTIP,
56 (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
59 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
61 if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
64 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
66 if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
69 /* Check specific protocol */
71 NF_INVF(ipinfo, IPT_INV_PROTO, ip->protocol != ipinfo->proto))
74 /* If we have a fragment rule but the packet is not a fragment
75 * then we return zero */
76 if (NF_INVF(ipinfo, IPT_INV_FRAG,
77 (ipinfo->flags & IPT_F_FRAG) && !isfrag))
84 ip_checkentry(const struct ipt_ip *ip)
86 if (ip->flags & ~IPT_F_MASK)
88 if (ip->invflags & ~IPT_INV_MASK)
94 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
96 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
101 /* Performance critical */
102 static inline struct ipt_entry *
103 get_entry(const void *base, unsigned int offset)
105 return (struct ipt_entry *)(base + offset);
108 /* All zeroes == unconditional rule. */
109 /* Mildly perf critical (only if packet tracing is on) */
110 static inline bool unconditional(const struct ipt_entry *e)
112 static const struct ipt_ip uncond;
114 return e->target_offset == sizeof(struct ipt_entry) &&
115 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
118 /* for const-correctness */
119 static inline const struct xt_entry_target *
120 ipt_get_target_c(const struct ipt_entry *e)
122 return ipt_get_target((struct ipt_entry *)e);
125 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
126 static const char *const hooknames[] = {
127 [NF_INET_PRE_ROUTING] = "PREROUTING",
128 [NF_INET_LOCAL_IN] = "INPUT",
129 [NF_INET_FORWARD] = "FORWARD",
130 [NF_INET_LOCAL_OUT] = "OUTPUT",
131 [NF_INET_POST_ROUTING] = "POSTROUTING",
134 enum nf_ip_trace_comments {
135 NF_IP_TRACE_COMMENT_RULE,
136 NF_IP_TRACE_COMMENT_RETURN,
137 NF_IP_TRACE_COMMENT_POLICY,
140 static const char *const comments[] = {
141 [NF_IP_TRACE_COMMENT_RULE] = "rule",
142 [NF_IP_TRACE_COMMENT_RETURN] = "return",
143 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
146 static const struct nf_loginfo trace_loginfo = {
147 .type = NF_LOG_TYPE_LOG,
151 .logflags = NF_LOG_DEFAULT_MASK,
156 /* Mildly perf critical (only if packet tracing is on) */
158 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
159 const char *hookname, const char **chainname,
160 const char **comment, unsigned int *rulenum)
162 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
164 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
165 /* Head of user chain: ERROR target with chainname */
166 *chainname = t->target.data;
171 if (unconditional(s) &&
172 strcmp(t->target.u.kernel.target->name,
173 XT_STANDARD_TARGET) == 0 &&
175 /* Tail of chains: STANDARD target (return/policy) */
176 *comment = *chainname == hookname
177 ? comments[NF_IP_TRACE_COMMENT_POLICY]
178 : comments[NF_IP_TRACE_COMMENT_RETURN];
187 static void trace_packet(struct net *net,
188 const struct sk_buff *skb,
190 const struct net_device *in,
191 const struct net_device *out,
192 const char *tablename,
193 const struct xt_table_info *private,
194 const struct ipt_entry *e)
196 const struct ipt_entry *root;
197 const char *hookname, *chainname, *comment;
198 const struct ipt_entry *iter;
199 unsigned int rulenum = 0;
201 root = get_entry(private->entries, private->hook_entry[hook]);
203 hookname = chainname = hooknames[hook];
204 comment = comments[NF_IP_TRACE_COMMENT_RULE];
206 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
207 if (get_chainname_rulenum(iter, e, hookname,
208 &chainname, &comment, &rulenum) != 0)
211 nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
212 "TRACE: %s:%s:%s:%u ",
213 tablename, chainname, comment, rulenum);
218 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
220 return (void *)entry + entry->next_offset;
223 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
225 ipt_do_table(struct sk_buff *skb,
226 const struct nf_hook_state *state,
227 struct xt_table *table)
229 unsigned int hook = state->hook;
230 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
231 const struct iphdr *ip;
232 /* Initializing verdict to NF_DROP keeps gcc happy. */
233 unsigned int verdict = NF_DROP;
234 const char *indev, *outdev;
235 const void *table_base;
236 struct ipt_entry *e, **jumpstack;
237 unsigned int stackidx, cpu;
238 const struct xt_table_info *private;
239 struct xt_action_param acpar;
245 indev = state->in ? state->in->name : nulldevname;
246 outdev = state->out ? state->out->name : nulldevname;
247 /* We handle fragments by dealing with the first fragment as
248 * if it was a normal packet. All other fragments are treated
249 * normally, except that they will NEVER match rules that ask
250 * things we don't know, ie. tcp syn flag or ports). If the
251 * rule is also a fragment-specific rule, non-fragments won't
253 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
254 acpar.thoff = ip_hdrlen(skb);
255 acpar.hotdrop = false;
258 WARN_ON(!(table->valid_hooks & (1 << hook)));
260 addend = xt_write_recseq_begin();
261 private = READ_ONCE(table->private); /* Address dependency. */
262 cpu = smp_processor_id();
263 table_base = private->entries;
264 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
266 /* Switch to alternate jumpstack if we're being invoked via TEE.
267 * TEE issues XT_CONTINUE verdict on original skb so we must not
268 * clobber the jumpstack.
270 * For recursion via REJECT or SYNPROXY the stack will be clobbered
271 * but it is no problem since absolute verdict is issued by these.
273 if (static_key_false(&xt_tee_enabled))
274 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
276 e = get_entry(table_base, private->hook_entry[hook]);
279 const struct xt_entry_target *t;
280 const struct xt_entry_match *ematch;
281 struct xt_counters *counter;
284 if (!ip_packet_match(ip, indev, outdev,
285 &e->ip, acpar.fragoff)) {
287 e = ipt_next_entry(e);
291 xt_ematch_foreach(ematch, e) {
292 acpar.match = ematch->u.kernel.match;
293 acpar.matchinfo = ematch->data;
294 if (!acpar.match->match(skb, &acpar))
298 counter = xt_get_this_cpu_counter(&e->counters);
299 ADD_COUNTER(*counter, skb->len, 1);
301 t = ipt_get_target_c(e);
302 WARN_ON(!t->u.kernel.target);
304 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
305 /* The packet is traced: log it */
306 if (unlikely(skb->nf_trace))
307 trace_packet(state->net, skb, hook, state->in,
308 state->out, table->name, private, e);
310 /* Standard target? */
311 if (!t->u.kernel.target->target) {
314 v = ((struct xt_standard_target *)t)->verdict;
316 /* Pop from stack? */
317 if (v != XT_RETURN) {
318 verdict = (unsigned int)(-v) - 1;
322 e = get_entry(table_base,
323 private->underflow[hook]);
325 e = jumpstack[--stackidx];
326 e = ipt_next_entry(e);
330 if (table_base + v != ipt_next_entry(e) &&
331 !(e->ip.flags & IPT_F_GOTO)) {
332 if (unlikely(stackidx >= private->stacksize)) {
336 jumpstack[stackidx++] = e;
339 e = get_entry(table_base, v);
343 acpar.target = t->u.kernel.target;
344 acpar.targinfo = t->data;
346 verdict = t->u.kernel.target->target(skb, &acpar);
347 if (verdict == XT_CONTINUE) {
348 /* Target might have changed stuff. */
350 e = ipt_next_entry(e);
355 } while (!acpar.hotdrop);
357 xt_write_recseq_end(addend);
365 /* Figures out from what hook each rule can be called: returns 0 if
366 there are loops. Puts hook bitmask in comefrom. */
368 mark_source_chains(const struct xt_table_info *newinfo,
369 unsigned int valid_hooks, void *entry0,
370 unsigned int *offsets)
374 /* No recursion; use packet counter to save back ptrs (reset
375 to 0 as we leave), and comefrom to save source hook bitmask */
376 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
377 unsigned int pos = newinfo->hook_entry[hook];
378 struct ipt_entry *e = entry0 + pos;
380 if (!(valid_hooks & (1 << hook)))
383 /* Set initial back pointer. */
384 e->counters.pcnt = pos;
387 const struct xt_standard_target *t
388 = (void *)ipt_get_target_c(e);
389 int visited = e->comefrom & (1 << hook);
391 if (e->comefrom & (1 << NF_INET_NUMHOOKS))
394 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
396 /* Unconditional return/END. */
397 if ((unconditional(e) &&
398 (strcmp(t->target.u.user.name,
399 XT_STANDARD_TARGET) == 0) &&
400 t->verdict < 0) || visited) {
401 unsigned int oldpos, size;
403 /* Return: backtrack through the last
406 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
408 pos = e->counters.pcnt;
409 e->counters.pcnt = 0;
411 /* We're at the start. */
416 } while (oldpos == pos + e->next_offset);
419 size = e->next_offset;
420 e = entry0 + pos + size;
421 if (pos + size >= newinfo->size)
423 e->counters.pcnt = pos;
426 int newpos = t->verdict;
428 if (strcmp(t->target.u.user.name,
429 XT_STANDARD_TARGET) == 0 &&
431 /* This a jump; chase it. */
432 if (!xt_find_jump_offset(offsets, newpos,
436 /* ... this is a fallthru */
437 newpos = pos + e->next_offset;
438 if (newpos >= newinfo->size)
442 e->counters.pcnt = pos;
451 static void cleanup_match(struct xt_entry_match *m, struct net *net)
453 struct xt_mtdtor_param par;
456 par.match = m->u.kernel.match;
457 par.matchinfo = m->data;
458 par.family = NFPROTO_IPV4;
459 if (par.match->destroy != NULL)
460 par.match->destroy(&par);
461 module_put(par.match->me);
465 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
467 const struct ipt_ip *ip = par->entryinfo;
469 par->match = m->u.kernel.match;
470 par->matchinfo = m->data;
472 return xt_check_match(par, m->u.match_size - sizeof(*m),
473 ip->proto, ip->invflags & IPT_INV_PROTO);
477 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
479 struct xt_match *match;
482 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
485 return PTR_ERR(match);
486 m->u.kernel.match = match;
488 ret = check_match(m, par);
494 module_put(m->u.kernel.match->me);
498 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
500 struct xt_entry_target *t = ipt_get_target(e);
501 struct xt_tgchk_param par = {
505 .target = t->u.kernel.target,
507 .hook_mask = e->comefrom,
508 .family = NFPROTO_IPV4,
511 return xt_check_target(&par, t->u.target_size - sizeof(*t),
512 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
516 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
518 struct xt_percpu_counter_alloc_state *alloc_state)
520 struct xt_entry_target *t;
521 struct xt_target *target;
524 struct xt_mtchk_param mtpar;
525 struct xt_entry_match *ematch;
527 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
531 memset(&mtpar, 0, sizeof(mtpar));
534 mtpar.entryinfo = &e->ip;
535 mtpar.hook_mask = e->comefrom;
536 mtpar.family = NFPROTO_IPV4;
537 xt_ematch_foreach(ematch, e) {
538 ret = find_check_match(ematch, &mtpar);
540 goto cleanup_matches;
544 t = ipt_get_target(e);
545 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
547 if (IS_ERR(target)) {
548 ret = PTR_ERR(target);
549 goto cleanup_matches;
551 t->u.kernel.target = target;
553 ret = check_target(e, net, name);
559 module_put(t->u.kernel.target->me);
561 xt_ematch_foreach(ematch, e) {
564 cleanup_match(ematch, net);
567 xt_percpu_counter_free(&e->counters);
572 static bool check_underflow(const struct ipt_entry *e)
574 const struct xt_entry_target *t;
575 unsigned int verdict;
577 if (!unconditional(e))
579 t = ipt_get_target_c(e);
580 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
582 verdict = ((struct xt_standard_target *)t)->verdict;
583 verdict = -verdict - 1;
584 return verdict == NF_DROP || verdict == NF_ACCEPT;
588 check_entry_size_and_hooks(struct ipt_entry *e,
589 struct xt_table_info *newinfo,
590 const unsigned char *base,
591 const unsigned char *limit,
592 const unsigned int *hook_entries,
593 const unsigned int *underflows,
594 unsigned int valid_hooks)
599 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
600 (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
601 (unsigned char *)e + e->next_offset > limit)
605 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
608 if (!ip_checkentry(&e->ip))
611 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
616 /* Check hooks & underflows */
617 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
618 if (!(valid_hooks & (1 << h)))
620 if ((unsigned char *)e - base == hook_entries[h])
621 newinfo->hook_entry[h] = hook_entries[h];
622 if ((unsigned char *)e - base == underflows[h]) {
623 if (!check_underflow(e))
626 newinfo->underflow[h] = underflows[h];
630 /* Clear counters and comefrom */
631 e->counters = ((struct xt_counters) { 0, 0 });
637 cleanup_entry(struct ipt_entry *e, struct net *net)
639 struct xt_tgdtor_param par;
640 struct xt_entry_target *t;
641 struct xt_entry_match *ematch;
643 /* Cleanup all matches */
644 xt_ematch_foreach(ematch, e)
645 cleanup_match(ematch, net);
646 t = ipt_get_target(e);
649 par.target = t->u.kernel.target;
650 par.targinfo = t->data;
651 par.family = NFPROTO_IPV4;
652 if (par.target->destroy != NULL)
653 par.target->destroy(&par);
654 module_put(par.target->me);
655 xt_percpu_counter_free(&e->counters);
658 /* Checks and translates the user-supplied table segment (held in
661 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
662 const struct ipt_replace *repl)
664 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
665 struct ipt_entry *iter;
666 unsigned int *offsets;
670 newinfo->size = repl->size;
671 newinfo->number = repl->num_entries;
673 /* Init all hooks to impossible value. */
674 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
675 newinfo->hook_entry[i] = 0xFFFFFFFF;
676 newinfo->underflow[i] = 0xFFFFFFFF;
679 offsets = xt_alloc_entry_offsets(newinfo->number);
683 /* Walk through entries, checking offsets. */
684 xt_entry_foreach(iter, entry0, newinfo->size) {
685 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
692 if (i < repl->num_entries)
693 offsets[i] = (void *)iter - entry0;
695 if (strcmp(ipt_get_target(iter)->u.user.name,
696 XT_ERROR_TARGET) == 0)
697 ++newinfo->stacksize;
701 if (i != repl->num_entries)
704 ret = xt_check_table_hooks(newinfo, repl->valid_hooks);
708 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
714 /* Finally, each sanity check must pass */
716 xt_entry_foreach(iter, entry0, newinfo->size) {
717 ret = find_check_entry(iter, net, repl->name, repl->size,
725 xt_entry_foreach(iter, entry0, newinfo->size) {
728 cleanup_entry(iter, net);
740 get_counters(const struct xt_table_info *t,
741 struct xt_counters counters[])
743 struct ipt_entry *iter;
747 for_each_possible_cpu(cpu) {
748 seqcount_t *s = &per_cpu(xt_recseq, cpu);
751 xt_entry_foreach(iter, t->entries, t->size) {
752 struct xt_counters *tmp;
756 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
758 start = read_seqcount_begin(s);
761 } while (read_seqcount_retry(s, start));
763 ADD_COUNTER(counters[i], bcnt, pcnt);
764 ++i; /* macro does multi eval of i */
770 static void get_old_counters(const struct xt_table_info *t,
771 struct xt_counters counters[])
773 struct ipt_entry *iter;
776 for_each_possible_cpu(cpu) {
778 xt_entry_foreach(iter, t->entries, t->size) {
779 const struct xt_counters *tmp;
781 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
782 ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
783 ++i; /* macro does multi eval of i */
790 static struct xt_counters *alloc_counters(const struct xt_table *table)
792 unsigned int countersize;
793 struct xt_counters *counters;
794 const struct xt_table_info *private = table->private;
796 /* We need atomic snapshot of counters: rest doesn't change
797 (other than comefrom, which userspace doesn't care
799 countersize = sizeof(struct xt_counters) * private->number;
800 counters = vzalloc(countersize);
802 if (counters == NULL)
803 return ERR_PTR(-ENOMEM);
805 get_counters(private, counters);
811 copy_entries_to_user(unsigned int total_size,
812 const struct xt_table *table,
813 void __user *userptr)
815 unsigned int off, num;
816 const struct ipt_entry *e;
817 struct xt_counters *counters;
818 const struct xt_table_info *private = table->private;
820 const void *loc_cpu_entry;
822 counters = alloc_counters(table);
823 if (IS_ERR(counters))
824 return PTR_ERR(counters);
826 loc_cpu_entry = private->entries;
828 /* FIXME: use iterator macros --RR */
829 /* ... then go back and fix counters and names */
830 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
832 const struct xt_entry_match *m;
833 const struct xt_entry_target *t;
835 e = loc_cpu_entry + off;
836 if (copy_to_user(userptr + off, e, sizeof(*e))) {
840 if (copy_to_user(userptr + off
841 + offsetof(struct ipt_entry, counters),
843 sizeof(counters[num])) != 0) {
848 for (i = sizeof(struct ipt_entry);
849 i < e->target_offset;
850 i += m->u.match_size) {
853 if (xt_match_to_user(m, userptr + off + i)) {
859 t = ipt_get_target_c(e);
860 if (xt_target_to_user(t, userptr + off + e->target_offset)) {
872 static void compat_standard_from_user(void *dst, const void *src)
874 int v = *(compat_int_t *)src;
877 v += xt_compat_calc_jump(AF_INET, v);
878 memcpy(dst, &v, sizeof(v));
881 static int compat_standard_to_user(void __user *dst, const void *src)
883 compat_int_t cv = *(int *)src;
886 cv -= xt_compat_calc_jump(AF_INET, cv);
887 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
890 static int compat_calc_entry(const struct ipt_entry *e,
891 const struct xt_table_info *info,
892 const void *base, struct xt_table_info *newinfo)
894 const struct xt_entry_match *ematch;
895 const struct xt_entry_target *t;
896 unsigned int entry_offset;
899 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
900 entry_offset = (void *)e - base;
901 xt_ematch_foreach(ematch, e)
902 off += xt_compat_match_offset(ematch->u.kernel.match);
903 t = ipt_get_target_c(e);
904 off += xt_compat_target_offset(t->u.kernel.target);
905 newinfo->size -= off;
906 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
910 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
911 if (info->hook_entry[i] &&
912 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
913 newinfo->hook_entry[i] -= off;
914 if (info->underflow[i] &&
915 (e < (struct ipt_entry *)(base + info->underflow[i])))
916 newinfo->underflow[i] -= off;
921 static int compat_table_info(const struct xt_table_info *info,
922 struct xt_table_info *newinfo)
924 struct ipt_entry *iter;
925 const void *loc_cpu_entry;
928 if (!newinfo || !info)
931 /* we dont care about newinfo->entries */
932 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
933 newinfo->initial_entries = 0;
934 loc_cpu_entry = info->entries;
935 ret = xt_compat_init_offsets(AF_INET, info->number);
938 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
939 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
947 static int get_info(struct net *net, void __user *user,
948 const int *len, int compat)
950 char name[XT_TABLE_MAXNAMELEN];
954 if (*len != sizeof(struct ipt_getinfo))
957 if (copy_from_user(name, user, sizeof(name)) != 0)
960 name[XT_TABLE_MAXNAMELEN-1] = '\0';
963 xt_compat_lock(AF_INET);
965 t = xt_request_find_table_lock(net, AF_INET, name);
967 struct ipt_getinfo info;
968 const struct xt_table_info *private = t->private;
970 struct xt_table_info tmp;
973 ret = compat_table_info(private, &tmp);
974 xt_compat_flush_offsets(AF_INET);
978 memset(&info, 0, sizeof(info));
979 info.valid_hooks = t->valid_hooks;
980 memcpy(info.hook_entry, private->hook_entry,
981 sizeof(info.hook_entry));
982 memcpy(info.underflow, private->underflow,
983 sizeof(info.underflow));
984 info.num_entries = private->number;
985 info.size = private->size;
986 strcpy(info.name, name);
988 if (copy_to_user(user, &info, *len) != 0)
999 xt_compat_unlock(AF_INET);
1005 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1009 struct ipt_get_entries get;
1012 if (*len < sizeof(get))
1014 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1016 if (*len != sizeof(struct ipt_get_entries) + get.size)
1018 get.name[sizeof(get.name) - 1] = '\0';
1020 t = xt_find_table_lock(net, AF_INET, get.name);
1022 const struct xt_table_info *private = t->private;
1023 if (get.size == private->size)
1024 ret = copy_entries_to_user(private->size,
1025 t, uptr->entrytable);
1038 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1039 struct xt_table_info *newinfo, unsigned int num_counters,
1040 void __user *counters_ptr)
1044 struct xt_table_info *oldinfo;
1045 struct xt_counters *counters;
1046 struct ipt_entry *iter;
1049 counters = xt_counters_alloc(num_counters);
1055 t = xt_request_find_table_lock(net, AF_INET, name);
1058 goto free_newinfo_counters_untrans;
1062 if (valid_hooks != t->valid_hooks) {
1067 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1071 /* Update module usage count based on number of rules */
1072 if ((oldinfo->number > oldinfo->initial_entries) ||
1073 (newinfo->number <= oldinfo->initial_entries))
1075 if ((oldinfo->number > oldinfo->initial_entries) &&
1076 (newinfo->number <= oldinfo->initial_entries))
1081 get_old_counters(oldinfo, counters);
1083 /* Decrease module usage counts and free resource */
1084 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1085 cleanup_entry(iter, net);
1087 xt_free_table_info(oldinfo);
1088 if (copy_to_user(counters_ptr, counters,
1089 sizeof(struct xt_counters) * num_counters) != 0) {
1090 /* Silent error, can't fail, new table is already in place */
1091 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1099 free_newinfo_counters_untrans:
1106 do_replace(struct net *net, const void __user *user, unsigned int len)
1109 struct ipt_replace tmp;
1110 struct xt_table_info *newinfo;
1111 void *loc_cpu_entry;
1112 struct ipt_entry *iter;
1114 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1117 /* overflow check */
1118 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1120 if (tmp.num_counters == 0)
1123 tmp.name[sizeof(tmp.name)-1] = 0;
1125 newinfo = xt_alloc_table_info(tmp.size);
1129 loc_cpu_entry = newinfo->entries;
1130 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1136 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1140 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1141 tmp.num_counters, tmp.counters);
1143 goto free_newinfo_untrans;
1146 free_newinfo_untrans:
1147 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1148 cleanup_entry(iter, net);
1150 xt_free_table_info(newinfo);
1155 do_add_counters(struct net *net, const void __user *user,
1156 unsigned int len, int compat)
1159 struct xt_counters_info tmp;
1160 struct xt_counters *paddc;
1162 const struct xt_table_info *private;
1164 struct ipt_entry *iter;
1165 unsigned int addend;
1167 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1169 return PTR_ERR(paddc);
1171 t = xt_find_table_lock(net, AF_INET, tmp.name);
1178 private = t->private;
1179 if (private->number != tmp.num_counters) {
1181 goto unlock_up_free;
1185 addend = xt_write_recseq_begin();
1186 xt_entry_foreach(iter, private->entries, private->size) {
1187 struct xt_counters *tmp;
1189 tmp = xt_get_this_cpu_counter(&iter->counters);
1190 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1193 xt_write_recseq_end(addend);
1204 #ifdef CONFIG_COMPAT
1205 struct compat_ipt_replace {
1206 char name[XT_TABLE_MAXNAMELEN];
1210 u32 hook_entry[NF_INET_NUMHOOKS];
1211 u32 underflow[NF_INET_NUMHOOKS];
1213 compat_uptr_t counters; /* struct xt_counters * */
1214 struct compat_ipt_entry entries[0];
1218 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1219 unsigned int *size, struct xt_counters *counters,
1222 struct xt_entry_target *t;
1223 struct compat_ipt_entry __user *ce;
1224 u_int16_t target_offset, next_offset;
1225 compat_uint_t origsize;
1226 const struct xt_entry_match *ematch;
1231 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1232 copy_to_user(&ce->counters, &counters[i],
1233 sizeof(counters[i])) != 0)
1236 *dstptr += sizeof(struct compat_ipt_entry);
1237 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1239 xt_ematch_foreach(ematch, e) {
1240 ret = xt_compat_match_to_user(ematch, dstptr, size);
1244 target_offset = e->target_offset - (origsize - *size);
1245 t = ipt_get_target(e);
1246 ret = xt_compat_target_to_user(t, dstptr, size);
1249 next_offset = e->next_offset - (origsize - *size);
1250 if (put_user(target_offset, &ce->target_offset) != 0 ||
1251 put_user(next_offset, &ce->next_offset) != 0)
1257 compat_find_calc_match(struct xt_entry_match *m,
1258 const struct ipt_ip *ip,
1261 struct xt_match *match;
1263 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1264 m->u.user.revision);
1266 return PTR_ERR(match);
1268 m->u.kernel.match = match;
1269 *size += xt_compat_match_offset(match);
1273 static void compat_release_entry(struct compat_ipt_entry *e)
1275 struct xt_entry_target *t;
1276 struct xt_entry_match *ematch;
1278 /* Cleanup all matches */
1279 xt_ematch_foreach(ematch, e)
1280 module_put(ematch->u.kernel.match->me);
1281 t = compat_ipt_get_target(e);
1282 module_put(t->u.kernel.target->me);
1286 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1287 struct xt_table_info *newinfo,
1289 const unsigned char *base,
1290 const unsigned char *limit)
1292 struct xt_entry_match *ematch;
1293 struct xt_entry_target *t;
1294 struct xt_target *target;
1295 unsigned int entry_offset;
1299 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1300 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1301 (unsigned char *)e + e->next_offset > limit)
1304 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1305 sizeof(struct compat_xt_entry_target))
1308 if (!ip_checkentry(&e->ip))
1311 ret = xt_compat_check_entry_offsets(e, e->elems,
1312 e->target_offset, e->next_offset);
1316 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1317 entry_offset = (void *)e - (void *)base;
1319 xt_ematch_foreach(ematch, e) {
1320 ret = compat_find_calc_match(ematch, &e->ip, &off);
1322 goto release_matches;
1326 t = compat_ipt_get_target(e);
1327 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1328 t->u.user.revision);
1329 if (IS_ERR(target)) {
1330 ret = PTR_ERR(target);
1331 goto release_matches;
1333 t->u.kernel.target = target;
1335 off += xt_compat_target_offset(target);
1337 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1344 module_put(t->u.kernel.target->me);
1346 xt_ematch_foreach(ematch, e) {
1349 module_put(ematch->u.kernel.match->me);
1355 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1357 struct xt_table_info *newinfo, unsigned char *base)
1359 struct xt_entry_target *t;
1360 struct ipt_entry *de;
1361 unsigned int origsize;
1363 struct xt_entry_match *ematch;
1367 memcpy(de, e, sizeof(struct ipt_entry));
1368 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1370 *dstptr += sizeof(struct ipt_entry);
1371 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1373 xt_ematch_foreach(ematch, e)
1374 xt_compat_match_from_user(ematch, dstptr, size);
1376 de->target_offset = e->target_offset - (origsize - *size);
1377 t = compat_ipt_get_target(e);
1378 xt_compat_target_from_user(t, dstptr, size);
1380 de->next_offset = e->next_offset - (origsize - *size);
1382 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1383 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1384 newinfo->hook_entry[h] -= origsize - *size;
1385 if ((unsigned char *)de - base < newinfo->underflow[h])
1386 newinfo->underflow[h] -= origsize - *size;
1391 translate_compat_table(struct net *net,
1392 struct xt_table_info **pinfo,
1394 const struct compat_ipt_replace *compatr)
1397 struct xt_table_info *newinfo, *info;
1398 void *pos, *entry0, *entry1;
1399 struct compat_ipt_entry *iter0;
1400 struct ipt_replace repl;
1406 size = compatr->size;
1407 info->number = compatr->num_entries;
1410 xt_compat_lock(AF_INET);
1411 ret = xt_compat_init_offsets(AF_INET, compatr->num_entries);
1414 /* Walk through entries, checking offsets. */
1415 xt_entry_foreach(iter0, entry0, compatr->size) {
1416 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1418 entry0 + compatr->size);
1425 if (j != compatr->num_entries)
1429 newinfo = xt_alloc_table_info(size);
1433 newinfo->number = compatr->num_entries;
1434 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1435 newinfo->hook_entry[i] = compatr->hook_entry[i];
1436 newinfo->underflow[i] = compatr->underflow[i];
1438 entry1 = newinfo->entries;
1440 size = compatr->size;
1441 xt_entry_foreach(iter0, entry0, compatr->size)
1442 compat_copy_entry_from_user(iter0, &pos, &size,
1445 /* all module references in entry0 are now gone.
1446 * entry1/newinfo contains a 64bit ruleset that looks exactly as
1447 * generated by 64bit userspace.
1449 * Call standard translate_table() to validate all hook_entrys,
1450 * underflows, check for loops, etc.
1452 xt_compat_flush_offsets(AF_INET);
1453 xt_compat_unlock(AF_INET);
1455 memcpy(&repl, compatr, sizeof(*compatr));
1457 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1458 repl.hook_entry[i] = newinfo->hook_entry[i];
1459 repl.underflow[i] = newinfo->underflow[i];
1462 repl.num_counters = 0;
1463 repl.counters = NULL;
1464 repl.size = newinfo->size;
1465 ret = translate_table(net, newinfo, entry1, &repl);
1471 xt_free_table_info(info);
1475 xt_free_table_info(newinfo);
1478 xt_compat_flush_offsets(AF_INET);
1479 xt_compat_unlock(AF_INET);
1480 xt_entry_foreach(iter0, entry0, compatr->size) {
1483 compat_release_entry(iter0);
1489 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1492 struct compat_ipt_replace tmp;
1493 struct xt_table_info *newinfo;
1494 void *loc_cpu_entry;
1495 struct ipt_entry *iter;
1497 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1500 /* overflow check */
1501 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1503 if (tmp.num_counters == 0)
1506 tmp.name[sizeof(tmp.name)-1] = 0;
1508 newinfo = xt_alloc_table_info(tmp.size);
1512 loc_cpu_entry = newinfo->entries;
1513 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1519 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1523 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1524 tmp.num_counters, compat_ptr(tmp.counters));
1526 goto free_newinfo_untrans;
1529 free_newinfo_untrans:
1530 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1531 cleanup_entry(iter, net);
1533 xt_free_table_info(newinfo);
1538 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1543 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1547 case IPT_SO_SET_REPLACE:
1548 ret = compat_do_replace(sock_net(sk), user, len);
1551 case IPT_SO_SET_ADD_COUNTERS:
1552 ret = do_add_counters(sock_net(sk), user, len, 1);
1562 struct compat_ipt_get_entries {
1563 char name[XT_TABLE_MAXNAMELEN];
1565 struct compat_ipt_entry entrytable[0];
1569 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1570 void __user *userptr)
1572 struct xt_counters *counters;
1573 const struct xt_table_info *private = table->private;
1578 struct ipt_entry *iter;
1580 counters = alloc_counters(table);
1581 if (IS_ERR(counters))
1582 return PTR_ERR(counters);
1586 xt_entry_foreach(iter, private->entries, total_size) {
1587 ret = compat_copy_entry_to_user(iter, &pos,
1588 &size, counters, i++);
1598 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1602 struct compat_ipt_get_entries get;
1605 if (*len < sizeof(get))
1608 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1611 if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
1614 get.name[sizeof(get.name) - 1] = '\0';
1616 xt_compat_lock(AF_INET);
1617 t = xt_find_table_lock(net, AF_INET, get.name);
1619 const struct xt_table_info *private = t->private;
1620 struct xt_table_info info;
1621 ret = compat_table_info(private, &info);
1622 if (!ret && get.size == info.size)
1623 ret = compat_copy_entries_to_user(private->size,
1624 t, uptr->entrytable);
1628 xt_compat_flush_offsets(AF_INET);
1634 xt_compat_unlock(AF_INET);
1638 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1641 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1645 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1649 case IPT_SO_GET_INFO:
1650 ret = get_info(sock_net(sk), user, len, 1);
1652 case IPT_SO_GET_ENTRIES:
1653 ret = compat_get_entries(sock_net(sk), user, len);
1656 ret = do_ipt_get_ctl(sk, cmd, user, len);
1663 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1667 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1671 case IPT_SO_SET_REPLACE:
1672 ret = do_replace(sock_net(sk), user, len);
1675 case IPT_SO_SET_ADD_COUNTERS:
1676 ret = do_add_counters(sock_net(sk), user, len, 0);
1687 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1691 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1695 case IPT_SO_GET_INFO:
1696 ret = get_info(sock_net(sk), user, len, 0);
1699 case IPT_SO_GET_ENTRIES:
1700 ret = get_entries(sock_net(sk), user, len);
1703 case IPT_SO_GET_REVISION_MATCH:
1704 case IPT_SO_GET_REVISION_TARGET: {
1705 struct xt_get_revision rev;
1708 if (*len != sizeof(rev)) {
1712 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1716 rev.name[sizeof(rev.name)-1] = 0;
1718 if (cmd == IPT_SO_GET_REVISION_TARGET)
1723 try_then_request_module(xt_find_revision(AF_INET, rev.name,
1726 "ipt_%s", rev.name);
1737 static void __ipt_unregister_table(struct net *net, struct xt_table *table)
1739 struct xt_table_info *private;
1740 void *loc_cpu_entry;
1741 struct module *table_owner = table->me;
1742 struct ipt_entry *iter;
1744 private = xt_unregister_table(table);
1746 /* Decrease module usage counts and free resources */
1747 loc_cpu_entry = private->entries;
1748 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1749 cleanup_entry(iter, net);
1750 if (private->number > private->initial_entries)
1751 module_put(table_owner);
1752 xt_free_table_info(private);
1755 int ipt_register_table(struct net *net, const struct xt_table *table,
1756 const struct ipt_replace *repl,
1757 const struct nf_hook_ops *ops, struct xt_table **res)
1760 struct xt_table_info *newinfo;
1761 struct xt_table_info bootstrap = {0};
1762 void *loc_cpu_entry;
1763 struct xt_table *new_table;
1765 newinfo = xt_alloc_table_info(repl->size);
1769 loc_cpu_entry = newinfo->entries;
1770 memcpy(loc_cpu_entry, repl->entries, repl->size);
1772 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1776 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1777 if (IS_ERR(new_table)) {
1778 ret = PTR_ERR(new_table);
1782 /* set res now, will see skbs right after nf_register_net_hooks */
1783 WRITE_ONCE(*res, new_table);
1787 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1789 __ipt_unregister_table(net, new_table);
1796 xt_free_table_info(newinfo);
1800 void ipt_unregister_table(struct net *net, struct xt_table *table,
1801 const struct nf_hook_ops *ops)
1804 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1805 __ipt_unregister_table(net, table);
1808 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1810 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1811 u_int8_t type, u_int8_t code,
1814 return ((test_type == 0xFF) ||
1815 (type == test_type && code >= min_code && code <= max_code))
1820 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
1822 const struct icmphdr *ic;
1823 struct icmphdr _icmph;
1824 const struct ipt_icmp *icmpinfo = par->matchinfo;
1826 /* Must not be a fragment. */
1827 if (par->fragoff != 0)
1830 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1832 /* We've been asked to examine this packet, and we
1833 * can't. Hence, no choice but to drop.
1835 par->hotdrop = true;
1839 return icmp_type_code_match(icmpinfo->type,
1843 !!(icmpinfo->invflags&IPT_ICMP_INV));
1846 static int icmp_checkentry(const struct xt_mtchk_param *par)
1848 const struct ipt_icmp *icmpinfo = par->matchinfo;
1850 /* Must specify no unknown invflags */
1851 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
1854 static struct xt_target ipt_builtin_tg[] __read_mostly = {
1856 .name = XT_STANDARD_TARGET,
1857 .targetsize = sizeof(int),
1858 .family = NFPROTO_IPV4,
1859 #ifdef CONFIG_COMPAT
1860 .compatsize = sizeof(compat_int_t),
1861 .compat_from_user = compat_standard_from_user,
1862 .compat_to_user = compat_standard_to_user,
1866 .name = XT_ERROR_TARGET,
1867 .target = ipt_error,
1868 .targetsize = XT_FUNCTION_MAXNAMELEN,
1869 .family = NFPROTO_IPV4,
1873 static struct nf_sockopt_ops ipt_sockopts = {
1875 .set_optmin = IPT_BASE_CTL,
1876 .set_optmax = IPT_SO_SET_MAX+1,
1877 .set = do_ipt_set_ctl,
1878 #ifdef CONFIG_COMPAT
1879 .compat_set = compat_do_ipt_set_ctl,
1881 .get_optmin = IPT_BASE_CTL,
1882 .get_optmax = IPT_SO_GET_MAX+1,
1883 .get = do_ipt_get_ctl,
1884 #ifdef CONFIG_COMPAT
1885 .compat_get = compat_do_ipt_get_ctl,
1887 .owner = THIS_MODULE,
1890 static struct xt_match ipt_builtin_mt[] __read_mostly = {
1893 .match = icmp_match,
1894 .matchsize = sizeof(struct ipt_icmp),
1895 .checkentry = icmp_checkentry,
1896 .proto = IPPROTO_ICMP,
1897 .family = NFPROTO_IPV4,
1902 static int __net_init ip_tables_net_init(struct net *net)
1904 return xt_proto_init(net, NFPROTO_IPV4);
1907 static void __net_exit ip_tables_net_exit(struct net *net)
1909 xt_proto_fini(net, NFPROTO_IPV4);
1912 static struct pernet_operations ip_tables_net_ops = {
1913 .init = ip_tables_net_init,
1914 .exit = ip_tables_net_exit,
1917 static int __init ip_tables_init(void)
1921 ret = register_pernet_subsys(&ip_tables_net_ops);
1925 /* No one else will be downing sem now, so we won't sleep */
1926 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1929 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1933 /* Register setsockopt */
1934 ret = nf_register_sockopt(&ipt_sockopts);
1941 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1943 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1945 unregister_pernet_subsys(&ip_tables_net_ops);
1950 static void __exit ip_tables_fini(void)
1952 nf_unregister_sockopt(&ipt_sockopts);
1954 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1955 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1956 unregister_pernet_subsys(&ip_tables_net_ops);
1959 EXPORT_SYMBOL(ipt_register_table);
1960 EXPORT_SYMBOL(ipt_unregister_table);
1961 EXPORT_SYMBOL(ipt_do_table);
1962 module_init(ip_tables_init);
1963 module_exit(ip_tables_fini);