7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/kmod.h>
20 #include <linux/module.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netfilter/x_tables.h>
23 #include <linux/netfilter_bridge/ebtables.h>
24 #include <linux/spinlock.h>
25 #include <linux/mutex.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
38 * Each cpu has its own set of counters, so there is no need for write_lock in
40 * For reading or updating the counters, the user context needs to
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
52 static DEFINE_MUTEX(ebt_mutex);
55 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 int v = *(compat_int_t *)src;
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 compat_int_t cv = *(int *)src;
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
75 static struct xt_target ebt_standard_target = {
78 .family = NFPROTO_BRIDGE,
79 .targetsize = sizeof(int),
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 struct xt_target_param *par)
91 par->target = w->u.watcher;
92 par->targinfo = w->data;
93 w->u.watcher->target(skb, par);
94 /* watchers don't give a verdict */
98 static inline int ebt_do_match (struct ebt_entry_match *m,
99 const struct sk_buff *skb, struct xt_match_param *par)
101 par->match = m->u.match;
102 par->matchinfo = m->data;
103 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
107 ebt_dev_check(const char *entry, const struct net_device *device)
116 devname = device->name;
117 /* 1 is the wildcard token */
118 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
120 return (devname[i] != entry[i] && entry[i] != 1);
123 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
124 /* process standard matches */
126 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
127 const struct net_device *in, const struct net_device *out)
131 if (e->bitmask & EBT_802_3) {
132 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
134 } else if (!(e->bitmask & EBT_NOPROTO) &&
135 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
138 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
140 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
142 if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
143 e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
145 if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
146 e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
149 if (e->bitmask & EBT_SOURCEMAC) {
151 for (i = 0; i < 6; i++)
152 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
154 if (FWINV2(verdict != 0, EBT_ISOURCE) )
157 if (e->bitmask & EBT_DESTMAC) {
159 for (i = 0; i < 6; i++)
160 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
162 if (FWINV2(verdict != 0, EBT_IDEST) )
169 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
171 return (void *)entry + entry->next_offset;
174 /* Do some firewalling */
175 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
176 const struct net_device *in, const struct net_device *out,
177 struct ebt_table *table)
180 struct ebt_entry *point;
181 struct ebt_counter *counter_base, *cb_base;
182 const struct ebt_entry_target *t;
184 struct ebt_chainstack *cs;
185 struct ebt_entries *chaininfo;
187 const struct ebt_table_info *private;
188 bool hotdrop = false;
189 struct xt_match_param mtpar;
190 struct xt_target_param tgpar;
192 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
193 mtpar.in = tgpar.in = in;
194 mtpar.out = tgpar.out = out;
195 mtpar.hotdrop = &hotdrop;
196 mtpar.hooknum = tgpar.hooknum = hook;
198 read_lock_bh(&table->lock);
199 private = table->private;
200 cb_base = COUNTER_BASE(private->counters, private->nentries,
202 if (private->chainstack)
203 cs = private->chainstack[smp_processor_id()];
206 chaininfo = private->hook_entry[hook];
207 nentries = private->hook_entry[hook]->nentries;
208 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
209 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
210 /* base for chain jumps */
211 base = private->entries;
213 while (i < nentries) {
214 if (ebt_basic_match(point, eth_hdr(skb), in, out))
217 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &mtpar) != 0)
220 read_unlock_bh(&table->lock);
224 /* increase counter */
225 (*(counter_base + i)).pcnt++;
226 (*(counter_base + i)).bcnt += skb->len;
228 /* these should only watch: not modify, nor tell us
229 what to do with the packet */
230 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &tgpar);
232 t = (struct ebt_entry_target *)
233 (((char *)point) + point->target_offset);
234 /* standard target */
235 if (!t->u.target->target)
236 verdict = ((struct ebt_standard_target *)t)->verdict;
238 tgpar.target = t->u.target;
239 tgpar.targinfo = t->data;
240 verdict = t->u.target->target(skb, &tgpar);
242 if (verdict == EBT_ACCEPT) {
243 read_unlock_bh(&table->lock);
246 if (verdict == EBT_DROP) {
247 read_unlock_bh(&table->lock);
250 if (verdict == EBT_RETURN) {
252 #ifdef CONFIG_NETFILTER_DEBUG
254 BUGPRINT("RETURN on base chain");
255 /* act like this is EBT_CONTINUE */
260 /* put all the local variables right */
262 chaininfo = cs[sp].chaininfo;
263 nentries = chaininfo->nentries;
265 counter_base = cb_base +
266 chaininfo->counter_offset;
269 if (verdict == EBT_CONTINUE)
271 #ifdef CONFIG_NETFILTER_DEBUG
273 BUGPRINT("bogus standard verdict\n");
274 read_unlock_bh(&table->lock);
280 cs[sp].chaininfo = chaininfo;
281 cs[sp].e = ebt_next_entry(point);
283 chaininfo = (struct ebt_entries *) (base + verdict);
284 #ifdef CONFIG_NETFILTER_DEBUG
285 if (chaininfo->distinguisher) {
286 BUGPRINT("jump to non-chain\n");
287 read_unlock_bh(&table->lock);
291 nentries = chaininfo->nentries;
292 point = (struct ebt_entry *)chaininfo->data;
293 counter_base = cb_base + chaininfo->counter_offset;
297 point = ebt_next_entry(point);
301 /* I actually like this :) */
302 if (chaininfo->policy == EBT_RETURN)
304 if (chaininfo->policy == EBT_ACCEPT) {
305 read_unlock_bh(&table->lock);
308 read_unlock_bh(&table->lock);
312 /* If it succeeds, returns element and locks mutex */
314 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
318 struct list_head list;
319 char name[EBT_FUNCTION_MAXNAMELEN];
322 *error = mutex_lock_interruptible(mutex);
326 list_for_each_entry(e, head, list) {
327 if (strcmp(e->name, name) == 0)
336 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
337 int *error, struct mutex *mutex)
339 return try_then_request_module(
340 find_inlist_lock_noload(head, name, error, mutex),
341 "%s%s", prefix, name);
344 static inline struct ebt_table *
345 find_table_lock(struct net *net, const char *name, int *error,
348 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
349 "ebtable_", error, mutex);
353 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
356 const struct ebt_entry *e = par->entryinfo;
357 struct xt_match *match;
358 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
361 if (left < sizeof(struct ebt_entry_match) ||
362 left - sizeof(struct ebt_entry_match) < m->match_size)
365 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
366 m->u.name, 0), "ebt_%s", m->u.name);
368 return PTR_ERR(match);
374 par->matchinfo = m->data;
375 ret = xt_check_match(par, m->match_size,
376 e->ethproto, e->invflags & EBT_IPROTO);
378 module_put(match->me);
387 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
390 const struct ebt_entry *e = par->entryinfo;
391 struct xt_target *watcher;
392 size_t left = ((char *)e + e->target_offset) - (char *)w;
395 if (left < sizeof(struct ebt_entry_watcher) ||
396 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
399 watcher = try_then_request_module(
400 xt_find_target(NFPROTO_BRIDGE, w->u.name, 0),
401 "ebt_%s", w->u.name);
403 return PTR_ERR(watcher);
406 w->u.watcher = watcher;
408 par->target = watcher;
409 par->targinfo = w->data;
410 ret = xt_check_target(par, w->watcher_size,
411 e->ethproto, e->invflags & EBT_IPROTO);
413 module_put(watcher->me);
421 static int ebt_verify_pointers(const struct ebt_replace *repl,
422 struct ebt_table_info *newinfo)
424 unsigned int limit = repl->entries_size;
425 unsigned int valid_hooks = repl->valid_hooks;
426 unsigned int offset = 0;
429 for (i = 0; i < NF_BR_NUMHOOKS; i++)
430 newinfo->hook_entry[i] = NULL;
432 newinfo->entries_size = repl->entries_size;
433 newinfo->nentries = repl->nentries;
435 while (offset < limit) {
436 size_t left = limit - offset;
437 struct ebt_entry *e = (void *)newinfo->entries + offset;
439 if (left < sizeof(unsigned int))
442 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
443 if ((valid_hooks & (1 << i)) == 0)
445 if ((char __user *)repl->hook_entry[i] ==
446 repl->entries + offset)
450 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
451 if (e->bitmask != 0) {
452 /* we make userspace set this right,
453 so there is no misunderstanding */
454 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
455 "in distinguisher\n");
458 if (i != NF_BR_NUMHOOKS)
459 newinfo->hook_entry[i] = (struct ebt_entries *)e;
460 if (left < sizeof(struct ebt_entries))
462 offset += sizeof(struct ebt_entries);
464 if (left < sizeof(struct ebt_entry))
466 if (left < e->next_offset)
468 if (e->next_offset < sizeof(struct ebt_entry))
470 offset += e->next_offset;
473 if (offset != limit) {
474 BUGPRINT("entries_size too small\n");
478 /* check if all valid hooks have a chain */
479 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
480 if (!newinfo->hook_entry[i] &&
481 (valid_hooks & (1 << i))) {
482 BUGPRINT("Valid hook without chain\n");
490 * this one is very careful, as it is the first function
491 * to parse the userspace data
494 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
495 const struct ebt_table_info *newinfo,
496 unsigned int *n, unsigned int *cnt,
497 unsigned int *totalcnt, unsigned int *udc_cnt)
501 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
502 if ((void *)e == (void *)newinfo->hook_entry[i])
505 /* beginning of a new chain
506 if i == NF_BR_NUMHOOKS it must be a user defined chain */
507 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
508 /* this checks if the previous chain has as many entries
511 BUGPRINT("nentries does not equal the nr of entries "
515 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
516 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
517 /* only RETURN from udc */
518 if (i != NF_BR_NUMHOOKS ||
519 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
520 BUGPRINT("bad policy\n");
524 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
526 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
527 BUGPRINT("counter_offset != totalcnt");
530 *n = ((struct ebt_entries *)e)->nentries;
534 /* a plain old entry, heh */
535 if (sizeof(struct ebt_entry) > e->watchers_offset ||
536 e->watchers_offset > e->target_offset ||
537 e->target_offset >= e->next_offset) {
538 BUGPRINT("entry offsets not in right order\n");
541 /* this is not checked anywhere else */
542 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
543 BUGPRINT("target size too small\n");
553 struct ebt_chainstack cs;
555 unsigned int hookmask;
559 * we need these positions to check that the jumps to a different part of the
560 * entries is a jump to the beginning of a new chain.
563 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
564 unsigned int *n, struct ebt_cl_stack *udc)
568 /* we're only interested in chain starts */
571 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
572 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
575 /* only care about udc */
576 if (i != NF_BR_NUMHOOKS)
579 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
580 /* these initialisations are depended on later in check_chainloops() */
582 udc[*n].hookmask = 0;
589 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
591 struct xt_mtdtor_param par;
593 if (i && (*i)-- == 0)
597 par.match = m->u.match;
598 par.matchinfo = m->data;
599 par.family = NFPROTO_BRIDGE;
600 if (par.match->destroy != NULL)
601 par.match->destroy(&par);
602 module_put(par.match->me);
607 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
609 struct xt_tgdtor_param par;
611 if (i && (*i)-- == 0)
615 par.target = w->u.watcher;
616 par.targinfo = w->data;
617 par.family = NFPROTO_BRIDGE;
618 if (par.target->destroy != NULL)
619 par.target->destroy(&par);
620 module_put(par.target->me);
625 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
627 struct xt_tgdtor_param par;
628 struct ebt_entry_target *t;
633 if (cnt && (*cnt)-- == 0)
635 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
636 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
637 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
640 par.target = t->u.target;
641 par.targinfo = t->data;
642 par.family = NFPROTO_BRIDGE;
643 if (par.target->destroy != NULL)
644 par.target->destroy(&par);
645 module_put(par.target->me);
650 ebt_check_entry(struct ebt_entry *e, struct net *net,
651 const struct ebt_table_info *newinfo,
652 const char *name, unsigned int *cnt,
653 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
655 struct ebt_entry_target *t;
656 struct xt_target *target;
657 unsigned int i, j, hook = 0, hookmask = 0;
660 struct xt_mtchk_param mtpar;
661 struct xt_tgchk_param tgpar;
663 /* don't mess with the struct ebt_entries */
667 if (e->bitmask & ~EBT_F_MASK) {
668 BUGPRINT("Unknown flag for bitmask\n");
671 if (e->invflags & ~EBT_INV_MASK) {
672 BUGPRINT("Unknown flag for inv bitmask\n");
675 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
676 BUGPRINT("NOPROTO & 802_3 not allowed\n");
679 /* what hook do we belong to? */
680 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
681 if (!newinfo->hook_entry[i])
683 if ((char *)newinfo->hook_entry[i] < (char *)e)
688 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
690 if (i < NF_BR_NUMHOOKS)
691 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
693 for (i = 0; i < udc_cnt; i++)
694 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
697 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
699 hookmask = cl_s[i - 1].hookmask;
703 mtpar.net = tgpar.net = net;
704 mtpar.table = tgpar.table = name;
705 mtpar.entryinfo = tgpar.entryinfo = e;
706 mtpar.hook_mask = tgpar.hook_mask = hookmask;
707 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
708 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
710 goto cleanup_matches;
712 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
714 goto cleanup_watchers;
715 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
716 gap = e->next_offset - e->target_offset;
718 target = try_then_request_module(
719 xt_find_target(NFPROTO_BRIDGE, t->u.name, 0),
720 "ebt_%s", t->u.name);
721 if (IS_ERR(target)) {
722 ret = PTR_ERR(target);
723 goto cleanup_watchers;
724 } else if (target == NULL) {
726 goto cleanup_watchers;
729 t->u.target = target;
730 if (t->u.target == &ebt_standard_target) {
731 if (gap < sizeof(struct ebt_standard_target)) {
732 BUGPRINT("Standard target size too big\n");
734 goto cleanup_watchers;
736 if (((struct ebt_standard_target *)t)->verdict <
737 -NUM_STANDARD_TARGETS) {
738 BUGPRINT("Invalid standard target\n");
740 goto cleanup_watchers;
742 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
743 module_put(t->u.target->me);
745 goto cleanup_watchers;
748 tgpar.target = target;
749 tgpar.targinfo = t->data;
750 ret = xt_check_target(&tgpar, t->target_size,
751 e->ethproto, e->invflags & EBT_IPROTO);
753 module_put(target->me);
754 goto cleanup_watchers;
759 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
761 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
766 * checks for loops and sets the hook mask for udc
767 * the hook mask for udc tells us from which base chains the udc can be
768 * accessed. This mask is a parameter to the check() functions of the extensions
770 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
771 unsigned int udc_cnt, unsigned int hooknr, char *base)
773 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
774 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
775 const struct ebt_entry_target *t;
777 while (pos < nentries || chain_nr != -1) {
778 /* end of udc, go back one 'recursion' step */
779 if (pos == nentries) {
780 /* put back values of the time when this chain was called */
781 e = cl_s[chain_nr].cs.e;
782 if (cl_s[chain_nr].from != -1)
784 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
786 nentries = chain->nentries;
787 pos = cl_s[chain_nr].cs.n;
788 /* make sure we won't see a loop that isn't one */
789 cl_s[chain_nr].cs.n = 0;
790 chain_nr = cl_s[chain_nr].from;
794 t = (struct ebt_entry_target *)
795 (((char *)e) + e->target_offset);
796 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
798 if (e->target_offset + sizeof(struct ebt_standard_target) >
800 BUGPRINT("Standard target size too big\n");
803 verdict = ((struct ebt_standard_target *)t)->verdict;
804 if (verdict >= 0) { /* jump to another chain */
805 struct ebt_entries *hlp2 =
806 (struct ebt_entries *)(base + verdict);
807 for (i = 0; i < udc_cnt; i++)
808 if (hlp2 == cl_s[i].cs.chaininfo)
810 /* bad destination or loop */
812 BUGPRINT("bad destination\n");
819 if (cl_s[i].hookmask & (1 << hooknr))
821 /* this can't be 0, so the loop test is correct */
822 cl_s[i].cs.n = pos + 1;
824 cl_s[i].cs.e = ebt_next_entry(e);
825 e = (struct ebt_entry *)(hlp2->data);
826 nentries = hlp2->nentries;
827 cl_s[i].from = chain_nr;
829 /* this udc is accessible from the base chain for hooknr */
830 cl_s[i].hookmask |= (1 << hooknr);
834 e = ebt_next_entry(e);
840 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
841 static int translate_table(struct net *net, const char *name,
842 struct ebt_table_info *newinfo)
844 unsigned int i, j, k, udc_cnt;
846 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
849 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
851 if (i == NF_BR_NUMHOOKS) {
852 BUGPRINT("No valid hooks specified\n");
855 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
856 BUGPRINT("Chains don't start at beginning\n");
859 /* make sure chains are ordered after each other in same order
860 as their corresponding hooks */
861 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
862 if (!newinfo->hook_entry[j])
864 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
865 BUGPRINT("Hook order must be followed\n");
871 /* do some early checkings and initialize some things */
872 i = 0; /* holds the expected nr. of entries for the chain */
873 j = 0; /* holds the up to now counted entries for the chain */
874 k = 0; /* holds the total nr. of entries, should equal
875 newinfo->nentries afterwards */
876 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
877 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
878 ebt_check_entry_size_and_hooks, newinfo,
879 &i, &j, &k, &udc_cnt);
885 BUGPRINT("nentries does not equal the nr of entries in the "
889 if (k != newinfo->nentries) {
890 BUGPRINT("Total nentries is wrong\n");
894 /* get the location of the udc, put them in an array
895 while we're at it, allocate the chainstack */
897 /* this will get free'd in do_replace()/ebt_register_table()
898 if an error occurs */
899 newinfo->chainstack =
900 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
901 if (!newinfo->chainstack)
903 for_each_possible_cpu(i) {
904 newinfo->chainstack[i] =
905 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
906 if (!newinfo->chainstack[i]) {
908 vfree(newinfo->chainstack[--i]);
909 vfree(newinfo->chainstack);
910 newinfo->chainstack = NULL;
915 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
918 i = 0; /* the i'th udc */
919 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
920 ebt_get_udc_positions, newinfo, &i, cl_s);
923 BUGPRINT("i != udc_cnt\n");
929 /* Check for loops */
930 for (i = 0; i < NF_BR_NUMHOOKS; i++)
931 if (newinfo->hook_entry[i])
932 if (check_chainloops(newinfo->hook_entry[i],
933 cl_s, udc_cnt, i, newinfo->entries)) {
938 /* we now know the following (along with E=mc²):
939 - the nr of entries in each chain is right
940 - the size of the allocated space is right
941 - all valid hooks have a corresponding chain
943 - wrong data can still be on the level of a single entry
944 - could be there are jumps to places that are not the
945 beginning of a chain. This can only occur in chains that
946 are not accessible from any base chains, so we don't care. */
948 /* used to know what we need to clean up if something goes wrong */
950 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
951 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
953 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
954 ebt_cleanup_entry, net, &i);
960 /* called under write_lock */
961 static void get_counters(const struct ebt_counter *oldcounters,
962 struct ebt_counter *counters, unsigned int nentries)
965 struct ebt_counter *counter_base;
967 /* counters of cpu 0 */
968 memcpy(counters, oldcounters,
969 sizeof(struct ebt_counter) * nentries);
971 /* add other counters to those of cpu 0 */
972 for_each_possible_cpu(cpu) {
975 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
976 for (i = 0; i < nentries; i++) {
977 counters[i].pcnt += counter_base[i].pcnt;
978 counters[i].bcnt += counter_base[i].bcnt;
983 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
984 struct ebt_table_info *newinfo)
987 struct ebt_counter *counterstmp = NULL;
988 /* used to be able to unlock earlier */
989 struct ebt_table_info *table;
992 /* the user wants counters back
993 the check on the size is done later, when we have the lock */
994 if (repl->num_counters) {
995 unsigned long size = repl->num_counters * sizeof(*counterstmp);
996 counterstmp = vmalloc(size);
1001 newinfo->chainstack = NULL;
1002 ret = ebt_verify_pointers(repl, newinfo);
1004 goto free_counterstmp;
1006 ret = translate_table(net, repl->name, newinfo);
1009 goto free_counterstmp;
1011 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1017 /* the table doesn't like it */
1018 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1021 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1022 BUGPRINT("Wrong nr. of counters requested\n");
1027 /* we have the mutex lock, so no danger in reading this pointer */
1029 /* make sure the table can only be rmmod'ed if it contains no rules */
1030 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1033 } else if (table->nentries && !newinfo->nentries)
1035 /* we need an atomic snapshot of the counters */
1036 write_lock_bh(&t->lock);
1037 if (repl->num_counters)
1038 get_counters(t->private->counters, counterstmp,
1039 t->private->nentries);
1041 t->private = newinfo;
1042 write_unlock_bh(&t->lock);
1043 mutex_unlock(&ebt_mutex);
1044 /* so, a user can change the chains while having messed up her counter
1045 allocation. Only reason why this is done is because this way the lock
1046 is held only once, while this doesn't bring the kernel into a
1048 if (repl->num_counters &&
1049 copy_to_user(repl->counters, counterstmp,
1050 repl->num_counters * sizeof(struct ebt_counter))) {
1056 /* decrease module count and free resources */
1057 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1058 ebt_cleanup_entry, net, NULL);
1060 vfree(table->entries);
1061 if (table->chainstack) {
1062 for_each_possible_cpu(i)
1063 vfree(table->chainstack[i]);
1064 vfree(table->chainstack);
1072 mutex_unlock(&ebt_mutex);
1074 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1075 ebt_cleanup_entry, net, NULL);
1078 /* can be initialized in translate_table() */
1079 if (newinfo->chainstack) {
1080 for_each_possible_cpu(i)
1081 vfree(newinfo->chainstack[i]);
1082 vfree(newinfo->chainstack);
1087 /* replace the table */
1088 static int do_replace(struct net *net, const void __user *user,
1091 int ret, countersize;
1092 struct ebt_table_info *newinfo;
1093 struct ebt_replace tmp;
1095 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1098 if (len != sizeof(tmp) + tmp.entries_size) {
1099 BUGPRINT("Wrong len argument\n");
1103 if (tmp.entries_size == 0) {
1104 BUGPRINT("Entries_size never zero\n");
1107 /* overflow check */
1108 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1109 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1111 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1114 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1115 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1120 memset(newinfo->counters, 0, countersize);
1122 newinfo->entries = vmalloc(tmp.entries_size);
1123 if (!newinfo->entries) {
1128 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1129 BUGPRINT("Couldn't copy entries from userspace\n");
1134 ret = do_replace_finish(net, &tmp, newinfo);
1138 vfree(newinfo->entries);
1145 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1147 struct ebt_table_info *newinfo;
1148 struct ebt_table *t, *table;
1149 struct ebt_replace_kernel *repl;
1150 int ret, i, countersize;
1153 if (input_table == NULL || (repl = input_table->table) == NULL ||
1154 repl->entries == 0 || repl->entries_size == 0 ||
1155 repl->counters != NULL || input_table->private != NULL) {
1156 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1157 return ERR_PTR(-EINVAL);
1160 /* Don't add one table to multiple lists. */
1161 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1167 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1168 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1173 p = vmalloc(repl->entries_size);
1177 memcpy(p, repl->entries, repl->entries_size);
1178 newinfo->entries = p;
1180 newinfo->entries_size = repl->entries_size;
1181 newinfo->nentries = repl->nentries;
1184 memset(newinfo->counters, 0, countersize);
1186 /* fill in newinfo and parse the entries */
1187 newinfo->chainstack = NULL;
1188 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1189 if ((repl->valid_hooks & (1 << i)) == 0)
1190 newinfo->hook_entry[i] = NULL;
1192 newinfo->hook_entry[i] = p +
1193 ((char *)repl->hook_entry[i] - repl->entries);
1195 ret = translate_table(net, repl->name, newinfo);
1197 BUGPRINT("Translate_table failed\n");
1198 goto free_chainstack;
1201 if (table->check && table->check(newinfo, table->valid_hooks)) {
1202 BUGPRINT("The table doesn't like its own initial data, lol\n");
1203 return ERR_PTR(-EINVAL);
1206 table->private = newinfo;
1207 rwlock_init(&table->lock);
1208 ret = mutex_lock_interruptible(&ebt_mutex);
1210 goto free_chainstack;
1212 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1213 if (strcmp(t->name, table->name) == 0) {
1215 BUGPRINT("Table name already exists\n");
1220 /* Hold a reference count if the chains aren't empty */
1221 if (newinfo->nentries && !try_module_get(table->me)) {
1225 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1226 mutex_unlock(&ebt_mutex);
1229 mutex_unlock(&ebt_mutex);
1231 if (newinfo->chainstack) {
1232 for_each_possible_cpu(i)
1233 vfree(newinfo->chainstack[i]);
1234 vfree(newinfo->chainstack);
1236 vfree(newinfo->entries);
1242 return ERR_PTR(ret);
1245 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1250 BUGPRINT("Request to unregister NULL table!!!\n");
1253 mutex_lock(&ebt_mutex);
1254 list_del(&table->list);
1255 mutex_unlock(&ebt_mutex);
1256 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1257 ebt_cleanup_entry, net, NULL);
1258 if (table->private->nentries)
1259 module_put(table->me);
1260 vfree(table->private->entries);
1261 if (table->private->chainstack) {
1262 for_each_possible_cpu(i)
1263 vfree(table->private->chainstack[i]);
1264 vfree(table->private->chainstack);
1266 vfree(table->private);
1270 /* userspace just supplied us with counters */
1271 static int do_update_counters(struct net *net, const char *name,
1272 struct ebt_counter __user *counters,
1273 unsigned int num_counters,
1274 const void __user *user, unsigned int len)
1277 struct ebt_counter *tmp;
1278 struct ebt_table *t;
1280 if (num_counters == 0)
1283 tmp = vmalloc(num_counters * sizeof(*tmp));
1287 t = find_table_lock(net, name, &ret, &ebt_mutex);
1291 if (num_counters != t->private->nentries) {
1292 BUGPRINT("Wrong nr of counters\n");
1297 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1302 /* we want an atomic add of the counters */
1303 write_lock_bh(&t->lock);
1305 /* we add to the counters of the first cpu */
1306 for (i = 0; i < num_counters; i++) {
1307 t->private->counters[i].pcnt += tmp[i].pcnt;
1308 t->private->counters[i].bcnt += tmp[i].bcnt;
1311 write_unlock_bh(&t->lock);
1314 mutex_unlock(&ebt_mutex);
1320 static int update_counters(struct net *net, const void __user *user,
1323 struct ebt_replace hlp;
1325 if (copy_from_user(&hlp, user, sizeof(hlp)))
1328 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1331 return do_update_counters(net, hlp.name, hlp.counters,
1332 hlp.num_counters, user, len);
1335 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1336 const char *base, char __user *ubase)
1338 char __user *hlp = ubase + ((char *)m - base);
1339 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1344 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1345 const char *base, char __user *ubase)
1347 char __user *hlp = ubase + ((char *)w - base);
1348 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1354 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1358 const struct ebt_entry_target *t;
1360 if (e->bitmask == 0)
1363 hlp = ubase + (((char *)e + e->target_offset) - base);
1364 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1366 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1369 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1372 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1377 static int copy_counters_to_user(struct ebt_table *t,
1378 const struct ebt_counter *oldcounters,
1379 void __user *user, unsigned int num_counters,
1380 unsigned int nentries)
1382 struct ebt_counter *counterstmp;
1385 /* userspace might not need the counters */
1386 if (num_counters == 0)
1389 if (num_counters != nentries) {
1390 BUGPRINT("Num_counters wrong\n");
1394 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1398 write_lock_bh(&t->lock);
1399 get_counters(oldcounters, counterstmp, nentries);
1400 write_unlock_bh(&t->lock);
1402 if (copy_to_user(user, counterstmp,
1403 nentries * sizeof(struct ebt_counter)))
1409 /* called with ebt_mutex locked */
1410 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1411 const int *len, int cmd)
1413 struct ebt_replace tmp;
1414 const struct ebt_counter *oldcounters;
1415 unsigned int entries_size, nentries;
1419 if (cmd == EBT_SO_GET_ENTRIES) {
1420 entries_size = t->private->entries_size;
1421 nentries = t->private->nentries;
1422 entries = t->private->entries;
1423 oldcounters = t->private->counters;
1425 entries_size = t->table->entries_size;
1426 nentries = t->table->nentries;
1427 entries = t->table->entries;
1428 oldcounters = t->table->counters;
1431 if (copy_from_user(&tmp, user, sizeof(tmp)))
1434 if (*len != sizeof(struct ebt_replace) + entries_size +
1435 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1438 if (tmp.nentries != nentries) {
1439 BUGPRINT("Nentries wrong\n");
1443 if (tmp.entries_size != entries_size) {
1444 BUGPRINT("Wrong size\n");
1448 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1449 tmp.num_counters, nentries);
1453 if (copy_to_user(tmp.entries, entries, entries_size)) {
1454 BUGPRINT("Couldn't copy entries to userspace\n");
1457 /* set the match/watcher/target names right */
1458 return EBT_ENTRY_ITERATE(entries, entries_size,
1459 ebt_make_names, entries, tmp.entries);
1462 static int do_ebt_set_ctl(struct sock *sk,
1463 int cmd, void __user *user, unsigned int len)
1467 if (!capable(CAP_NET_ADMIN))
1471 case EBT_SO_SET_ENTRIES:
1472 ret = do_replace(sock_net(sk), user, len);
1474 case EBT_SO_SET_COUNTERS:
1475 ret = update_counters(sock_net(sk), user, len);
1483 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1486 struct ebt_replace tmp;
1487 struct ebt_table *t;
1489 if (!capable(CAP_NET_ADMIN))
1492 if (copy_from_user(&tmp, user, sizeof(tmp)))
1495 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1500 case EBT_SO_GET_INFO:
1501 case EBT_SO_GET_INIT_INFO:
1502 if (*len != sizeof(struct ebt_replace)){
1504 mutex_unlock(&ebt_mutex);
1507 if (cmd == EBT_SO_GET_INFO) {
1508 tmp.nentries = t->private->nentries;
1509 tmp.entries_size = t->private->entries_size;
1510 tmp.valid_hooks = t->valid_hooks;
1512 tmp.nentries = t->table->nentries;
1513 tmp.entries_size = t->table->entries_size;
1514 tmp.valid_hooks = t->table->valid_hooks;
1516 mutex_unlock(&ebt_mutex);
1517 if (copy_to_user(user, &tmp, *len) != 0){
1518 BUGPRINT("c2u Didn't work\n");
1525 case EBT_SO_GET_ENTRIES:
1526 case EBT_SO_GET_INIT_ENTRIES:
1527 ret = copy_everything_to_user(t, user, len, cmd);
1528 mutex_unlock(&ebt_mutex);
1532 mutex_unlock(&ebt_mutex);
1539 #ifdef CONFIG_COMPAT
1540 /* 32 bit-userspace compatibility definitions. */
1541 struct compat_ebt_replace {
1542 char name[EBT_TABLE_MAXNAMELEN];
1543 compat_uint_t valid_hooks;
1544 compat_uint_t nentries;
1545 compat_uint_t entries_size;
1546 /* start of the chains */
1547 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1548 /* nr of counters userspace expects back */
1549 compat_uint_t num_counters;
1550 /* where the kernel will put the old counters. */
1551 compat_uptr_t counters;
1552 compat_uptr_t entries;
1555 /* struct ebt_entry_match, _target and _watcher have same layout */
1556 struct compat_ebt_entry_mwt {
1558 char name[EBT_FUNCTION_MAXNAMELEN];
1561 compat_uint_t match_size;
1562 compat_uint_t data[0];
1565 /* account for possible padding between match_size and ->data */
1566 static int ebt_compat_entry_padsize(void)
1568 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1569 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1570 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1571 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1574 static int ebt_compat_match_offset(const struct xt_match *match,
1575 unsigned int userlen)
1578 * ebt_among needs special handling. The kernel .matchsize is
1579 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1580 * value is expected.
1581 * Example: userspace sends 4500, ebt_among.c wants 4504.
1583 if (unlikely(match->matchsize == -1))
1584 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1585 return xt_compat_match_offset(match);
1588 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1591 const struct xt_match *match = m->u.match;
1592 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1593 int off = ebt_compat_match_offset(match, m->match_size);
1594 compat_uint_t msize = m->match_size - off;
1596 BUG_ON(off >= m->match_size);
1598 if (copy_to_user(cm->u.name, match->name,
1599 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1602 if (match->compat_to_user) {
1603 if (match->compat_to_user(cm->data, m->data))
1605 } else if (copy_to_user(cm->data, m->data, msize))
1608 *size -= ebt_compat_entry_padsize() + off;
1614 static int compat_target_to_user(struct ebt_entry_target *t,
1615 void __user **dstptr,
1618 const struct xt_target *target = t->u.target;
1619 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1620 int off = xt_compat_target_offset(target);
1621 compat_uint_t tsize = t->target_size - off;
1623 BUG_ON(off >= t->target_size);
1625 if (copy_to_user(cm->u.name, target->name,
1626 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1629 if (target->compat_to_user) {
1630 if (target->compat_to_user(cm->data, t->data))
1632 } else if (copy_to_user(cm->data, t->data, tsize))
1635 *size -= ebt_compat_entry_padsize() + off;
1641 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1642 void __user **dstptr,
1645 return compat_target_to_user((struct ebt_entry_target *)w,
1649 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1652 struct ebt_entry_target *t;
1653 struct ebt_entry __user *ce;
1654 u32 watchers_offset, target_offset, next_offset;
1655 compat_uint_t origsize;
1658 if (e->bitmask == 0) {
1659 if (*size < sizeof(struct ebt_entries))
1661 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1664 *dstptr += sizeof(struct ebt_entries);
1665 *size -= sizeof(struct ebt_entries);
1669 if (*size < sizeof(*ce))
1672 ce = (struct ebt_entry __user *)*dstptr;
1673 if (copy_to_user(ce, e, sizeof(*ce)))
1677 *dstptr += sizeof(*ce);
1679 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1682 watchers_offset = e->watchers_offset - (origsize - *size);
1684 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1687 target_offset = e->target_offset - (origsize - *size);
1689 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1691 ret = compat_target_to_user(t, dstptr, size);
1694 next_offset = e->next_offset - (origsize - *size);
1696 if (put_user(watchers_offset, &ce->watchers_offset) ||
1697 put_user(target_offset, &ce->target_offset) ||
1698 put_user(next_offset, &ce->next_offset))
1701 *size -= sizeof(*ce);
1705 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1707 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1708 *off += ebt_compat_entry_padsize();
1712 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1714 *off += xt_compat_target_offset(w->u.watcher);
1715 *off += ebt_compat_entry_padsize();
1719 static int compat_calc_entry(const struct ebt_entry *e,
1720 const struct ebt_table_info *info,
1722 struct compat_ebt_replace *newinfo)
1724 const struct ebt_entry_target *t;
1725 unsigned int entry_offset;
1728 if (e->bitmask == 0)
1732 entry_offset = (void *)e - base;
1734 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1735 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1737 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1739 off += xt_compat_target_offset(t->u.target);
1740 off += ebt_compat_entry_padsize();
1742 newinfo->entries_size -= off;
1744 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1748 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1749 const void *hookptr = info->hook_entry[i];
1750 if (info->hook_entry[i] &&
1751 (e < (struct ebt_entry *)(base - hookptr))) {
1752 newinfo->hook_entry[i] -= off;
1753 pr_debug("0x%08X -> 0x%08X\n",
1754 newinfo->hook_entry[i] + off,
1755 newinfo->hook_entry[i]);
1763 static int compat_table_info(const struct ebt_table_info *info,
1764 struct compat_ebt_replace *newinfo)
1766 unsigned int size = info->entries_size;
1767 const void *entries = info->entries;
1769 newinfo->entries_size = size;
1771 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1775 static int compat_copy_everything_to_user(struct ebt_table *t,
1776 void __user *user, int *len, int cmd)
1778 struct compat_ebt_replace repl, tmp;
1779 struct ebt_counter *oldcounters;
1780 struct ebt_table_info tinfo;
1784 memset(&tinfo, 0, sizeof(tinfo));
1786 if (cmd == EBT_SO_GET_ENTRIES) {
1787 tinfo.entries_size = t->private->entries_size;
1788 tinfo.nentries = t->private->nentries;
1789 tinfo.entries = t->private->entries;
1790 oldcounters = t->private->counters;
1792 tinfo.entries_size = t->table->entries_size;
1793 tinfo.nentries = t->table->nentries;
1794 tinfo.entries = t->table->entries;
1795 oldcounters = t->table->counters;
1798 if (copy_from_user(&tmp, user, sizeof(tmp)))
1801 if (tmp.nentries != tinfo.nentries ||
1802 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1805 memcpy(&repl, &tmp, sizeof(repl));
1806 if (cmd == EBT_SO_GET_ENTRIES)
1807 ret = compat_table_info(t->private, &repl);
1809 ret = compat_table_info(&tinfo, &repl);
1813 if (*len != sizeof(tmp) + repl.entries_size +
1814 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1815 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1816 *len, tinfo.entries_size, repl.entries_size);
1820 /* userspace might not need the counters */
1821 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1822 tmp.num_counters, tinfo.nentries);
1826 pos = compat_ptr(tmp.entries);
1827 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1828 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1831 struct ebt_entries_buf_state {
1832 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1833 u32 buf_kern_len; /* total size of kernel buffer */
1834 u32 buf_kern_offset; /* amount of data copied so far */
1835 u32 buf_user_offset; /* read position in userspace buffer */
1838 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1840 state->buf_kern_offset += sz;
1841 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1844 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1845 void *data, unsigned int sz)
1847 if (state->buf_kern_start == NULL)
1850 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1852 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1855 state->buf_user_offset += sz;
1856 return ebt_buf_count(state, sz);
1859 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1861 char *b = state->buf_kern_start;
1863 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1865 if (b != NULL && sz > 0)
1866 memset(b + state->buf_kern_offset, 0, sz);
1867 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1868 return ebt_buf_count(state, sz);
1877 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1878 enum compat_mwt compat_mwt,
1879 struct ebt_entries_buf_state *state,
1880 const unsigned char *base)
1882 char name[EBT_FUNCTION_MAXNAMELEN];
1883 struct xt_match *match;
1884 struct xt_target *wt;
1886 int off, pad = 0, ret = 0;
1887 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1889 strlcpy(name, mwt->u.name, sizeof(name));
1891 if (state->buf_kern_start)
1892 dst = state->buf_kern_start + state->buf_kern_offset;
1894 entry_offset = (unsigned char *) mwt - base;
1895 switch (compat_mwt) {
1896 case EBT_COMPAT_MATCH:
1897 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1898 name, 0), "ebt_%s", name);
1902 return PTR_ERR(match);
1904 off = ebt_compat_match_offset(match, match_size);
1906 if (match->compat_from_user)
1907 match->compat_from_user(dst, mwt->data);
1909 memcpy(dst, mwt->data, match_size);
1912 size_kern = match->matchsize;
1913 if (unlikely(size_kern == -1))
1914 size_kern = match_size;
1915 module_put(match->me);
1917 case EBT_COMPAT_WATCHER: /* fallthrough */
1918 case EBT_COMPAT_TARGET:
1919 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1920 name, 0), "ebt_%s", name);
1925 off = xt_compat_target_offset(wt);
1928 if (wt->compat_from_user)
1929 wt->compat_from_user(dst, mwt->data);
1931 memcpy(dst, mwt->data, match_size);
1934 size_kern = wt->targetsize;
1940 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1941 off + ebt_compat_entry_padsize());
1946 state->buf_kern_offset += match_size + off;
1947 state->buf_user_offset += match_size;
1948 pad = XT_ALIGN(size_kern) - size_kern;
1950 if (pad > 0 && dst) {
1951 BUG_ON(state->buf_kern_len <= pad);
1952 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1953 memset(dst + size_kern, 0, pad);
1955 return off + match_size;
1959 * return size of all matches, watchers or target, including necessary
1960 * alignment and padding.
1962 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1963 unsigned int size_left, enum compat_mwt type,
1964 struct ebt_entries_buf_state *state, const void *base)
1972 buf = (char *) match32;
1974 while (size_left >= sizeof(*match32)) {
1975 struct ebt_entry_match *match_kern;
1978 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1981 tmp = state->buf_kern_start + state->buf_kern_offset;
1982 match_kern = (struct ebt_entry_match *) tmp;
1984 ret = ebt_buf_add(state, buf, sizeof(*match32));
1987 size_left -= sizeof(*match32);
1989 /* add padding before match->data (if any) */
1990 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1994 if (match32->match_size > size_left)
1997 size_left -= match32->match_size;
1999 ret = compat_mtw_from_user(match32, type, state, base);
2003 BUG_ON(ret < match32->match_size);
2004 growth += ret - match32->match_size;
2005 growth += ebt_compat_entry_padsize();
2007 buf += sizeof(*match32);
2008 buf += match32->match_size;
2011 match_kern->match_size = ret;
2013 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2014 match32 = (struct compat_ebt_entry_mwt *) buf;
2020 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2024 struct compat_ebt_entry_mwt *__watcher; \
2026 for (__i = e->watchers_offset; \
2027 __i < (e)->target_offset; \
2028 __i += __watcher->watcher_size + \
2029 sizeof(struct compat_ebt_entry_mwt)) { \
2030 __watcher = (void *)(e) + __i; \
2031 __ret = fn(__watcher , ## args); \
2036 if (__i != (e)->target_offset) \
2042 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2046 struct compat_ebt_entry_mwt *__match; \
2048 for (__i = sizeof(struct ebt_entry); \
2049 __i < (e)->watchers_offset; \
2050 __i += __match->match_size + \
2051 sizeof(struct compat_ebt_entry_mwt)) { \
2052 __match = (void *)(e) + __i; \
2053 __ret = fn(__match , ## args); \
2058 if (__i != (e)->watchers_offset) \
2064 /* called for all ebt_entry structures. */
2065 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2066 unsigned int *total,
2067 struct ebt_entries_buf_state *state)
2069 unsigned int i, j, startoff, new_offset = 0;
2070 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2071 unsigned int offsets[4];
2072 unsigned int *offsets_update = NULL;
2076 if (*total < sizeof(struct ebt_entries))
2079 if (!entry->bitmask) {
2080 *total -= sizeof(struct ebt_entries);
2081 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2083 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2086 startoff = state->buf_user_offset;
2087 /* pull in most part of ebt_entry, it does not need to be changed. */
2088 ret = ebt_buf_add(state, entry,
2089 offsetof(struct ebt_entry, watchers_offset));
2093 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2094 memcpy(&offsets[1], &entry->watchers_offset,
2095 sizeof(offsets) - sizeof(offsets[0]));
2097 if (state->buf_kern_start) {
2098 buf_start = state->buf_kern_start + state->buf_kern_offset;
2099 offsets_update = (unsigned int *) buf_start;
2101 ret = ebt_buf_add(state, &offsets[1],
2102 sizeof(offsets) - sizeof(offsets[0]));
2105 buf_start = (char *) entry;
2107 * 0: matches offset, always follows ebt_entry.
2108 * 1: watchers offset, from ebt_entry structure
2109 * 2: target offset, from ebt_entry structure
2110 * 3: next ebt_entry offset, from ebt_entry structure
2112 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2114 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2115 struct compat_ebt_entry_mwt *match32;
2117 char *buf = buf_start;
2119 buf = buf_start + offsets[i];
2120 if (offsets[i] > offsets[j])
2123 match32 = (struct compat_ebt_entry_mwt *) buf;
2124 size = offsets[j] - offsets[i];
2125 ret = ebt_size_mwt(match32, size, i, state, base);
2129 if (offsets_update && new_offset) {
2130 pr_debug("ebtables: change offset %d to %d\n",
2131 offsets_update[i], offsets[j] + new_offset);
2132 offsets_update[i] = offsets[j] + new_offset;
2136 startoff = state->buf_user_offset - startoff;
2138 BUG_ON(*total < startoff);
2144 * repl->entries_size is the size of the ebt_entry blob in userspace.
2145 * It might need more memory when copied to a 64 bit kernel in case
2146 * userspace is 32-bit. So, first task: find out how much memory is needed.
2148 * Called before validation is performed.
2150 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2151 struct ebt_entries_buf_state *state)
2153 unsigned int size_remaining = size_user;
2156 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2157 &size_remaining, state);
2161 WARN_ON(size_remaining);
2162 return state->buf_kern_offset;
2166 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2167 void __user *user, unsigned int len)
2169 struct compat_ebt_replace tmp;
2172 if (len < sizeof(tmp))
2175 if (copy_from_user(&tmp, user, sizeof(tmp)))
2178 if (len != sizeof(tmp) + tmp.entries_size)
2181 if (tmp.entries_size == 0)
2184 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2185 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2187 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2190 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2192 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2193 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2194 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2196 repl->num_counters = tmp.num_counters;
2197 repl->counters = compat_ptr(tmp.counters);
2198 repl->entries = compat_ptr(tmp.entries);
2202 static int compat_do_replace(struct net *net, void __user *user,
2205 int ret, i, countersize, size64;
2206 struct ebt_table_info *newinfo;
2207 struct ebt_replace tmp;
2208 struct ebt_entries_buf_state state;
2211 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2213 /* try real handler in case userland supplied needed padding */
2214 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2219 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2220 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2225 memset(newinfo->counters, 0, countersize);
2227 memset(&state, 0, sizeof(state));
2229 newinfo->entries = vmalloc(tmp.entries_size);
2230 if (!newinfo->entries) {
2235 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2240 entries_tmp = newinfo->entries;
2242 xt_compat_lock(NFPROTO_BRIDGE);
2244 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2248 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2249 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2250 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2253 newinfo->entries = vmalloc(size64);
2254 if (!newinfo->entries) {
2260 memset(&state, 0, sizeof(state));
2261 state.buf_kern_start = newinfo->entries;
2262 state.buf_kern_len = size64;
2264 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2265 BUG_ON(ret < 0); /* parses same data again */
2268 tmp.entries_size = size64;
2270 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2271 char __user *usrptr;
2272 if (tmp.hook_entry[i]) {
2274 usrptr = (char __user *) tmp.hook_entry[i];
2275 delta = usrptr - tmp.entries;
2276 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2277 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2281 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2282 xt_compat_unlock(NFPROTO_BRIDGE);
2284 ret = do_replace_finish(net, &tmp, newinfo);
2288 vfree(newinfo->entries);
2293 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2294 xt_compat_unlock(NFPROTO_BRIDGE);
2298 static int compat_update_counters(struct net *net, void __user *user,
2301 struct compat_ebt_replace hlp;
2303 if (copy_from_user(&hlp, user, sizeof(hlp)))
2306 /* try real handler in case userland supplied needed padding */
2307 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2308 return update_counters(net, user, len);
2310 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2311 hlp.num_counters, user, len);
2314 static int compat_do_ebt_set_ctl(struct sock *sk,
2315 int cmd, void __user *user, unsigned int len)
2319 if (!capable(CAP_NET_ADMIN))
2323 case EBT_SO_SET_ENTRIES:
2324 ret = compat_do_replace(sock_net(sk), user, len);
2326 case EBT_SO_SET_COUNTERS:
2327 ret = compat_update_counters(sock_net(sk), user, len);
2335 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2336 void __user *user, int *len)
2339 struct compat_ebt_replace tmp;
2340 struct ebt_table *t;
2342 if (!capable(CAP_NET_ADMIN))
2345 /* try real handler in case userland supplied needed padding */
2346 if ((cmd == EBT_SO_GET_INFO ||
2347 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2348 return do_ebt_get_ctl(sk, cmd, user, len);
2350 if (copy_from_user(&tmp, user, sizeof(tmp)))
2353 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2357 xt_compat_lock(NFPROTO_BRIDGE);
2359 case EBT_SO_GET_INFO:
2360 tmp.nentries = t->private->nentries;
2361 ret = compat_table_info(t->private, &tmp);
2364 tmp.valid_hooks = t->valid_hooks;
2366 if (copy_to_user(user, &tmp, *len) != 0) {
2372 case EBT_SO_GET_INIT_INFO:
2373 tmp.nentries = t->table->nentries;
2374 tmp.entries_size = t->table->entries_size;
2375 tmp.valid_hooks = t->table->valid_hooks;
2377 if (copy_to_user(user, &tmp, *len) != 0) {
2383 case EBT_SO_GET_ENTRIES:
2384 case EBT_SO_GET_INIT_ENTRIES:
2386 * try real handler first in case of userland-side padding.
2387 * in case we are dealing with an 'ordinary' 32 bit binary
2388 * without 64bit compatibility padding, this will fail right
2389 * after copy_from_user when the *len argument is validated.
2391 * the compat_ variant needs to do one pass over the kernel
2392 * data set to adjust for size differences before it the check.
2394 if (copy_everything_to_user(t, user, len, cmd) == 0)
2397 ret = compat_copy_everything_to_user(t, user, len, cmd);
2403 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2404 xt_compat_unlock(NFPROTO_BRIDGE);
2405 mutex_unlock(&ebt_mutex);
2410 static struct nf_sockopt_ops ebt_sockopts =
2413 .set_optmin = EBT_BASE_CTL,
2414 .set_optmax = EBT_SO_SET_MAX + 1,
2415 .set = do_ebt_set_ctl,
2416 #ifdef CONFIG_COMPAT
2417 .compat_set = compat_do_ebt_set_ctl,
2419 .get_optmin = EBT_BASE_CTL,
2420 .get_optmax = EBT_SO_GET_MAX + 1,
2421 .get = do_ebt_get_ctl,
2422 #ifdef CONFIG_COMPAT
2423 .compat_get = compat_do_ebt_get_ctl,
2425 .owner = THIS_MODULE,
2428 static int __init ebtables_init(void)
2432 ret = xt_register_target(&ebt_standard_target);
2435 ret = nf_register_sockopt(&ebt_sockopts);
2437 xt_unregister_target(&ebt_standard_target);
2441 printk(KERN_INFO "Ebtables v2.0 registered\n");
2445 static void __exit ebtables_fini(void)
2447 nf_unregister_sockopt(&ebt_sockopts);
2448 xt_unregister_target(&ebt_standard_target);
2449 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2452 EXPORT_SYMBOL(ebt_register_table);
2453 EXPORT_SYMBOL(ebt_unregister_table);
2454 EXPORT_SYMBOL(ebt_do_table);
2455 module_init(ebtables_init);
2456 module_exit(ebtables_fini);
2457 MODULE_LICENSE("GPL");