]> Git Repo - J-linux.git/blob - net/ipv6/netfilter/ip6_tables.c
Merge tag 'kbuild-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[J-linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Packet matching code.
4  *
5  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
6  * Copyright (C) 2000-2005 Netfilter Core Team <[email protected]>
7  * Copyright (c) 2006-2010 Patrick McHardy <[email protected]>
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/capability.h>
14 #include <linux/in.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <linux/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <[email protected]>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37
38 void *ip6t_alloc_initial_table(const struct xt_table *info)
39 {
40         return xt_alloc_initial_table(ip6t, IP6T);
41 }
42 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
43
44 /* Returns whether matches rule or not. */
45 /* Performance critical - called for every packet */
46 static inline bool
47 ip6_packet_match(const struct sk_buff *skb,
48                  const char *indev,
49                  const char *outdev,
50                  const struct ip6t_ip6 *ip6info,
51                  unsigned int *protoff,
52                  u16 *fragoff, bool *hotdrop)
53 {
54         unsigned long ret;
55         const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
56
57         if (NF_INVF(ip6info, IP6T_INV_SRCIP,
58                     ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
59                                          &ip6info->src)) ||
60             NF_INVF(ip6info, IP6T_INV_DSTIP,
61                     ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
62                                          &ip6info->dst)))
63                 return false;
64
65         ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
66
67         if (NF_INVF(ip6info, IP6T_INV_VIA_IN, ret != 0))
68                 return false;
69
70         ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
71
72         if (NF_INVF(ip6info, IP6T_INV_VIA_OUT, ret != 0))
73                 return false;
74
75 /* ... might want to do something with class and flowlabel here ... */
76
77         /* look for the desired protocol header */
78         if (ip6info->flags & IP6T_F_PROTO) {
79                 int protohdr;
80                 unsigned short _frag_off;
81
82                 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
83                 if (protohdr < 0) {
84                         if (_frag_off == 0)
85                                 *hotdrop = true;
86                         return false;
87                 }
88                 *fragoff = _frag_off;
89
90                 if (ip6info->proto == protohdr) {
91                         if (ip6info->invflags & IP6T_INV_PROTO)
92                                 return false;
93
94                         return true;
95                 }
96
97                 /* We need match for the '-p all', too! */
98                 if ((ip6info->proto != 0) &&
99                         !(ip6info->invflags & IP6T_INV_PROTO))
100                         return false;
101         }
102         return true;
103 }
104
105 /* should be ip6 safe */
106 static bool
107 ip6_checkentry(const struct ip6t_ip6 *ipv6)
108 {
109         if (ipv6->flags & ~IP6T_F_MASK)
110                 return false;
111         if (ipv6->invflags & ~IP6T_INV_MASK)
112                 return false;
113
114         return true;
115 }
116
117 static unsigned int
118 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
119 {
120         net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
121
122         return NF_DROP;
123 }
124
125 static inline struct ip6t_entry *
126 get_entry(const void *base, unsigned int offset)
127 {
128         return (struct ip6t_entry *)(base + offset);
129 }
130
131 /* All zeroes == unconditional rule. */
132 /* Mildly perf critical (only if packet tracing is on) */
133 static inline bool unconditional(const struct ip6t_entry *e)
134 {
135         static const struct ip6t_ip6 uncond;
136
137         return e->target_offset == sizeof(struct ip6t_entry) &&
138                memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
139 }
140
141 static inline const struct xt_entry_target *
142 ip6t_get_target_c(const struct ip6t_entry *e)
143 {
144         return ip6t_get_target((struct ip6t_entry *)e);
145 }
146
147 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
148 /* This cries for unification! */
149 static const char *const hooknames[] = {
150         [NF_INET_PRE_ROUTING]           = "PREROUTING",
151         [NF_INET_LOCAL_IN]              = "INPUT",
152         [NF_INET_FORWARD]               = "FORWARD",
153         [NF_INET_LOCAL_OUT]             = "OUTPUT",
154         [NF_INET_POST_ROUTING]          = "POSTROUTING",
155 };
156
157 enum nf_ip_trace_comments {
158         NF_IP6_TRACE_COMMENT_RULE,
159         NF_IP6_TRACE_COMMENT_RETURN,
160         NF_IP6_TRACE_COMMENT_POLICY,
161 };
162
163 static const char *const comments[] = {
164         [NF_IP6_TRACE_COMMENT_RULE]     = "rule",
165         [NF_IP6_TRACE_COMMENT_RETURN]   = "return",
166         [NF_IP6_TRACE_COMMENT_POLICY]   = "policy",
167 };
168
169 static const struct nf_loginfo trace_loginfo = {
170         .type = NF_LOG_TYPE_LOG,
171         .u = {
172                 .log = {
173                         .level = LOGLEVEL_WARNING,
174                         .logflags = NF_LOG_DEFAULT_MASK,
175                 },
176         },
177 };
178
179 /* Mildly perf critical (only if packet tracing is on) */
180 static inline int
181 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
182                       const char *hookname, const char **chainname,
183                       const char **comment, unsigned int *rulenum)
184 {
185         const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
186
187         if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
188                 /* Head of user chain: ERROR target with chainname */
189                 *chainname = t->target.data;
190                 (*rulenum) = 0;
191         } else if (s == e) {
192                 (*rulenum)++;
193
194                 if (unconditional(s) &&
195                     strcmp(t->target.u.kernel.target->name,
196                            XT_STANDARD_TARGET) == 0 &&
197                     t->verdict < 0) {
198                         /* Tail of chains: STANDARD target (return/policy) */
199                         *comment = *chainname == hookname
200                                 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
201                                 : comments[NF_IP6_TRACE_COMMENT_RETURN];
202                 }
203                 return 1;
204         } else
205                 (*rulenum)++;
206
207         return 0;
208 }
209
210 static void trace_packet(struct net *net,
211                          const struct sk_buff *skb,
212                          unsigned int hook,
213                          const struct net_device *in,
214                          const struct net_device *out,
215                          const char *tablename,
216                          const struct xt_table_info *private,
217                          const struct ip6t_entry *e)
218 {
219         const struct ip6t_entry *root;
220         const char *hookname, *chainname, *comment;
221         const struct ip6t_entry *iter;
222         unsigned int rulenum = 0;
223
224         root = get_entry(private->entries, private->hook_entry[hook]);
225
226         hookname = chainname = hooknames[hook];
227         comment = comments[NF_IP6_TRACE_COMMENT_RULE];
228
229         xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
230                 if (get_chainname_rulenum(iter, e, hookname,
231                     &chainname, &comment, &rulenum) != 0)
232                         break;
233
234         nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
235                      "TRACE: %s:%s:%s:%u ",
236                      tablename, chainname, comment, rulenum);
237 }
238 #endif
239
240 static inline struct ip6t_entry *
241 ip6t_next_entry(const struct ip6t_entry *entry)
242 {
243         return (void *)entry + entry->next_offset;
244 }
245
246 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
247 unsigned int
248 ip6t_do_table(void *priv, struct sk_buff *skb,
249               const struct nf_hook_state *state)
250 {
251         const struct xt_table *table = priv;
252         unsigned int hook = state->hook;
253         static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
254         /* Initializing verdict to NF_DROP keeps gcc happy. */
255         unsigned int verdict = NF_DROP;
256         const char *indev, *outdev;
257         const void *table_base;
258         struct ip6t_entry *e, **jumpstack;
259         unsigned int stackidx, cpu;
260         const struct xt_table_info *private;
261         struct xt_action_param acpar;
262         unsigned int addend;
263
264         /* Initialization */
265         stackidx = 0;
266         indev = state->in ? state->in->name : nulldevname;
267         outdev = state->out ? state->out->name : nulldevname;
268         /* We handle fragments by dealing with the first fragment as
269          * if it was a normal packet.  All other fragments are treated
270          * normally, except that they will NEVER match rules that ask
271          * things we don't know, ie. tcp syn flag or ports).  If the
272          * rule is also a fragment-specific rule, non-fragments won't
273          * match it. */
274         acpar.fragoff = 0;
275         acpar.hotdrop = false;
276         acpar.state   = state;
277
278         WARN_ON(!(table->valid_hooks & (1 << hook)));
279
280         local_bh_disable();
281         addend = xt_write_recseq_begin();
282         private = READ_ONCE(table->private); /* Address dependency. */
283         cpu        = smp_processor_id();
284         table_base = private->entries;
285         jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
286
287         /* Switch to alternate jumpstack if we're being invoked via TEE.
288          * TEE issues XT_CONTINUE verdict on original skb so we must not
289          * clobber the jumpstack.
290          *
291          * For recursion via REJECT or SYNPROXY the stack will be clobbered
292          * but it is no problem since absolute verdict is issued by these.
293          */
294         if (static_key_false(&xt_tee_enabled))
295                 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
296
297         e = get_entry(table_base, private->hook_entry[hook]);
298
299         do {
300                 const struct xt_entry_target *t;
301                 const struct xt_entry_match *ematch;
302                 struct xt_counters *counter;
303
304                 WARN_ON(!e);
305                 acpar.thoff = 0;
306                 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
307                     &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
308  no_match:
309                         e = ip6t_next_entry(e);
310                         continue;
311                 }
312
313                 xt_ematch_foreach(ematch, e) {
314                         acpar.match     = ematch->u.kernel.match;
315                         acpar.matchinfo = ematch->data;
316                         if (!acpar.match->match(skb, &acpar))
317                                 goto no_match;
318                 }
319
320                 counter = xt_get_this_cpu_counter(&e->counters);
321                 ADD_COUNTER(*counter, skb->len, 1);
322
323                 t = ip6t_get_target_c(e);
324                 WARN_ON(!t->u.kernel.target);
325
326 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
327                 /* The packet is traced: log it */
328                 if (unlikely(skb->nf_trace))
329                         trace_packet(state->net, skb, hook, state->in,
330                                      state->out, table->name, private, e);
331 #endif
332                 /* Standard target? */
333                 if (!t->u.kernel.target->target) {
334                         int v;
335
336                         v = ((struct xt_standard_target *)t)->verdict;
337                         if (v < 0) {
338                                 /* Pop from stack? */
339                                 if (v != XT_RETURN) {
340                                         verdict = (unsigned int)(-v) - 1;
341                                         break;
342                                 }
343                                 if (stackidx == 0)
344                                         e = get_entry(table_base,
345                                             private->underflow[hook]);
346                                 else
347                                         e = ip6t_next_entry(jumpstack[--stackidx]);
348                                 continue;
349                         }
350                         if (table_base + v != ip6t_next_entry(e) &&
351                             !(e->ipv6.flags & IP6T_F_GOTO)) {
352                                 if (unlikely(stackidx >= private->stacksize)) {
353                                         verdict = NF_DROP;
354                                         break;
355                                 }
356                                 jumpstack[stackidx++] = e;
357                         }
358
359                         e = get_entry(table_base, v);
360                         continue;
361                 }
362
363                 acpar.target   = t->u.kernel.target;
364                 acpar.targinfo = t->data;
365
366                 verdict = t->u.kernel.target->target(skb, &acpar);
367                 if (verdict == XT_CONTINUE)
368                         e = ip6t_next_entry(e);
369                 else
370                         /* Verdict */
371                         break;
372         } while (!acpar.hotdrop);
373
374         xt_write_recseq_end(addend);
375         local_bh_enable();
376
377         if (acpar.hotdrop)
378                 return NF_DROP;
379         else return verdict;
380 }
381
382 /* Figures out from what hook each rule can be called: returns 0 if
383    there are loops.  Puts hook bitmask in comefrom. */
384 static int
385 mark_source_chains(const struct xt_table_info *newinfo,
386                    unsigned int valid_hooks, void *entry0,
387                    unsigned int *offsets)
388 {
389         unsigned int hook;
390
391         /* No recursion; use packet counter to save back ptrs (reset
392            to 0 as we leave), and comefrom to save source hook bitmask */
393         for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
394                 unsigned int pos = newinfo->hook_entry[hook];
395                 struct ip6t_entry *e = entry0 + pos;
396
397                 if (!(valid_hooks & (1 << hook)))
398                         continue;
399
400                 /* Set initial back pointer. */
401                 e->counters.pcnt = pos;
402
403                 for (;;) {
404                         const struct xt_standard_target *t
405                                 = (void *)ip6t_get_target_c(e);
406                         int visited = e->comefrom & (1 << hook);
407
408                         if (e->comefrom & (1 << NF_INET_NUMHOOKS))
409                                 return 0;
410
411                         e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
412
413                         /* Unconditional return/END. */
414                         if ((unconditional(e) &&
415                              (strcmp(t->target.u.user.name,
416                                      XT_STANDARD_TARGET) == 0) &&
417                              t->verdict < 0) || visited) {
418                                 unsigned int oldpos, size;
419
420                                 /* Return: backtrack through the last
421                                    big jump. */
422                                 do {
423                                         e->comefrom ^= (1<<NF_INET_NUMHOOKS);
424                                         oldpos = pos;
425                                         pos = e->counters.pcnt;
426                                         e->counters.pcnt = 0;
427
428                                         /* We're at the start. */
429                                         if (pos == oldpos)
430                                                 goto next;
431
432                                         e = entry0 + pos;
433                                 } while (oldpos == pos + e->next_offset);
434
435                                 /* Move along one */
436                                 size = e->next_offset;
437                                 e = entry0 + pos + size;
438                                 if (pos + size >= newinfo->size)
439                                         return 0;
440                                 e->counters.pcnt = pos;
441                                 pos += size;
442                         } else {
443                                 int newpos = t->verdict;
444
445                                 if (strcmp(t->target.u.user.name,
446                                            XT_STANDARD_TARGET) == 0 &&
447                                     newpos >= 0) {
448                                         /* This a jump; chase it. */
449                                         if (!xt_find_jump_offset(offsets, newpos,
450                                                                  newinfo->number))
451                                                 return 0;
452                                 } else {
453                                         /* ... this is a fallthru */
454                                         newpos = pos + e->next_offset;
455                                         if (newpos >= newinfo->size)
456                                                 return 0;
457                                 }
458                                 e = entry0 + newpos;
459                                 e->counters.pcnt = pos;
460                                 pos = newpos;
461                         }
462                 }
463 next:           ;
464         }
465         return 1;
466 }
467
468 static void cleanup_match(struct xt_entry_match *m, struct net *net)
469 {
470         struct xt_mtdtor_param par;
471
472         par.net       = net;
473         par.match     = m->u.kernel.match;
474         par.matchinfo = m->data;
475         par.family    = NFPROTO_IPV6;
476         if (par.match->destroy != NULL)
477                 par.match->destroy(&par);
478         module_put(par.match->me);
479 }
480
481 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
482 {
483         const struct ip6t_ip6 *ipv6 = par->entryinfo;
484
485         par->match     = m->u.kernel.match;
486         par->matchinfo = m->data;
487
488         return xt_check_match(par, m->u.match_size - sizeof(*m),
489                               ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
490 }
491
492 static int
493 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
494 {
495         struct xt_match *match;
496         int ret;
497
498         match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
499                                       m->u.user.revision);
500         if (IS_ERR(match))
501                 return PTR_ERR(match);
502
503         m->u.kernel.match = match;
504
505         ret = check_match(m, par);
506         if (ret)
507                 goto err;
508
509         return 0;
510 err:
511         module_put(m->u.kernel.match->me);
512         return ret;
513 }
514
515 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
516 {
517         struct xt_entry_target *t = ip6t_get_target(e);
518         struct xt_tgchk_param par = {
519                 .net       = net,
520                 .table     = name,
521                 .entryinfo = e,
522                 .target    = t->u.kernel.target,
523                 .targinfo  = t->data,
524                 .hook_mask = e->comefrom,
525                 .family    = NFPROTO_IPV6,
526         };
527
528         return xt_check_target(&par, t->u.target_size - sizeof(*t),
529                                e->ipv6.proto,
530                                e->ipv6.invflags & IP6T_INV_PROTO);
531 }
532
533 static int
534 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
535                  unsigned int size,
536                  struct xt_percpu_counter_alloc_state *alloc_state)
537 {
538         struct xt_entry_target *t;
539         struct xt_target *target;
540         int ret;
541         unsigned int j;
542         struct xt_mtchk_param mtpar;
543         struct xt_entry_match *ematch;
544
545         if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
546                 return -ENOMEM;
547
548         j = 0;
549         memset(&mtpar, 0, sizeof(mtpar));
550         mtpar.net       = net;
551         mtpar.table     = name;
552         mtpar.entryinfo = &e->ipv6;
553         mtpar.hook_mask = e->comefrom;
554         mtpar.family    = NFPROTO_IPV6;
555         xt_ematch_foreach(ematch, e) {
556                 ret = find_check_match(ematch, &mtpar);
557                 if (ret != 0)
558                         goto cleanup_matches;
559                 ++j;
560         }
561
562         t = ip6t_get_target(e);
563         target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
564                                         t->u.user.revision);
565         if (IS_ERR(target)) {
566                 ret = PTR_ERR(target);
567                 goto cleanup_matches;
568         }
569         t->u.kernel.target = target;
570
571         ret = check_target(e, net, name);
572         if (ret)
573                 goto err;
574         return 0;
575  err:
576         module_put(t->u.kernel.target->me);
577  cleanup_matches:
578         xt_ematch_foreach(ematch, e) {
579                 if (j-- == 0)
580                         break;
581                 cleanup_match(ematch, net);
582         }
583
584         xt_percpu_counter_free(&e->counters);
585
586         return ret;
587 }
588
589 static bool check_underflow(const struct ip6t_entry *e)
590 {
591         const struct xt_entry_target *t;
592         unsigned int verdict;
593
594         if (!unconditional(e))
595                 return false;
596         t = ip6t_get_target_c(e);
597         if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
598                 return false;
599         verdict = ((struct xt_standard_target *)t)->verdict;
600         verdict = -verdict - 1;
601         return verdict == NF_DROP || verdict == NF_ACCEPT;
602 }
603
604 static int
605 check_entry_size_and_hooks(struct ip6t_entry *e,
606                            struct xt_table_info *newinfo,
607                            const unsigned char *base,
608                            const unsigned char *limit,
609                            const unsigned int *hook_entries,
610                            const unsigned int *underflows,
611                            unsigned int valid_hooks)
612 {
613         unsigned int h;
614         int err;
615
616         if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
617             (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
618             (unsigned char *)e + e->next_offset > limit)
619                 return -EINVAL;
620
621         if (e->next_offset
622             < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target))
623                 return -EINVAL;
624
625         if (!ip6_checkentry(&e->ipv6))
626                 return -EINVAL;
627
628         err = xt_check_entry_offsets(e, e->elems, e->target_offset,
629                                      e->next_offset);
630         if (err)
631                 return err;
632
633         /* Check hooks & underflows */
634         for (h = 0; h < NF_INET_NUMHOOKS; h++) {
635                 if (!(valid_hooks & (1 << h)))
636                         continue;
637                 if ((unsigned char *)e - base == hook_entries[h])
638                         newinfo->hook_entry[h] = hook_entries[h];
639                 if ((unsigned char *)e - base == underflows[h]) {
640                         if (!check_underflow(e))
641                                 return -EINVAL;
642
643                         newinfo->underflow[h] = underflows[h];
644                 }
645         }
646
647         /* Clear counters and comefrom */
648         e->counters = ((struct xt_counters) { 0, 0 });
649         e->comefrom = 0;
650         return 0;
651 }
652
653 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
654 {
655         struct xt_tgdtor_param par;
656         struct xt_entry_target *t;
657         struct xt_entry_match *ematch;
658
659         /* Cleanup all matches */
660         xt_ematch_foreach(ematch, e)
661                 cleanup_match(ematch, net);
662         t = ip6t_get_target(e);
663
664         par.net      = net;
665         par.target   = t->u.kernel.target;
666         par.targinfo = t->data;
667         par.family   = NFPROTO_IPV6;
668         if (par.target->destroy != NULL)
669                 par.target->destroy(&par);
670         module_put(par.target->me);
671         xt_percpu_counter_free(&e->counters);
672 }
673
674 /* Checks and translates the user-supplied table segment (held in
675    newinfo) */
676 static int
677 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
678                 const struct ip6t_replace *repl)
679 {
680         struct xt_percpu_counter_alloc_state alloc_state = { 0 };
681         struct ip6t_entry *iter;
682         unsigned int *offsets;
683         unsigned int i;
684         int ret = 0;
685
686         newinfo->size = repl->size;
687         newinfo->number = repl->num_entries;
688
689         /* Init all hooks to impossible value. */
690         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
691                 newinfo->hook_entry[i] = 0xFFFFFFFF;
692                 newinfo->underflow[i] = 0xFFFFFFFF;
693         }
694
695         offsets = xt_alloc_entry_offsets(newinfo->number);
696         if (!offsets)
697                 return -ENOMEM;
698         i = 0;
699         /* Walk through entries, checking offsets. */
700         xt_entry_foreach(iter, entry0, newinfo->size) {
701                 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
702                                                  entry0 + repl->size,
703                                                  repl->hook_entry,
704                                                  repl->underflow,
705                                                  repl->valid_hooks);
706                 if (ret != 0)
707                         goto out_free;
708                 if (i < repl->num_entries)
709                         offsets[i] = (void *)iter - entry0;
710                 ++i;
711                 if (strcmp(ip6t_get_target(iter)->u.user.name,
712                     XT_ERROR_TARGET) == 0)
713                         ++newinfo->stacksize;
714         }
715
716         ret = -EINVAL;
717         if (i != repl->num_entries)
718                 goto out_free;
719
720         ret = xt_check_table_hooks(newinfo, repl->valid_hooks);
721         if (ret)
722                 goto out_free;
723
724         if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
725                 ret = -ELOOP;
726                 goto out_free;
727         }
728         kvfree(offsets);
729
730         /* Finally, each sanity check must pass */
731         i = 0;
732         xt_entry_foreach(iter, entry0, newinfo->size) {
733                 ret = find_check_entry(iter, net, repl->name, repl->size,
734                                        &alloc_state);
735                 if (ret != 0)
736                         break;
737                 ++i;
738         }
739
740         if (ret != 0) {
741                 xt_entry_foreach(iter, entry0, newinfo->size) {
742                         if (i-- == 0)
743                                 break;
744                         cleanup_entry(iter, net);
745                 }
746                 return ret;
747         }
748
749         return ret;
750  out_free:
751         kvfree(offsets);
752         return ret;
753 }
754
755 static void
756 get_counters(const struct xt_table_info *t,
757              struct xt_counters counters[])
758 {
759         struct ip6t_entry *iter;
760         unsigned int cpu;
761         unsigned int i;
762
763         for_each_possible_cpu(cpu) {
764                 seqcount_t *s = &per_cpu(xt_recseq, cpu);
765
766                 i = 0;
767                 xt_entry_foreach(iter, t->entries, t->size) {
768                         struct xt_counters *tmp;
769                         u64 bcnt, pcnt;
770                         unsigned int start;
771
772                         tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
773                         do {
774                                 start = read_seqcount_begin(s);
775                                 bcnt = tmp->bcnt;
776                                 pcnt = tmp->pcnt;
777                         } while (read_seqcount_retry(s, start));
778
779                         ADD_COUNTER(counters[i], bcnt, pcnt);
780                         ++i;
781                         cond_resched();
782                 }
783         }
784 }
785
786 static void get_old_counters(const struct xt_table_info *t,
787                              struct xt_counters counters[])
788 {
789         struct ip6t_entry *iter;
790         unsigned int cpu, i;
791
792         for_each_possible_cpu(cpu) {
793                 i = 0;
794                 xt_entry_foreach(iter, t->entries, t->size) {
795                         const struct xt_counters *tmp;
796
797                         tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
798                         ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
799                         ++i;
800                 }
801                 cond_resched();
802         }
803 }
804
805 static struct xt_counters *alloc_counters(const struct xt_table *table)
806 {
807         unsigned int countersize;
808         struct xt_counters *counters;
809         const struct xt_table_info *private = table->private;
810
811         /* We need atomic snapshot of counters: rest doesn't change
812            (other than comefrom, which userspace doesn't care
813            about). */
814         countersize = sizeof(struct xt_counters) * private->number;
815         counters = vzalloc(countersize);
816
817         if (counters == NULL)
818                 return ERR_PTR(-ENOMEM);
819
820         get_counters(private, counters);
821
822         return counters;
823 }
824
825 static int
826 copy_entries_to_user(unsigned int total_size,
827                      const struct xt_table *table,
828                      void __user *userptr)
829 {
830         unsigned int off, num;
831         const struct ip6t_entry *e;
832         struct xt_counters *counters;
833         const struct xt_table_info *private = table->private;
834         int ret = 0;
835         const void *loc_cpu_entry;
836
837         counters = alloc_counters(table);
838         if (IS_ERR(counters))
839                 return PTR_ERR(counters);
840
841         loc_cpu_entry = private->entries;
842
843         /* FIXME: use iterator macros --RR */
844         /* ... then go back and fix counters and names */
845         for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
846                 unsigned int i;
847                 const struct xt_entry_match *m;
848                 const struct xt_entry_target *t;
849
850                 e = loc_cpu_entry + off;
851                 if (copy_to_user(userptr + off, e, sizeof(*e))) {
852                         ret = -EFAULT;
853                         goto free_counters;
854                 }
855                 if (copy_to_user(userptr + off
856                                  + offsetof(struct ip6t_entry, counters),
857                                  &counters[num],
858                                  sizeof(counters[num])) != 0) {
859                         ret = -EFAULT;
860                         goto free_counters;
861                 }
862
863                 for (i = sizeof(struct ip6t_entry);
864                      i < e->target_offset;
865                      i += m->u.match_size) {
866                         m = (void *)e + i;
867
868                         if (xt_match_to_user(m, userptr + off + i)) {
869                                 ret = -EFAULT;
870                                 goto free_counters;
871                         }
872                 }
873
874                 t = ip6t_get_target_c(e);
875                 if (xt_target_to_user(t, userptr + off + e->target_offset)) {
876                         ret = -EFAULT;
877                         goto free_counters;
878                 }
879         }
880
881  free_counters:
882         vfree(counters);
883         return ret;
884 }
885
886 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
887 static void compat_standard_from_user(void *dst, const void *src)
888 {
889         int v = *(compat_int_t *)src;
890
891         if (v > 0)
892                 v += xt_compat_calc_jump(AF_INET6, v);
893         memcpy(dst, &v, sizeof(v));
894 }
895
896 static int compat_standard_to_user(void __user *dst, const void *src)
897 {
898         compat_int_t cv = *(int *)src;
899
900         if (cv > 0)
901                 cv -= xt_compat_calc_jump(AF_INET6, cv);
902         return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
903 }
904
905 static int compat_calc_entry(const struct ip6t_entry *e,
906                              const struct xt_table_info *info,
907                              const void *base, struct xt_table_info *newinfo)
908 {
909         const struct xt_entry_match *ematch;
910         const struct xt_entry_target *t;
911         unsigned int entry_offset;
912         int off, i, ret;
913
914         off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
915         entry_offset = (void *)e - base;
916         xt_ematch_foreach(ematch, e)
917                 off += xt_compat_match_offset(ematch->u.kernel.match);
918         t = ip6t_get_target_c(e);
919         off += xt_compat_target_offset(t->u.kernel.target);
920         newinfo->size -= off;
921         ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
922         if (ret)
923                 return ret;
924
925         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
926                 if (info->hook_entry[i] &&
927                     (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
928                         newinfo->hook_entry[i] -= off;
929                 if (info->underflow[i] &&
930                     (e < (struct ip6t_entry *)(base + info->underflow[i])))
931                         newinfo->underflow[i] -= off;
932         }
933         return 0;
934 }
935
936 static int compat_table_info(const struct xt_table_info *info,
937                              struct xt_table_info *newinfo)
938 {
939         struct ip6t_entry *iter;
940         const void *loc_cpu_entry;
941         int ret;
942
943         if (!newinfo || !info)
944                 return -EINVAL;
945
946         /* we dont care about newinfo->entries */
947         memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
948         newinfo->initial_entries = 0;
949         loc_cpu_entry = info->entries;
950         ret = xt_compat_init_offsets(AF_INET6, info->number);
951         if (ret)
952                 return ret;
953         xt_entry_foreach(iter, loc_cpu_entry, info->size) {
954                 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
955                 if (ret != 0)
956                         return ret;
957         }
958         return 0;
959 }
960 #endif
961
962 static int get_info(struct net *net, void __user *user, const int *len)
963 {
964         char name[XT_TABLE_MAXNAMELEN];
965         struct xt_table *t;
966         int ret;
967
968         if (*len != sizeof(struct ip6t_getinfo))
969                 return -EINVAL;
970
971         if (copy_from_user(name, user, sizeof(name)) != 0)
972                 return -EFAULT;
973
974         name[XT_TABLE_MAXNAMELEN-1] = '\0';
975 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
976         if (in_compat_syscall())
977                 xt_compat_lock(AF_INET6);
978 #endif
979         t = xt_request_find_table_lock(net, AF_INET6, name);
980         if (!IS_ERR(t)) {
981                 struct ip6t_getinfo info;
982                 const struct xt_table_info *private = t->private;
983 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
984                 struct xt_table_info tmp;
985
986                 if (in_compat_syscall()) {
987                         ret = compat_table_info(private, &tmp);
988                         xt_compat_flush_offsets(AF_INET6);
989                         private = &tmp;
990                 }
991 #endif
992                 memset(&info, 0, sizeof(info));
993                 info.valid_hooks = t->valid_hooks;
994                 memcpy(info.hook_entry, private->hook_entry,
995                        sizeof(info.hook_entry));
996                 memcpy(info.underflow, private->underflow,
997                        sizeof(info.underflow));
998                 info.num_entries = private->number;
999                 info.size = private->size;
1000                 strcpy(info.name, name);
1001
1002                 if (copy_to_user(user, &info, *len) != 0)
1003                         ret = -EFAULT;
1004                 else
1005                         ret = 0;
1006
1007                 xt_table_unlock(t);
1008                 module_put(t->me);
1009         } else
1010                 ret = PTR_ERR(t);
1011 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1012         if (in_compat_syscall())
1013                 xt_compat_unlock(AF_INET6);
1014 #endif
1015         return ret;
1016 }
1017
1018 static int
1019 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1020             const int *len)
1021 {
1022         int ret;
1023         struct ip6t_get_entries get;
1024         struct xt_table *t;
1025
1026         if (*len < sizeof(get))
1027                 return -EINVAL;
1028         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1029                 return -EFAULT;
1030         if (*len != sizeof(struct ip6t_get_entries) + get.size)
1031                 return -EINVAL;
1032
1033         get.name[sizeof(get.name) - 1] = '\0';
1034
1035         t = xt_find_table_lock(net, AF_INET6, get.name);
1036         if (!IS_ERR(t)) {
1037                 struct xt_table_info *private = t->private;
1038                 if (get.size == private->size)
1039                         ret = copy_entries_to_user(private->size,
1040                                                    t, uptr->entrytable);
1041                 else
1042                         ret = -EAGAIN;
1043
1044                 module_put(t->me);
1045                 xt_table_unlock(t);
1046         } else
1047                 ret = PTR_ERR(t);
1048
1049         return ret;
1050 }
1051
1052 static int
1053 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1054              struct xt_table_info *newinfo, unsigned int num_counters,
1055              void __user *counters_ptr)
1056 {
1057         int ret;
1058         struct xt_table *t;
1059         struct xt_table_info *oldinfo;
1060         struct xt_counters *counters;
1061         struct ip6t_entry *iter;
1062
1063         counters = xt_counters_alloc(num_counters);
1064         if (!counters) {
1065                 ret = -ENOMEM;
1066                 goto out;
1067         }
1068
1069         t = xt_request_find_table_lock(net, AF_INET6, name);
1070         if (IS_ERR(t)) {
1071                 ret = PTR_ERR(t);
1072                 goto free_newinfo_counters_untrans;
1073         }
1074
1075         /* You lied! */
1076         if (valid_hooks != t->valid_hooks) {
1077                 ret = -EINVAL;
1078                 goto put_module;
1079         }
1080
1081         oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1082         if (!oldinfo)
1083                 goto put_module;
1084
1085         /* Update module usage count based on number of rules */
1086         if ((oldinfo->number > oldinfo->initial_entries) ||
1087             (newinfo->number <= oldinfo->initial_entries))
1088                 module_put(t->me);
1089         if ((oldinfo->number > oldinfo->initial_entries) &&
1090             (newinfo->number <= oldinfo->initial_entries))
1091                 module_put(t->me);
1092
1093         xt_table_unlock(t);
1094
1095         get_old_counters(oldinfo, counters);
1096
1097         /* Decrease module usage counts and free resource */
1098         xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1099                 cleanup_entry(iter, net);
1100
1101         xt_free_table_info(oldinfo);
1102         if (copy_to_user(counters_ptr, counters,
1103                          sizeof(struct xt_counters) * num_counters) != 0) {
1104                 /* Silent error, can't fail, new table is already in place */
1105                 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1106         }
1107         vfree(counters);
1108         return 0;
1109
1110  put_module:
1111         module_put(t->me);
1112         xt_table_unlock(t);
1113  free_newinfo_counters_untrans:
1114         vfree(counters);
1115  out:
1116         return ret;
1117 }
1118
1119 static int
1120 do_replace(struct net *net, sockptr_t arg, unsigned int len)
1121 {
1122         int ret;
1123         struct ip6t_replace tmp;
1124         struct xt_table_info *newinfo;
1125         void *loc_cpu_entry;
1126         struct ip6t_entry *iter;
1127
1128         if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
1129                 return -EFAULT;
1130
1131         /* overflow check */
1132         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1133                 return -ENOMEM;
1134         if (tmp.num_counters == 0)
1135                 return -EINVAL;
1136
1137         tmp.name[sizeof(tmp.name)-1] = 0;
1138
1139         newinfo = xt_alloc_table_info(tmp.size);
1140         if (!newinfo)
1141                 return -ENOMEM;
1142
1143         loc_cpu_entry = newinfo->entries;
1144         if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp),
1145                         tmp.size) != 0) {
1146                 ret = -EFAULT;
1147                 goto free_newinfo;
1148         }
1149
1150         ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1151         if (ret != 0)
1152                 goto free_newinfo;
1153
1154         ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1155                            tmp.num_counters, tmp.counters);
1156         if (ret)
1157                 goto free_newinfo_untrans;
1158         return 0;
1159
1160  free_newinfo_untrans:
1161         xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1162                 cleanup_entry(iter, net);
1163  free_newinfo:
1164         xt_free_table_info(newinfo);
1165         return ret;
1166 }
1167
1168 static int
1169 do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
1170 {
1171         unsigned int i;
1172         struct xt_counters_info tmp;
1173         struct xt_counters *paddc;
1174         struct xt_table *t;
1175         const struct xt_table_info *private;
1176         int ret = 0;
1177         struct ip6t_entry *iter;
1178         unsigned int addend;
1179
1180         paddc = xt_copy_counters(arg, len, &tmp);
1181         if (IS_ERR(paddc))
1182                 return PTR_ERR(paddc);
1183         t = xt_find_table_lock(net, AF_INET6, tmp.name);
1184         if (IS_ERR(t)) {
1185                 ret = PTR_ERR(t);
1186                 goto free;
1187         }
1188
1189         local_bh_disable();
1190         private = t->private;
1191         if (private->number != tmp.num_counters) {
1192                 ret = -EINVAL;
1193                 goto unlock_up_free;
1194         }
1195
1196         i = 0;
1197         addend = xt_write_recseq_begin();
1198         xt_entry_foreach(iter, private->entries, private->size) {
1199                 struct xt_counters *tmp;
1200
1201                 tmp = xt_get_this_cpu_counter(&iter->counters);
1202                 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1203                 ++i;
1204         }
1205         xt_write_recseq_end(addend);
1206  unlock_up_free:
1207         local_bh_enable();
1208         xt_table_unlock(t);
1209         module_put(t->me);
1210  free:
1211         vfree(paddc);
1212
1213         return ret;
1214 }
1215
1216 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1217 struct compat_ip6t_replace {
1218         char                    name[XT_TABLE_MAXNAMELEN];
1219         u32                     valid_hooks;
1220         u32                     num_entries;
1221         u32                     size;
1222         u32                     hook_entry[NF_INET_NUMHOOKS];
1223         u32                     underflow[NF_INET_NUMHOOKS];
1224         u32                     num_counters;
1225         compat_uptr_t           counters;       /* struct xt_counters * */
1226         struct compat_ip6t_entry entries[];
1227 };
1228
1229 static int
1230 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1231                           unsigned int *size, struct xt_counters *counters,
1232                           unsigned int i)
1233 {
1234         struct xt_entry_target *t;
1235         struct compat_ip6t_entry __user *ce;
1236         u_int16_t target_offset, next_offset;
1237         compat_uint_t origsize;
1238         const struct xt_entry_match *ematch;
1239         int ret = 0;
1240
1241         origsize = *size;
1242         ce = *dstptr;
1243         if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1244             copy_to_user(&ce->counters, &counters[i],
1245             sizeof(counters[i])) != 0)
1246                 return -EFAULT;
1247
1248         *dstptr += sizeof(struct compat_ip6t_entry);
1249         *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1250
1251         xt_ematch_foreach(ematch, e) {
1252                 ret = xt_compat_match_to_user(ematch, dstptr, size);
1253                 if (ret != 0)
1254                         return ret;
1255         }
1256         target_offset = e->target_offset - (origsize - *size);
1257         t = ip6t_get_target(e);
1258         ret = xt_compat_target_to_user(t, dstptr, size);
1259         if (ret)
1260                 return ret;
1261         next_offset = e->next_offset - (origsize - *size);
1262         if (put_user(target_offset, &ce->target_offset) != 0 ||
1263             put_user(next_offset, &ce->next_offset) != 0)
1264                 return -EFAULT;
1265         return 0;
1266 }
1267
1268 static int
1269 compat_find_calc_match(struct xt_entry_match *m,
1270                        const struct ip6t_ip6 *ipv6,
1271                        int *size)
1272 {
1273         struct xt_match *match;
1274
1275         match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1276                                       m->u.user.revision);
1277         if (IS_ERR(match))
1278                 return PTR_ERR(match);
1279
1280         m->u.kernel.match = match;
1281         *size += xt_compat_match_offset(match);
1282         return 0;
1283 }
1284
1285 static void compat_release_entry(struct compat_ip6t_entry *e)
1286 {
1287         struct xt_entry_target *t;
1288         struct xt_entry_match *ematch;
1289
1290         /* Cleanup all matches */
1291         xt_ematch_foreach(ematch, e)
1292                 module_put(ematch->u.kernel.match->me);
1293         t = compat_ip6t_get_target(e);
1294         module_put(t->u.kernel.target->me);
1295 }
1296
1297 static int
1298 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1299                                   struct xt_table_info *newinfo,
1300                                   unsigned int *size,
1301                                   const unsigned char *base,
1302                                   const unsigned char *limit)
1303 {
1304         struct xt_entry_match *ematch;
1305         struct xt_entry_target *t;
1306         struct xt_target *target;
1307         unsigned int entry_offset;
1308         unsigned int j;
1309         int ret, off;
1310
1311         if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1312             (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1313             (unsigned char *)e + e->next_offset > limit)
1314                 return -EINVAL;
1315
1316         if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1317                              sizeof(struct compat_xt_entry_target))
1318                 return -EINVAL;
1319
1320         if (!ip6_checkentry(&e->ipv6))
1321                 return -EINVAL;
1322
1323         ret = xt_compat_check_entry_offsets(e, e->elems,
1324                                             e->target_offset, e->next_offset);
1325         if (ret)
1326                 return ret;
1327
1328         off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1329         entry_offset = (void *)e - (void *)base;
1330         j = 0;
1331         xt_ematch_foreach(ematch, e) {
1332                 ret = compat_find_calc_match(ematch, &e->ipv6, &off);
1333                 if (ret != 0)
1334                         goto release_matches;
1335                 ++j;
1336         }
1337
1338         t = compat_ip6t_get_target(e);
1339         target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1340                                         t->u.user.revision);
1341         if (IS_ERR(target)) {
1342                 ret = PTR_ERR(target);
1343                 goto release_matches;
1344         }
1345         t->u.kernel.target = target;
1346
1347         off += xt_compat_target_offset(target);
1348         *size += off;
1349         ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1350         if (ret)
1351                 goto out;
1352
1353         return 0;
1354
1355 out:
1356         module_put(t->u.kernel.target->me);
1357 release_matches:
1358         xt_ematch_foreach(ematch, e) {
1359                 if (j-- == 0)
1360                         break;
1361                 module_put(ematch->u.kernel.match->me);
1362         }
1363         return ret;
1364 }
1365
1366 static void
1367 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1368                             unsigned int *size,
1369                             struct xt_table_info *newinfo, unsigned char *base)
1370 {
1371         struct xt_entry_target *t;
1372         struct ip6t_entry *de;
1373         unsigned int origsize;
1374         int h;
1375         struct xt_entry_match *ematch;
1376
1377         origsize = *size;
1378         de = *dstptr;
1379         memcpy(de, e, sizeof(struct ip6t_entry));
1380         memcpy(&de->counters, &e->counters, sizeof(e->counters));
1381
1382         *dstptr += sizeof(struct ip6t_entry);
1383         *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1384
1385         xt_ematch_foreach(ematch, e)
1386                 xt_compat_match_from_user(ematch, dstptr, size);
1387
1388         de->target_offset = e->target_offset - (origsize - *size);
1389         t = compat_ip6t_get_target(e);
1390         xt_compat_target_from_user(t, dstptr, size);
1391
1392         de->next_offset = e->next_offset - (origsize - *size);
1393         for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1394                 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1395                         newinfo->hook_entry[h] -= origsize - *size;
1396                 if ((unsigned char *)de - base < newinfo->underflow[h])
1397                         newinfo->underflow[h] -= origsize - *size;
1398         }
1399 }
1400
1401 static int
1402 translate_compat_table(struct net *net,
1403                        struct xt_table_info **pinfo,
1404                        void **pentry0,
1405                        const struct compat_ip6t_replace *compatr)
1406 {
1407         unsigned int i, j;
1408         struct xt_table_info *newinfo, *info;
1409         void *pos, *entry0, *entry1;
1410         struct compat_ip6t_entry *iter0;
1411         struct ip6t_replace repl;
1412         unsigned int size;
1413         int ret;
1414
1415         info = *pinfo;
1416         entry0 = *pentry0;
1417         size = compatr->size;
1418         info->number = compatr->num_entries;
1419
1420         j = 0;
1421         xt_compat_lock(AF_INET6);
1422         ret = xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1423         if (ret)
1424                 goto out_unlock;
1425         /* Walk through entries, checking offsets. */
1426         xt_entry_foreach(iter0, entry0, compatr->size) {
1427                 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1428                                                         entry0,
1429                                                         entry0 + compatr->size);
1430                 if (ret != 0)
1431                         goto out_unlock;
1432                 ++j;
1433         }
1434
1435         ret = -EINVAL;
1436         if (j != compatr->num_entries)
1437                 goto out_unlock;
1438
1439         ret = -ENOMEM;
1440         newinfo = xt_alloc_table_info(size);
1441         if (!newinfo)
1442                 goto out_unlock;
1443
1444         memset(newinfo->entries, 0, size);
1445
1446         newinfo->number = compatr->num_entries;
1447         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1448                 newinfo->hook_entry[i] = compatr->hook_entry[i];
1449                 newinfo->underflow[i] = compatr->underflow[i];
1450         }
1451         entry1 = newinfo->entries;
1452         pos = entry1;
1453         size = compatr->size;
1454         xt_entry_foreach(iter0, entry0, compatr->size)
1455                 compat_copy_entry_from_user(iter0, &pos, &size,
1456                                             newinfo, entry1);
1457
1458         /* all module references in entry0 are now gone. */
1459         xt_compat_flush_offsets(AF_INET6);
1460         xt_compat_unlock(AF_INET6);
1461
1462         memcpy(&repl, compatr, sizeof(*compatr));
1463
1464         for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1465                 repl.hook_entry[i] = newinfo->hook_entry[i];
1466                 repl.underflow[i] = newinfo->underflow[i];
1467         }
1468
1469         repl.num_counters = 0;
1470         repl.counters = NULL;
1471         repl.size = newinfo->size;
1472         ret = translate_table(net, newinfo, entry1, &repl);
1473         if (ret)
1474                 goto free_newinfo;
1475
1476         *pinfo = newinfo;
1477         *pentry0 = entry1;
1478         xt_free_table_info(info);
1479         return 0;
1480
1481 free_newinfo:
1482         xt_free_table_info(newinfo);
1483         return ret;
1484 out_unlock:
1485         xt_compat_flush_offsets(AF_INET6);
1486         xt_compat_unlock(AF_INET6);
1487         xt_entry_foreach(iter0, entry0, compatr->size) {
1488                 if (j-- == 0)
1489                         break;
1490                 compat_release_entry(iter0);
1491         }
1492         return ret;
1493 }
1494
1495 static int
1496 compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
1497 {
1498         int ret;
1499         struct compat_ip6t_replace tmp;
1500         struct xt_table_info *newinfo;
1501         void *loc_cpu_entry;
1502         struct ip6t_entry *iter;
1503
1504         if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
1505                 return -EFAULT;
1506
1507         /* overflow check */
1508         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1509                 return -ENOMEM;
1510         if (tmp.num_counters == 0)
1511                 return -EINVAL;
1512
1513         tmp.name[sizeof(tmp.name)-1] = 0;
1514
1515         newinfo = xt_alloc_table_info(tmp.size);
1516         if (!newinfo)
1517                 return -ENOMEM;
1518
1519         loc_cpu_entry = newinfo->entries;
1520         if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp),
1521                         tmp.size) != 0) {
1522                 ret = -EFAULT;
1523                 goto free_newinfo;
1524         }
1525
1526         ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1527         if (ret != 0)
1528                 goto free_newinfo;
1529
1530         ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1531                            tmp.num_counters, compat_ptr(tmp.counters));
1532         if (ret)
1533                 goto free_newinfo_untrans;
1534         return 0;
1535
1536  free_newinfo_untrans:
1537         xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1538                 cleanup_entry(iter, net);
1539  free_newinfo:
1540         xt_free_table_info(newinfo);
1541         return ret;
1542 }
1543
1544 struct compat_ip6t_get_entries {
1545         char name[XT_TABLE_MAXNAMELEN];
1546         compat_uint_t size;
1547         struct compat_ip6t_entry entrytable[];
1548 };
1549
1550 static int
1551 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1552                             void __user *userptr)
1553 {
1554         struct xt_counters *counters;
1555         const struct xt_table_info *private = table->private;
1556         void __user *pos;
1557         unsigned int size;
1558         int ret = 0;
1559         unsigned int i = 0;
1560         struct ip6t_entry *iter;
1561
1562         counters = alloc_counters(table);
1563         if (IS_ERR(counters))
1564                 return PTR_ERR(counters);
1565
1566         pos = userptr;
1567         size = total_size;
1568         xt_entry_foreach(iter, private->entries, total_size) {
1569                 ret = compat_copy_entry_to_user(iter, &pos,
1570                                                 &size, counters, i++);
1571                 if (ret != 0)
1572                         break;
1573         }
1574
1575         vfree(counters);
1576         return ret;
1577 }
1578
1579 static int
1580 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1581                    int *len)
1582 {
1583         int ret;
1584         struct compat_ip6t_get_entries get;
1585         struct xt_table *t;
1586
1587         if (*len < sizeof(get))
1588                 return -EINVAL;
1589
1590         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1591                 return -EFAULT;
1592
1593         if (*len != sizeof(struct compat_ip6t_get_entries) + get.size)
1594                 return -EINVAL;
1595
1596         get.name[sizeof(get.name) - 1] = '\0';
1597
1598         xt_compat_lock(AF_INET6);
1599         t = xt_find_table_lock(net, AF_INET6, get.name);
1600         if (!IS_ERR(t)) {
1601                 const struct xt_table_info *private = t->private;
1602                 struct xt_table_info info;
1603                 ret = compat_table_info(private, &info);
1604                 if (!ret && get.size == info.size)
1605                         ret = compat_copy_entries_to_user(private->size,
1606                                                           t, uptr->entrytable);
1607                 else if (!ret)
1608                         ret = -EAGAIN;
1609
1610                 xt_compat_flush_offsets(AF_INET6);
1611                 module_put(t->me);
1612                 xt_table_unlock(t);
1613         } else
1614                 ret = PTR_ERR(t);
1615
1616         xt_compat_unlock(AF_INET6);
1617         return ret;
1618 }
1619 #endif
1620
1621 static int
1622 do_ip6t_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len)
1623 {
1624         int ret;
1625
1626         if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1627                 return -EPERM;
1628
1629         switch (cmd) {
1630         case IP6T_SO_SET_REPLACE:
1631 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1632                 if (in_compat_syscall())
1633                         ret = compat_do_replace(sock_net(sk), arg, len);
1634                 else
1635 #endif
1636                         ret = do_replace(sock_net(sk), arg, len);
1637                 break;
1638
1639         case IP6T_SO_SET_ADD_COUNTERS:
1640                 ret = do_add_counters(sock_net(sk), arg, len);
1641                 break;
1642
1643         default:
1644                 ret = -EINVAL;
1645         }
1646
1647         return ret;
1648 }
1649
1650 static int
1651 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1652 {
1653         int ret;
1654
1655         if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1656                 return -EPERM;
1657
1658         switch (cmd) {
1659         case IP6T_SO_GET_INFO:
1660                 ret = get_info(sock_net(sk), user, len);
1661                 break;
1662
1663         case IP6T_SO_GET_ENTRIES:
1664 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1665                 if (in_compat_syscall())
1666                         ret = compat_get_entries(sock_net(sk), user, len);
1667                 else
1668 #endif
1669                         ret = get_entries(sock_net(sk), user, len);
1670                 break;
1671
1672         case IP6T_SO_GET_REVISION_MATCH:
1673         case IP6T_SO_GET_REVISION_TARGET: {
1674                 struct xt_get_revision rev;
1675                 int target;
1676
1677                 if (*len != sizeof(rev)) {
1678                         ret = -EINVAL;
1679                         break;
1680                 }
1681                 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1682                         ret = -EFAULT;
1683                         break;
1684                 }
1685                 rev.name[sizeof(rev.name)-1] = 0;
1686
1687                 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1688                         target = 1;
1689                 else
1690                         target = 0;
1691
1692                 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1693                                                          rev.revision,
1694                                                          target, &ret),
1695                                         "ip6t_%s", rev.name);
1696                 break;
1697         }
1698
1699         default:
1700                 ret = -EINVAL;
1701         }
1702
1703         return ret;
1704 }
1705
1706 static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
1707 {
1708         struct xt_table_info *private;
1709         void *loc_cpu_entry;
1710         struct module *table_owner = table->me;
1711         struct ip6t_entry *iter;
1712
1713         private = xt_unregister_table(table);
1714
1715         /* Decrease module usage counts and free resources */
1716         loc_cpu_entry = private->entries;
1717         xt_entry_foreach(iter, loc_cpu_entry, private->size)
1718                 cleanup_entry(iter, net);
1719         if (private->number > private->initial_entries)
1720                 module_put(table_owner);
1721         xt_free_table_info(private);
1722 }
1723
1724 int ip6t_register_table(struct net *net, const struct xt_table *table,
1725                         const struct ip6t_replace *repl,
1726                         const struct nf_hook_ops *template_ops)
1727 {
1728         struct nf_hook_ops *ops;
1729         unsigned int num_ops;
1730         int ret, i;
1731         struct xt_table_info *newinfo;
1732         struct xt_table_info bootstrap = {0};
1733         void *loc_cpu_entry;
1734         struct xt_table *new_table;
1735
1736         newinfo = xt_alloc_table_info(repl->size);
1737         if (!newinfo)
1738                 return -ENOMEM;
1739
1740         loc_cpu_entry = newinfo->entries;
1741         memcpy(loc_cpu_entry, repl->entries, repl->size);
1742
1743         ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1744         if (ret != 0) {
1745                 xt_free_table_info(newinfo);
1746                 return ret;
1747         }
1748
1749         new_table = xt_register_table(net, table, &bootstrap, newinfo);
1750         if (IS_ERR(new_table)) {
1751                 struct ip6t_entry *iter;
1752
1753                 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1754                         cleanup_entry(iter, net);
1755                 xt_free_table_info(newinfo);
1756                 return PTR_ERR(new_table);
1757         }
1758
1759         if (!template_ops)
1760                 return 0;
1761
1762         num_ops = hweight32(table->valid_hooks);
1763         if (num_ops == 0) {
1764                 ret = -EINVAL;
1765                 goto out_free;
1766         }
1767
1768         ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
1769         if (!ops) {
1770                 ret = -ENOMEM;
1771                 goto out_free;
1772         }
1773
1774         for (i = 0; i < num_ops; i++)
1775                 ops[i].priv = new_table;
1776
1777         new_table->ops = ops;
1778
1779         ret = nf_register_net_hooks(net, ops, num_ops);
1780         if (ret != 0)
1781                 goto out_free;
1782
1783         return ret;
1784
1785 out_free:
1786         __ip6t_unregister_table(net, new_table);
1787         return ret;
1788 }
1789
1790 void ip6t_unregister_table_pre_exit(struct net *net, const char *name)
1791 {
1792         struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name);
1793
1794         if (table)
1795                 nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
1796 }
1797
1798 void ip6t_unregister_table_exit(struct net *net, const char *name)
1799 {
1800         struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name);
1801
1802         if (table)
1803                 __ip6t_unregister_table(net, table);
1804 }
1805
1806 /* The built-in targets: standard (NULL) and error. */
1807 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
1808         {
1809                 .name             = XT_STANDARD_TARGET,
1810                 .targetsize       = sizeof(int),
1811                 .family           = NFPROTO_IPV6,
1812 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1813                 .compatsize       = sizeof(compat_int_t),
1814                 .compat_from_user = compat_standard_from_user,
1815                 .compat_to_user   = compat_standard_to_user,
1816 #endif
1817         },
1818         {
1819                 .name             = XT_ERROR_TARGET,
1820                 .target           = ip6t_error,
1821                 .targetsize       = XT_FUNCTION_MAXNAMELEN,
1822                 .family           = NFPROTO_IPV6,
1823         },
1824 };
1825
1826 static struct nf_sockopt_ops ip6t_sockopts = {
1827         .pf             = PF_INET6,
1828         .set_optmin     = IP6T_BASE_CTL,
1829         .set_optmax     = IP6T_SO_SET_MAX+1,
1830         .set            = do_ip6t_set_ctl,
1831         .get_optmin     = IP6T_BASE_CTL,
1832         .get_optmax     = IP6T_SO_GET_MAX+1,
1833         .get            = do_ip6t_get_ctl,
1834         .owner          = THIS_MODULE,
1835 };
1836
1837 static int __net_init ip6_tables_net_init(struct net *net)
1838 {
1839         return xt_proto_init(net, NFPROTO_IPV6);
1840 }
1841
1842 static void __net_exit ip6_tables_net_exit(struct net *net)
1843 {
1844         xt_proto_fini(net, NFPROTO_IPV6);
1845 }
1846
1847 static struct pernet_operations ip6_tables_net_ops = {
1848         .init = ip6_tables_net_init,
1849         .exit = ip6_tables_net_exit,
1850 };
1851
1852 static int __init ip6_tables_init(void)
1853 {
1854         int ret;
1855
1856         ret = register_pernet_subsys(&ip6_tables_net_ops);
1857         if (ret < 0)
1858                 goto err1;
1859
1860         /* No one else will be downing sem now, so we won't sleep */
1861         ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1862         if (ret < 0)
1863                 goto err2;
1864
1865         /* Register setsockopt */
1866         ret = nf_register_sockopt(&ip6t_sockopts);
1867         if (ret < 0)
1868                 goto err4;
1869
1870         return 0;
1871
1872 err4:
1873         xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1874 err2:
1875         unregister_pernet_subsys(&ip6_tables_net_ops);
1876 err1:
1877         return ret;
1878 }
1879
1880 static void __exit ip6_tables_fini(void)
1881 {
1882         nf_unregister_sockopt(&ip6t_sockopts);
1883
1884         xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1885         unregister_pernet_subsys(&ip6_tables_net_ops);
1886 }
1887
1888 EXPORT_SYMBOL(ip6t_register_table);
1889 EXPORT_SYMBOL(ip6t_unregister_table_pre_exit);
1890 EXPORT_SYMBOL(ip6t_unregister_table_exit);
1891 EXPORT_SYMBOL(ip6t_do_table);
1892
1893 module_init(ip6_tables_init);
1894 module_exit(ip6_tables_fini);
This page took 0.145052 seconds and 4 git commands to generate.