]> Git Repo - linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
authorDavid S. Miller <[email protected]>
Sun, 6 Sep 2015 00:36:30 +0000 (17:36 -0700)
committerDavid S. Miller <[email protected]>
Sun, 6 Sep 2015 04:57:42 +0000 (21:57 -0700)
Conflicts:
include/net/netfilter/nf_conntrack.h

The conflict was an overlap between changing the type of the zone
argument to nf_ct_tmpl_alloc() whilst exporting nf_ct_tmpl_free.

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net, they are:

1) Oneliner to restore maps in nf_tables since we support addressing registers
   at 32 bits level.

2) Restore previous default behaviour in bridge netfilter when CONFIG_IPV6=n,
   oneliner from Bernhard Thaler.

3) Out of bound access in ipset hash:net* set types, reported by Dave Jones'
   KASan utility, patch from Jozsef Kadlecsik.

4) Fix ipset compilation with gcc 4.4.7 related to C99 initialization of
   unnamed unions, patch from Elad Raz.

5) Add a workaround to address inconsistent endianess in the res_id field of
   nfnetlink batch messages, reported by Florian Westphal.

6) Fix error paths of CT/synproxy since the conntrack template was moved to use
   kmalloc, patch from Daniel Borkmann.

All of them look good to me to reach 4.2, I can route this to -stable myself
too, just let me know what you prefer.
====================

Signed-off-by: David S. Miller <[email protected]>
1  2 
include/net/netfilter/nf_conntrack.h
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_synproxy_core.c
net/netfilter/xt_CT.c

index f5e23c6dee8bcbcc66705a4d5cefdaef311eb98b,4023c4ce260f0ed55ad30907667e2e135ddfde24..e8ad46834df87453e1335bccb7e80dda2ba9fbe5
@@@ -250,12 -250,8 +250,12 @@@ void nf_ct_untracked_status_or(unsigne
  void nf_ct_iterate_cleanup(struct net *net,
                           int (*iter)(struct nf_conn *i, void *data),
                           void *data, u32 portid, int report);
 +
 +struct nf_conntrack_zone;
 +
  void nf_conntrack_free(struct nf_conn *ct);
 -struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
 +struct nf_conn *nf_conntrack_alloc(struct net *net,
 +                                 const struct nf_conntrack_zone *zone,
                                   const struct nf_conntrack_tuple *orig,
                                   const struct nf_conntrack_tuple *repl,
                                   gfp_t gfp);
@@@ -295,9 -291,8 +295,10 @@@ extern unsigned int nf_conntrack_max
  extern unsigned int nf_conntrack_hash_rnd;
  void init_nf_conntrack_hash_rnd(void);
  
 -struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
 +struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
 +                               const struct nf_conntrack_zone *zone,
 +                               gfp_t flags);
+ void nf_ct_tmpl_free(struct nf_conn *tmpl);
  
  #define NF_CT_STAT_INC(net, count)      __this_cpu_inc((net)->ct.stat->count)
  #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
index eedf0495f11f5eb93c4bce1e2ffbb1b04cc273b3,0625a42df108849a9fb2deaaf92037e93a24eb9a..c09d6c7198f60d809b36783ca1de43646025c876
@@@ -126,7 -126,7 +126,7 @@@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untr
  unsigned int nf_conntrack_hash_rnd __read_mostly;
  EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
  
 -static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
 +static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
  {
        unsigned int n;
  
         * three bytes manually.
         */
        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
 -      return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
 +      return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
                      (((__force __u16)tuple->dst.u.all << 16) |
                      tuple->dst.protonum));
  }
@@@ -151,15 -151,15 +151,15 @@@ static u32 hash_bucket(u32 hash, const 
  }
  
  static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
 -                                u16 zone, unsigned int size)
 +                                unsigned int size)
  {
 -      return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
 +      return __hash_bucket(hash_conntrack_raw(tuple), size);
  }
  
 -static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
 +static inline u_int32_t hash_conntrack(const struct net *net,
                                       const struct nf_conntrack_tuple *tuple)
  {
 -      return __hash_conntrack(tuple, zone, net->ct.htable_size);
 +      return __hash_conntrack(tuple, net->ct.htable_size);
  }
  
  bool
@@@ -288,9 -288,7 +288,9 @@@ static void nf_ct_del_from_dying_or_unc
  }
  
  /* Released via destroy_conntrack() */
 -struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
 +struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
 +                               const struct nf_conntrack_zone *zone,
 +                               gfp_t flags)
  {
        struct nf_conn *tmpl;
  
        tmpl->status = IPS_TEMPLATE;
        write_pnet(&tmpl->ct_net, net);
  
 -#ifdef CONFIG_NF_CONNTRACK_ZONES
 -      if (zone) {
 -              struct nf_conntrack_zone *nf_ct_zone;
 +      if (nf_ct_zone_add(tmpl, flags, zone) < 0)
 +              goto out_free;
  
 -              nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
 -              if (!nf_ct_zone)
 -                      goto out_free;
 -              nf_ct_zone->id = zone;
 -      }
 -#endif
        atomic_set(&tmpl->ct_general.use, 0);
  
        return tmpl;
 -#ifdef CONFIG_NF_CONNTRACK_ZONES
  out_free:
        kfree(tmpl);
        return NULL;
 -#endif
  }
  EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
  
static void nf_ct_tmpl_free(struct nf_conn *tmpl)
+ void nf_ct_tmpl_free(struct nf_conn *tmpl)
  {
        nf_ct_ext_destroy(tmpl);
        nf_ct_ext_free(tmpl);
        kfree(tmpl);
  }
+ EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
  
  static void
  destroy_conntrack(struct nf_conntrack *nfct)
@@@ -366,6 -374,7 +367,6 @@@ static void nf_ct_delete_from_lists(str
  {
        struct net *net = nf_ct_net(ct);
        unsigned int hash, reply_hash;
 -      u16 zone = nf_ct_zone(ct);
        unsigned int sequence;
  
        nf_ct_helper_destroy(ct);
        local_bh_disable();
        do {
                sequence = read_seqcount_begin(&net->ct.generation);
 -              hash = hash_conntrack(net, zone,
 +              hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 -              reply_hash = hash_conntrack(net, zone,
 +              reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
  
@@@ -423,8 -432,8 +424,8 @@@ static void death_by_timeout(unsigned l
  
  static inline bool
  nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
 -                      const struct nf_conntrack_tuple *tuple,
 -                      u16 zone)
 +              const struct nf_conntrack_tuple *tuple,
 +              const struct nf_conntrack_zone *zone)
  {
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
  
         * so we need to check that the conntrack is confirmed
         */
        return nf_ct_tuple_equal(tuple, &h->tuple) &&
 -              nf_ct_zone(ct) == zone &&
 -              nf_ct_is_confirmed(ct);
 +             nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
 +             nf_ct_is_confirmed(ct);
  }
  
  /*
   *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
   */
  static struct nf_conntrack_tuple_hash *
 -____nf_conntrack_find(struct net *net, u16 zone,
 +____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple, u32 hash)
  {
        struct nf_conntrack_tuple_hash *h;
@@@ -478,7 -487,7 +479,7 @@@ begin
  
  /* Find a connection corresponding to a tuple. */
  static struct nf_conntrack_tuple_hash *
 -__nf_conntrack_find_get(struct net *net, u16 zone,
 +__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                        const struct nf_conntrack_tuple *tuple, u32 hash)
  {
        struct nf_conntrack_tuple_hash *h;
@@@ -505,11 -514,11 +506,11 @@@ begin
  }
  
  struct nf_conntrack_tuple_hash *
 -nf_conntrack_find_get(struct net *net, u16 zone,
 +nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
  {
        return __nf_conntrack_find_get(net, zone, tuple,
 -                                     hash_conntrack_raw(tuple, zone));
 +                                     hash_conntrack_raw(tuple));
  }
  EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
  
@@@ -528,11 -537,11 +529,11 @@@ static void __nf_conntrack_hash_insert(
  int
  nf_conntrack_hash_check_insert(struct nf_conn *ct)
  {
 +      const struct nf_conntrack_zone *zone;
        struct net *net = nf_ct_net(ct);
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
 -      u16 zone;
        unsigned int sequence;
  
        zone = nf_ct_zone(ct);
        local_bh_disable();
        do {
                sequence = read_seqcount_begin(&net->ct.generation);
 -              hash = hash_conntrack(net, zone,
 +              hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 -              reply_hash = hash_conntrack(net, zone,
 +              reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
  
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                      &h->tuple) &&
 -                  zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
 +                  nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
 +                                   NF_CT_DIRECTION(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
 -                  zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
 +                  nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
 +                                   NF_CT_DIRECTION(h)))
                        goto out;
  
        add_timer(&ct->timeout);
@@@ -582,7 -589,6 +583,7 @@@ EXPORT_SYMBOL_GPL(nf_conntrack_hash_che
  int
  __nf_conntrack_confirm(struct sk_buff *skb)
  {
 +      const struct nf_conntrack_zone *zone;
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
        struct hlist_nulls_node *n;
        enum ip_conntrack_info ctinfo;
        struct net *net;
 -      u16 zone;
        unsigned int sequence;
  
        ct = nf_ct_get(skb, &ctinfo);
                /* reuse the hash saved before */
                hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
                hash = hash_bucket(hash, net);
 -              reply_hash = hash_conntrack(net, zone,
 +              reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
  
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                      &h->tuple) &&
 -                  zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
 +                  nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
 +                                   NF_CT_DIRECTION(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
 -                  zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
 +                  nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
 +                                   NF_CT_DIRECTION(h)))
                        goto out;
  
        /* Timer relative to confirmation time, not original
@@@ -703,14 -708,11 +704,14 @@@ nf_conntrack_tuple_taken(const struct n
                         const struct nf_conn *ignored_conntrack)
  {
        struct net *net = nf_ct_net(ignored_conntrack);
 +      const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        struct nf_conn *ct;
 -      u16 zone = nf_ct_zone(ignored_conntrack);
 -      unsigned int hash = hash_conntrack(net, zone, tuple);
 +      unsigned int hash;
 +
 +      zone = nf_ct_zone(ignored_conntrack);
 +      hash = hash_conntrack(net, tuple);
  
        /* Disable BHs the entire time since we need to disable them at
         * least once for the stats anyway.
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (ct != ignored_conntrack &&
                    nf_ct_tuple_equal(tuple, &h->tuple) &&
 -                  nf_ct_zone(ct) == zone) {
 +                  nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
                        NF_CT_STAT_INC(net, found);
                        rcu_read_unlock_bh();
                        return 1;
@@@ -809,8 -811,7 +810,8 @@@ void init_nf_conntrack_hash_rnd(void
  }
  
  static struct nf_conn *
 -__nf_conntrack_alloc(struct net *net, u16 zone,
 +__nf_conntrack_alloc(struct net *net,
 +                   const struct nf_conntrack_zone *zone,
                     const struct nf_conntrack_tuple *orig,
                     const struct nf_conntrack_tuple *repl,
                     gfp_t gfp, u32 hash)
        if (unlikely(!nf_conntrack_hash_rnd)) {
                init_nf_conntrack_hash_rnd();
                /* recompute the hash as nf_conntrack_hash_rnd is initialized */
 -              hash = hash_conntrack_raw(orig, zone);
 +              hash = hash_conntrack_raw(orig);
        }
  
        /* We don't want any race condition at early drop stage */
         * SLAB_DESTROY_BY_RCU.
         */
        ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
 -      if (ct == NULL) {
 -              atomic_dec(&net->ct.count);
 -              return ERR_PTR(-ENOMEM);
 -      }
 +      if (ct == NULL)
 +              goto out;
 +
        spin_lock_init(&ct->lock);
        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
        memset(&ct->__nfct_init_offset[0], 0,
               offsetof(struct nf_conn, proto) -
               offsetof(struct nf_conn, __nfct_init_offset[0]));
 -#ifdef CONFIG_NF_CONNTRACK_ZONES
 -      if (zone) {
 -              struct nf_conntrack_zone *nf_ct_zone;
  
 -              nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
 -              if (!nf_ct_zone)
 -                      goto out_free;
 -              nf_ct_zone->id = zone;
 -      }
 -#endif
 +      if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
 +              goto out_free;
 +
        /* Because we use RCU lookups, we set ct_general.use to zero before
         * this is inserted in any list.
         */
        atomic_set(&ct->ct_general.use, 0);
        return ct;
 -
 -#ifdef CONFIG_NF_CONNTRACK_ZONES
  out_free:
 -      atomic_dec(&net->ct.count);
        kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
 +out:
 +      atomic_dec(&net->ct.count);
        return ERR_PTR(-ENOMEM);
 -#endif
  }
  
 -struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
 +struct nf_conn *nf_conntrack_alloc(struct net *net,
 +                                 const struct nf_conntrack_zone *zone,
                                   const struct nf_conntrack_tuple *orig,
                                   const struct nf_conntrack_tuple *repl,
                                   gfp_t gfp)
@@@ -915,9 -924,8 +916,9 @@@ init_conntrack(struct net *net, struct 
        struct nf_conntrack_tuple repl_tuple;
        struct nf_conntrack_ecache *ecache;
        struct nf_conntrack_expect *exp = NULL;
 -      u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
 +      const struct nf_conntrack_zone *zone;
        struct nf_conn_timeout *timeout_ext;
 +      struct nf_conntrack_zone tmp;
        unsigned int *timeouts;
  
        if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
                return NULL;
        }
  
 +      zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
        ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
                                  hash);
        if (IS_ERR(ct))
@@@ -1020,11 -1027,10 +1021,11 @@@ resolve_normal_ct(struct net *net, stru
                  int *set_reply,
                  enum ip_conntrack_info *ctinfo)
  {
 +      const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
 +      struct nf_conntrack_zone tmp;
        struct nf_conn *ct;
 -      u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
        u32 hash;
  
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
        }
  
        /* look for tuple match */
 -      hash = hash_conntrack_raw(&tuple, zone);
 +      zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 +      hash = hash_conntrack_raw(&tuple);
        h = __nf_conntrack_find_get(net, zone, &tuple, hash);
        if (!h) {
                h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@@ -1592,7 -1597,8 +1593,7 @@@ int nf_conntrack_set_hashsize(const cha
                                        struct nf_conntrack_tuple_hash, hnnode);
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
 -                      bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
 -                                                hashsize);
 +                      bucket = __hash_conntrack(&h->tuple, hashsize);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
index 888b9558415eb23bedd78b80f1bb458763331e68,d6ee8f8b19b63aaf30a2a2c9cf8a2a327063e58e..c8a4a48bced988a29cd19df06a00117ea026c6ad
  #include <linux/netfilter/x_tables.h>
  #include <linux/netfilter/xt_tcpudp.h>
  #include <linux/netfilter/xt_SYNPROXY.h>
 +
  #include <net/netfilter/nf_conntrack.h>
  #include <net/netfilter/nf_conntrack_extend.h>
  #include <net/netfilter/nf_conntrack_seqadj.h>
  #include <net/netfilter/nf_conntrack_synproxy.h>
 +#include <net/netfilter/nf_conntrack_zones.h>
  
  int synproxy_net_id;
  EXPORT_SYMBOL_GPL(synproxy_net_id);
@@@ -188,7 -186,7 +188,7 @@@ unsigned int synproxy_tstamp_adjust(str
                                    const struct nf_conn_synproxy *synproxy)
  {
        unsigned int optoff, optend;
 -      u32 *ptr, old;
 +      __be32 *ptr, old;
  
        if (synproxy->tsoff == 0)
                return 1;
                        if (op[0] == TCPOPT_TIMESTAMP &&
                            op[1] == TCPOLEN_TIMESTAMP) {
                                if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
 -                                      ptr = (u32 *)&op[2];
 +                                      ptr = (__be32 *)&op[2];
                                        old = *ptr;
                                        *ptr = htonl(ntohl(*ptr) -
                                                     synproxy->tsoff);
                                } else {
 -                                      ptr = (u32 *)&op[6];
 +                                      ptr = (__be32 *)&op[6];
                                        old = *ptr;
                                        *ptr = htonl(ntohl(*ptr) +
                                                     synproxy->tsoff);
                                }
                                inet_proto_csum_replace4(&th->check, skb,
 -                                                       old, *ptr, 0);
 +                                                       old, *ptr, false);
                                return 1;
                        }
                        optoff += op[1];
@@@ -354,7 -352,7 +354,7 @@@ static int __net_init synproxy_net_init
        struct nf_conn *ct;
        int err = -ENOMEM;
  
 -      ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
 +      ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL);
        if (!ct)
                goto err1;
  
  err3:
        free_percpu(snet->stats);
  err2:
-       nf_conntrack_free(ct);
+       nf_ct_tmpl_free(ct);
  err1:
        return err;
  }
diff --combined net/netfilter/xt_CT.c
index 8e524898ccea234a2b5cae3bdfaf2cd72d023238,f3377ce1ff18e8f454ec30af9b5c4be010fd198a..faf32d888198a72a50c293312c014bcb63747654
@@@ -181,23 -181,9 +181,23 @@@ out
  #endif
  }
  
 +static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
 +{
 +      switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
 +                             XT_CT_ZONE_DIR_REPL)) {
 +      case XT_CT_ZONE_DIR_ORIG:
 +              return NF_CT_ZONE_DIR_ORIG;
 +      case XT_CT_ZONE_DIR_REPL:
 +              return NF_CT_ZONE_DIR_REPL;
 +      default:
 +              return NF_CT_DEFAULT_ZONE_DIR;
 +      }
 +}
 +
  static int xt_ct_tg_check(const struct xt_tgchk_param *par,
                          struct xt_ct_target_info_v1 *info)
  {
 +      struct nf_conntrack_zone zone;
        struct nf_conn *ct;
        int ret = -EOPNOTSUPP;
  
        }
  
  #ifndef CONFIG_NF_CONNTRACK_ZONES
 -      if (info->zone)
 +      if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
 +                                       XT_CT_ZONE_DIR_REPL |
 +                                       XT_CT_ZONE_MARK))
                goto err1;
  #endif
  
        if (ret < 0)
                goto err1;
  
 -      ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
 +      memset(&zone, 0, sizeof(zone));
 +      zone.id = info->zone;
 +      zone.dir = xt_ct_flags_to_dir(info);
 +      if (info->flags & XT_CT_ZONE_MARK)
 +              zone.flags |= NF_CT_FLAG_MARK;
 +
 +      ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
        if (!ct) {
                ret = -ENOMEM;
                goto err2;
@@@ -255,7 -233,7 +255,7 @@@ out
        return 0;
  
  err3:
-       nf_conntrack_free(ct);
+       nf_ct_tmpl_free(ct);
  err2:
        nf_ct_l3proto_module_put(par->family);
  err1:
This page took 0.108714 seconds and 4 git commands to generate.