2 * xt_hashlimit - Netfilter module to limit the number of packets per time
3 * separately for each hashbucket (sourceip/sourceport/dstip/dstport)
7 * Copyright © CC Computer Consultants GmbH, 2007 - 2008
9 * Development of this code was funded by Astaro AG, http://www.astaro.com/
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/jhash.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/list.h>
21 #include <linux/skbuff.h>
25 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
26 #include <linux/ipv6.h>
30 #include <net/net_namespace.h>
31 #include <net/netns/generic.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35 #include <linux/netfilter_ipv6/ip6_tables.h>
36 #include <linux/netfilter/xt_hashlimit.h>
37 #include <linux/mutex.h>
39 MODULE_LICENSE("GPL");
42 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
43 MODULE_ALIAS("ipt_hashlimit");
44 MODULE_ALIAS("ip6t_hashlimit");
46 struct hashlimit_net {
47 struct hlist_head htables;
48 struct proc_dir_entry *ipt_hashlimit;
49 struct proc_dir_entry *ip6t_hashlimit;
52 static unsigned int hashlimit_net_id;
53 static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
55 return net_generic(net, hashlimit_net_id);
58 /* need to declare this at the top */
59 static const struct file_operations dl_file_ops_v1;
60 static const struct file_operations dl_file_ops;
69 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
81 /* static / read-only parts in the beginning */
82 struct hlist_node node;
83 struct dsthash_dst dst;
85 /* modified structure members in the end */
87 unsigned long expires; /* precalculated expiry time */
89 unsigned long prev; /* last modification */
91 u_int64_t credit_cap, cost;
96 struct xt_hashlimit_htable {
97 struct hlist_node node; /* global list of all htables */
100 bool rnd_initialized;
102 struct hashlimit_cfg2 cfg; /* config */
104 /* used internally */
105 spinlock_t lock; /* lock for list_head */
106 u_int32_t rnd; /* random seed for hash */
107 unsigned int count; /* number entries in table */
108 struct delayed_work gc_work;
111 struct proc_dir_entry *pde;
115 struct hlist_head hash[0]; /* hashtable itself */
119 cfg_copy(struct hashlimit_cfg2 *to, void *from, int revision)
122 struct hashlimit_cfg1 *cfg = (struct hashlimit_cfg1 *)from;
124 to->mode = cfg->mode;
126 to->burst = cfg->burst;
127 to->size = cfg->size;
129 to->gc_interval = cfg->gc_interval;
130 to->expire = cfg->expire;
131 to->srcmask = cfg->srcmask;
132 to->dstmask = cfg->dstmask;
133 } else if (revision == 2) {
134 memcpy(to, from, sizeof(struct hashlimit_cfg2));
142 static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
143 static struct kmem_cache *hashlimit_cachep __read_mostly;
145 static inline bool dst_cmp(const struct dsthash_ent *ent,
146 const struct dsthash_dst *b)
148 return !memcmp(&ent->dst, b, sizeof(ent->dst));
152 hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
154 u_int32_t hash = jhash2((const u32 *)dst,
155 sizeof(*dst)/sizeof(u32),
158 * Instead of returning hash % ht->cfg.size (implying a divide)
159 * we return the high 32 bits of the (hash * ht->cfg.size) that will
160 * give results between [0 and cfg.size-1] and same hash distribution,
161 * but using a multiply, less expensive than a divide
163 return reciprocal_scale(hash, ht->cfg.size);
166 static struct dsthash_ent *
167 dsthash_find(const struct xt_hashlimit_htable *ht,
168 const struct dsthash_dst *dst)
170 struct dsthash_ent *ent;
171 u_int32_t hash = hash_dst(ht, dst);
173 if (!hlist_empty(&ht->hash[hash])) {
174 hlist_for_each_entry_rcu(ent, &ht->hash[hash], node)
175 if (dst_cmp(ent, dst)) {
176 spin_lock(&ent->lock);
183 /* allocate dsthash_ent, initialize dst, put in htable and lock it */
184 static struct dsthash_ent *
185 dsthash_alloc_init(struct xt_hashlimit_htable *ht,
186 const struct dsthash_dst *dst, bool *race)
188 struct dsthash_ent *ent;
190 spin_lock(&ht->lock);
192 /* Two or more packets may race to create the same entry in the
193 * hashtable, double check if this packet lost race.
195 ent = dsthash_find(ht, dst);
197 spin_unlock(&ht->lock);
202 /* initialize hash with random val at the time we allocate
203 * the first hashtable entry */
204 if (unlikely(!ht->rnd_initialized)) {
205 get_random_bytes(&ht->rnd, sizeof(ht->rnd));
206 ht->rnd_initialized = true;
209 if (ht->cfg.max && ht->count >= ht->cfg.max) {
210 /* FIXME: do something. question is what.. */
211 net_err_ratelimited("max count of %u reached\n", ht->cfg.max);
214 ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
216 memcpy(&ent->dst, dst, sizeof(ent->dst));
217 spin_lock_init(&ent->lock);
219 spin_lock(&ent->lock);
220 hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]);
223 spin_unlock(&ht->lock);
227 static void dsthash_free_rcu(struct rcu_head *head)
229 struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu);
231 kmem_cache_free(hashlimit_cachep, ent);
235 dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
237 hlist_del_rcu(&ent->node);
238 call_rcu_bh(&ent->rcu, dsthash_free_rcu);
241 static void htable_gc(struct work_struct *work);
243 static int htable_create(struct net *net, struct hashlimit_cfg2 *cfg,
244 const char *name, u_int8_t family,
245 struct xt_hashlimit_htable **out_hinfo,
248 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
249 struct xt_hashlimit_htable *hinfo;
250 unsigned int size, i;
256 size = (totalram_pages << PAGE_SHIFT) / 16384 /
257 sizeof(struct list_head);
258 if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
263 /* FIXME: don't use vmalloc() here or anywhere else -HW */
264 hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
265 sizeof(struct list_head) * size);
270 /* copy match config into hashtable config */
271 ret = cfg_copy(&hinfo->cfg, (void *)cfg, 2);
276 hinfo->cfg.size = size;
277 if (hinfo->cfg.max == 0)
278 hinfo->cfg.max = 8 * hinfo->cfg.size;
279 else if (hinfo->cfg.max < hinfo->cfg.size)
280 hinfo->cfg.max = hinfo->cfg.size;
282 for (i = 0; i < hinfo->cfg.size; i++)
283 INIT_HLIST_HEAD(&hinfo->hash[i]);
287 hinfo->family = family;
288 hinfo->rnd_initialized = false;
289 hinfo->name = kstrdup(name, GFP_KERNEL);
294 spin_lock_init(&hinfo->lock);
296 hinfo->pde = proc_create_data(name, 0,
297 (family == NFPROTO_IPV4) ?
298 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
299 (revision == 1) ? &dl_file_ops_v1 : &dl_file_ops,
301 if (hinfo->pde == NULL) {
308 INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
309 queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
310 msecs_to_jiffies(hinfo->cfg.gc_interval));
312 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
317 static bool select_all(const struct xt_hashlimit_htable *ht,
318 const struct dsthash_ent *he)
323 static bool select_gc(const struct xt_hashlimit_htable *ht,
324 const struct dsthash_ent *he)
326 return time_after_eq(jiffies, he->expires);
329 static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
330 bool (*select)(const struct xt_hashlimit_htable *ht,
331 const struct dsthash_ent *he))
335 for (i = 0; i < ht->cfg.size; i++) {
336 struct dsthash_ent *dh;
337 struct hlist_node *n;
339 spin_lock_bh(&ht->lock);
340 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
341 if ((*select)(ht, dh))
342 dsthash_free(ht, dh);
344 spin_unlock_bh(&ht->lock);
349 static void htable_gc(struct work_struct *work)
351 struct xt_hashlimit_htable *ht;
353 ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
355 htable_selective_cleanup(ht, select_gc);
357 queue_delayed_work(system_power_efficient_wq,
358 &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
361 static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
363 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
364 struct proc_dir_entry *parent;
366 if (hinfo->family == NFPROTO_IPV4)
367 parent = hashlimit_net->ipt_hashlimit;
369 parent = hashlimit_net->ip6t_hashlimit;
372 remove_proc_entry(hinfo->name, parent);
375 static void htable_destroy(struct xt_hashlimit_htable *hinfo)
377 cancel_delayed_work_sync(&hinfo->gc_work);
378 htable_remove_proc_entry(hinfo);
379 htable_selective_cleanup(hinfo, select_all);
384 static struct xt_hashlimit_htable *htable_find_get(struct net *net,
388 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
389 struct xt_hashlimit_htable *hinfo;
391 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
392 if (!strcmp(name, hinfo->name) &&
393 hinfo->family == family) {
401 static void htable_put(struct xt_hashlimit_htable *hinfo)
403 mutex_lock(&hashlimit_mutex);
404 if (--hinfo->use == 0) {
405 hlist_del(&hinfo->node);
406 htable_destroy(hinfo);
408 mutex_unlock(&hashlimit_mutex);
411 /* The algorithm used is the Simple Token Bucket Filter (TBF)
412 * see net/sched/sch_tbf.c in the linux source tree
415 /* Rusty: This is my (non-mathematically-inclined) understanding of
416 this algorithm. The `average rate' in jiffies becomes your initial
417 amount of credit `credit' and the most credit you can ever have
418 `credit_cap'. The `peak rate' becomes the cost of passing the
421 `prev' tracks the last packet hit: you gain one credit per jiffy.
422 If you get credit balance more than this, the extra credit is
423 discarded. Every time the match passes, you lose `cost' credits;
424 if you don't have that many, the test fails.
426 See Alexey's formal explanation in net/sched/sch_tbf.c.
428 To get the maximum range, we multiply by this factor (ie. you get N
429 credits per jiffy). We want to allow a rate as low as 1 per day
430 (slowest userspace tool allows), which means
431 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
433 #define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
434 #define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
436 /* Repeated shift and or gives us all 1s, final shift and add 1 gives
437 * us the power of 2 below the theoretical max, so GCC simply does a
439 #define _POW2_BELOW2(x) ((x)|((x)>>1))
440 #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
441 #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
442 #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
443 #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
444 #define _POW2_BELOW64(x) (_POW2_BELOW32(x)|_POW2_BELOW32((x)>>32))
445 #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
446 #define POW2_BELOW64(x) ((_POW2_BELOW64(x)>>1) + 1)
448 #define CREDITS_PER_JIFFY POW2_BELOW64(MAX_CPJ)
449 #define CREDITS_PER_JIFFY_v1 POW2_BELOW32(MAX_CPJ_v1)
451 /* in byte mode, the lowest possible rate is one packet/second.
452 * credit_cap is used as a counter that tells us how many times we can
453 * refill the "credits available" counter when it becomes empty.
455 #define MAX_CPJ_BYTES (0xFFFFFFFF / HZ)
456 #define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES)
458 static u32 xt_hashlimit_len_to_chunks(u32 len)
460 return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1;
463 /* Precision saver. */
464 static u64 user2credits(u64 user, int revision)
467 /* If multiplying would overflow... */
468 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY_v1))
470 return div64_u64(user, XT_HASHLIMIT_SCALE)
471 * HZ * CREDITS_PER_JIFFY_v1;
473 return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
476 if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
477 return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
478 * HZ * CREDITS_PER_JIFFY;
480 return div64_u64(user * HZ * CREDITS_PER_JIFFY,
481 XT_HASHLIMIT_SCALE_v2);
485 static u32 user2credits_byte(u32 user)
488 us *= HZ * CREDITS_PER_JIFFY_BYTES;
489 return (u32) (us >> 32);
492 static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now,
493 u32 mode, int revision)
495 unsigned long delta = now - dh->rateinfo.prev;
501 dh->rateinfo.prev = now;
503 if (mode & XT_HASHLIMIT_BYTES) {
504 u64 tmp = dh->rateinfo.credit;
505 dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta;
506 cap = CREDITS_PER_JIFFY_BYTES * HZ;
507 if (tmp >= dh->rateinfo.credit) {/* overflow */
508 dh->rateinfo.credit = cap;
512 cpj = (revision == 1) ?
513 CREDITS_PER_JIFFY_v1 : CREDITS_PER_JIFFY;
514 dh->rateinfo.credit += delta * cpj;
515 cap = dh->rateinfo.credit_cap;
517 if (dh->rateinfo.credit > cap)
518 dh->rateinfo.credit = cap;
521 static void rateinfo_init(struct dsthash_ent *dh,
522 struct xt_hashlimit_htable *hinfo, int revision)
524 dh->rateinfo.prev = jiffies;
525 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
526 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
527 dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg);
528 dh->rateinfo.credit_cap = hinfo->cfg.burst;
530 dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
531 hinfo->cfg.burst, revision);
532 dh->rateinfo.cost = user2credits(hinfo->cfg.avg, revision);
533 dh->rateinfo.credit_cap = dh->rateinfo.credit;
537 static inline __be32 maskl(__be32 a, unsigned int l)
539 return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
542 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
543 static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
547 i[0] = maskl(i[0], p);
548 i[1] = i[2] = i[3] = 0;
551 i[1] = maskl(i[1], p - 32);
555 i[2] = maskl(i[2], p - 64);
559 i[3] = maskl(i[3], p - 96);
568 hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
569 struct dsthash_dst *dst,
570 const struct sk_buff *skb, unsigned int protoff)
572 __be16 _ports[2], *ports;
576 memset(dst, 0, sizeof(*dst));
578 switch (hinfo->family) {
580 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
581 dst->ip.dst = maskl(ip_hdr(skb)->daddr,
583 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
584 dst->ip.src = maskl(ip_hdr(skb)->saddr,
587 if (!(hinfo->cfg.mode &
588 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
590 nexthdr = ip_hdr(skb)->protocol;
592 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
597 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
598 memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
599 sizeof(dst->ip6.dst));
600 hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask);
602 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) {
603 memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr,
604 sizeof(dst->ip6.src));
605 hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask);
608 if (!(hinfo->cfg.mode &
609 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
611 nexthdr = ipv6_hdr(skb)->nexthdr;
612 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
613 if ((int)protoff < 0)
623 poff = proto_ports_offset(nexthdr);
625 ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports),
628 _ports[0] = _ports[1] = 0;
633 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT)
634 dst->src_port = ports[0];
635 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT)
636 dst->dst_port = ports[1];
640 static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh)
642 u64 tmp = xt_hashlimit_len_to_chunks(len);
643 tmp = tmp * dh->rateinfo.cost;
645 if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ))
646 tmp = CREDITS_PER_JIFFY_BYTES * HZ;
648 if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) {
649 dh->rateinfo.credit_cap--;
650 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
656 hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
657 struct xt_hashlimit_htable *hinfo,
658 const struct hashlimit_cfg2 *cfg, int revision)
660 unsigned long now = jiffies;
661 struct dsthash_ent *dh;
662 struct dsthash_dst dst;
666 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
670 dh = dsthash_find(hinfo, &dst);
672 dh = dsthash_alloc_init(hinfo, &dst, &race);
674 rcu_read_unlock_bh();
677 /* Already got an entry, update expiration timeout */
678 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
679 rateinfo_recalc(dh, now, hinfo->cfg.mode, revision);
681 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
682 rateinfo_init(dh, hinfo, revision);
685 /* update expiration timeout */
686 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
687 rateinfo_recalc(dh, now, hinfo->cfg.mode, revision);
690 if (cfg->mode & XT_HASHLIMIT_BYTES)
691 cost = hashlimit_byte_cost(skb->len, dh);
693 cost = dh->rateinfo.cost;
695 if (dh->rateinfo.credit >= cost) {
696 /* below the limit */
697 dh->rateinfo.credit -= cost;
698 spin_unlock(&dh->lock);
699 rcu_read_unlock_bh();
700 return !(cfg->mode & XT_HASHLIMIT_INVERT);
703 spin_unlock(&dh->lock);
704 rcu_read_unlock_bh();
705 /* default match is underlimit - so over the limit, we need to invert */
706 return cfg->mode & XT_HASHLIMIT_INVERT;
714 hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
716 const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
717 struct xt_hashlimit_htable *hinfo = info->hinfo;
718 struct hashlimit_cfg2 cfg = {};
721 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
726 return hashlimit_mt_common(skb, par, hinfo, &cfg, 1);
730 hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
732 const struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
733 struct xt_hashlimit_htable *hinfo = info->hinfo;
735 return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 2);
738 static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
739 struct xt_hashlimit_htable **hinfo,
740 struct hashlimit_cfg2 *cfg,
741 const char *name, int revision)
743 struct net *net = par->net;
746 if (cfg->gc_interval == 0 || cfg->expire == 0)
748 if (par->family == NFPROTO_IPV4) {
749 if (cfg->srcmask > 32 || cfg->dstmask > 32)
752 if (cfg->srcmask > 128 || cfg->dstmask > 128)
756 if (cfg->mode & ~XT_HASHLIMIT_ALL) {
757 pr_info("Unknown mode mask %X, kernel too old?\n",
762 /* Check for overflow. */
763 if (cfg->mode & XT_HASHLIMIT_BYTES) {
764 if (user2credits_byte(cfg->avg) == 0) {
765 pr_info("overflow, rate too high: %llu\n", cfg->avg);
768 } else if (cfg->burst == 0 ||
769 user2credits(cfg->avg * cfg->burst, revision) <
770 user2credits(cfg->avg, revision)) {
771 pr_info("overflow, try lower: %llu/%llu\n",
772 cfg->avg, cfg->burst);
776 mutex_lock(&hashlimit_mutex);
777 *hinfo = htable_find_get(net, name, par->family);
778 if (*hinfo == NULL) {
779 ret = htable_create(net, cfg, name, par->family,
782 mutex_unlock(&hashlimit_mutex);
786 mutex_unlock(&hashlimit_mutex);
791 static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
793 struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
794 struct hashlimit_cfg2 cfg = {};
797 if (info->name[sizeof(info->name) - 1] != '\0')
800 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
805 return hashlimit_mt_check_common(par, &info->hinfo,
806 &cfg, info->name, 1);
809 static int hashlimit_mt_check(const struct xt_mtchk_param *par)
811 struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
813 if (info->name[sizeof(info->name) - 1] != '\0')
816 return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
820 static void hashlimit_mt_destroy_v1(const struct xt_mtdtor_param *par)
822 const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
824 htable_put(info->hinfo);
827 static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
829 const struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
831 htable_put(info->hinfo);
834 static struct xt_match hashlimit_mt_reg[] __read_mostly = {
838 .family = NFPROTO_IPV4,
839 .match = hashlimit_mt_v1,
840 .matchsize = sizeof(struct xt_hashlimit_mtinfo1),
841 .checkentry = hashlimit_mt_check_v1,
842 .destroy = hashlimit_mt_destroy_v1,
848 .family = NFPROTO_IPV4,
849 .match = hashlimit_mt,
850 .matchsize = sizeof(struct xt_hashlimit_mtinfo2),
851 .checkentry = hashlimit_mt_check,
852 .destroy = hashlimit_mt_destroy,
855 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
859 .family = NFPROTO_IPV6,
860 .match = hashlimit_mt_v1,
861 .matchsize = sizeof(struct xt_hashlimit_mtinfo1),
862 .checkentry = hashlimit_mt_check_v1,
863 .destroy = hashlimit_mt_destroy_v1,
869 .family = NFPROTO_IPV6,
870 .match = hashlimit_mt,
871 .matchsize = sizeof(struct xt_hashlimit_mtinfo2),
872 .checkentry = hashlimit_mt_check,
873 .destroy = hashlimit_mt_destroy,
880 static void *dl_seq_start(struct seq_file *s, loff_t *pos)
881 __acquires(htable->lock)
883 struct xt_hashlimit_htable *htable = s->private;
884 unsigned int *bucket;
886 spin_lock_bh(&htable->lock);
887 if (*pos >= htable->cfg.size)
890 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
892 return ERR_PTR(-ENOMEM);
898 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
900 struct xt_hashlimit_htable *htable = s->private;
901 unsigned int *bucket = (unsigned int *)v;
904 if (*pos >= htable->cfg.size) {
911 static void dl_seq_stop(struct seq_file *s, void *v)
912 __releases(htable->lock)
914 struct xt_hashlimit_htable *htable = s->private;
915 unsigned int *bucket = (unsigned int *)v;
919 spin_unlock_bh(&htable->lock);
922 static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
927 seq_printf(s, "%ld %pI4:%u->%pI4:%u %llu %llu %llu\n",
928 (long)(ent->expires - jiffies)/HZ,
930 ntohs(ent->dst.src_port),
932 ntohs(ent->dst.dst_port),
933 ent->rateinfo.credit, ent->rateinfo.credit_cap,
936 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
938 seq_printf(s, "%ld %pI6:%u->%pI6:%u %llu %llu %llu\n",
939 (long)(ent->expires - jiffies)/HZ,
941 ntohs(ent->dst.src_port),
943 ntohs(ent->dst.dst_port),
944 ent->rateinfo.credit, ent->rateinfo.credit_cap,
953 static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
956 const struct xt_hashlimit_htable *ht = s->private;
958 spin_lock(&ent->lock);
959 /* recalculate to show accurate numbers */
960 rateinfo_recalc(ent, jiffies, ht->cfg.mode, 1);
962 dl_seq_print(ent, family, s);
964 spin_unlock(&ent->lock);
965 return seq_has_overflowed(s);
968 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
971 const struct xt_hashlimit_htable *ht = s->private;
973 spin_lock(&ent->lock);
974 /* recalculate to show accurate numbers */
975 rateinfo_recalc(ent, jiffies, ht->cfg.mode, 2);
977 dl_seq_print(ent, family, s);
979 spin_unlock(&ent->lock);
980 return seq_has_overflowed(s);
983 static int dl_seq_show_v1(struct seq_file *s, void *v)
985 struct xt_hashlimit_htable *htable = s->private;
986 unsigned int *bucket = (unsigned int *)v;
987 struct dsthash_ent *ent;
989 if (!hlist_empty(&htable->hash[*bucket])) {
990 hlist_for_each_entry(ent, &htable->hash[*bucket], node)
991 if (dl_seq_real_show_v1(ent, htable->family, s))
997 static int dl_seq_show(struct seq_file *s, void *v)
999 struct xt_hashlimit_htable *htable = s->private;
1000 unsigned int *bucket = (unsigned int *)v;
1001 struct dsthash_ent *ent;
1003 if (!hlist_empty(&htable->hash[*bucket])) {
1004 hlist_for_each_entry(ent, &htable->hash[*bucket], node)
1005 if (dl_seq_real_show(ent, htable->family, s))
1011 static const struct seq_operations dl_seq_ops_v1 = {
1012 .start = dl_seq_start,
1013 .next = dl_seq_next,
1014 .stop = dl_seq_stop,
1015 .show = dl_seq_show_v1
1018 static const struct seq_operations dl_seq_ops = {
1019 .start = dl_seq_start,
1020 .next = dl_seq_next,
1021 .stop = dl_seq_stop,
1025 static int dl_proc_open_v1(struct inode *inode, struct file *file)
1027 int ret = seq_open(file, &dl_seq_ops_v1);
1030 struct seq_file *sf = file->private_data;
1031 sf->private = PDE_DATA(inode);
1036 static int dl_proc_open(struct inode *inode, struct file *file)
1038 int ret = seq_open(file, &dl_seq_ops);
1041 struct seq_file *sf = file->private_data;
1043 sf->private = PDE_DATA(inode);
1048 static const struct file_operations dl_file_ops_v1 = {
1049 .owner = THIS_MODULE,
1050 .open = dl_proc_open_v1,
1052 .llseek = seq_lseek,
1053 .release = seq_release
1056 static const struct file_operations dl_file_ops = {
1057 .owner = THIS_MODULE,
1058 .open = dl_proc_open,
1060 .llseek = seq_lseek,
1061 .release = seq_release
1064 static int __net_init hashlimit_proc_net_init(struct net *net)
1066 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
1068 hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
1069 if (!hashlimit_net->ipt_hashlimit)
1071 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
1072 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
1073 if (!hashlimit_net->ip6t_hashlimit) {
1074 remove_proc_entry("ipt_hashlimit", net->proc_net);
1081 static void __net_exit hashlimit_proc_net_exit(struct net *net)
1083 struct xt_hashlimit_htable *hinfo;
1084 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
1086 /* hashlimit_net_exit() is called before hashlimit_mt_destroy().
1087 * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc
1088 * entries is empty before trying to remove it.
1090 mutex_lock(&hashlimit_mutex);
1091 hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
1092 htable_remove_proc_entry(hinfo);
1093 hashlimit_net->ipt_hashlimit = NULL;
1094 hashlimit_net->ip6t_hashlimit = NULL;
1095 mutex_unlock(&hashlimit_mutex);
1097 remove_proc_entry("ipt_hashlimit", net->proc_net);
1098 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
1099 remove_proc_entry("ip6t_hashlimit", net->proc_net);
1103 static int __net_init hashlimit_net_init(struct net *net)
1105 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
1107 INIT_HLIST_HEAD(&hashlimit_net->htables);
1108 return hashlimit_proc_net_init(net);
1111 static void __net_exit hashlimit_net_exit(struct net *net)
1113 hashlimit_proc_net_exit(net);
1116 static struct pernet_operations hashlimit_net_ops = {
1117 .init = hashlimit_net_init,
1118 .exit = hashlimit_net_exit,
1119 .id = &hashlimit_net_id,
1120 .size = sizeof(struct hashlimit_net),
1123 static int __init hashlimit_mt_init(void)
1127 err = register_pernet_subsys(&hashlimit_net_ops);
1130 err = xt_register_matches(hashlimit_mt_reg,
1131 ARRAY_SIZE(hashlimit_mt_reg));
1136 hashlimit_cachep = kmem_cache_create("xt_hashlimit",
1137 sizeof(struct dsthash_ent), 0, 0,
1139 if (!hashlimit_cachep) {
1140 pr_warn("unable to create slab cache\n");
1146 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
1148 unregister_pernet_subsys(&hashlimit_net_ops);
1153 static void __exit hashlimit_mt_exit(void)
1155 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
1156 unregister_pernet_subsys(&hashlimit_net_ops);
1159 kmem_cache_destroy(hashlimit_cachep);
1162 module_init(hashlimit_mt_init);
1163 module_exit(hashlimit_mt_exit);