2 * xt_HMARK - Netfilter module to set mark by means of hashing
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/icmp.h>
18 #include <linux/netfilter/x_tables.h>
19 #include <linux/netfilter/xt_HMARK.h>
22 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
23 #include <net/netfilter/nf_conntrack.h>
25 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
27 #include <linux/netfilter_ipv6/ip6_tables.h>
30 MODULE_LICENSE("GPL");
32 MODULE_DESCRIPTION("Xtables: packet marking using hash calculation");
33 MODULE_ALIAS("ipt_HMARK");
34 MODULE_ALIAS("ip6t_HMARK");
39 union hmark_ports uports;
43 static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
45 return (addr32[0] & mask[0]) ^
46 (addr32[1] & mask[1]) ^
47 (addr32[2] & mask[2]) ^
48 (addr32[3] & mask[3]);
52 hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
56 return *addr32 & *mask;
58 return hmark_addr6_mask(addr32, mask);
63 static inline void hmark_swap_ports(union hmark_ports *uports,
64 const struct xt_hmark_info *info)
69 hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
70 src = ntohs(hp.b16.src);
71 dst = ntohs(hp.b16.dst);
74 uports->v32 = (dst << 16) | src;
76 uports->v32 = (src << 16) | dst;
80 hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
81 const struct xt_hmark_info *info)
83 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
84 enum ip_conntrack_info ctinfo;
85 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
86 struct nf_conntrack_tuple *otuple;
87 struct nf_conntrack_tuple *rtuple;
92 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
93 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
95 t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
97 t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
100 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
103 t->proto = nf_ct_protonum(ct);
104 if (t->proto != IPPROTO_ICMP) {
105 t->uports.b16.src = otuple->src.u.all;
106 t->uports.b16.dst = rtuple->src.u.all;
107 hmark_swap_ports(&t->uports, info);
116 /* This hash function is endian independent, to ensure consistent hashing if
117 * the cluster is composed of big and little endian systems. */
119 hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
122 u32 src = ntohl(t->src);
123 u32 dst = ntohl(t->dst);
128 hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
129 hash = hash ^ (t->proto & info->proto_mask);
131 return reciprocal_scale(hash, info->hmodulus) + info->hoffset;
135 hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
136 struct hmark_tuple *t, const struct xt_hmark_info *info)
140 protoff = proto_ports_offset(t->proto);
145 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
148 hmark_swap_ports(&t->uports, info);
151 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
152 static int get_inner6_hdr(const struct sk_buff *skb, int *offset)
154 struct icmp6hdr *icmp6h, _ih6;
156 icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6);
160 if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) {
161 *offset += sizeof(struct icmp6hdr);
168 hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
169 const struct xt_hmark_info *info)
171 struct ipv6hdr *ip6, _ip6;
172 int flag = IP6_FH_F_AUTH;
173 unsigned int nhoff = 0;
177 ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb));
178 nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
181 /* No need to check for icmp errors on fragments */
182 if ((flag & IP6_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6))
184 /* Use inner header in case of ICMP errors */
185 if (get_inner6_hdr(skb, &nhoff)) {
186 ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6);
189 /* If AH present, use SPI like in ESP. */
190 flag = IP6_FH_F_AUTH;
191 nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
196 t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
197 t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
199 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
203 if (t->proto == IPPROTO_ICMPV6)
206 if (flag & IP6_FH_F_FRAG)
209 hmark_set_tuple_ports(skb, nhoff, t, info);
214 hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par)
216 const struct xt_hmark_info *info = par->targinfo;
217 struct hmark_tuple t;
219 memset(&t, 0, sizeof(struct hmark_tuple));
221 if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
222 if (hmark_ct_set_htuple(skb, &t, info) < 0)
225 if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0)
229 skb->mark = hmark_hash(&t, info);
234 static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
236 const struct icmphdr *icmph;
239 /* Not enough header? */
240 icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih);
241 if (icmph == NULL || icmph->type > NR_ICMP_TYPES)
245 if (icmph->type != ICMP_DEST_UNREACH &&
246 icmph->type != ICMP_SOURCE_QUENCH &&
247 icmph->type != ICMP_TIME_EXCEEDED &&
248 icmph->type != ICMP_PARAMETERPROB &&
249 icmph->type != ICMP_REDIRECT)
252 *nhoff += iphsz + sizeof(_ih);
257 hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
258 const struct xt_hmark_info *info)
260 struct iphdr *ip, _ip;
261 int nhoff = skb_network_offset(skb);
263 ip = (struct iphdr *) (skb->data + nhoff);
264 if (ip->protocol == IPPROTO_ICMP) {
265 /* Use inner header in case of ICMP errors */
266 if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) {
267 ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip);
273 t->src = ip->saddr & info->src_mask.ip;
274 t->dst = ip->daddr & info->dst_mask.ip;
276 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
279 t->proto = ip->protocol;
281 /* ICMP has no ports, skip */
282 if (t->proto == IPPROTO_ICMP)
285 /* follow-up fragments don't contain ports, skip all fragments */
286 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
289 hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
295 hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
297 const struct xt_hmark_info *info = par->targinfo;
298 struct hmark_tuple t;
300 memset(&t, 0, sizeof(struct hmark_tuple));
302 if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
303 if (hmark_ct_set_htuple(skb, &t, info) < 0)
306 if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0)
310 skb->mark = hmark_hash(&t, info);
314 static int hmark_tg_check(const struct xt_tgchk_param *par)
316 const struct xt_hmark_info *info = par->targinfo;
317 const char *errmsg = "proto mask must be zero with L3 mode";
322 if (info->proto_mask &&
323 (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)))
326 if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
327 (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
328 XT_HMARK_FLAG(XT_HMARK_DPORT_MASK))))
331 if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
332 (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
333 XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
334 errmsg = "spi-set and port-set can't be combined";
339 pr_info_ratelimited("%s\n", errmsg);
343 static struct xt_target hmark_tg_reg[] __read_mostly = {
346 .family = NFPROTO_IPV4,
347 .target = hmark_tg_v4,
348 .targetsize = sizeof(struct xt_hmark_info),
349 .checkentry = hmark_tg_check,
352 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
355 .family = NFPROTO_IPV6,
356 .target = hmark_tg_v6,
357 .targetsize = sizeof(struct xt_hmark_info),
358 .checkentry = hmark_tg_check,
364 static int __init hmark_tg_init(void)
366 return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
369 static void __exit hmark_tg_exit(void)
371 xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
374 module_init(hmark_tg_init);
375 module_exit(hmark_tg_exit);