1 // SPDX-License-Identifier: GPL-2.0-only
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/netlink.h>
12 #include <linux/netfilter.h>
13 #include <linux/if_arp.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables_core.h>
16 #include <net/netfilter/nf_tables_offload.h>
17 #include <net/netfilter/nf_tables.h>
23 enum nft_cmp_ops op:8;
26 void nft_cmp_eval(const struct nft_expr *expr,
27 struct nft_regs *regs,
28 const struct nft_pktinfo *pkt)
30 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
33 d = memcmp(®s->data[priv->sreg], &priv->data, priv->len);
63 regs->verdict.code = NFT_BREAK;
66 static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
67 [NFTA_CMP_SREG] = { .type = NLA_U32 },
68 [NFTA_CMP_OP] = { .type = NLA_U32 },
69 [NFTA_CMP_DATA] = { .type = NLA_NESTED },
72 static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
73 const struct nlattr * const tb[])
75 struct nft_cmp_expr *priv = nft_expr_priv(expr);
76 struct nft_data_desc desc = {
77 .type = NFT_DATA_VALUE,
78 .size = sizeof(priv->data),
82 err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
86 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
90 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
95 static int nft_cmp_dump(struct sk_buff *skb,
96 const struct nft_expr *expr, bool reset)
98 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
100 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
101 goto nla_put_failure;
102 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
103 goto nla_put_failure;
105 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
106 NFT_DATA_VALUE, priv->len) < 0)
107 goto nla_put_failure;
114 union nft_cmp_offload_data {
120 static void nft_payload_n2h(union nft_cmp_offload_data *data,
121 const u8 *val, u32 len)
125 data->val16 = ntohs(*((__be16 *)val));
128 data->val32 = ntohl(*((__be32 *)val));
131 data->val64 = be64_to_cpu(*((__be64 *)val));
139 static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
140 struct nft_flow_rule *flow,
141 const struct nft_cmp_expr *priv)
143 struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
144 union nft_cmp_offload_data _data, _datamask;
145 u8 *mask = (u8 *)&flow->match.mask;
146 u8 *key = (u8 *)&flow->match.key;
149 if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
152 if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
153 nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
154 nft_payload_n2h(&_datamask, (u8 *)®->mask, reg->len);
156 datamask = (u8 *)&_datamask;
158 data = (u8 *)&priv->data;
159 datamask = (u8 *)®->mask;
162 memcpy(key + reg->offset, data, reg->len);
163 memcpy(mask + reg->offset, datamask, reg->len);
165 flow->match.dissector.used_keys |= BIT(reg->key);
166 flow->match.dissector.offset[reg->key] = reg->base_offset;
168 if (reg->key == FLOW_DISSECTOR_KEY_META &&
169 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
170 nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
173 nft_offload_update_dependency(ctx, &priv->data, reg->len);
178 static int nft_cmp_offload(struct nft_offload_ctx *ctx,
179 struct nft_flow_rule *flow,
180 const struct nft_expr *expr)
182 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
184 return __nft_cmp_offload(ctx, flow, priv);
187 static const struct nft_expr_ops nft_cmp_ops = {
188 .type = &nft_cmp_type,
189 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
190 .eval = nft_cmp_eval,
191 .init = nft_cmp_init,
192 .dump = nft_cmp_dump,
193 .reduce = NFT_REDUCE_READONLY,
194 .offload = nft_cmp_offload,
197 /* Calculate the mask for the nft_cmp_fast expression. On big endian the
198 * mask needs to include the *upper* bytes when interpreting that data as
199 * something smaller than the full u32, therefore a cpu_to_le32 is done.
201 static u32 nft_cmp_fast_mask(unsigned int len)
203 __le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
204 data) * BITS_PER_BYTE - len));
206 return (__force u32)mask;
209 static int nft_cmp_fast_init(const struct nft_ctx *ctx,
210 const struct nft_expr *expr,
211 const struct nlattr * const tb[])
213 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
214 struct nft_data data;
215 struct nft_data_desc desc = {
216 .type = NFT_DATA_VALUE,
217 .size = sizeof(data),
221 err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
225 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
229 desc.len *= BITS_PER_BYTE;
231 priv->mask = nft_cmp_fast_mask(desc.len);
232 priv->data = data.data[0] & priv->mask;
233 priv->len = desc.len;
234 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
238 static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
239 struct nft_flow_rule *flow,
240 const struct nft_expr *expr)
242 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
243 struct nft_cmp_expr cmp = {
250 .len = priv->len / BITS_PER_BYTE,
251 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
254 return __nft_cmp_offload(ctx, flow, &cmp);
257 static int nft_cmp_fast_dump(struct sk_buff *skb,
258 const struct nft_expr *expr, bool reset)
260 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
261 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
262 struct nft_data data;
264 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
265 goto nla_put_failure;
266 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
267 goto nla_put_failure;
269 data.data[0] = priv->data;
270 if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
271 NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
272 goto nla_put_failure;
279 const struct nft_expr_ops nft_cmp_fast_ops = {
280 .type = &nft_cmp_type,
281 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
282 .eval = NULL, /* inlined */
283 .init = nft_cmp_fast_init,
284 .dump = nft_cmp_fast_dump,
285 .reduce = NFT_REDUCE_READONLY,
286 .offload = nft_cmp_fast_offload,
289 static u32 nft_cmp_mask(u32 bitlen)
291 return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
294 static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
296 int len = bitlen / BITS_PER_BYTE;
297 int i, words = len / sizeof(u32);
299 for (i = 0; i < words; i++) {
300 data->data[i] = 0xffffffff;
301 bitlen -= sizeof(u32) * BITS_PER_BYTE;
304 if (len % sizeof(u32))
305 data->data[i++] = nft_cmp_mask(bitlen);
311 static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
312 const struct nft_expr *expr,
313 const struct nlattr * const tb[])
315 struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
316 struct nft_data_desc desc = {
317 .type = NFT_DATA_VALUE,
318 .size = sizeof(priv->data),
322 err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
326 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
330 nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE);
331 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
332 priv->len = desc.len;
337 static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
338 struct nft_flow_rule *flow,
339 const struct nft_expr *expr)
341 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
342 struct nft_cmp_expr cmp = {
346 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
349 return __nft_cmp_offload(ctx, flow, &cmp);
352 static int nft_cmp16_fast_dump(struct sk_buff *skb,
353 const struct nft_expr *expr, bool reset)
355 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
356 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
358 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
359 goto nla_put_failure;
360 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
361 goto nla_put_failure;
363 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
364 NFT_DATA_VALUE, priv->len) < 0)
365 goto nla_put_failure;
373 const struct nft_expr_ops nft_cmp16_fast_ops = {
374 .type = &nft_cmp_type,
375 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
376 .eval = NULL, /* inlined */
377 .init = nft_cmp16_fast_init,
378 .dump = nft_cmp16_fast_dump,
379 .reduce = NFT_REDUCE_READONLY,
380 .offload = nft_cmp16_fast_offload,
383 static const struct nft_expr_ops *
384 nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
386 struct nft_data data;
387 struct nft_data_desc desc = {
388 .type = NFT_DATA_VALUE,
389 .size = sizeof(data),
395 if (tb[NFTA_CMP_SREG] == NULL ||
396 tb[NFTA_CMP_OP] == NULL ||
397 tb[NFTA_CMP_DATA] == NULL)
398 return ERR_PTR(-EINVAL);
400 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
410 return ERR_PTR(-EINVAL);
413 err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
417 sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
419 if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
420 if (desc.len <= sizeof(u32))
421 return &nft_cmp_fast_ops;
422 else if (desc.len <= sizeof(data) &&
423 ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
424 (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
425 return &nft_cmp16_fast_ops;
430 struct nft_expr_type nft_cmp_type __read_mostly = {
432 .select_ops = nft_cmp_select_ops,
433 .policy = nft_cmp_policy,
434 .maxattr = NFTA_CMP_MAX,
435 .owner = THIS_MODULE,