1 // SPDX-License-Identifier: GPL-2.0-only
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
23 #include <linux/icmpv6.h>
25 #include <linux/ipv6.h>
26 #include <net/sctp/checksum.h>
28 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
29 struct vlan_ethhdr *veth)
31 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
34 veth->h_vlan_proto = skb->vlan_proto;
35 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
36 veth->h_vlan_encapsulated_proto = skb->protocol;
41 /* add vlan header into the user buffer for if tag was removed by offloads */
43 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
45 int mac_off = skb_mac_header(skb) - skb->data;
46 u8 *vlanh, *dst_u8 = (u8 *) d;
47 struct vlan_ethhdr veth;
50 if ((skb->protocol == htons(ETH_P_8021AD) ||
51 skb->protocol == htons(ETH_P_8021Q)) &&
52 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
53 vlan_hlen += VLAN_HLEN;
56 if (offset < VLAN_ETH_HLEN + vlan_hlen) {
60 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
62 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
65 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
66 ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
68 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
75 offset = ETH_HLEN + vlan_hlen;
77 offset -= VLAN_HLEN + vlan_hlen;
80 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
83 static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
85 unsigned int thoff = nft_thoff(pkt);
87 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
92 pkt->inneroff = thoff + sizeof(struct udphdr);
95 struct tcphdr *th, _tcph;
97 th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
101 pkt->inneroff = thoff + __tcp_hdrlen(th);
105 u32 offset = sizeof(struct gre_base_hdr);
106 struct gre_base_hdr *gre, _gre;
109 gre = skb_header_pointer(pkt->skb, thoff, sizeof(_gre), &_gre);
113 version = gre->flags & GRE_VERSION;
116 if (gre->flags & GRE_ROUTING)
119 if (gre->flags & GRE_CSUM) {
120 offset += sizeof_field(struct gre_full_hdr, csum) +
121 sizeof_field(struct gre_full_hdr, reserved1);
123 if (gre->flags & GRE_KEY)
124 offset += sizeof_field(struct gre_full_hdr, key);
126 if (gre->flags & GRE_SEQ)
127 offset += sizeof_field(struct gre_full_hdr, seq);
133 pkt->inneroff = thoff + offset;
137 pkt->inneroff = thoff;
143 pkt->flags |= NFT_PKTINFO_INNER;
148 int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
150 if (!(pkt->flags & NFT_PKTINFO_INNER) &&
151 __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
154 return pkt->inneroff;
157 void nft_payload_eval(const struct nft_expr *expr,
158 struct nft_regs *regs,
159 const struct nft_pktinfo *pkt)
161 const struct nft_payload *priv = nft_expr_priv(expr);
162 const struct sk_buff *skb = pkt->skb;
163 u32 *dest = ®s->data[priv->dreg];
166 if (priv->len % NFT_REG32_SIZE)
167 dest[priv->len / NFT_REG32_SIZE] = 0;
169 switch (priv->base) {
170 case NFT_PAYLOAD_LL_HEADER:
171 if (!skb_mac_header_was_set(skb))
174 if (skb_vlan_tag_present(skb)) {
175 if (!nft_payload_copy_vlan(dest, skb,
176 priv->offset, priv->len))
180 offset = skb_mac_header(skb) - skb->data;
182 case NFT_PAYLOAD_NETWORK_HEADER:
183 offset = skb_network_offset(skb);
185 case NFT_PAYLOAD_TRANSPORT_HEADER:
186 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
188 offset = nft_thoff(pkt);
190 case NFT_PAYLOAD_INNER_HEADER:
191 offset = nft_payload_inner_offset(pkt);
199 offset += priv->offset;
201 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
205 regs->verdict.code = NFT_BREAK;
208 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
209 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
210 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
211 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
212 [NFTA_PAYLOAD_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
213 [NFTA_PAYLOAD_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
214 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
215 [NFTA_PAYLOAD_CSUM_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
216 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
219 static int nft_payload_init(const struct nft_ctx *ctx,
220 const struct nft_expr *expr,
221 const struct nlattr * const tb[])
223 struct nft_payload *priv = nft_expr_priv(expr);
225 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
226 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
227 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
229 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
230 &priv->dreg, NULL, NFT_DATA_VALUE,
234 static int nft_payload_dump(struct sk_buff *skb,
235 const struct nft_expr *expr, bool reset)
237 const struct nft_payload *priv = nft_expr_priv(expr);
239 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
240 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
241 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
242 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
243 goto nla_put_failure;
250 static bool nft_payload_reduce(struct nft_regs_track *track,
251 const struct nft_expr *expr)
253 const struct nft_payload *priv = nft_expr_priv(expr);
254 const struct nft_payload *payload;
256 if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
257 nft_reg_track_update(track, expr, priv->dreg, priv->len);
261 payload = nft_expr_priv(track->regs[priv->dreg].selector);
262 if (priv->base != payload->base ||
263 priv->offset != payload->offset ||
264 priv->len != payload->len) {
265 nft_reg_track_update(track, expr, priv->dreg, priv->len);
269 if (!track->regs[priv->dreg].bitwise)
272 return nft_expr_reduce_bitwise(track, expr);
275 static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
276 u32 priv_len, u32 field_len)
278 unsigned int remainder, delta, k;
279 struct nft_data mask = {};
280 __be32 remainder_mask;
282 if (priv_len == field_len) {
283 memset(®->mask, 0xff, priv_len);
285 } else if (priv_len > field_len) {
289 memset(&mask, 0xff, field_len);
290 remainder = priv_len % sizeof(u32);
292 k = priv_len / sizeof(u32);
293 delta = field_len - priv_len;
294 remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
295 mask.data[k] = (__force u32)remainder_mask;
298 memcpy(®->mask, &mask, field_len);
303 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
304 struct nft_flow_rule *flow,
305 const struct nft_payload *priv)
307 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
309 switch (priv->offset) {
310 case offsetof(struct ethhdr, h_source):
311 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
314 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
317 case offsetof(struct ethhdr, h_dest):
318 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
321 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
324 case offsetof(struct ethhdr, h_proto):
325 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
328 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
329 n_proto, sizeof(__be16), reg);
330 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
332 case offsetof(struct vlan_ethhdr, h_vlan_TCI):
333 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
336 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
337 vlan_tci, sizeof(__be16), reg,
338 NFT_OFFLOAD_F_NETWORK2HOST);
340 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
341 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
344 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
345 vlan_tpid, sizeof(__be16), reg);
346 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
348 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
349 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
352 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
353 vlan_tci, sizeof(__be16), reg,
354 NFT_OFFLOAD_F_NETWORK2HOST);
356 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
357 sizeof(struct vlan_hdr):
358 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
361 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
362 vlan_tpid, sizeof(__be16), reg);
363 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
372 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
373 struct nft_flow_rule *flow,
374 const struct nft_payload *priv)
376 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
378 switch (priv->offset) {
379 case offsetof(struct iphdr, saddr):
380 if (!nft_payload_offload_mask(reg, priv->len,
381 sizeof(struct in_addr)))
384 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
385 sizeof(struct in_addr), reg);
386 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
388 case offsetof(struct iphdr, daddr):
389 if (!nft_payload_offload_mask(reg, priv->len,
390 sizeof(struct in_addr)))
393 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
394 sizeof(struct in_addr), reg);
395 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
397 case offsetof(struct iphdr, protocol):
398 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
401 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
403 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
412 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
413 struct nft_flow_rule *flow,
414 const struct nft_payload *priv)
416 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
418 switch (priv->offset) {
419 case offsetof(struct ipv6hdr, saddr):
420 if (!nft_payload_offload_mask(reg, priv->len,
421 sizeof(struct in6_addr)))
424 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
425 sizeof(struct in6_addr), reg);
426 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
428 case offsetof(struct ipv6hdr, daddr):
429 if (!nft_payload_offload_mask(reg, priv->len,
430 sizeof(struct in6_addr)))
433 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
434 sizeof(struct in6_addr), reg);
435 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
437 case offsetof(struct ipv6hdr, nexthdr):
438 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
441 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
443 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
452 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
453 struct nft_flow_rule *flow,
454 const struct nft_payload *priv)
458 switch (ctx->dep.l3num) {
459 case htons(ETH_P_IP):
460 err = nft_payload_offload_ip(ctx, flow, priv);
462 case htons(ETH_P_IPV6):
463 err = nft_payload_offload_ip6(ctx, flow, priv);
472 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
473 struct nft_flow_rule *flow,
474 const struct nft_payload *priv)
476 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
478 switch (priv->offset) {
479 case offsetof(struct tcphdr, source):
480 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
483 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
484 sizeof(__be16), reg);
486 case offsetof(struct tcphdr, dest):
487 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
490 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
491 sizeof(__be16), reg);
500 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
501 struct nft_flow_rule *flow,
502 const struct nft_payload *priv)
504 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
506 switch (priv->offset) {
507 case offsetof(struct udphdr, source):
508 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
511 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
512 sizeof(__be16), reg);
514 case offsetof(struct udphdr, dest):
515 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
518 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
519 sizeof(__be16), reg);
528 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
529 struct nft_flow_rule *flow,
530 const struct nft_payload *priv)
534 switch (ctx->dep.protonum) {
536 err = nft_payload_offload_tcp(ctx, flow, priv);
539 err = nft_payload_offload_udp(ctx, flow, priv);
548 static int nft_payload_offload(struct nft_offload_ctx *ctx,
549 struct nft_flow_rule *flow,
550 const struct nft_expr *expr)
552 const struct nft_payload *priv = nft_expr_priv(expr);
555 switch (priv->base) {
556 case NFT_PAYLOAD_LL_HEADER:
557 err = nft_payload_offload_ll(ctx, flow, priv);
559 case NFT_PAYLOAD_NETWORK_HEADER:
560 err = nft_payload_offload_nh(ctx, flow, priv);
562 case NFT_PAYLOAD_TRANSPORT_HEADER:
563 err = nft_payload_offload_th(ctx, flow, priv);
572 static const struct nft_expr_ops nft_payload_ops = {
573 .type = &nft_payload_type,
574 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
575 .eval = nft_payload_eval,
576 .init = nft_payload_init,
577 .dump = nft_payload_dump,
578 .reduce = nft_payload_reduce,
579 .offload = nft_payload_offload,
582 const struct nft_expr_ops nft_payload_fast_ops = {
583 .type = &nft_payload_type,
584 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
585 .eval = nft_payload_eval,
586 .init = nft_payload_init,
587 .dump = nft_payload_dump,
588 .reduce = nft_payload_reduce,
589 .offload = nft_payload_offload,
592 void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
593 const struct nft_pktinfo *pkt,
594 struct nft_inner_tun_ctx *tun_ctx)
596 const struct nft_payload *priv = nft_expr_priv(expr);
597 const struct sk_buff *skb = pkt->skb;
598 u32 *dest = ®s->data[priv->dreg];
601 if (priv->len % NFT_REG32_SIZE)
602 dest[priv->len / NFT_REG32_SIZE] = 0;
604 switch (priv->base) {
605 case NFT_PAYLOAD_TUN_HEADER:
606 if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TUN))
609 offset = tun_ctx->inner_tunoff;
611 case NFT_PAYLOAD_LL_HEADER:
612 if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_LL))
615 offset = tun_ctx->inner_lloff;
617 case NFT_PAYLOAD_NETWORK_HEADER:
618 if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_NH))
621 offset = tun_ctx->inner_nhoff;
623 case NFT_PAYLOAD_TRANSPORT_HEADER:
624 if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH))
627 offset = tun_ctx->inner_thoff;
633 offset += priv->offset;
635 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
640 regs->verdict.code = NFT_BREAK;
643 static int nft_payload_inner_init(const struct nft_ctx *ctx,
644 const struct nft_expr *expr,
645 const struct nlattr * const tb[])
647 struct nft_payload *priv = nft_expr_priv(expr);
650 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
652 case NFT_PAYLOAD_TUN_HEADER:
653 case NFT_PAYLOAD_LL_HEADER:
654 case NFT_PAYLOAD_NETWORK_HEADER:
655 case NFT_PAYLOAD_TRANSPORT_HEADER:
662 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
663 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
665 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
666 &priv->dreg, NULL, NFT_DATA_VALUE,
670 static const struct nft_expr_ops nft_payload_inner_ops = {
671 .type = &nft_payload_type,
672 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
673 .init = nft_payload_inner_init,
674 .dump = nft_payload_dump,
675 /* direct call to nft_payload_inner_eval(). */
678 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
680 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
682 *sum = CSUM_MANGLED_0;
685 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
687 struct udphdr *uh, _uh;
689 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
693 return (__force bool)uh->check;
696 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
698 unsigned int *l4csum_offset)
703 switch (pkt->tprot) {
705 *l4csum_offset = offsetof(struct tcphdr, check);
708 if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
711 case IPPROTO_UDPLITE:
712 *l4csum_offset = offsetof(struct udphdr, check);
715 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
721 *l4csum_offset += nft_thoff(pkt);
725 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
729 if (skb_ensure_writable(skb, offset + sizeof(*sh)))
732 sh = (struct sctphdr *)(skb->data + offset);
733 sh->checksum = sctp_compute_cksum(skb, offset);
734 skb->ip_summed = CHECKSUM_UNNECESSARY;
738 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
740 __wsum fsum, __wsum tsum)
745 /* If we cannot determine layer 4 checksum offset or this packet doesn't
746 * require layer 4 checksum recalculation, skip this packet.
748 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
751 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
754 /* Checksum mangling for an arbitrary amount of bytes, based on
755 * inet_proto_csum_replace*() functions.
757 if (skb->ip_summed != CHECKSUM_PARTIAL) {
758 nft_csum_replace(&sum, fsum, tsum);
759 if (skb->ip_summed == CHECKSUM_COMPLETE) {
760 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
764 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
768 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
769 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
775 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
776 __wsum fsum, __wsum tsum, int csum_offset)
780 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
783 nft_csum_replace(&sum, fsum, tsum);
784 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
785 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
791 struct nft_payload_set {
792 enum nft_payload_bases base:8;
801 static void nft_payload_set_eval(const struct nft_expr *expr,
802 struct nft_regs *regs,
803 const struct nft_pktinfo *pkt)
805 const struct nft_payload_set *priv = nft_expr_priv(expr);
806 struct sk_buff *skb = pkt->skb;
807 const u32 *src = ®s->data[priv->sreg];
808 int offset, csum_offset;
811 switch (priv->base) {
812 case NFT_PAYLOAD_LL_HEADER:
813 if (!skb_mac_header_was_set(skb))
815 offset = skb_mac_header(skb) - skb->data;
817 case NFT_PAYLOAD_NETWORK_HEADER:
818 offset = skb_network_offset(skb);
820 case NFT_PAYLOAD_TRANSPORT_HEADER:
821 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
823 offset = nft_thoff(pkt);
825 case NFT_PAYLOAD_INNER_HEADER:
826 offset = nft_payload_inner_offset(pkt);
835 csum_offset = offset + priv->csum_offset;
836 offset += priv->offset;
838 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
839 ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
840 priv->base != NFT_PAYLOAD_INNER_HEADER) ||
841 skb->ip_summed != CHECKSUM_PARTIAL)) {
842 fsum = skb_checksum(skb, offset, priv->len, 0);
843 tsum = csum_partial(src, priv->len, 0);
845 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
846 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
849 if (priv->csum_flags &&
850 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
854 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
855 skb_store_bits(skb, offset, src, priv->len) < 0)
858 if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
859 pkt->tprot == IPPROTO_SCTP &&
860 skb->ip_summed != CHECKSUM_PARTIAL) {
861 if (pkt->fragoff == 0 &&
862 nft_payload_csum_sctp(skb, nft_thoff(pkt)))
868 regs->verdict.code = NFT_BREAK;
871 static int nft_payload_set_init(const struct nft_ctx *ctx,
872 const struct nft_expr *expr,
873 const struct nlattr * const tb[])
875 struct nft_payload_set *priv = nft_expr_priv(expr);
876 u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
879 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
880 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
881 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
883 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
884 csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
885 if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
886 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
891 priv->csum_offset = csum_offset;
893 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
896 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
897 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
900 priv->csum_flags = flags;
904 case NFT_PAYLOAD_CSUM_NONE:
905 case NFT_PAYLOAD_CSUM_INET:
907 case NFT_PAYLOAD_CSUM_SCTP:
908 if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
911 if (priv->csum_offset != offsetof(struct sctphdr, checksum))
917 priv->csum_type = csum_type;
919 return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
923 static int nft_payload_set_dump(struct sk_buff *skb,
924 const struct nft_expr *expr, bool reset)
926 const struct nft_payload_set *priv = nft_expr_priv(expr);
928 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
929 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
930 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
931 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
932 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
933 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
934 htonl(priv->csum_offset)) ||
935 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
936 goto nla_put_failure;
943 static bool nft_payload_set_reduce(struct nft_regs_track *track,
944 const struct nft_expr *expr)
948 for (i = 0; i < NFT_REG32_NUM; i++) {
949 if (!track->regs[i].selector)
952 if (track->regs[i].selector->ops != &nft_payload_ops &&
953 track->regs[i].selector->ops != &nft_payload_fast_ops)
956 __nft_reg_track_cancel(track, i);
962 static const struct nft_expr_ops nft_payload_set_ops = {
963 .type = &nft_payload_type,
964 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
965 .eval = nft_payload_set_eval,
966 .init = nft_payload_set_init,
967 .dump = nft_payload_set_dump,
968 .reduce = nft_payload_set_reduce,
971 static const struct nft_expr_ops *
972 nft_payload_select_ops(const struct nft_ctx *ctx,
973 const struct nlattr * const tb[])
975 enum nft_payload_bases base;
976 unsigned int offset, len;
979 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
980 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
981 tb[NFTA_PAYLOAD_LEN] == NULL)
982 return ERR_PTR(-EINVAL);
984 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
986 case NFT_PAYLOAD_LL_HEADER:
987 case NFT_PAYLOAD_NETWORK_HEADER:
988 case NFT_PAYLOAD_TRANSPORT_HEADER:
989 case NFT_PAYLOAD_INNER_HEADER:
992 return ERR_PTR(-EOPNOTSUPP);
995 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
996 if (tb[NFTA_PAYLOAD_DREG] != NULL)
997 return ERR_PTR(-EINVAL);
998 return &nft_payload_set_ops;
1001 if (tb[NFTA_PAYLOAD_DREG] == NULL)
1002 return ERR_PTR(-EINVAL);
1004 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
1006 return ERR_PTR(err);
1008 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
1010 return ERR_PTR(err);
1012 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
1013 base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
1014 return &nft_payload_fast_ops;
1016 return &nft_payload_ops;
1019 struct nft_expr_type nft_payload_type __read_mostly = {
1021 .select_ops = nft_payload_select_ops,
1022 .inner_ops = &nft_payload_inner_ops,
1023 .policy = nft_payload_policy,
1024 .maxattr = NFTA_PAYLOAD_MAX,
1025 .owner = THIS_MODULE,