1 // SPDX-License-Identifier: GPL-2.0
2 /* xfrm_iptfs: IPTFS encapsulation support
6 * Copyright (c) 2022, LabN Consulting, L.L.C.
10 #include <linux/kernel.h>
11 #include <linux/icmpv6.h>
12 #include <linux/skbuff_ref.h>
15 #include <net/ip6_route.h>
16 #include <net/inet_ecn.h>
19 #include <crypto/aead.h>
21 #include "xfrm_inout.h"
22 #include "trace_iptfs.h"
24 /* IPTFS encap (header) values. */
25 #define IPTFS_SUBTYPE_BASIC 0
26 #define IPTFS_SUBTYPE_CC 1
28 /* ----------------------------------------------- */
29 /* IP-TFS default SA values (tunnel egress/dir-in) */
30 /* ----------------------------------------------- */
33 * define IPTFS_DEFAULT_DROP_TIME_USECS - default drop time
35 * The default IPTFS drop time in microseconds. The drop time is the amount of
36 * time before a missing out-of-order IPTFS tunnel packet is considered lost.
37 * See also the reorder window.
41 #define IPTFS_DEFAULT_DROP_TIME_USECS 1000000
44 * define IPTFS_DEFAULT_REORDER_WINDOW - default reorder window size
46 * The default IPTFS reorder window size. The reorder window size dictates the
47 * maximum number of IPTFS tunnel packets in a sequence that may arrive out of
50 * Default 3. (tcp folks suggested)
52 #define IPTFS_DEFAULT_REORDER_WINDOW 3
54 /* ------------------------------------------------ */
55 /* IPTFS default SA values (tunnel ingress/dir-out) */
56 /* ------------------------------------------------ */
59 * define IPTFS_DEFAULT_INIT_DELAY_USECS - default initial output delay
61 * The initial output delay is the amount of time prior to servicing the output
62 * queue after queueing the first packet on said queue. This applies anytime the
63 * output queue was previously empty.
67 #define IPTFS_DEFAULT_INIT_DELAY_USECS 0
70 * define IPTFS_DEFAULT_MAX_QUEUE_SIZE - default max output queue size.
72 * The default IPTFS max output queue size in octets. The output queue is where
73 * received packets destined for output over an IPTFS tunnel are stored prior to
74 * being output in aggregated/fragmented form over the IPTFS tunnel.
78 #define IPTFS_DEFAULT_MAX_QUEUE_SIZE (1024 * 10240)
80 /* Assumed: skb->head is cache aligned.
82 * L2 Header resv: Arrange for cacheline to start at skb->data - 16 to keep the
83 * to-be-pushed L2 header in the same cacheline as resulting `skb->data` (i.e.,
84 * the L3 header). If cacheline size is > 64 then skb->data + pushed L2 will all
85 * be in a single cacheline if we simply reserve 64 bytes.
87 * L3 Header resv: For L3+L2 headers (i.e., skb->data points at the IPTFS payload)
88 * we want `skb->data` to be cacheline aligned and all pushed L2L3 headers will
89 * be in their own cacheline[s]. 128 works for cachelins up to 128 bytes, for
90 * any larger cacheline sizes the pushed headers will simply share the cacheline
91 * with the start of the IPTFS payload (skb->data).
93 #define XFRM_IPTFS_MIN_L3HEADROOM 128
94 #define XFRM_IPTFS_MIN_L2HEADROOM (L1_CACHE_BYTES > 64 ? 64 : 64 + 16)
96 /* Min to try to share outer iptfs skb data vs copying into new skb */
97 #define IPTFS_PKT_SHARE_MIN 129
99 #define NSECS_IN_USEC 1000
101 #define IPTFS_HRTIMER_MODE HRTIMER_MODE_REL_SOFT
104 * struct xfrm_iptfs_config - configuration for the IPTFS tunnel.
105 * @pkt_size: size of the outer IP packet. 0 to use interface and MTU discovery,
106 * otherwise the user specified value.
107 * @max_queue_size: The maximum number of octets allowed to be queued to be sent
108 * over the IPTFS SA. The queue size is measured as the size of all the
110 * @reorder_win_size: the number slots in the reorder window, thus the number of
111 * packets that may arrive out of order.
112 * @dont_frag: true to inhibit fragmenting across IPTFS outer packets.
114 struct xfrm_iptfs_config {
115 u32 pkt_size; /* outer_packet_size or 0 */
116 u32 max_queue_size; /* octets */
117 u16 reorder_win_size;
127 * struct xfrm_iptfs_data - mode specific xfrm state.
128 * @cfg: IPTFS tunnel config.
129 * @x: owning SA (xfrm_state).
130 * @queue: queued user packets to send.
131 * @queue_size: number of octets on queue (sum of packet sizes).
132 * @ecn_queue_size: octets above with ECN mark.
133 * @init_delay_ns: nanoseconds to wait to send initial IPTFS packet.
134 * @iptfs_timer: output timer.
135 * @iptfs_settime: time the output timer was set.
136 * @payload_mtu: max payload size.
137 * @w_seq_set: true after first seq received.
138 * @w_wantseq: waiting for this seq number as next to process (in order).
139 * @w_saved: the saved buf array (reorder window).
140 * @w_savedlen: the saved len (not size).
141 * @drop_lock: lock to protect reorder queue.
142 * @drop_timer: timer for considering next packet lost.
143 * @drop_time_ns: timer intervan in nanoseconds.
144 * @ra_newskb: new pkt being reassembled.
145 * @ra_wantseq: expected next sequence for reassembly.
146 * @ra_runt: last pkt bytes from very end of last skb.
147 * @ra_runtlen: size of ra_runt.
149 struct xfrm_iptfs_data {
150 struct xfrm_iptfs_config cfg;
152 /* Ingress User Input */
153 struct xfrm_state *x; /* owning state */
154 struct sk_buff_head queue; /* output queue */
156 u32 queue_size; /* octets */
157 u32 ecn_queue_size; /* octets above which ECN mark */
158 u64 init_delay_ns; /* nanoseconds */
159 struct hrtimer iptfs_timer; /* output timer */
160 time64_t iptfs_settime; /* time timer was set */
161 u32 payload_mtu; /* max payload size */
163 /* Tunnel input reordering */
164 bool w_seq_set; /* true after first seq received */
165 u64 w_wantseq; /* expected next sequence */
166 struct skb_wseq *w_saved; /* the saved buf array */
167 u32 w_savedlen; /* the saved len (not size) */
168 spinlock_t drop_lock;
169 struct hrtimer drop_timer;
172 /* Tunnel input reassembly */
173 struct sk_buff *ra_newskb; /* new pkt being reassembled */
174 u64 ra_wantseq; /* expected next sequence */
175 u8 ra_runt[6]; /* last pkt bytes from last skb */
176 u8 ra_runtlen; /* count of ra_runt */
179 static u32 __iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu);
180 static enum hrtimer_restart iptfs_delay_timer(struct hrtimer *me);
181 static enum hrtimer_restart iptfs_drop_timer(struct hrtimer *me);
183 /* ================= */
184 /* Utility Functions */
185 /* ================= */
187 #ifdef TRACEPOINTS_ENABLED
188 static u32 __trace_ip_proto(struct iphdr *iph)
190 if (iph->version == 4)
191 return iph->protocol;
192 return ((struct ipv6hdr *)iph)->nexthdr;
195 static u32 __trace_ip_proto_seq(struct iphdr *iph)
200 if (iph->version == 4) {
201 nexthdr = (void *)(iph + 1);
202 protocol = iph->protocol;
203 } else if (iph->version == 6) {
204 nexthdr = (void *)(((struct ipv6hdr *)(iph)) + 1);
205 protocol = ((struct ipv6hdr *)(iph))->nexthdr;
209 return ntohs(((struct icmphdr *)nexthdr)->un.echo.sequence);
211 return ntohs(((struct icmp6hdr *)nexthdr)->icmp6_sequence);
213 return ntohl(((struct tcphdr *)nexthdr)->seq);
215 return ntohs(((struct udphdr *)nexthdr)->source);
220 #endif /*TRACEPOINTS_ENABLED*/
222 static u64 __esp_seq(struct sk_buff *skb)
224 u64 seq = ntohl(XFRM_SKB_CB(skb)->seq.input.low);
226 return seq | (u64)ntohl(XFRM_SKB_CB(skb)->seq.input.hi) << 32;
229 /* ======================= */
230 /* IPTFS SK_BUFF Functions */
231 /* ======================= */
234 * iptfs_alloc_skb() - Allocate a new `skb`.
235 * @tpl: the skb to copy required meta-data from.
236 * @len: the linear length of the head data, zero is fine.
237 * @l3resv: true if skb reserve needs to support pushing L3 headers
239 * A new `skb` is allocated and required meta-data is copied from `tpl`, the
240 * head data is sized to `len` + reserved space set according to the @l3resv
243 * When @l3resv is false, resv is XFRM_IPTFS_MIN_L2HEADROOM which arranges for
244 * `skb->data - 16` which is a good guess for good cache alignment (placing the
245 * to be pushed L2 header at the start of a cacheline.
247 * Otherwise, @l3resv is true and resv is set to the correct reserved space for
248 * dst->dev plus the calculated L3 overhead for the xfrm dst or
249 * XFRM_IPTFS_MIN_L3HEADROOM whichever is larger. This is then cache aligned so
250 * that all the headers will commonly fall in a cacheline when possible.
252 * l3resv=true is used on tunnel ingress (tx), because we need to reserve for
253 * the new IPTFS packet (i.e., L2+L3 headers). On tunnel egress (rx) the data
254 * being copied into the skb includes the user L3 headers already so we only
255 * need to reserve for L2.
257 * Return: the new skb or NULL.
259 static struct sk_buff *iptfs_alloc_skb(struct sk_buff *tpl, u32 len, bool l3resv)
265 resv = XFRM_IPTFS_MIN_L2HEADROOM;
267 struct dst_entry *dst = skb_dst(tpl);
269 resv = LL_RESERVED_SPACE(dst->dev) + dst->header_len;
270 resv = max(resv, XFRM_IPTFS_MIN_L3HEADROOM);
271 resv = L1_CACHE_ALIGN(resv);
274 skb = alloc_skb(len + resv, GFP_ATOMIC | __GFP_NOWARN);
278 skb_reserve(skb, resv);
281 /* xfrm_input resume needs dev and xfrm ext from tunnel pkt */
283 __skb_ext_copy(skb, tpl);
286 /* dropped by xfrm_input, used by xfrm_output */
287 skb_dst_copy(skb, tpl);
293 * iptfs_skb_head_to_frag() - initialize a skb_frag_t based on skb head data
294 * @skb: skb with the head data
295 * @frag: frag to initialize
297 static void iptfs_skb_head_to_frag(const struct sk_buff *skb, skb_frag_t *frag)
299 struct page *page = virt_to_head_page(skb->data);
300 unsigned char *addr = (unsigned char *)page_address(page);
302 skb_frag_fill_page_desc(frag, page, skb->data - addr, skb_headlen(skb));
306 * struct iptfs_skb_frag_walk - use to track a walk through fragments
307 * @fragi: current fragment index
308 * @past: length of data in fragments before @fragi
309 * @total: length of data in all fragments
310 * @nr_frags: number of fragments present in array
311 * @initial_offset: the value passed in to skb_prepare_frag_walk()
312 * @frags: the page fragments inc. room for head page
313 * @pp_recycle: copy of skb->pp_recycle
315 struct iptfs_skb_frag_walk {
321 skb_frag_t frags[MAX_SKB_FRAGS + 1];
326 * iptfs_skb_prepare_frag_walk() - initialize a frag walk over an skb.
327 * @skb: the skb to walk.
328 * @initial_offset: start the walk @initial_offset into the skb.
329 * @walk: the walk to initialize
331 * Future calls to skb_add_frags() will expect the @offset value to be at
332 * least @initial_offset large.
334 static void iptfs_skb_prepare_frag_walk(struct sk_buff *skb, u32 initial_offset,
335 struct iptfs_skb_frag_walk *walk)
337 struct skb_shared_info *shinfo = skb_shinfo(skb);
338 skb_frag_t *frag, *from;
341 walk->initial_offset = initial_offset;
346 walk->pp_recycle = skb->pp_recycle;
348 if (skb->head_frag) {
349 if (initial_offset >= skb_headlen(skb)) {
350 initial_offset -= skb_headlen(skb);
352 frag = &walk->frags[walk->nr_frags++];
353 iptfs_skb_head_to_frag(skb, frag);
354 frag->offset += initial_offset;
355 frag->len -= initial_offset;
356 walk->total += frag->len;
360 initial_offset -= skb_headlen(skb);
363 for (i = 0; i < shinfo->nr_frags; i++) {
364 from = &shinfo->frags[i];
365 if (initial_offset >= from->len) {
366 initial_offset -= from->len;
369 frag = &walk->frags[walk->nr_frags++];
371 if (initial_offset) {
372 frag->offset += initial_offset;
373 frag->len -= initial_offset;
376 walk->total += frag->len;
380 static u32 iptfs_skb_reset_frag_walk(struct iptfs_skb_frag_walk *walk,
383 /* Adjust offset to refer to internal walk values */
384 offset -= walk->initial_offset;
386 /* Get to the correct fragment for offset */
387 while (offset < walk->past) {
388 walk->past -= walk->frags[--walk->fragi].len;
389 if (offset >= walk->past)
392 while (offset >= walk->past + walk->frags[walk->fragi].len)
393 walk->past += walk->frags[walk->fragi++].len;
395 /* offset now relative to this current frag */
396 offset -= walk->past;
401 * iptfs_skb_can_add_frags() - check if ok to add frags from walk to skb
402 * @skb: skb to check for adding frags to
403 * @walk: the walk that will be used as source for frags.
404 * @offset: offset from beginning of original skb to start from.
405 * @len: amount of data to add frag references to in @skb.
407 * Return: true if ok to add frags.
409 static bool iptfs_skb_can_add_frags(const struct sk_buff *skb,
410 struct iptfs_skb_frag_walk *walk,
413 struct skb_shared_info *shinfo = skb_shinfo(skb);
414 u32 fragi, nr_frags, fraglen;
416 if (skb_has_frag_list(skb) || skb->pp_recycle != walk->pp_recycle)
419 /* Make offset relative to current frag after setting that */
420 offset = iptfs_skb_reset_frag_walk(walk, offset);
422 /* Verify we have array space for the fragments we need to add */
424 nr_frags = shinfo->nr_frags;
425 while (len && fragi < walk->nr_frags) {
426 skb_frag_t *frag = &walk->frags[fragi];
433 if (++nr_frags > MAX_SKB_FRAGS)
440 /* We may not copy all @len but what we have will fit. */
445 * iptfs_skb_add_frags() - add a range of fragment references into an skb
446 * @skb: skb to add references into
447 * @walk: the walk to add referenced fragments from.
448 * @offset: offset from beginning of original skb to start from.
449 * @len: amount of data to add frag references to in @skb.
451 * iptfs_skb_can_add_frags() should be called before this function to verify
452 * that the destination @skb is compatible with the walk and has space in the
453 * array for the to be added frag references.
455 * Return: The number of bytes not added to @skb b/c we reached the end of the
456 * walk before adding all of @len.
458 static int iptfs_skb_add_frags(struct sk_buff *skb,
459 struct iptfs_skb_frag_walk *walk, u32 offset,
462 struct skb_shared_info *shinfo = skb_shinfo(skb);
465 if (!walk->nr_frags || offset >= walk->total + walk->initial_offset)
468 /* make offset relative to current frag after setting that */
469 offset = iptfs_skb_reset_frag_walk(walk, offset);
471 while (len && walk->fragi < walk->nr_frags) {
472 skb_frag_t *frag = &walk->frags[walk->fragi];
473 skb_frag_t *tofrag = &shinfo->frags[shinfo->nr_frags];
477 tofrag->offset += offset;
478 tofrag->len -= offset;
481 __skb_frag_ref(tofrag);
484 /* see if we are done */
485 fraglen = tofrag->len;
489 skb->data_len += len;
492 /* advance to next source fragment */
493 len -= fraglen; /* careful, use dst bv_len */
494 skb->len += fraglen; /* careful, " " " */
495 skb->data_len += fraglen; /* careful, " " " */
496 walk->past += frag->len; /* careful, use src bv_len */
502 /* ================================== */
503 /* IPTFS Trace Event Definitions */
504 /* ================================== */
506 #define CREATE_TRACE_POINTS
507 #include "trace_iptfs.h"
509 /* ================================== */
510 /* IPTFS Receiving (egress) Functions */
511 /* ================================== */
514 * iptfs_pskb_add_frags() - Create and add frags into a new sk_buff.
515 * @tpl: template to create new skb from.
516 * @walk: The source for fragments to add.
517 * @off: The offset into @walk to add frags from, also used with @st and
519 * @len: The length of data to add covering frags from @walk into @skb.
520 * This must be <= @skblen.
521 * @st: The sequence state to copy from into the new head skb.
522 * @copy_len: Copy @copy_len bytes from @st at offset @off into the new skb
525 * Create a new sk_buff `skb` using the template @tpl. Copy @copy_len bytes from
526 * @st into the new skb linear space, and then add shared fragments from the
527 * frag walk for the remaining @len of data (i.e., @len - @copy_len bytes).
529 * Return: The newly allocated sk_buff `skb` or NULL if an error occurs.
531 static struct sk_buff *
532 iptfs_pskb_add_frags(struct sk_buff *tpl, struct iptfs_skb_frag_walk *walk,
533 u32 off, u32 len, struct skb_seq_state *st, u32 copy_len)
537 skb = iptfs_alloc_skb(tpl, copy_len, false);
541 /* this should not normally be happening */
542 if (!iptfs_skb_can_add_frags(skb, walk, off + copy_len,
549 skb_copy_seq_read(st, off, skb_put(skb, copy_len), copy_len)) {
550 XFRM_INC_STATS(dev_net(st->root_skb->dev),
551 LINUX_MIB_XFRMINERROR);
556 iptfs_skb_add_frags(skb, walk, off + copy_len, len - copy_len);
561 * iptfs_pskb_extract_seq() - Create and load data into a new sk_buff.
562 * @skblen: the total data size for `skb`.
563 * @st: The source for the rest of the data to copy into `skb`.
564 * @off: The offset into @st to copy data from.
565 * @len: The length of data to copy from @st into `skb`. This must be <=
568 * Create a new sk_buff `skb` with @skblen of packet data space. If non-zero,
569 * copy @rlen bytes of @runt into `skb`. Then using seq functions copy @len
570 * bytes from @st into `skb` starting from @off.
572 * It is an error for @len to be greater than the amount of data left in @st.
574 * Return: The newly allocated sk_buff `skb` or NULL if an error occurs.
576 static struct sk_buff *
577 iptfs_pskb_extract_seq(u32 skblen, struct skb_seq_state *st, u32 off, int len)
579 struct sk_buff *skb = iptfs_alloc_skb(st->root_skb, skblen, false);
583 if (skb_copy_seq_read(st, off, skb_put(skb, len), len)) {
584 XFRM_INC_STATS(dev_net(st->root_skb->dev), LINUX_MIB_XFRMINERROR);
592 * iptfs_input_save_runt() - save data in xtfs runt space.
594 * @seq: the current sequence
596 * @len: length of packet data
598 * Save the small (`len`) start of a fragmented packet in `buf` in the xtfs data
601 static void iptfs_input_save_runt(struct xfrm_iptfs_data *xtfs, u64 seq,
604 memcpy(xtfs->ra_runt, buf, len);
606 xtfs->ra_runtlen = len;
607 xtfs->ra_wantseq = seq + 1;
611 * __iptfs_iphlen() - return the v4/v6 header length using packet data.
612 * @data: pointer at octet with version nibble
614 * The version data has been checked to be valid (i.e., either 4 or 6).
616 * Return: the IP header size based on the IP version.
618 static u32 __iptfs_iphlen(u8 *data)
620 struct iphdr *iph = (struct iphdr *)data;
622 if (iph->version == 0x4)
624 return sizeof(struct ipv6hdr);
628 * __iptfs_iplen() - return the v4/v6 length using packet data.
629 * @data: pointer to ip (v4/v6) packet header
631 * Grab the IPv4 or IPv6 length value in the start of the inner packet header
632 * pointed to by `data`. Assumes data len is enough for the length field only.
634 * The version data has been checked to be valid (i.e., either 4 or 6).
636 * Return: the length value.
638 static u32 __iptfs_iplen(u8 *data)
640 struct iphdr *iph = (struct iphdr *)data;
642 if (iph->version == 0x4)
643 return ntohs(iph->tot_len);
644 return ntohs(((struct ipv6hdr *)iph)->payload_len) +
645 sizeof(struct ipv6hdr);
649 * iptfs_complete_inner_skb() - finish preparing the inner packet for gro recv.
651 * @skb: the inner packet
653 * Finish the standard xfrm processing on the inner packet prior to sending back
654 * through gro_cells_receive. We do this separately b/c we are building a list
655 * of packets in the hopes that one day a list will be taken by
658 static void iptfs_complete_inner_skb(struct xfrm_state *x, struct sk_buff *skb)
660 skb_reset_network_header(skb);
662 /* The packet is going back through gro_cells_receive no need to
665 skb_reset_transport_header(skb);
667 /* Packet already has checksum value set. */
668 skb->ip_summed = CHECKSUM_NONE;
670 /* Our skb will contain the header data copied when this outer packet
671 * which contained the start of this inner packet. This is true
672 * when we allocate a new skb as well as when we reuse the existing skb.
674 if (ip_hdr(skb)->version == 0x4) {
675 struct iphdr *iph = ip_hdr(skb);
677 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
678 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph);
679 if (!(x->props.flags & XFRM_STATE_NOECN))
680 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
683 skb->protocol = htons(ETH_P_IP);
685 struct ipv6hdr *iph = ipv6_hdr(skb);
687 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
688 ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph);
689 if (!(x->props.flags & XFRM_STATE_NOECN))
690 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
691 IP6_ECN_set_ce(skb, iph);
693 skb->protocol = htons(ETH_P_IPV6);
697 static void __iptfs_reassem_done(struct xfrm_iptfs_data *xtfs, bool free)
699 assert_spin_locked(&xtfs->drop_lock);
701 /* We don't care if it works locking takes care of things */
702 hrtimer_try_to_cancel(&xtfs->drop_timer);
704 kfree_skb(xtfs->ra_newskb);
705 xtfs->ra_newskb = NULL;
709 * iptfs_reassem_abort() - In-progress packet is aborted free the state.
712 static void iptfs_reassem_abort(struct xfrm_iptfs_data *xtfs)
714 __iptfs_reassem_done(xtfs, true);
718 * iptfs_reassem_done() - In-progress packet is complete, clear the state.
721 static void iptfs_reassem_done(struct xfrm_iptfs_data *xtfs)
723 __iptfs_reassem_done(xtfs, false);
727 * iptfs_reassem_cont() - Continue the reassembly of an inner packets.
729 * @seq: sequence of current packet
730 * @st: seq read stat for current packet
731 * @skb: current packet
732 * @data: offset into sequential packet data
733 * @blkoff: packet blkoff value
734 * @list: list of skbs to enqueue completed packet on
736 * Process an IPTFS payload that has a non-zero `blkoff` or when we are
737 * expecting the continuation b/c we have a runt or in-progress packet.
739 * Return: the new data offset to continue processing from.
741 static u32 iptfs_reassem_cont(struct xfrm_iptfs_data *xtfs, u64 seq,
742 struct skb_seq_state *st, struct sk_buff *skb,
743 u32 data, u32 blkoff, struct list_head *list)
745 struct iptfs_skb_frag_walk _fragwalk;
746 struct iptfs_skb_frag_walk *fragwalk = NULL;
747 struct sk_buff *newskb = xtfs->ra_newskb;
748 u32 remaining = skb->len - data;
749 u32 runtlen = xtfs->ra_runtlen;
750 u32 copylen, fraglen, ipremain, iphlen, iphremain, rrem;
752 /* Handle packet fragment we aren't expecting */
753 if (!runtlen && !xtfs->ra_newskb)
754 return data + min(blkoff, remaining);
756 /* Important to remember that input to this function is an ordered
757 * packet stream (unless the user disabled the reorder window). Thus if
758 * we are waiting for, and expecting the next packet so we can continue
759 * assembly, a newer sequence number indicates older ones are not coming
760 * (or if they do should be ignored). Technically we can receive older
761 * ones when the reorder window is disabled; however, the user should
762 * have disabled fragmentation in this case, and regardless we don't
765 * blkoff could be zero if the stream is messed up (or it's an all pad
766 * insertion) be careful to handle that case in each of the below
769 /* Too old case: This can happen when the reorder window is disabled so
770 * ordering isn't actually guaranteed.
772 if (seq < xtfs->ra_wantseq)
773 return data + remaining;
775 /* Too new case: We missed what we wanted cleanup. */
776 if (seq > xtfs->ra_wantseq) {
777 XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINIPTFSERROR);
782 if ((*skb->data & 0xF0) != 0) {
783 XFRM_INC_STATS(xs_net(xtfs->x),
784 LINUX_MIB_XFRMINIPTFSERROR);
787 /* Handle all pad case, advance expected sequence number.
791 /* will end parsing */
792 return data + remaining;
796 /* Regardless of what happens we're done with the runt */
797 xtfs->ra_runtlen = 0;
799 /* The start of this inner packet was at the very end of the last
800 * iptfs payload which didn't include enough for the ip header
801 * length field. We must have *at least* that now.
803 rrem = sizeof(xtfs->ra_runt) - runtlen;
804 if (remaining < rrem || blkoff < rrem) {
805 XFRM_INC_STATS(xs_net(xtfs->x),
806 LINUX_MIB_XFRMINIPTFSERROR);
810 /* fill in the runt data */
811 if (skb_copy_seq_read(st, data, &xtfs->ra_runt[runtlen],
813 XFRM_INC_STATS(xs_net(xtfs->x),
814 LINUX_MIB_XFRMINBUFFERERROR);
818 /* We have enough data to get the ip length value now,
819 * allocate an in progress skb
821 ipremain = __iptfs_iplen(xtfs->ra_runt);
822 if (ipremain < sizeof(xtfs->ra_runt)) {
823 /* length has to be at least runtsize large */
824 XFRM_INC_STATS(xs_net(xtfs->x),
825 LINUX_MIB_XFRMINIPTFSERROR);
829 /* For the runt case we don't attempt sharing currently. NOTE:
830 * Currently, this IPTFS implementation will not create runts.
833 newskb = iptfs_alloc_skb(skb, ipremain, false);
835 XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINERROR);
838 xtfs->ra_newskb = newskb;
840 /* Copy the runt data into the buffer, but leave data
841 * pointers the same as normal non-runt case. The extra `rrem`
842 * recopied bytes are basically cacheline free. Allows using
843 * same logic below to complete.
845 memcpy(skb_put(newskb, runtlen), xtfs->ra_runt,
846 sizeof(xtfs->ra_runt));
849 /* Continue reassembling the packet */
850 ipremain = __iptfs_iplen(newskb->data);
851 iphlen = __iptfs_iphlen(newskb->data);
853 ipremain -= newskb->len;
854 if (blkoff < ipremain) {
855 /* Corrupt data, we don't have enough to complete the packet */
856 XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINIPTFSERROR);
860 /* We want the IP header in linear space */
861 if (newskb->len < iphlen) {
862 iphremain = iphlen - newskb->len;
863 if (blkoff < iphremain) {
864 XFRM_INC_STATS(xs_net(xtfs->x),
865 LINUX_MIB_XFRMINIPTFSERROR);
868 fraglen = min(blkoff, remaining);
869 copylen = min(fraglen, iphremain);
870 if (skb_copy_seq_read(st, data, skb_put(newskb, copylen),
872 XFRM_INC_STATS(xs_net(xtfs->x),
873 LINUX_MIB_XFRMINBUFFERERROR);
876 /* this is a silly condition that might occur anyway */
877 if (copylen < iphremain) {
879 return data + fraglen;
881 /* update data and things derived from it */
884 remaining -= copylen;
888 fraglen = min(blkoff, remaining);
889 copylen = min(fraglen, ipremain);
891 /* If we may have the opportunity to share prepare a fragwalk. */
892 if (!skb_has_frag_list(skb) && !skb_has_frag_list(newskb) &&
893 (skb->head_frag || skb->len == skb->data_len) &&
894 skb->pp_recycle == newskb->pp_recycle) {
895 fragwalk = &_fragwalk;
896 iptfs_skb_prepare_frag_walk(skb, data, fragwalk);
899 /* Try share then copy. */
901 iptfs_skb_can_add_frags(newskb, fragwalk, data, copylen)) {
902 iptfs_skb_add_frags(newskb, fragwalk, data, copylen);
904 /* copy fragment data into newskb */
905 if (skb_copy_seq_read(st, data, skb_put(newskb, copylen),
907 XFRM_INC_STATS(xs_net(xtfs->x),
908 LINUX_MIB_XFRMINBUFFERERROR);
913 if (copylen < ipremain) {
916 /* We are done with packet reassembly! */
917 iptfs_reassem_done(xtfs);
918 iptfs_complete_inner_skb(xtfs->x, newskb);
919 list_add_tail(&newskb->list, list);
922 /* will continue on to new data block or end */
923 return data + fraglen;
926 if (xtfs->ra_newskb) {
927 iptfs_reassem_abort(xtfs);
929 xtfs->ra_runtlen = 0;
930 xtfs->ra_wantseq = 0;
932 /* skip past fragment, maybe to end */
933 return data + min(blkoff, remaining);
936 static bool __input_process_payload(struct xfrm_state *x, u32 data,
937 struct skb_seq_state *skbseq,
938 struct list_head *sublist)
940 u8 hbytes[sizeof(struct ipv6hdr)];
941 struct iptfs_skb_frag_walk _fragwalk;
942 struct iptfs_skb_frag_walk *fragwalk = NULL;
943 struct sk_buff *defer, *first_skb, *next, *skb;
944 const unsigned char *old_mac;
945 struct xfrm_iptfs_data *xtfs;
948 u32 first_iplen, iphlen, iplen, remaining, tail;
954 skb = skbseq->root_skb;
958 seq = __esp_seq(skb);
960 /* Save the old mac header if set */
961 old_mac = skb_mac_header_was_set(skb) ? skb_mac_header(skb) : NULL;
966 while (data < tail) {
969 /* Gather information on the next data block.
970 * `data` points to the start of the data block.
972 remaining = tail - data;
974 /* try and copy enough bytes to read length from ipv4/ipv6 */
975 iphlen = min_t(u32, remaining, 6);
976 if (skb_copy_seq_read(skbseq, data, hbytes, iphlen)) {
977 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
981 iph = (struct iphdr *)hbytes;
982 if (iph->version == 0x4) {
983 /* must have at least tot_len field present */
985 /* save the bytes we have, advance data and exit */
986 iptfs_input_save_runt(xtfs, seq, hbytes,
992 iplen = be16_to_cpu(iph->tot_len);
993 iphlen = iph->ihl << 2;
994 protocol = cpu_to_be16(ETH_P_IP);
995 XFRM_MODE_SKB_CB(skbseq->root_skb)->tos = iph->tos;
996 } else if (iph->version == 0x6) {
997 /* must have at least payload_len field present */
999 /* save the bytes we have, advance data and exit */
1000 iptfs_input_save_runt(xtfs, seq, hbytes,
1006 iplen = be16_to_cpu(((struct ipv6hdr *)hbytes)->payload_len);
1007 iplen += sizeof(struct ipv6hdr);
1008 iphlen = sizeof(struct ipv6hdr);
1009 protocol = cpu_to_be16(ETH_P_IPV6);
1010 XFRM_MODE_SKB_CB(skbseq->root_skb)->tos =
1011 ipv6_get_dsfield((struct ipv6hdr *)iph);
1012 } else if (iph->version == 0x0) {
1017 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
1021 if (unlikely(skbseq->stepped_offset)) {
1022 /* We need to reset our seq read, it can't backup at
1025 struct sk_buff *save = skbseq->root_skb;
1027 skb_abort_seq_read(skbseq);
1028 skb_prepare_seq_read(save, data, tail, skbseq);
1035 first_iplen = iplen;
1038 /* We are going to skip over `data` bytes to reach the
1039 * start of the IP header of `iphlen` len for `iplen`
1043 if (skb_has_frag_list(skb)) {
1046 } else if (data + iphlen <= skb_headlen(skb) &&
1047 /* make sure our header is 32-bit aligned? */
1048 /* ((uintptr_t)(skb->data + data) & 0x3) == 0 && */
1049 skb_tailroom(skb) + tail - data >= iplen) {
1050 /* Reuse the received skb.
1052 * We have enough headlen to pull past any
1053 * initial fragment data, leaving at least the
1054 * IP header in the linear buffer space.
1056 * For linear buffer space we only require that
1057 * linear buffer space is large enough to
1058 * eventually hold the entire reassembled
1059 * packet (by including tailroom in the check).
1061 * For non-linear tailroom is 0 and so we only
1062 * re-use if the entire packet is present
1065 * NOTE: there are many more options for
1066 * sharing, KISS for now. Also, this can produce
1067 * skb's with the IP header unaligned to 32
1068 * bits. If that ends up being a problem then a
1069 * check should be added to the conditional
1070 * above that the header lies on a 32-bit
1073 skb_pull(skb, data);
1075 /* our range just changed */
1078 remaining = skb->len;
1080 skb->protocol = protocol;
1081 skb_mac_header_rebuild(skb);
1083 eth_hdr(skb)->h_proto = skb->protocol;
1085 /* all pointers could be changed now reset walk */
1086 skb_abort_seq_read(skbseq);
1087 skb_prepare_seq_read(skb, data, tail, skbseq);
1088 } else if (skb->head_frag &&
1089 /* We have the IP header right now */
1090 remaining >= iphlen) {
1091 fragwalk = &_fragwalk;
1092 iptfs_skb_prepare_frag_walk(skb, data, fragwalk);
1096 /* We couldn't reuse the input skb so allocate a
1103 /* Don't trim `first_skb` until the end as we are
1104 * walking that data now.
1108 capturelen = min(iplen, remaining);
1111 /* Large enough to be worth sharing */
1112 iplen < IPTFS_PKT_SHARE_MIN ||
1113 /* Have IP header + some data to share. */
1114 capturelen <= iphlen ||
1115 /* Try creating skb and adding frags */
1116 !(skb = iptfs_pskb_add_frags(first_skb, fragwalk,
1119 skb = iptfs_pskb_extract_seq(iplen, skbseq, data, capturelen);
1122 /* skip to next packet or done */
1127 skb->protocol = protocol;
1129 /* rebuild the mac header */
1130 skb_set_mac_header(skb, -first_skb->mac_len);
1131 memcpy(skb_mac_header(skb), old_mac, first_skb->mac_len);
1132 eth_hdr(skb)->h_proto = skb->protocol;
1138 if (skb->len < iplen) {
1139 /* Start reassembly */
1140 spin_lock(&xtfs->drop_lock);
1142 xtfs->ra_newskb = skb;
1143 xtfs->ra_wantseq = seq + 1;
1144 if (!hrtimer_is_queued(&xtfs->drop_timer)) {
1145 /* softirq blocked lest the timer fire and interrupt us */
1146 hrtimer_start(&xtfs->drop_timer,
1148 IPTFS_HRTIMER_MODE);
1151 spin_unlock(&xtfs->drop_lock);
1156 iptfs_complete_inner_skb(x, skb);
1157 list_add_tail(&skb->list, sublist);
1161 /* this should not happen from the above code */
1162 XFRM_INC_STATS(net, LINUX_MIB_XFRMINIPTFSERROR);
1164 if (first_skb && first_iplen && !defer && first_skb != xtfs->ra_newskb) {
1165 /* first_skb is queued b/c !defer and not partial */
1166 if (pskb_trim(first_skb, first_iplen)) {
1167 /* error trimming */
1168 list_del(&first_skb->list);
1171 first_skb->ip_summed = CHECKSUM_NONE;
1174 /* Send the packets! */
1175 list_for_each_entry_safe(skb, next, sublist, list) {
1176 skb_list_del_init(skb);
1177 if (xfrm_input(skb, 0, 0, -2))
1181 skb = skbseq->root_skb;
1182 skb_abort_seq_read(skbseq);
1186 } else if (!first_skb) {
1187 /* skb is the original passed in skb, but we didn't get far
1188 * enough to process it as the first_skb, if we had it would
1189 * either be save in ra_newskb, trimmed and sent on as an skb or
1190 * placed in defer to be freed.
1198 * iptfs_input_ordered() - handle next in order IPTFS payload.
1200 * @skb: current packet
1202 * Process the IPTFS payload in `skb` and consume it afterwards.
1204 static void iptfs_input_ordered(struct xfrm_state *x, struct sk_buff *skb)
1206 struct ip_iptfs_cc_hdr iptcch;
1207 struct skb_seq_state skbseq;
1208 struct list_head sublist; /* rename this it's just a list */
1209 struct xfrm_iptfs_data *xtfs;
1210 struct ip_iptfs_hdr *ipth;
1212 u32 blkoff, data, remaining;
1213 bool consumed = false;
1216 xtfs = x->mode_data;
1219 seq = __esp_seq(skb);
1221 /* Large enough to hold both types of header */
1222 ipth = (struct ip_iptfs_hdr *)&iptcch;
1224 skb_prepare_seq_read(skb, 0, skb->len, &skbseq);
1226 /* Get the IPTFS header and validate it */
1228 if (skb_copy_seq_read(&skbseq, 0, ipth, sizeof(*ipth))) {
1229 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
1232 data = sizeof(*ipth);
1234 trace_iptfs_egress_recv(skb, xtfs, be16_to_cpu(ipth->block_offset));
1236 /* Set data past the basic header */
1237 if (ipth->subtype == IPTFS_SUBTYPE_CC) {
1238 /* Copy the rest of the CC header */
1239 remaining = sizeof(iptcch) - sizeof(*ipth);
1240 if (skb_copy_seq_read(&skbseq, data, ipth + 1, remaining)) {
1241 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
1245 } else if (ipth->subtype != IPTFS_SUBTYPE_BASIC) {
1246 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
1250 if (ipth->flags != 0) {
1251 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
1255 INIT_LIST_HEAD(&sublist);
1257 /* Handle fragment at start of payload, and/or waiting reassembly. */
1259 blkoff = ntohs(ipth->block_offset);
1260 /* check before locking i.e., maybe */
1261 if (blkoff || xtfs->ra_runtlen || xtfs->ra_newskb) {
1262 spin_lock(&xtfs->drop_lock);
1264 /* check again after lock */
1265 if (blkoff || xtfs->ra_runtlen || xtfs->ra_newskb) {
1266 data = iptfs_reassem_cont(xtfs, seq, &skbseq, skb, data,
1270 spin_unlock(&xtfs->drop_lock);
1274 consumed = __input_process_payload(x, data, &skbseq, &sublist);
1277 skb = skbseq.root_skb;
1278 skb_abort_seq_read(&skbseq);
1283 /* ------------------------------- */
1284 /* Input (Egress) Re-ordering Code */
1285 /* ------------------------------- */
1287 static void __vec_shift(struct xfrm_iptfs_data *xtfs, u32 shift)
1289 u32 savedlen = xtfs->w_savedlen;
1291 if (shift > savedlen)
1293 if (shift != savedlen)
1294 memcpy(xtfs->w_saved, xtfs->w_saved + shift,
1295 (savedlen - shift) * sizeof(*xtfs->w_saved));
1296 memset(xtfs->w_saved + savedlen - shift, 0,
1297 shift * sizeof(*xtfs->w_saved));
1298 xtfs->w_savedlen -= shift;
1301 static void __reorder_past(struct xfrm_iptfs_data *xtfs, struct sk_buff *inskb,
1302 struct list_head *freelist)
1304 list_add_tail(&inskb->list, freelist);
1307 static u32 __reorder_drop(struct xfrm_iptfs_data *xtfs, struct list_head *list)
1310 struct skb_wseq *s, *se;
1311 const u32 savedlen = xtfs->w_savedlen;
1312 time64_t now = ktime_get_raw_fast_ns();
1316 if (xtfs->w_saved[0].drop_time > now)
1321 /* Keep flushing packets until we reach a drop time greater than now. */
1325 /* Walking past empty slots until we reach a packet */
1326 for (; s < se && !s->skb; s++) {
1327 if (s->drop_time > now)
1330 /* Sending packets until we hit another empty slot. */
1331 for (; s < se && s->skb; scount++, s++)
1332 list_add_tail(&s->skb->list, list);
1336 count = s - xtfs->w_saved;
1338 xtfs->w_wantseq += count;
1340 /* Shift handled slots plus final empty slot into slot 0. */
1341 __vec_shift(xtfs, count);
1344 if (xtfs->w_savedlen) {
1346 /* Drifting is OK */
1347 hrtimer_start(&xtfs->drop_timer,
1348 xtfs->w_saved[0].drop_time - now,
1349 IPTFS_HRTIMER_MODE);
1354 static void __reorder_this(struct xfrm_iptfs_data *xtfs, struct sk_buff *inskb,
1355 struct list_head *list)
1357 struct skb_wseq *s, *se;
1358 const u32 savedlen = xtfs->w_savedlen;
1361 /* Got what we wanted. */
1362 list_add_tail(&inskb->list, list);
1367 /* Flush remaining consecutive packets. */
1369 /* Keep sending until we hit another missed pkt. */
1370 for (s = xtfs->w_saved, se = s + savedlen; s < se && s->skb; s++)
1371 list_add_tail(&s->skb->list, list);
1372 count = s - xtfs->w_saved;
1374 xtfs->w_wantseq += count;
1376 /* Shift handled slots plus final empty slot into slot 0. */
1377 __vec_shift(xtfs, count + 1);
1380 /* Set the slot's drop time and all the empty slots below it until reaching a
1381 * filled slot which will already be set.
1383 static void iptfs_set_window_drop_times(struct xfrm_iptfs_data *xtfs, int index)
1385 const u32 savedlen = xtfs->w_savedlen;
1386 struct skb_wseq *s = xtfs->w_saved;
1389 assert_spin_locked(&xtfs->drop_lock);
1391 if (savedlen > index + 1) {
1392 /* we are below another, our drop time and the timer are already set */
1395 /* we are the most future so get a new drop time. */
1396 drop_time = ktime_get_raw_fast_ns();
1397 drop_time += xtfs->drop_time_ns;
1399 /* Walk back through the array setting drop times as we go */
1400 s[index].drop_time = drop_time;
1401 while (index-- > 0 && !s[index].skb)
1402 s[index].drop_time = drop_time;
1404 /* If we walked all the way back, schedule the drop timer if needed */
1405 if (index == -1 && !hrtimer_is_queued(&xtfs->drop_timer))
1406 hrtimer_start(&xtfs->drop_timer, xtfs->drop_time_ns,
1407 IPTFS_HRTIMER_MODE);
1410 static void __reorder_future_fits(struct xfrm_iptfs_data *xtfs,
1411 struct sk_buff *inskb,
1412 struct list_head *freelist)
1414 const u64 inseq = __esp_seq(inskb);
1415 const u64 wantseq = xtfs->w_wantseq;
1416 const u64 distance = inseq - wantseq;
1417 const u32 savedlen = xtfs->w_savedlen;
1418 const u32 index = distance - 1;
1420 /* Handle future sequence number received which fits in the window.
1422 * We know we don't have the seq we want so we won't be able to flush
1426 /* slot count is 4, saved size is 3 savedlen is 2
1428 * "window boundary" is based on the fixed window size
1429 * distance is also slot number
1430 * index is an array index (i.e., - 1 of slot)
1431 * : : - implicit NULL after array len
1433 * +--------- used length (savedlen == 2)
1434 * | +----- array size (nslots - 1 == 3)
1435 * | | + window boundary (nslots == 4)
1438 * 0 1 2 3 | slot number
1439 * --- 0 1 2 | array index
1440 * [-] [b] : :| array
1442 * "2" "3" "4" *5*| seq numbers
1444 * We receive seq number 5
1445 * distance == 3 [inseq(5) - w_wantseq(2)]
1446 * index == 2 [distance(6) - 1]
1449 if (xtfs->w_saved[index].skb) {
1450 /* a dup of a future */
1451 list_add_tail(&inskb->list, freelist);
1455 xtfs->w_saved[index].skb = inskb;
1456 xtfs->w_savedlen = max(savedlen, index + 1);
1457 iptfs_set_window_drop_times(xtfs, index);
1460 static void __reorder_future_shifts(struct xfrm_iptfs_data *xtfs,
1461 struct sk_buff *inskb,
1462 struct list_head *list)
1464 const u32 nslots = xtfs->cfg.reorder_win_size + 1;
1465 const u64 inseq = __esp_seq(inskb);
1466 u32 savedlen = xtfs->w_savedlen;
1467 u64 wantseq = xtfs->w_wantseq;
1468 struct skb_wseq *wnext;
1469 struct sk_buff *slot0;
1470 u32 beyond, shifting, slot;
1473 /* Handle future sequence number received.
1475 * IMPORTANT: we are at least advancing w_wantseq (i.e., wantseq) by 1
1476 * b/c we are beyond the window boundary.
1478 * We know we don't have the wantseq so that counts as a drop.
1481 /* example: slot count is 4, array size is 3 savedlen is 2, slot 0 is
1482 * the missing sequence number.
1484 * the final slot at savedlen (index savedlen - 1) is always occupied.
1486 * beyond is "beyond array size" not savedlen.
1488 * +--------- array length (savedlen == 2)
1489 * | +----- array size (nslots - 1 == 3)
1490 * | | +- window boundary (nslots == 4)
1493 * 0 1 2 3 | slot number
1494 * --- 0 1 2 | array index
1495 * [b] [c] : :| array
1497 * "2" "3" "4" "5"|*6* seq numbers
1499 * We receive seq number 6
1500 * distance == 4 [inseq(6) - w_wantseq(2)]
1501 * newslot == distance
1502 * index == 3 [distance(4) - 1]
1503 * beyond == 1 [newslot(4) - lastslot((nslots(4) - 1))]
1504 * shifting == 1 [min(savedlen(2), beyond(1)]
1505 * slot0_skb == [b], and should match w_wantseq
1507 * +--- window boundary (nslots == 4)
1508 * 0 1 2 3 | 4 slot number
1509 * --- 0 1 2 | 3 array index
1510 * [b] : : : :| array
1511 * "2" "3" "4" "5" *6* seq numbers
1513 * We receive seq number 6
1514 * distance == 4 [inseq(6) - w_wantseq(2)]
1515 * newslot == distance
1516 * index == 3 [distance(4) - 1]
1517 * beyond == 1 [newslot(4) - lastslot((nslots(4) - 1))]
1518 * shifting == 1 [min(savedlen(1), beyond(1)]
1519 * slot0_skb == [b] and should match w_wantseq
1521 * +-- window boundary (nslots == 4)
1522 * 0 1 2 3 | 4 5 6 slot number
1523 * --- 0 1 2 | 3 4 5 array index
1524 * [-] [c] : :| array
1525 * "2" "3" "4" "5" "6" "7" *8* seq numbers
1527 * savedlen = 2, beyond = 3
1528 * iter 1: slot0 == NULL, missed++, lastdrop = 2 (2+1-1), slot0 = [-]
1529 * iter 2: slot0 == NULL, missed++, lastdrop = 3 (2+2-1), slot0 = [c]
1530 * 2 < 3, extra = 1 (3-2), missed += extra, lastdrop = 4 (2+2+1-1)
1532 * We receive seq number 8
1533 * distance == 6 [inseq(8) - w_wantseq(2)]
1534 * newslot == distance
1535 * index == 5 [distance(6) - 1]
1536 * beyond == 3 [newslot(6) - lastslot((nslots(4) - 1))]
1537 * shifting == 2 [min(savedlen(2), beyond(3)]
1539 * slot0_skb == NULL changed from [b] when "savedlen < beyond" is true.
1542 /* Now send any packets that are being shifted out of saved, and account
1543 * for missing packets that are exiting the window as we shift it.
1546 distance = inseq - wantseq;
1547 beyond = distance - (nslots - 1);
1549 /* If savedlen > beyond we are shifting some, else all. */
1550 shifting = min(savedlen, beyond);
1552 /* slot0 is the buf that just shifted out and into slot0 */
1554 wnext = xtfs->w_saved;
1555 for (slot = 1; slot <= shifting; slot++, wnext++) {
1556 /* handle what was in slot0 before we occupy it */
1558 list_add_tail(&slot0->list, list);
1563 /* slot0 is now either NULL (in which case it's what we now are waiting
1564 * for, or a buf in which case we need to handle it like we received it;
1565 * however, we may be advancing past that buffer as well..
1568 /* Handle case where we need to shift more than we had saved, slot0 will
1569 * be NULL iff savedlen is 0, otherwise slot0 will always be
1570 * non-NULL b/c we shifted the final element, which is always set if
1571 * there is any saved, into slot0.
1573 if (savedlen < beyond) {
1575 list_add_tail(&slot0->list, list);
1577 /* slot0 has had an empty slot pushed into it */
1580 /* Remove the entries */
1581 __vec_shift(xtfs, beyond);
1583 /* Advance want seq */
1584 xtfs->w_wantseq += beyond;
1586 /* Process drops here when implementing congestion control */
1588 /* We've shifted. plug the packet in at the end. */
1589 xtfs->w_savedlen = nslots - 1;
1590 xtfs->w_saved[xtfs->w_savedlen - 1].skb = inskb;
1591 iptfs_set_window_drop_times(xtfs, xtfs->w_savedlen - 1);
1593 /* if we don't have a slot0 then we must wait for it */
1597 /* If slot0, seq must match new want seq */
1599 /* slot0 is valid, treat like we received expected. */
1600 __reorder_this(xtfs, slot0, list);
1603 /* Receive a new packet into the reorder window. Return a list of ordered
1604 * packets from the window.
1606 static void iptfs_input_reorder(struct xfrm_iptfs_data *xtfs,
1607 struct sk_buff *inskb, struct list_head *list,
1608 struct list_head *freelist)
1610 const u32 nslots = xtfs->cfg.reorder_win_size + 1;
1611 u64 inseq = __esp_seq(inskb);
1614 assert_spin_locked(&xtfs->drop_lock);
1616 if (unlikely(!xtfs->w_seq_set)) {
1617 xtfs->w_seq_set = true;
1618 xtfs->w_wantseq = inseq;
1620 wantseq = xtfs->w_wantseq;
1622 if (likely(inseq == wantseq))
1623 __reorder_this(xtfs, inskb, list);
1624 else if (inseq < wantseq)
1625 __reorder_past(xtfs, inskb, freelist);
1626 else if ((inseq - wantseq) < nslots)
1627 __reorder_future_fits(xtfs, inskb, freelist);
1629 __reorder_future_shifts(xtfs, inskb, list);
1633 * iptfs_drop_timer() - Handle drop timer expiry.
1636 * This is similar to our input function.
1638 * The drop timer is set when we start an in progress reassembly, and also when
1639 * we save a future packet in the window saved array.
1641 * NOTE packets in the save window are always newer WRT drop times as
1642 * they get further in the future. i.e. for:
1644 * if slots (S0, S1, ... Sn) and `Dn` is the drop time for slot `Sn`,
1645 * then D(n-1) <= D(n).
1647 * So, regardless of why the timer is firing we can always discard any inprogress
1648 * fragment; either it's the reassembly timer, or slot 0 is going to be
1649 * dropped as S0 must have the most recent drop time, and slot 0 holds the
1650 * continuation fragment of the in progress packet.
1652 * Returns HRTIMER_NORESTART.
1654 static enum hrtimer_restart iptfs_drop_timer(struct hrtimer *me)
1656 struct sk_buff *skb, *next;
1657 struct list_head list;
1658 struct xfrm_iptfs_data *xtfs;
1659 struct xfrm_state *x;
1662 xtfs = container_of(me, typeof(*xtfs), drop_timer);
1665 INIT_LIST_HEAD(&list);
1667 spin_lock(&xtfs->drop_lock);
1669 /* Drop any in progress packet */
1670 skb = xtfs->ra_newskb;
1671 xtfs->ra_newskb = NULL;
1673 /* Now drop as many packets as we should from the reordering window
1676 count = xtfs->w_savedlen ? __reorder_drop(xtfs, &list) : 0;
1678 spin_unlock(&xtfs->drop_lock);
1681 kfree_skb_reason(skb, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
1684 list_for_each_entry_safe(skb, next, &list, list) {
1685 skb_list_del_init(skb);
1686 iptfs_input_ordered(x, skb);
1690 return HRTIMER_NORESTART;
1694 * iptfs_input() - handle receipt of iptfs payload
1698 * We have an IPTFS payload order it if needed, then process newly in order
1701 * Return: -EINPROGRESS to inform xfrm_input to stop processing the skb.
1703 static int iptfs_input(struct xfrm_state *x, struct sk_buff *skb)
1705 struct list_head freelist, list;
1706 struct xfrm_iptfs_data *xtfs = x->mode_data;
1707 struct sk_buff *next;
1709 /* Fast path for no reorder window. */
1710 if (xtfs->cfg.reorder_win_size == 0) {
1711 iptfs_input_ordered(x, skb);
1715 /* Fetch list of in-order packets from the reordering window as well as
1716 * a list of buffers we need to now free.
1718 INIT_LIST_HEAD(&list);
1719 INIT_LIST_HEAD(&freelist);
1721 spin_lock(&xtfs->drop_lock);
1722 iptfs_input_reorder(xtfs, skb, &list, &freelist);
1723 spin_unlock(&xtfs->drop_lock);
1725 list_for_each_entry_safe(skb, next, &list, list) {
1726 skb_list_del_init(skb);
1727 iptfs_input_ordered(x, skb);
1730 list_for_each_entry_safe(skb, next, &freelist, list) {
1731 skb_list_del_init(skb);
1735 /* We always have dealt with the input SKB, either we are re-using it,
1736 * or we have freed it. Return EINPROGRESS so that xfrm_input stops
1739 return -EINPROGRESS;
1742 /* ================================= */
1743 /* IPTFS Sending (ingress) Functions */
1744 /* ================================= */
1746 /* ------------------------- */
1747 /* Enqueue to send functions */
1748 /* ------------------------- */
1751 * iptfs_enqueue() - enqueue packet if ok to send.
1755 * Return: true if packet enqueued.
1757 static bool iptfs_enqueue(struct xfrm_iptfs_data *xtfs, struct sk_buff *skb)
1759 u64 newsz = xtfs->queue_size + skb->len;
1762 assert_spin_locked(&xtfs->x->lock);
1764 if (newsz > xtfs->cfg.max_queue_size)
1767 /* Set ECN CE if we are above our ECN queue threshold */
1768 if (newsz > xtfs->ecn_queue_size) {
1770 if (iph->version == 4)
1772 else if (iph->version == 6)
1773 IP6_ECN_set_ce(skb, ipv6_hdr(skb));
1776 __skb_queue_tail(&xtfs->queue, skb);
1777 xtfs->queue_size += skb->len;
1781 static int iptfs_get_cur_pmtu(struct xfrm_state *x, struct xfrm_iptfs_data *xtfs,
1782 struct sk_buff *skb)
1784 struct xfrm_dst *xdst = (struct xfrm_dst *)skb_dst(skb);
1785 u32 payload_mtu = xtfs->payload_mtu;
1786 u32 pmtu = __iptfs_get_inner_mtu(x, xdst->child_mtu_cached);
1788 if (payload_mtu && payload_mtu < pmtu)
1794 static int iptfs_is_too_big(struct sock *sk, struct sk_buff *skb, u32 pmtu)
1796 if (skb->len <= pmtu)
1799 /* We only send ICMP too big if the user has configured us as
1803 XFRM_INC_STATS(dev_net(skb->dev), LINUX_MIB_XFRMOUTERROR);
1806 xfrm_local_error(skb, pmtu);
1807 else if (ip_hdr(skb)->version == 4)
1808 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(pmtu));
1810 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, pmtu);
1815 /* IPv4/IPv6 packet ingress to IPTFS tunnel, arrange to send in IPTFS payload
1816 * (i.e., aggregating or fragmenting as appropriate).
1817 * This is set in dst->output for an SA.
1819 static int iptfs_output_collect(struct net *net, struct sock *sk, struct sk_buff *skb)
1821 struct dst_entry *dst = skb_dst(skb);
1822 struct xfrm_state *x = dst->xfrm;
1823 struct xfrm_iptfs_data *xtfs = x->mode_data;
1824 struct sk_buff *segs, *nskb;
1829 /* We have hooked into dst_entry->output which means we have skipped the
1830 * protocol specific netfilter (see xfrm4_output, xfrm6_output).
1831 * when our timer runs we will end up calling xfrm_output directly on
1832 * the encapsulated traffic.
1834 * For both cases this is the NF_INET_POST_ROUTING hook which allows
1835 * changing the skb->dst entry which then may not be xfrm based anymore
1836 * in which case a REROUTED flag is set. and dst_output is called.
1838 * For IPv6 we are also skipping fragmentation handling for local
1839 * sockets, which may or may not be good depending on our tunnel DF
1840 * setting. Normally with fragmentation supported we want to skip this
1844 if (xtfs->cfg.dont_frag)
1845 pmtu = iptfs_get_cur_pmtu(x, xtfs, skb);
1847 /* Break apart GSO skbs. If the queue is nearing full then we want the
1848 * accounting and queuing to be based on the individual packets not on the
1849 * aggregate GSO buffer.
1851 was_gso = skb_is_gso(skb);
1855 segs = skb_gso_segment(skb, 0);
1856 if (IS_ERR_OR_NULL(segs)) {
1857 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
1860 return PTR_ERR(segs);
1867 /* We can be running on multiple cores and from the network softirq or
1868 * from user context depending on where the packet is coming from.
1870 spin_lock_bh(&x->lock);
1872 skb_list_walk_safe(segs, skb, nskb) {
1873 skb_mark_not_on_list(skb);
1875 /* Once we drop due to no queue space we continue to drop the
1876 * rest of the packets from that GRO.
1880 trace_iptfs_no_queue_space(skb, xtfs, pmtu, was_gso);
1881 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOQSPACE);
1882 kfree_skb_reason(skb, SKB_DROP_REASON_FULL_RING);
1886 /* If the user indicated no iptfs fragmenting check before
1889 if (xtfs->cfg.dont_frag && iptfs_is_too_big(sk, skb, pmtu)) {
1890 trace_iptfs_too_big(skb, xtfs, pmtu, was_gso);
1891 kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
1895 /* Enqueue to send in tunnel */
1896 ok = iptfs_enqueue(xtfs, skb);
1900 trace_iptfs_enqueue(skb, xtfs, pmtu, was_gso);
1903 /* Start a delay timer if we don't have one yet */
1904 if (!hrtimer_is_queued(&xtfs->iptfs_timer)) {
1905 hrtimer_start(&xtfs->iptfs_timer, xtfs->init_delay_ns, IPTFS_HRTIMER_MODE);
1906 xtfs->iptfs_settime = ktime_get_raw_fast_ns();
1907 trace_iptfs_timer_start(xtfs, xtfs->init_delay_ns);
1910 spin_unlock_bh(&x->lock);
1914 /* -------------------------- */
1915 /* Dequeue and send functions */
1916 /* -------------------------- */
1918 static void iptfs_output_prepare_skb(struct sk_buff *skb, u32 blkoff)
1920 struct ip_iptfs_hdr *h;
1921 size_t hsz = sizeof(*h);
1923 /* now reset values to be pointing at the rest of the packets */
1924 h = skb_push(skb, hsz);
1927 h->block_offset = htons(blkoff);
1929 /* network_header current points at the inner IP packet
1930 * move it to the iptfs header
1932 skb->transport_header = skb->network_header;
1933 skb->network_header -= hsz;
1935 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
1939 * iptfs_copy_create_frag() - create an inner fragment skb.
1940 * @st: The source packet data.
1941 * @offset: offset in @st of the new fragment data.
1942 * @copy_len: the amount of data to copy from @st.
1944 * Create a new skb holding a single IPTFS inner packet fragment. @copy_len must
1945 * not be greater than the max fragment size.
1947 * Return: the new fragment skb or an ERR_PTR().
1949 static struct sk_buff *iptfs_copy_create_frag(struct skb_seq_state *st, u32 offset, u32 copy_len)
1951 struct sk_buff *src = st->root_skb;
1952 struct sk_buff *skb;
1955 skb = iptfs_alloc_skb(src, copy_len, true);
1957 return ERR_PTR(-ENOMEM);
1959 /* Now copy `copy_len` data from src */
1960 err = skb_copy_seq_read(st, offset, skb_put(skb, copy_len), copy_len);
1963 return ERR_PTR(err);
1970 * iptfs_copy_create_frags() - create and send N-1 fragments of a larger skb.
1971 * @skbp: the source packet skb (IN), skb holding the last fragment in
1972 * the fragment stream (OUT).
1973 * @xtfs: IPTFS SA state.
1974 * @mtu: the max IPTFS fragment size.
1976 * This function is responsible for fragmenting a larger inner packet into a
1977 * sequence of IPTFS payload packets. The last fragment is returned rather than
1978 * being sent so that the caller can append more inner packets (aggregation) if
1981 * Return: 0 on success or a negative error code on failure
1983 static int iptfs_copy_create_frags(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, u32 mtu)
1985 struct skb_seq_state skbseq;
1986 struct list_head sublist;
1987 struct sk_buff *skb = *skbp;
1988 struct sk_buff *nskb = *skbp;
1989 u32 copy_len, offset;
1990 u32 to_copy = skb->len - mtu;
1994 INIT_LIST_HEAD(&sublist);
1996 skb_prepare_seq_read(skb, 0, skb->len, &skbseq);
1998 /* A trimmed `skb` will be sent as the first fragment, later. */
2000 to_copy = skb->len - offset;
2002 /* Send all but last fragment to allow agg. append */
2003 trace_iptfs_first_fragmenting(nskb, mtu, to_copy, NULL);
2004 list_add_tail(&nskb->list, &sublist);
2006 /* FUTURE: if the packet has an odd/non-aligning length we could
2007 * send less data in the penultimate fragment so that the last
2008 * fragment then ends on an aligned boundary.
2010 copy_len = min(to_copy, mtu);
2011 nskb = iptfs_copy_create_frag(&skbseq, offset, copy_len);
2013 XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMOUTERROR);
2014 skb_abort_seq_read(&skbseq);
2015 err = PTR_ERR(nskb);
2019 iptfs_output_prepare_skb(nskb, to_copy);
2021 to_copy -= copy_len;
2024 skb_abort_seq_read(&skbseq);
2026 /* return last fragment that will be unsent (or NULL) */
2029 trace_iptfs_first_final_fragment(nskb, mtu, blkoff, NULL);
2031 /* trim the original skb to MTU */
2033 err = pskb_trim(skb, mtu);
2036 /* Free all frags. Don't bother sending a partial packet we will
2040 list_for_each_entry_safe(skb, nskb, &sublist, list) {
2041 skb_list_del_init(skb);
2047 /* prepare the initial fragment with an iptfs header */
2048 iptfs_output_prepare_skb(skb, 0);
2050 /* Send all but last fragment, if we fail to send a fragment then free
2051 * the rest -- no point in sending a packet that can't be reassembled.
2053 list_for_each_entry_safe(skb, nskb, &sublist, list) {
2054 skb_list_del_init(skb);
2056 err = xfrm_output(NULL, skb);
2066 * iptfs_first_skb() - handle the first dequeued inner packet for output
2067 * @skbp: the source packet skb (IN), skb holding the last fragment in
2068 * the fragment stream (OUT).
2069 * @xtfs: IPTFS SA state.
2070 * @mtu: the max IPTFS fragment size.
2072 * This function is responsible for fragmenting a larger inner packet into a
2073 * sequence of IPTFS payload packets.
2075 * The last fragment is returned rather than being sent so that the caller can
2076 * append more inner packets (aggregation) if there is room.
2078 * Return: 0 on success or a negative error code on failure
2080 static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, u32 mtu)
2082 struct sk_buff *skb = *skbp;
2085 /* Classic ESP skips the don't fragment ICMP error if DF is clear on
2086 * the inner packet or ignore_df is set. Otherwise it will send an ICMP
2087 * or local error if the inner packet won't fit it's MTU.
2089 * With IPTFS we do not care about the inner packet DF bit. If the
2090 * tunnel is configured to "don't fragment" we error back if things
2091 * don't fit in our max packet size. Otherwise we iptfs-fragment as
2095 /* The opportunity for HW offload has ended */
2096 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2097 err = skb_checksum_help(skb);
2102 /* We've split gso up before queuing */
2104 trace_iptfs_first_dequeue(skb, mtu, 0, ip_hdr(skb));
2106 /* Consider the buffer Tx'd and no longer owned */
2109 /* Simple case -- it fits. `mtu` accounted for all the overhead
2110 * including the basic IPTFS header.
2112 if (skb->len <= mtu) {
2113 iptfs_output_prepare_skb(skb, 0);
2117 return iptfs_copy_create_frags(skbp, xtfs, mtu);
2120 static struct sk_buff **iptfs_rehome_fraglist(struct sk_buff **nextp, struct sk_buff *child)
2124 /* It might be possible to account for a frag list in addition to page
2125 * fragment if it's a valid state to be in. The page fragments size
2126 * should be kept as data_len so only the frag_list size is removed,
2127 * this must be done above as well.
2129 *nextp = skb_shinfo(child)->frag_list;
2131 fllen += (*nextp)->len;
2132 nextp = &(*nextp)->next;
2134 skb_frag_list_init(child);
2135 child->len -= fllen;
2136 child->data_len -= fllen;
2141 static void iptfs_consume_frags(struct sk_buff *to, struct sk_buff *from)
2143 struct skb_shared_info *fromi = skb_shinfo(from);
2144 struct skb_shared_info *toi = skb_shinfo(to);
2145 unsigned int new_truesize;
2147 /* If we have data in a head page, grab it */
2148 if (!skb_headlen(from)) {
2149 new_truesize = SKB_TRUESIZE(skb_end_offset(from));
2151 iptfs_skb_head_to_frag(from, &toi->frags[toi->nr_frags]);
2152 skb_frag_ref(to, toi->nr_frags++);
2153 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
2156 /* Move any other page fragments rather than copy */
2157 memcpy(&toi->frags[toi->nr_frags], fromi->frags,
2158 sizeof(fromi->frags[0]) * fromi->nr_frags);
2159 toi->nr_frags += fromi->nr_frags;
2160 fromi->nr_frags = 0;
2163 to->truesize += from->truesize - new_truesize;
2164 from->truesize = new_truesize;
2166 /* We are done with this SKB */
2170 static void iptfs_output_queued(struct xfrm_state *x, struct sk_buff_head *list)
2172 struct xfrm_iptfs_data *xtfs = x->mode_data;
2173 struct sk_buff *skb, *skb2, **nextp;
2174 struct skb_shared_info *shi, *shi2;
2176 /* If we are fragmenting due to a large inner packet we will output all
2177 * the outer IPTFS packets required to contain the fragments of the
2178 * single large inner packet. These outer packets need to be sent
2179 * consecutively (ESP seq-wise). Since this output function is always
2180 * running from a timer we do not need a lock to provide this guarantee.
2181 * We will output our packets consecutively before the timer is allowed
2182 * to run again on some other CPU.
2185 while ((skb = __skb_dequeue(list))) {
2186 u32 mtu = iptfs_get_cur_pmtu(x, xtfs, skb);
2187 bool share_ok = true;
2190 /* protocol comes to us cleared sometimes */
2191 skb->protocol = x->outer_mode.family == AF_INET ? htons(ETH_P_IP) :
2194 if (skb->len > mtu && xtfs->cfg.dont_frag) {
2195 /* We handle this case before enqueueing so we are only
2196 * here b/c MTU changed after we enqueued before we
2197 * dequeued, just drop these.
2199 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTERROR);
2201 trace_iptfs_first_toobig(skb, mtu, 0, ip_hdr(skb));
2202 kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
2206 /* Convert first inner packet into an outer IPTFS packet,
2207 * dealing with any fragmentation into multiple outer packets
2210 if (iptfs_first_skb(&skb, xtfs, mtu))
2213 /* If fragmentation was required the returned skb is the last
2214 * IPTFS fragment in the chain, and it's IPTFS header blkoff has
2215 * been set just past the end of the fragment data.
2217 * In either case the space remaining to send more inner packet
2218 * data is `mtu` - (skb->len - sizeof iptfs header). This is b/c
2219 * the `mtu` value has the basic IPTFS header len accounted for,
2220 * and we added that header to the skb so it is a part of
2221 * skb->len, thus we subtract it from the skb length.
2223 remaining = mtu - (skb->len - sizeof(struct ip_iptfs_hdr));
2225 /* Re-home (un-nest) nested fragment lists. We need to do this
2226 * b/c we will simply be appending any following aggregated
2227 * inner packets using the frag list.
2229 shi = skb_shinfo(skb);
2230 nextp = &shi->frag_list;
2232 if (skb_has_frag_list(*nextp))
2233 nextp = iptfs_rehome_fraglist(&(*nextp)->next, *nextp);
2235 nextp = &(*nextp)->next;
2238 if (shi->frag_list || skb_cloned(skb) || skb_shared(skb))
2241 /* See if we have enough space to simply append.
2243 * NOTE: Maybe do not append if we will be mis-aligned,
2244 * SW-based endpoints will probably have to copy in this
2247 while ((skb2 = skb_peek(list))) {
2248 trace_iptfs_ingress_nth_peek(skb2, remaining);
2249 if (skb2->len > remaining)
2252 __skb_unlink(skb2, list);
2254 /* Consider the buffer Tx'd and no longer owned */
2257 /* If we don't have a cksum in the packet we need to add
2258 * one before encapsulation.
2260 if (skb2->ip_summed == CHECKSUM_PARTIAL) {
2261 if (skb_checksum_help(skb2)) {
2262 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTERROR);
2268 /* skb->pp_recycle is passed to __skb_flag_unref for all
2269 * frag pages so we can only share pages with skb's who
2272 shi2 = skb_shinfo(skb2);
2275 (!skb2->head_frag && skb_headlen(skb)) ||
2276 skb->pp_recycle != skb2->pp_recycle ||
2278 (shi->nr_frags + shi2->nr_frags + 1 > MAX_SKB_FRAGS)))
2282 skb->data_len += skb2->len;
2283 skb->len += skb2->len;
2284 remaining -= skb2->len;
2286 trace_iptfs_ingress_nth_add(skb2, share_ok);
2289 iptfs_consume_frags(skb, skb2);
2291 /* Append to the frag_list */
2293 nextp = &skb2->next;
2294 if (skb_has_frag_list(skb2))
2295 nextp = iptfs_rehome_fraglist(nextp,
2297 skb->truesize += skb2->truesize;
2301 xfrm_output(NULL, skb);
2305 static enum hrtimer_restart iptfs_delay_timer(struct hrtimer *me)
2307 struct sk_buff_head list;
2308 struct xfrm_iptfs_data *xtfs;
2309 struct xfrm_state *x;
2312 xtfs = container_of(me, typeof(*xtfs), iptfs_timer);
2315 /* Process all the queued packets
2317 * softirq execution order: timer > tasklet > hrtimer
2319 * Network rx will have run before us giving one last chance to queue
2320 * ingress packets for us to process and transmit.
2323 spin_lock(&x->lock);
2324 __skb_queue_head_init(&list);
2325 skb_queue_splice_init(&xtfs->queue, &list);
2326 xtfs->queue_size = 0;
2327 settime = xtfs->iptfs_settime;
2328 spin_unlock(&x->lock);
2330 /* After the above unlock, packets can begin queuing again, and the
2331 * timer can be set again, from another CPU either in softirq or user
2332 * context (not from this one since we are running at softirq level
2336 trace_iptfs_timer_expire(xtfs, (unsigned long long)(ktime_get_raw_fast_ns() - settime));
2338 iptfs_output_queued(x, &list);
2340 return HRTIMER_NORESTART;
2344 * iptfs_encap_add_ipv4() - add outer encaps
2348 * This was originally taken from xfrm4_tunnel_encap_add. The reason for the
2349 * copy is that IP-TFS/AGGFRAG can have different functionality for how to set
2350 * the TOS/DSCP bits. Sets the protocol to a different value and doesn't do
2351 * anything with inner headers as they aren't pointing into a normal IP
2352 * singleton inner packet.
2354 * Return: 0 on success or a negative error code on failure
2356 static int iptfs_encap_add_ipv4(struct xfrm_state *x, struct sk_buff *skb)
2358 struct dst_entry *dst = skb_dst(skb);
2359 struct iphdr *top_iph;
2361 skb_reset_inner_network_header(skb);
2362 skb_reset_inner_transport_header(skb);
2364 skb_set_network_header(skb, -(x->props.header_len - x->props.enc_hdr_len));
2365 skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol);
2366 skb->transport_header = skb->network_header + sizeof(*top_iph);
2368 top_iph = ip_hdr(skb);
2370 top_iph->version = 4;
2371 top_iph->protocol = IPPROTO_AGGFRAG;
2373 /* As we have 0, fractional, 1 or N inner packets there's no obviously
2374 * correct DSCP mapping to inherit. ECN should be cleared per RFC9347
2379 top_iph->frag_off = htons(IP_DF);
2380 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
2381 top_iph->saddr = x->props.saddr.a4;
2382 top_iph->daddr = x->id.daddr.a4;
2383 ip_select_ident(dev_net(dst->dev), skb, NULL);
2388 #if IS_ENABLED(CONFIG_IPV6)
2390 * iptfs_encap_add_ipv6() - add outer encaps
2394 * This was originally taken from xfrm6_tunnel_encap_add. The reason for the
2395 * copy is that IP-TFS/AGGFRAG can have different functionality for how to set
2396 * the flow label and TOS/DSCP bits. It also sets the protocol to a different
2397 * value and doesn't do anything with inner headers as they aren't pointing into
2398 * a normal IP singleton inner packet.
2400 * Return: 0 on success or a negative error code on failure
2402 static int iptfs_encap_add_ipv6(struct xfrm_state *x, struct sk_buff *skb)
2404 struct dst_entry *dst = skb_dst(skb);
2405 struct ipv6hdr *top_iph;
2408 skb_reset_inner_network_header(skb);
2409 skb_reset_inner_transport_header(skb);
2411 skb_set_network_header(skb, -x->props.header_len + x->props.enc_hdr_len);
2412 skb->mac_header = skb->network_header + offsetof(struct ipv6hdr, nexthdr);
2413 skb->transport_header = skb->network_header + sizeof(*top_iph);
2415 top_iph = ipv6_hdr(skb);
2416 top_iph->version = 6;
2417 top_iph->priority = 0;
2418 memset(top_iph->flow_lbl, 0, sizeof(top_iph->flow_lbl));
2419 top_iph->nexthdr = IPPROTO_AGGFRAG;
2421 /* As we have 0, fractional, 1 or N inner packets there's no obviously
2422 * correct DSCP mapping to inherit. ECN should be cleared per RFC9347
2426 ipv6_change_dsfield(top_iph, 0, dsfield);
2428 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
2429 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
2430 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
2437 * iptfs_prepare_output() - prepare the skb for output
2441 * Return: Error value, if 0 then skb values should be as follows:
2442 * - transport_header should point at ESP header
2443 * - network_header should point at Outer IP header
2444 * - mac_header should point at protocol/nexthdr of the outer IP
2446 static int iptfs_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
2448 if (x->outer_mode.family == AF_INET)
2449 return iptfs_encap_add_ipv4(x, skb);
2450 if (x->outer_mode.family == AF_INET6) {
2451 #if IS_ENABLED(CONFIG_IPV6)
2452 return iptfs_encap_add_ipv6(x, skb);
2454 return -EAFNOSUPPORT;
2460 /* ========================== */
2461 /* State Management Functions */
2462 /* ========================== */
2465 * __iptfs_get_inner_mtu() - return inner MTU with no fragmentation.
2467 * @outer_mtu: the outer mtu
2469 * Return: Correct MTU taking in to account the encap overhead.
2471 static u32 __iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu)
2473 struct crypto_aead *aead;
2477 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
2478 return ((outer_mtu - x->props.header_len - crypto_aead_authsize(aead)) &
2479 ~(blksize - 1)) - 2;
2483 * iptfs_get_inner_mtu() - return the inner MTU for an IPTFS xfrm.
2485 * @outer_mtu: Outer MTU for the encapsulated packet.
2487 * Return: Correct MTU taking in to account the encap overhead.
2489 static u32 iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu)
2491 struct xfrm_iptfs_data *xtfs = x->mode_data;
2493 /* If not dont-frag we have no MTU */
2494 if (!xtfs->cfg.dont_frag)
2495 return x->outer_mode.family == AF_INET ? IP_MAX_MTU : IP6_MAX_MTU;
2496 return __iptfs_get_inner_mtu(x, outer_mtu);
2500 * iptfs_user_init() - initialize the SA with IPTFS options from netlink.
2501 * @net: the net data
2503 * @attrs: netlink attributes
2504 * @extack: extack return data
2506 * Return: 0 on success or a negative error code on failure
2508 static int iptfs_user_init(struct net *net, struct xfrm_state *x,
2509 struct nlattr **attrs,
2510 struct netlink_ext_ack *extack)
2512 struct xfrm_iptfs_data *xtfs = x->mode_data;
2513 struct xfrm_iptfs_config *xc;
2517 xc->max_queue_size = IPTFS_DEFAULT_MAX_QUEUE_SIZE;
2518 xc->reorder_win_size = IPTFS_DEFAULT_REORDER_WINDOW;
2519 xtfs->drop_time_ns = IPTFS_DEFAULT_DROP_TIME_USECS * NSECS_IN_USEC;
2520 xtfs->init_delay_ns = IPTFS_DEFAULT_INIT_DELAY_USECS * NSECS_IN_USEC;
2522 if (attrs[XFRMA_IPTFS_DONT_FRAG])
2523 xc->dont_frag = true;
2524 if (attrs[XFRMA_IPTFS_REORDER_WINDOW])
2525 xc->reorder_win_size =
2526 nla_get_u16(attrs[XFRMA_IPTFS_REORDER_WINDOW]);
2527 /* saved array is for saving 1..N seq nums from wantseq */
2528 if (xc->reorder_win_size) {
2529 xtfs->w_saved = kcalloc(xc->reorder_win_size,
2530 sizeof(*xtfs->w_saved), GFP_KERNEL);
2531 if (!xtfs->w_saved) {
2532 NL_SET_ERR_MSG(extack, "Cannot alloc reorder window");
2536 if (attrs[XFRMA_IPTFS_PKT_SIZE]) {
2537 xc->pkt_size = nla_get_u32(attrs[XFRMA_IPTFS_PKT_SIZE]);
2538 if (!xc->pkt_size) {
2539 xtfs->payload_mtu = 0;
2540 } else if (xc->pkt_size > x->props.header_len) {
2541 xtfs->payload_mtu = xc->pkt_size - x->props.header_len;
2543 NL_SET_ERR_MSG(extack,
2544 "Packet size must be 0 or greater than IPTFS/ESP header length");
2548 if (attrs[XFRMA_IPTFS_MAX_QSIZE])
2549 xc->max_queue_size = nla_get_u32(attrs[XFRMA_IPTFS_MAX_QSIZE]);
2550 if (attrs[XFRMA_IPTFS_DROP_TIME])
2551 xtfs->drop_time_ns =
2552 (u64)nla_get_u32(attrs[XFRMA_IPTFS_DROP_TIME]) *
2554 if (attrs[XFRMA_IPTFS_INIT_DELAY])
2555 xtfs->init_delay_ns =
2556 (u64)nla_get_u32(attrs[XFRMA_IPTFS_INIT_DELAY]) * NSECS_IN_USEC;
2558 q = (u64)xc->max_queue_size * 95;
2560 xtfs->ecn_queue_size = (u32)q;
2565 static unsigned int iptfs_sa_len(const struct xfrm_state *x)
2567 struct xfrm_iptfs_data *xtfs = x->mode_data;
2568 struct xfrm_iptfs_config *xc = &xtfs->cfg;
2571 if (x->dir == XFRM_SA_DIR_IN) {
2572 l += nla_total_size(sizeof(u32)); /* drop time usec */
2573 l += nla_total_size(sizeof(xc->reorder_win_size));
2576 l += nla_total_size(0); /* dont-frag flag */
2577 l += nla_total_size(sizeof(u32)); /* init delay usec */
2578 l += nla_total_size(sizeof(xc->max_queue_size));
2579 l += nla_total_size(sizeof(xc->pkt_size));
2585 static int iptfs_copy_to_user(struct xfrm_state *x, struct sk_buff *skb)
2587 struct xfrm_iptfs_data *xtfs = x->mode_data;
2588 struct xfrm_iptfs_config *xc = &xtfs->cfg;
2592 if (x->dir == XFRM_SA_DIR_IN) {
2593 q = xtfs->drop_time_ns;
2594 do_div(q, NSECS_IN_USEC);
2595 ret = nla_put_u32(skb, XFRMA_IPTFS_DROP_TIME, q);
2599 ret = nla_put_u16(skb, XFRMA_IPTFS_REORDER_WINDOW,
2600 xc->reorder_win_size);
2602 if (xc->dont_frag) {
2603 ret = nla_put_flag(skb, XFRMA_IPTFS_DONT_FRAG);
2608 q = xtfs->init_delay_ns;
2609 do_div(q, NSECS_IN_USEC);
2610 ret = nla_put_u32(skb, XFRMA_IPTFS_INIT_DELAY, q);
2614 ret = nla_put_u32(skb, XFRMA_IPTFS_MAX_QSIZE, xc->max_queue_size);
2618 ret = nla_put_u32(skb, XFRMA_IPTFS_PKT_SIZE, xc->pkt_size);
2624 static void __iptfs_init_state(struct xfrm_state *x,
2625 struct xfrm_iptfs_data *xtfs)
2627 __skb_queue_head_init(&xtfs->queue);
2628 hrtimer_init(&xtfs->iptfs_timer, CLOCK_MONOTONIC, IPTFS_HRTIMER_MODE);
2629 xtfs->iptfs_timer.function = iptfs_delay_timer;
2631 spin_lock_init(&xtfs->drop_lock);
2632 hrtimer_init(&xtfs->drop_timer, CLOCK_MONOTONIC, IPTFS_HRTIMER_MODE);
2633 xtfs->drop_timer.function = iptfs_drop_timer;
2635 /* Modify type (esp) adjustment values */
2637 if (x->props.family == AF_INET)
2638 x->props.header_len += sizeof(struct iphdr) + sizeof(struct ip_iptfs_hdr);
2639 else if (x->props.family == AF_INET6)
2640 x->props.header_len += sizeof(struct ipv6hdr) + sizeof(struct ip_iptfs_hdr);
2641 x->props.enc_hdr_len = sizeof(struct ip_iptfs_hdr);
2643 /* Always keep a module reference when x->mode_data is set */
2644 __module_get(x->mode_cbs->owner);
2646 x->mode_data = xtfs;
2650 static int iptfs_clone_state(struct xfrm_state *x, struct xfrm_state *orig)
2652 struct xfrm_iptfs_data *xtfs;
2654 xtfs = kmemdup(orig->mode_data, sizeof(*xtfs), GFP_KERNEL);
2658 x->mode_data = xtfs;
2661 xtfs->ra_newskb = NULL;
2662 if (xtfs->cfg.reorder_win_size) {
2663 xtfs->w_saved = kcalloc(xtfs->cfg.reorder_win_size,
2664 sizeof(*xtfs->w_saved), GFP_KERNEL);
2665 if (!xtfs->w_saved) {
2666 kfree_sensitive(xtfs);
2674 static int iptfs_init_state(struct xfrm_state *x)
2676 struct xfrm_iptfs_data *xtfs;
2679 /* We have arrived here from xfrm_state_clone() */
2680 xtfs = x->mode_data;
2682 xtfs = kzalloc(sizeof(*xtfs), GFP_KERNEL);
2687 __iptfs_init_state(x, xtfs);
2692 static void iptfs_destroy_state(struct xfrm_state *x)
2694 struct xfrm_iptfs_data *xtfs = x->mode_data;
2695 struct sk_buff_head list;
2696 struct skb_wseq *s, *se;
2697 struct sk_buff *skb;
2702 spin_lock_bh(&xtfs->x->lock);
2703 hrtimer_cancel(&xtfs->iptfs_timer);
2704 __skb_queue_head_init(&list);
2705 skb_queue_splice_init(&xtfs->queue, &list);
2706 spin_unlock_bh(&xtfs->x->lock);
2708 while ((skb = __skb_dequeue(&list)))
2711 spin_lock_bh(&xtfs->drop_lock);
2712 hrtimer_cancel(&xtfs->drop_timer);
2713 spin_unlock_bh(&xtfs->drop_lock);
2715 if (xtfs->ra_newskb)
2716 kfree_skb(xtfs->ra_newskb);
2718 for (s = xtfs->w_saved, se = s + xtfs->w_savedlen; s < se; s++) {
2723 kfree_sensitive(xtfs->w_saved);
2724 kfree_sensitive(xtfs);
2726 module_put(x->mode_cbs->owner);
2729 static const struct xfrm_mode_cbs iptfs_mode_cbs = {
2730 .owner = THIS_MODULE,
2731 .init_state = iptfs_init_state,
2732 .clone_state = iptfs_clone_state,
2733 .destroy_state = iptfs_destroy_state,
2734 .user_init = iptfs_user_init,
2735 .copy_to_user = iptfs_copy_to_user,
2736 .sa_len = iptfs_sa_len,
2737 .get_inner_mtu = iptfs_get_inner_mtu,
2738 .input = iptfs_input,
2739 .output = iptfs_output_collect,
2740 .prepare_output = iptfs_prepare_output,
2743 static int __init xfrm_iptfs_init(void)
2747 pr_info("xfrm_iptfs: IPsec IP-TFS tunnel mode module\n");
2749 err = xfrm_register_mode_cbs(XFRM_MODE_IPTFS, &iptfs_mode_cbs);
2751 pr_info("%s: can't register IP-TFS\n", __func__);
2756 static void __exit xfrm_iptfs_fini(void)
2758 xfrm_unregister_mode_cbs(XFRM_MODE_IPTFS);
2761 module_init(xfrm_iptfs_init);
2762 module_exit(xfrm_iptfs_fini);
2763 MODULE_LICENSE("GPL");
2764 MODULE_DESCRIPTION("IP-TFS support for xfrm ipsec tunnels");