1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
7 * This file is part of the SCTP kernel implementation
9 * These functions handle output processing.
11 * Please send any bug reports or fixes you make to the
15 * Written or modified by:
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/wait.h>
27 #include <linux/time.h>
29 #include <linux/ipv6.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <net/inet_ecn.h>
35 #include <net/net_namespace.h>
37 #include <linux/socket.h> /* for sa_family_t */
40 #include <net/sctp/sctp.h>
41 #include <net/sctp/sm.h>
42 #include <net/sctp/checksum.h>
44 /* Forward declarations for private helpers. */
45 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
46 struct sctp_chunk *chunk);
47 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
48 struct sctp_chunk *chunk);
49 static void sctp_packet_append_data(struct sctp_packet *packet,
50 struct sctp_chunk *chunk);
51 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
52 struct sctp_chunk *chunk,
55 static void sctp_packet_reset(struct sctp_packet *packet)
57 /* sctp_packet_transmit() relies on this to reset size to the
58 * current overhead after sending packets.
60 packet->size = packet->overhead;
62 packet->has_cookie_echo = 0;
71 * This appears to be a followup set of initializations.
73 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
76 struct sctp_transport *tp = packet->transport;
77 struct sctp_association *asoc = tp->asoc;
78 struct sctp_sock *sp = NULL;
81 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
84 /* do the following jobs only once for a flush schedule */
85 if (!sctp_packet_empty(packet))
88 /* set packet max_size with pathmtu, then calculate overhead */
89 packet->max_size = tp->pathmtu;
95 packet->overhead = sctp_mtu_payload(sp, 0, 0);
96 packet->size = packet->overhead;
101 /* update dst or transport pathmtu if in need */
102 if (!sctp_transport_dst_check(tp)) {
103 sctp_transport_route(tp, NULL, sp);
104 if (asoc->param_flags & SPP_PMTUD_ENABLE)
105 sctp_assoc_sync_pmtu(asoc);
106 } else if (!sctp_transport_pmtu_check(tp)) {
107 if (asoc->param_flags & SPP_PMTUD_ENABLE)
108 sctp_assoc_sync_pmtu(asoc);
111 if (asoc->pmtu_pending) {
112 if (asoc->param_flags & SPP_PMTUD_ENABLE)
113 sctp_assoc_sync_pmtu(asoc);
114 asoc->pmtu_pending = 0;
117 /* If there a is a prepend chunk stick it on the list before
118 * any other chunks get appended.
121 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
124 sctp_packet_append_chunk(packet, chunk);
130 /* set packet max_size with gso_max_size if gso is enabled*/
132 if (__sk_dst_get(sk) != tp->dst) {
134 sk_setup_caps(sk, tp->dst);
136 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
141 /* Initialize the packet structure. */
142 void sctp_packet_init(struct sctp_packet *packet,
143 struct sctp_transport *transport,
144 __u16 sport, __u16 dport)
146 pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
148 packet->transport = transport;
149 packet->source_port = sport;
150 packet->destination_port = dport;
151 INIT_LIST_HEAD(&packet->chunk_list);
152 /* The overhead will be calculated by sctp_packet_config() */
153 packet->overhead = 0;
154 sctp_packet_reset(packet);
159 void sctp_packet_free(struct sctp_packet *packet)
161 struct sctp_chunk *chunk, *tmp;
163 pr_debug("%s: packet:%p\n", __func__, packet);
165 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
166 list_del_init(&chunk->list);
167 sctp_chunk_free(chunk);
171 /* This routine tries to append the chunk to the offered packet. If adding
172 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
173 * is not present in the packet, it transmits the input packet.
174 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
175 * as it can fit in the packet, but any more data that does not fit in this
176 * packet can be sent only after receiving the COOKIE_ACK.
178 enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
179 struct sctp_chunk *chunk,
180 int one_packet, gfp_t gfp)
182 enum sctp_xmit retval;
184 pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
185 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
187 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
188 case SCTP_XMIT_PMTU_FULL:
189 if (!packet->has_cookie_echo) {
192 error = sctp_packet_transmit(packet, gfp);
194 chunk->skb->sk->sk_err = -error;
196 /* If we have an empty packet, then we can NOT ever
200 retval = sctp_packet_append_chunk(packet,
205 case SCTP_XMIT_RWND_FULL:
207 case SCTP_XMIT_DELAY:
214 /* Try to bundle an auth chunk into the packet. */
215 static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
216 struct sctp_chunk *chunk)
218 struct sctp_association *asoc = pkt->transport->asoc;
219 enum sctp_xmit retval = SCTP_XMIT_OK;
220 struct sctp_chunk *auth;
222 /* if we don't have an association, we can't do authentication */
226 /* See if this is an auth chunk we are bundling or if
227 * auth is already bundled.
229 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
232 /* if the peer did not request this chunk to be authenticated,
238 auth = sctp_make_auth(asoc, chunk->shkey->key_id);
242 auth->shkey = chunk->shkey;
243 sctp_auth_shkey_hold(auth->shkey);
245 retval = __sctp_packet_append_chunk(pkt, auth);
247 if (retval != SCTP_XMIT_OK)
248 sctp_chunk_free(auth);
253 /* Try to bundle a SACK with the packet. */
254 static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
255 struct sctp_chunk *chunk)
257 enum sctp_xmit retval = SCTP_XMIT_OK;
259 /* If sending DATA and haven't aleady bundled a SACK, try to
260 * bundle one in to the packet.
262 if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
263 !pkt->has_cookie_echo) {
264 struct sctp_association *asoc;
265 struct timer_list *timer;
266 asoc = pkt->transport->asoc;
267 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
269 /* If the SACK timer is running, we have a pending SACK */
270 if (timer_pending(timer)) {
271 struct sctp_chunk *sack;
273 if (pkt->transport->sack_generation !=
274 pkt->transport->asoc->peer.sack_generation)
277 asoc->a_rwnd = asoc->rwnd;
278 sack = sctp_make_sack(asoc);
280 retval = __sctp_packet_append_chunk(pkt, sack);
281 if (retval != SCTP_XMIT_OK) {
282 sctp_chunk_free(sack);
285 SCTP_INC_STATS(asoc->base.net,
286 SCTP_MIB_OUTCTRLCHUNKS);
287 asoc->stats.octrlchunks++;
288 asoc->peer.sack_needed = 0;
289 if (del_timer(timer))
290 sctp_association_put(asoc);
299 /* Append a chunk to the offered packet reporting back any inability to do
302 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
303 struct sctp_chunk *chunk)
305 __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
306 enum sctp_xmit retval = SCTP_XMIT_OK;
308 /* Check to see if this chunk will fit into the packet */
309 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
310 if (retval != SCTP_XMIT_OK)
313 /* We believe that this chunk is OK to add to the packet */
314 switch (chunk->chunk_hdr->type) {
316 case SCTP_CID_I_DATA:
317 /* Account for the data being in the packet */
318 sctp_packet_append_data(packet, chunk);
319 /* Disallow SACK bundling after DATA. */
320 packet->has_sack = 1;
321 /* Disallow AUTH bundling after DATA */
322 packet->has_auth = 1;
323 /* Let it be knows that packet has DATA in it */
324 packet->has_data = 1;
325 /* timestamp the chunk for rtx purposes */
326 chunk->sent_at = jiffies;
327 /* Mainly used for prsctp RTX policy */
330 case SCTP_CID_COOKIE_ECHO:
331 packet->has_cookie_echo = 1;
335 packet->has_sack = 1;
337 chunk->asoc->stats.osacks++;
341 packet->has_auth = 1;
342 packet->auth = chunk;
346 /* It is OK to send this chunk. */
347 list_add_tail(&chunk->list, &packet->chunk_list);
348 packet->size += chunk_len;
349 chunk->transport = packet->transport;
354 /* Append a chunk to the offered packet reporting back any inability to do
357 enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
358 struct sctp_chunk *chunk)
360 enum sctp_xmit retval = SCTP_XMIT_OK;
362 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
364 /* Data chunks are special. Before seeing what else we can
365 * bundle into this packet, check to see if we are allowed to
368 if (sctp_chunk_is_data(chunk)) {
369 retval = sctp_packet_can_append_data(packet, chunk);
370 if (retval != SCTP_XMIT_OK)
374 /* Try to bundle AUTH chunk */
375 retval = sctp_packet_bundle_auth(packet, chunk);
376 if (retval != SCTP_XMIT_OK)
379 /* Try to bundle SACK chunk */
380 retval = sctp_packet_bundle_sack(packet, chunk);
381 if (retval != SCTP_XMIT_OK)
384 retval = __sctp_packet_append_chunk(packet, chunk);
390 static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
392 if (SCTP_OUTPUT_CB(head)->last == head)
393 skb_shinfo(head)->frag_list = skb;
395 SCTP_OUTPUT_CB(head)->last->next = skb;
396 SCTP_OUTPUT_CB(head)->last = skb;
398 head->truesize += skb->truesize;
399 head->data_len += skb->len;
400 head->len += skb->len;
401 refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
403 __skb_header_release(skb);
406 static int sctp_packet_pack(struct sctp_packet *packet,
407 struct sk_buff *head, int gso, gfp_t gfp)
409 struct sctp_transport *tp = packet->transport;
410 struct sctp_auth_chunk *auth = NULL;
411 struct sctp_chunk *chunk, *tmp;
412 int pkt_count = 0, pkt_size;
413 struct sock *sk = head->sk;
414 struct sk_buff *nskb;
418 skb_shinfo(head)->gso_type = sk->sk_gso_type;
419 SCTP_OUTPUT_CB(head)->last = head;
422 pkt_size = packet->size;
427 /* calculate the pkt_size and alloc nskb */
428 pkt_size = packet->overhead;
429 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
431 int padded = SCTP_PAD4(chunk->skb->len);
433 if (chunk == packet->auth)
435 else if (auth_len + padded + packet->overhead >
438 else if (pkt_size + padded > tp->pathmtu)
442 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
445 skb_reserve(nskb, packet->overhead + MAX_HEADER);
448 /* merge chunks into nskb and append nskb into head list */
449 pkt_size -= packet->overhead;
450 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
453 list_del_init(&chunk->list);
454 if (sctp_chunk_is_data(chunk)) {
455 if (!sctp_chunk_retransmitted(chunk) &&
457 chunk->rtt_in_progress = 1;
462 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
464 skb_put_zero(chunk->skb, padding);
466 if (chunk == packet->auth)
467 auth = (struct sctp_auth_chunk *)
468 skb_tail_pointer(nskb);
470 skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
472 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
474 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
475 chunk->has_tsn ? "TSN" : "No TSN",
476 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
477 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
478 chunk->rtt_in_progress);
480 pkt_size -= SCTP_PAD4(chunk->skb->len);
482 if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
483 sctp_chunk_free(chunk);
490 sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
491 packet->auth->shkey, gfp);
492 /* free auth if no more chunks, or add it back */
493 if (list_empty(&packet->chunk_list))
494 sctp_chunk_free(packet->auth);
496 list_add(&packet->auth->list,
497 &packet->chunk_list);
501 sctp_packet_gso_append(head, nskb);
504 } while (!list_empty(&packet->chunk_list));
507 memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
508 sizeof(struct inet6_skb_parm)));
509 skb_shinfo(head)->gso_segs = pkt_count;
510 skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
514 if (sctp_checksum_disable)
517 if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) ||
518 dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) {
520 (struct sctphdr *)skb_transport_header(head);
522 sh->checksum = sctp_compute_cksum(head, 0);
525 head->ip_summed = CHECKSUM_PARTIAL;
526 head->csum_not_inet = 1;
527 head->csum_start = skb_transport_header(head) - head->head;
528 head->csum_offset = offsetof(struct sctphdr, checksum);
534 /* All packets are sent to the network through this function from
537 * The return value is always 0 for now.
539 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
541 struct sctp_transport *tp = packet->transport;
542 struct sctp_association *asoc = tp->asoc;
543 struct sctp_chunk *chunk, *tmp;
544 int pkt_count, gso = 0;
545 struct sk_buff *head;
549 pr_debug("%s: packet:%p\n", __func__, packet);
550 if (list_empty(&packet->chunk_list))
552 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
556 if (packet->size > tp->pathmtu && !packet->ipfragok) {
557 if (!sk_can_gso(sk)) {
558 pr_err_once("Trying to GSO but underlying device doesn't support it.");
565 head = alloc_skb((gso ? packet->overhead : packet->size) +
569 skb_reserve(head, packet->overhead + MAX_HEADER);
570 skb_set_owner_w(head, sk);
572 /* set sctp header */
573 sh = skb_push(head, sizeof(struct sctphdr));
574 skb_reset_transport_header(head);
575 sh->source = htons(packet->source_port);
576 sh->dest = htons(packet->destination_port);
577 sh->vtag = htonl(packet->vtag);
580 /* drop packet if no dst */
582 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
588 pkt_count = sctp_packet_pack(packet, head, gso, gfp);
593 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
595 /* start autoclose timer */
596 if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
597 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
598 struct timer_list *timer =
599 &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
600 unsigned long timeout =
601 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
603 if (!mod_timer(timer, jiffies + timeout))
604 sctp_association_hold(asoc);
608 tp->af_specific->ecn_capable(sk);
610 asoc->stats.opackets += pkt_count;
611 if (asoc->peer.last_sent_to != tp)
612 asoc->peer.last_sent_to = tp;
614 head->ignore_df = packet->ipfragok;
615 if (tp->dst_pending_confirm)
616 skb_set_dst_pending_confirm(head, 1);
617 /* neighbour should be confirmed on successful transmission or
620 if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
621 tp->dst_pending_confirm)
622 tp->dst_pending_confirm = 0;
625 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
626 list_del_init(&chunk->list);
627 if (!sctp_chunk_is_data(chunk))
628 sctp_chunk_free(chunk);
630 sctp_packet_reset(packet);
634 /********************************************************************
635 * 2nd Level Abstractions
636 ********************************************************************/
638 /* This private function check to see if a chunk can be added */
639 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
640 struct sctp_chunk *chunk)
642 size_t datasize, rwnd, inflight, flight_size;
643 struct sctp_transport *transport = packet->transport;
644 struct sctp_association *asoc = transport->asoc;
645 struct sctp_outq *q = &asoc->outqueue;
647 /* RFC 2960 6.1 Transmission of DATA Chunks
649 * A) At any given time, the data sender MUST NOT transmit new data to
650 * any destination transport address if its peer's rwnd indicates
651 * that the peer has no buffer space (i.e. rwnd is 0, see Section
652 * 6.2.1). However, regardless of the value of rwnd (including if it
653 * is 0), the data sender can always have one DATA chunk in flight to
654 * the receiver if allowed by cwnd (see rule B below). This rule
655 * allows the sender to probe for a change in rwnd that the sender
656 * missed due to the SACK having been lost in transit from the data
657 * receiver to the data sender.
660 rwnd = asoc->peer.rwnd;
661 inflight = q->outstanding_bytes;
662 flight_size = transport->flight_size;
664 datasize = sctp_data_size(chunk);
666 if (datasize > rwnd && inflight > 0)
667 /* We have (at least) one data chunk in flight,
668 * so we can't fall back to rule 6.1 B).
670 return SCTP_XMIT_RWND_FULL;
672 /* RFC 2960 6.1 Transmission of DATA Chunks
674 * B) At any given time, the sender MUST NOT transmit new data
675 * to a given transport address if it has cwnd or more bytes
676 * of data outstanding to that transport address.
678 /* RFC 7.2.4 & the Implementers Guide 2.8.
681 * When a Fast Retransmit is being performed the sender SHOULD
682 * ignore the value of cwnd and SHOULD NOT delay retransmission.
684 if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
685 flight_size >= transport->cwnd)
686 return SCTP_XMIT_RWND_FULL;
688 /* Nagle's algorithm to solve small-packet problem:
689 * Inhibit the sending of new chunks when new outgoing data arrives
690 * if any previously transmitted data on the connection remains
694 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
696 /* Nothing unacked */
699 if (!sctp_packet_empty(packet))
700 /* Append to packet */
703 if (!sctp_state(asoc, ESTABLISHED))
706 /* Check whether this chunk and all the rest of pending data will fit
707 * or delay in hopes of bundling a full sized packet.
709 if (chunk->skb->len + q->out_qlen > transport->pathmtu -
710 packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
711 /* Enough data queued to fill a packet */
714 /* Don't delay large message writes that may have been fragmented */
715 if (!chunk->msg->can_delay)
718 /* Defer until all data acked or packet full */
719 return SCTP_XMIT_DELAY;
722 /* This private function does management things when adding DATA chunk */
723 static void sctp_packet_append_data(struct sctp_packet *packet,
724 struct sctp_chunk *chunk)
726 struct sctp_transport *transport = packet->transport;
727 size_t datasize = sctp_data_size(chunk);
728 struct sctp_association *asoc = transport->asoc;
729 u32 rwnd = asoc->peer.rwnd;
731 /* Keep track of how many bytes are in flight over this transport. */
732 transport->flight_size += datasize;
734 /* Keep track of how many bytes are in flight to the receiver. */
735 asoc->outqueue.outstanding_bytes += datasize;
737 /* Update our view of the receiver's rwnd. */
743 asoc->peer.rwnd = rwnd;
744 sctp_chunk_assign_tsn(chunk);
745 asoc->stream.si->assign_number(chunk);
748 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
749 struct sctp_chunk *chunk,
752 enum sctp_xmit retval = SCTP_XMIT_OK;
753 size_t psize, pmtu, maxsize;
755 /* Don't bundle in this packet if this chunk's auth key doesn't
756 * match other chunks already enqueued on this packet. Also,
757 * don't bundle the chunk with auth key if other chunks in this
758 * packet don't have auth key.
760 if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
761 (!packet->auth && chunk->shkey &&
762 chunk->chunk_hdr->type != SCTP_CID_AUTH))
763 return SCTP_XMIT_PMTU_FULL;
765 psize = packet->size;
766 if (packet->transport->asoc)
767 pmtu = packet->transport->asoc->pathmtu;
769 pmtu = packet->transport->pathmtu;
771 /* Decide if we need to fragment or resubmit later. */
772 if (psize + chunk_len > pmtu) {
773 /* It's OK to fragment at IP level if any one of the following
775 * 1. The packet is empty (meaning this chunk is greater
777 * 2. The packet doesn't have any data in it yet and data
778 * requires authentication.
780 if (sctp_packet_empty(packet) ||
781 (!packet->has_data && chunk->auth)) {
782 /* We no longer do re-fragmentation.
783 * Just fragment at the IP layer, if we
784 * actually hit this condition
786 packet->ipfragok = 1;
790 /* Similarly, if this chunk was built before a PMTU
791 * reduction, we have to fragment it at IP level now. So
792 * if the packet already contains something, we need to
795 maxsize = pmtu - packet->overhead;
797 maxsize -= SCTP_PAD4(packet->auth->skb->len);
798 if (chunk_len > maxsize)
799 retval = SCTP_XMIT_PMTU_FULL;
801 /* It is also okay to fragment if the chunk we are
802 * adding is a control chunk, but only if current packet
803 * is not a GSO one otherwise it causes fragmentation of
804 * a large frame. So in this case we allow the
805 * fragmentation by forcing it to be in a new packet.
807 if (!sctp_chunk_is_data(chunk) && packet->has_data)
808 retval = SCTP_XMIT_PMTU_FULL;
810 if (psize + chunk_len > packet->max_size)
811 /* Hit GSO/PMTU limit, gotta flush */
812 retval = SCTP_XMIT_PMTU_FULL;
814 if (!packet->transport->burst_limited &&
815 psize + chunk_len > (packet->transport->cwnd >> 1))
816 /* Do not allow a single GSO packet to use more
819 retval = SCTP_XMIT_PMTU_FULL;
821 if (packet->transport->burst_limited &&
822 psize + chunk_len > (packet->transport->burst_limited >> 1))
823 /* Do not allow a single GSO packet to use more
824 * than half of original cwnd.
826 retval = SCTP_XMIT_PMTU_FULL;
827 /* Otherwise it will fit in the GSO packet */