1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
7 * This file is part of the SCTP kernel implementation
9 * These functions handle output processing.
11 * Please send any bug reports or fixes you make to the
15 * Written or modified by:
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/wait.h>
27 #include <linux/time.h>
29 #include <linux/ipv6.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <net/inet_ecn.h>
35 #include <net/net_namespace.h>
37 #include <linux/socket.h> /* for sa_family_t */
40 #include <net/sctp/sctp.h>
41 #include <net/sctp/sm.h>
42 #include <net/sctp/checksum.h>
44 /* Forward declarations for private helpers. */
45 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
46 struct sctp_chunk *chunk);
47 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
48 struct sctp_chunk *chunk);
49 static void sctp_packet_append_data(struct sctp_packet *packet,
50 struct sctp_chunk *chunk);
51 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
52 struct sctp_chunk *chunk,
55 static void sctp_packet_reset(struct sctp_packet *packet)
57 /* sctp_packet_transmit() relies on this to reset size to the
58 * current overhead after sending packets.
60 packet->size = packet->overhead;
62 packet->has_cookie_echo = 0;
71 * This appears to be a followup set of initializations.
73 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
76 struct sctp_transport *tp = packet->transport;
77 struct sctp_association *asoc = tp->asoc;
78 struct sctp_sock *sp = NULL;
81 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
84 /* do the following jobs only once for a flush schedule */
85 if (!sctp_packet_empty(packet))
88 /* set packet max_size with pathmtu, then calculate overhead */
89 packet->max_size = tp->pathmtu;
95 packet->overhead = sctp_mtu_payload(sp, 0, 0);
96 packet->size = packet->overhead;
101 /* update dst or transport pathmtu if in need */
102 if (!sctp_transport_dst_check(tp)) {
103 sctp_transport_route(tp, NULL, sp);
104 if (asoc->param_flags & SPP_PMTUD_ENABLE)
105 sctp_assoc_sync_pmtu(asoc);
106 } else if (!sctp_transport_pmtu_check(tp)) {
107 if (asoc->param_flags & SPP_PMTUD_ENABLE)
108 sctp_assoc_sync_pmtu(asoc);
111 if (asoc->pmtu_pending) {
112 if (asoc->param_flags & SPP_PMTUD_ENABLE)
113 sctp_assoc_sync_pmtu(asoc);
114 asoc->pmtu_pending = 0;
117 /* If there a is a prepend chunk stick it on the list before
118 * any other chunks get appended.
121 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
124 sctp_packet_append_chunk(packet, chunk);
130 /* set packet max_size with gso_max_size if gso is enabled*/
132 if (__sk_dst_get(sk) != tp->dst) {
134 sk_setup_caps(sk, tp->dst);
136 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
141 /* Initialize the packet structure. */
142 void sctp_packet_init(struct sctp_packet *packet,
143 struct sctp_transport *transport,
144 __u16 sport, __u16 dport)
146 pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
148 packet->transport = transport;
149 packet->source_port = sport;
150 packet->destination_port = dport;
151 INIT_LIST_HEAD(&packet->chunk_list);
152 /* The overhead will be calculated by sctp_packet_config() */
153 packet->overhead = 0;
154 sctp_packet_reset(packet);
159 void sctp_packet_free(struct sctp_packet *packet)
161 struct sctp_chunk *chunk, *tmp;
163 pr_debug("%s: packet:%p\n", __func__, packet);
165 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
166 list_del_init(&chunk->list);
167 sctp_chunk_free(chunk);
171 /* This routine tries to append the chunk to the offered packet. If adding
172 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
173 * is not present in the packet, it transmits the input packet.
174 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
175 * as it can fit in the packet, but any more data that does not fit in this
176 * packet can be sent only after receiving the COOKIE_ACK.
178 enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
179 struct sctp_chunk *chunk,
180 int one_packet, gfp_t gfp)
182 enum sctp_xmit retval;
184 pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
185 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
187 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
188 case SCTP_XMIT_PMTU_FULL:
189 if (!packet->has_cookie_echo) {
192 error = sctp_packet_transmit(packet, gfp);
194 chunk->skb->sk->sk_err = -error;
196 /* If we have an empty packet, then we can NOT ever
200 retval = sctp_packet_append_chunk(packet,
205 case SCTP_XMIT_RWND_FULL:
207 case SCTP_XMIT_DELAY:
214 /* Try to bundle an auth chunk into the packet. */
215 static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
216 struct sctp_chunk *chunk)
218 struct sctp_association *asoc = pkt->transport->asoc;
219 enum sctp_xmit retval = SCTP_XMIT_OK;
220 struct sctp_chunk *auth;
222 /* if we don't have an association, we can't do authentication */
226 /* See if this is an auth chunk we are bundling or if
227 * auth is already bundled.
229 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
232 /* if the peer did not request this chunk to be authenticated,
238 auth = sctp_make_auth(asoc, chunk->shkey->key_id);
242 auth->shkey = chunk->shkey;
243 sctp_auth_shkey_hold(auth->shkey);
245 retval = __sctp_packet_append_chunk(pkt, auth);
247 if (retval != SCTP_XMIT_OK)
248 sctp_chunk_free(auth);
253 /* Try to bundle a SACK with the packet. */
254 static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
255 struct sctp_chunk *chunk)
257 enum sctp_xmit retval = SCTP_XMIT_OK;
259 /* If sending DATA and haven't aleady bundled a SACK, try to
260 * bundle one in to the packet.
262 if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
263 !pkt->has_cookie_echo) {
264 struct sctp_association *asoc;
265 struct timer_list *timer;
266 asoc = pkt->transport->asoc;
267 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
269 /* If the SACK timer is running, we have a pending SACK */
270 if (timer_pending(timer)) {
271 struct sctp_chunk *sack;
273 if (pkt->transport->sack_generation !=
274 pkt->transport->asoc->peer.sack_generation)
277 asoc->a_rwnd = asoc->rwnd;
278 sack = sctp_make_sack(asoc);
280 retval = __sctp_packet_append_chunk(pkt, sack);
281 if (retval != SCTP_XMIT_OK) {
282 sctp_chunk_free(sack);
285 asoc->peer.sack_needed = 0;
286 if (del_timer(timer))
287 sctp_association_put(asoc);
296 /* Append a chunk to the offered packet reporting back any inability to do
299 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
300 struct sctp_chunk *chunk)
302 __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
303 enum sctp_xmit retval = SCTP_XMIT_OK;
305 /* Check to see if this chunk will fit into the packet */
306 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
307 if (retval != SCTP_XMIT_OK)
310 /* We believe that this chunk is OK to add to the packet */
311 switch (chunk->chunk_hdr->type) {
313 case SCTP_CID_I_DATA:
314 /* Account for the data being in the packet */
315 sctp_packet_append_data(packet, chunk);
316 /* Disallow SACK bundling after DATA. */
317 packet->has_sack = 1;
318 /* Disallow AUTH bundling after DATA */
319 packet->has_auth = 1;
320 /* Let it be knows that packet has DATA in it */
321 packet->has_data = 1;
322 /* timestamp the chunk for rtx purposes */
323 chunk->sent_at = jiffies;
324 /* Mainly used for prsctp RTX policy */
327 case SCTP_CID_COOKIE_ECHO:
328 packet->has_cookie_echo = 1;
332 packet->has_sack = 1;
334 chunk->asoc->stats.osacks++;
338 packet->has_auth = 1;
339 packet->auth = chunk;
343 /* It is OK to send this chunk. */
344 list_add_tail(&chunk->list, &packet->chunk_list);
345 packet->size += chunk_len;
346 chunk->transport = packet->transport;
351 /* Append a chunk to the offered packet reporting back any inability to do
354 enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
355 struct sctp_chunk *chunk)
357 enum sctp_xmit retval = SCTP_XMIT_OK;
359 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
361 /* Data chunks are special. Before seeing what else we can
362 * bundle into this packet, check to see if we are allowed to
365 if (sctp_chunk_is_data(chunk)) {
366 retval = sctp_packet_can_append_data(packet, chunk);
367 if (retval != SCTP_XMIT_OK)
371 /* Try to bundle AUTH chunk */
372 retval = sctp_packet_bundle_auth(packet, chunk);
373 if (retval != SCTP_XMIT_OK)
376 /* Try to bundle SACK chunk */
377 retval = sctp_packet_bundle_sack(packet, chunk);
378 if (retval != SCTP_XMIT_OK)
381 retval = __sctp_packet_append_chunk(packet, chunk);
387 static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
389 if (SCTP_OUTPUT_CB(head)->last == head)
390 skb_shinfo(head)->frag_list = skb;
392 SCTP_OUTPUT_CB(head)->last->next = skb;
393 SCTP_OUTPUT_CB(head)->last = skb;
395 head->truesize += skb->truesize;
396 head->data_len += skb->len;
397 head->len += skb->len;
398 refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
400 __skb_header_release(skb);
403 static int sctp_packet_pack(struct sctp_packet *packet,
404 struct sk_buff *head, int gso, gfp_t gfp)
406 struct sctp_transport *tp = packet->transport;
407 struct sctp_auth_chunk *auth = NULL;
408 struct sctp_chunk *chunk, *tmp;
409 int pkt_count = 0, pkt_size;
410 struct sock *sk = head->sk;
411 struct sk_buff *nskb;
415 skb_shinfo(head)->gso_type = sk->sk_gso_type;
416 SCTP_OUTPUT_CB(head)->last = head;
419 pkt_size = packet->size;
424 /* calculate the pkt_size and alloc nskb */
425 pkt_size = packet->overhead;
426 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
428 int padded = SCTP_PAD4(chunk->skb->len);
430 if (chunk == packet->auth)
432 else if (auth_len + padded + packet->overhead >
435 else if (pkt_size + padded > tp->pathmtu)
439 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
442 skb_reserve(nskb, packet->overhead + MAX_HEADER);
445 /* merge chunks into nskb and append nskb into head list */
446 pkt_size -= packet->overhead;
447 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
450 list_del_init(&chunk->list);
451 if (sctp_chunk_is_data(chunk)) {
452 if (!sctp_chunk_retransmitted(chunk) &&
454 chunk->rtt_in_progress = 1;
459 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
461 skb_put_zero(chunk->skb, padding);
463 if (chunk == packet->auth)
464 auth = (struct sctp_auth_chunk *)
465 skb_tail_pointer(nskb);
467 skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
469 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
471 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
472 chunk->has_tsn ? "TSN" : "No TSN",
473 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
474 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
475 chunk->rtt_in_progress);
477 pkt_size -= SCTP_PAD4(chunk->skb->len);
479 if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
480 sctp_chunk_free(chunk);
487 sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
488 packet->auth->shkey, gfp);
489 /* free auth if no more chunks, or add it back */
490 if (list_empty(&packet->chunk_list))
491 sctp_chunk_free(packet->auth);
493 list_add(&packet->auth->list,
494 &packet->chunk_list);
498 sctp_packet_gso_append(head, nskb);
501 } while (!list_empty(&packet->chunk_list));
504 memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
505 sizeof(struct inet6_skb_parm)));
506 skb_shinfo(head)->gso_segs = pkt_count;
507 skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
509 if (skb_dst(head) != tp->dst) {
511 sk_setup_caps(sk, tp->dst);
517 if (sctp_checksum_disable)
520 if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) ||
521 dst_xfrm(skb_dst(head)) || packet->ipfragok) {
523 (struct sctphdr *)skb_transport_header(head);
525 sh->checksum = sctp_compute_cksum(head, 0);
528 head->ip_summed = CHECKSUM_PARTIAL;
529 head->csum_not_inet = 1;
530 head->csum_start = skb_transport_header(head) - head->head;
531 head->csum_offset = offsetof(struct sctphdr, checksum);
537 /* All packets are sent to the network through this function from
540 * The return value is always 0 for now.
542 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
544 struct sctp_transport *tp = packet->transport;
545 struct sctp_association *asoc = tp->asoc;
546 struct sctp_chunk *chunk, *tmp;
547 int pkt_count, gso = 0;
548 struct dst_entry *dst;
549 struct sk_buff *head;
553 pr_debug("%s: packet:%p\n", __func__, packet);
554 if (list_empty(&packet->chunk_list))
556 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
560 if (packet->size > tp->pathmtu && !packet->ipfragok) {
561 if (!sk_can_gso(sk)) {
562 pr_err_once("Trying to GSO but underlying device doesn't support it.");
569 head = alloc_skb((gso ? packet->overhead : packet->size) +
573 skb_reserve(head, packet->overhead + MAX_HEADER);
574 skb_set_owner_w(head, sk);
576 /* set sctp header */
577 sh = skb_push(head, sizeof(struct sctphdr));
578 skb_reset_transport_header(head);
579 sh->source = htons(packet->source_port);
580 sh->dest = htons(packet->destination_port);
581 sh->vtag = htonl(packet->vtag);
584 /* drop packet if no dst */
585 dst = dst_clone(tp->dst);
587 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
591 skb_dst_set(head, dst);
594 pkt_count = sctp_packet_pack(packet, head, gso, gfp);
599 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
601 /* start autoclose timer */
602 if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
603 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
604 struct timer_list *timer =
605 &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
606 unsigned long timeout =
607 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
609 if (!mod_timer(timer, jiffies + timeout))
610 sctp_association_hold(asoc);
614 tp->af_specific->ecn_capable(sk);
616 asoc->stats.opackets += pkt_count;
617 if (asoc->peer.last_sent_to != tp)
618 asoc->peer.last_sent_to = tp;
620 head->ignore_df = packet->ipfragok;
621 if (tp->dst_pending_confirm)
622 skb_set_dst_pending_confirm(head, 1);
623 /* neighbour should be confirmed on successful transmission or
626 if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
627 tp->dst_pending_confirm)
628 tp->dst_pending_confirm = 0;
631 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
632 list_del_init(&chunk->list);
633 if (!sctp_chunk_is_data(chunk))
634 sctp_chunk_free(chunk);
636 sctp_packet_reset(packet);
640 /********************************************************************
641 * 2nd Level Abstractions
642 ********************************************************************/
644 /* This private function check to see if a chunk can be added */
645 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
646 struct sctp_chunk *chunk)
648 size_t datasize, rwnd, inflight, flight_size;
649 struct sctp_transport *transport = packet->transport;
650 struct sctp_association *asoc = transport->asoc;
651 struct sctp_outq *q = &asoc->outqueue;
653 /* RFC 2960 6.1 Transmission of DATA Chunks
655 * A) At any given time, the data sender MUST NOT transmit new data to
656 * any destination transport address if its peer's rwnd indicates
657 * that the peer has no buffer space (i.e. rwnd is 0, see Section
658 * 6.2.1). However, regardless of the value of rwnd (including if it
659 * is 0), the data sender can always have one DATA chunk in flight to
660 * the receiver if allowed by cwnd (see rule B below). This rule
661 * allows the sender to probe for a change in rwnd that the sender
662 * missed due to the SACK having been lost in transit from the data
663 * receiver to the data sender.
666 rwnd = asoc->peer.rwnd;
667 inflight = q->outstanding_bytes;
668 flight_size = transport->flight_size;
670 datasize = sctp_data_size(chunk);
672 if (datasize > rwnd && inflight > 0)
673 /* We have (at least) one data chunk in flight,
674 * so we can't fall back to rule 6.1 B).
676 return SCTP_XMIT_RWND_FULL;
678 /* RFC 2960 6.1 Transmission of DATA Chunks
680 * B) At any given time, the sender MUST NOT transmit new data
681 * to a given transport address if it has cwnd or more bytes
682 * of data outstanding to that transport address.
684 /* RFC 7.2.4 & the Implementers Guide 2.8.
687 * When a Fast Retransmit is being performed the sender SHOULD
688 * ignore the value of cwnd and SHOULD NOT delay retransmission.
690 if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
691 flight_size >= transport->cwnd)
692 return SCTP_XMIT_RWND_FULL;
694 /* Nagle's algorithm to solve small-packet problem:
695 * Inhibit the sending of new chunks when new outgoing data arrives
696 * if any previously transmitted data on the connection remains
700 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
702 /* Nothing unacked */
705 if (!sctp_packet_empty(packet))
706 /* Append to packet */
709 if (!sctp_state(asoc, ESTABLISHED))
712 /* Check whether this chunk and all the rest of pending data will fit
713 * or delay in hopes of bundling a full sized packet.
715 if (chunk->skb->len + q->out_qlen > transport->pathmtu -
716 packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
717 /* Enough data queued to fill a packet */
720 /* Don't delay large message writes that may have been fragmented */
721 if (!chunk->msg->can_delay)
724 /* Defer until all data acked or packet full */
725 return SCTP_XMIT_DELAY;
728 /* This private function does management things when adding DATA chunk */
729 static void sctp_packet_append_data(struct sctp_packet *packet,
730 struct sctp_chunk *chunk)
732 struct sctp_transport *transport = packet->transport;
733 size_t datasize = sctp_data_size(chunk);
734 struct sctp_association *asoc = transport->asoc;
735 u32 rwnd = asoc->peer.rwnd;
737 /* Keep track of how many bytes are in flight over this transport. */
738 transport->flight_size += datasize;
740 /* Keep track of how many bytes are in flight to the receiver. */
741 asoc->outqueue.outstanding_bytes += datasize;
743 /* Update our view of the receiver's rwnd. */
749 asoc->peer.rwnd = rwnd;
750 sctp_chunk_assign_tsn(chunk);
751 asoc->stream.si->assign_number(chunk);
754 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
755 struct sctp_chunk *chunk,
758 enum sctp_xmit retval = SCTP_XMIT_OK;
759 size_t psize, pmtu, maxsize;
761 /* Don't bundle in this packet if this chunk's auth key doesn't
762 * match other chunks already enqueued on this packet. Also,
763 * don't bundle the chunk with auth key if other chunks in this
764 * packet don't have auth key.
766 if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
767 (!packet->auth && chunk->shkey &&
768 chunk->chunk_hdr->type != SCTP_CID_AUTH))
769 return SCTP_XMIT_PMTU_FULL;
771 psize = packet->size;
772 if (packet->transport->asoc)
773 pmtu = packet->transport->asoc->pathmtu;
775 pmtu = packet->transport->pathmtu;
777 /* Decide if we need to fragment or resubmit later. */
778 if (psize + chunk_len > pmtu) {
779 /* It's OK to fragment at IP level if any one of the following
781 * 1. The packet is empty (meaning this chunk is greater
783 * 2. The packet doesn't have any data in it yet and data
784 * requires authentication.
786 if (sctp_packet_empty(packet) ||
787 (!packet->has_data && chunk->auth)) {
788 /* We no longer do re-fragmentation.
789 * Just fragment at the IP layer, if we
790 * actually hit this condition
792 packet->ipfragok = 1;
796 /* Similarly, if this chunk was built before a PMTU
797 * reduction, we have to fragment it at IP level now. So
798 * if the packet already contains something, we need to
801 maxsize = pmtu - packet->overhead;
803 maxsize -= SCTP_PAD4(packet->auth->skb->len);
804 if (chunk_len > maxsize)
805 retval = SCTP_XMIT_PMTU_FULL;
807 /* It is also okay to fragment if the chunk we are
808 * adding is a control chunk, but only if current packet
809 * is not a GSO one otherwise it causes fragmentation of
810 * a large frame. So in this case we allow the
811 * fragmentation by forcing it to be in a new packet.
813 if (!sctp_chunk_is_data(chunk) && packet->has_data)
814 retval = SCTP_XMIT_PMTU_FULL;
816 if (psize + chunk_len > packet->max_size)
817 /* Hit GSO/PMTU limit, gotta flush */
818 retval = SCTP_XMIT_PMTU_FULL;
820 if (!packet->transport->burst_limited &&
821 psize + chunk_len > (packet->transport->cwnd >> 1))
822 /* Do not allow a single GSO packet to use more
825 retval = SCTP_XMIT_PMTU_FULL;
827 if (packet->transport->burst_limited &&
828 psize + chunk_len > (packet->transport->burst_limited >> 1))
829 /* Do not allow a single GSO packet to use more
830 * than half of original cwnd.
832 retval = SCTP_XMIT_PMTU_FULL;
833 /* Otherwise it will fit in the GSO packet */