1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright Red Hat Inc. 2017
5 * This file is part of the SCTP kernel implementation
7 * These functions implement sctp stream message interleaving, mostly
8 * including I-DATA and I-FORWARD-TSN chunks process.
10 * Please send any bug reports or fixes you make to the
11 * email addresched(es):
14 * Written or modified by:
18 #include <net/busy_poll.h>
19 #include <net/sctp/sctp.h>
20 #include <net/sctp/sm.h>
21 #include <net/sctp/ulpevent.h>
22 #include <linux/sctp.h>
24 static struct sctp_chunk *sctp_make_idatafrag_empty(
25 const struct sctp_association *asoc,
26 const struct sctp_sndrcvinfo *sinfo,
27 int len, __u8 flags, gfp_t gfp)
29 struct sctp_chunk *retval;
30 struct sctp_idatahdr dp;
32 memset(&dp, 0, sizeof(dp));
33 dp.stream = htons(sinfo->sinfo_stream);
35 if (sinfo->sinfo_flags & SCTP_UNORDERED)
36 flags |= SCTP_DATA_UNORDERED;
38 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
42 retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
43 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
48 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
50 struct sctp_stream *stream;
51 struct sctp_chunk *lchunk;
58 sid = sctp_chunk_stream_no(chunk);
59 stream = &chunk->asoc->stream;
61 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
62 struct sctp_idatahdr *hdr;
67 hdr = lchunk->subh.idata_hdr;
69 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
70 hdr->ppid = lchunk->sinfo.sinfo_ppid;
72 hdr->fsn = htonl(cfsn++);
74 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
75 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
76 sctp_mid_uo_next(stream, out, sid) :
77 sctp_mid_uo_peek(stream, out, sid);
79 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
80 sctp_mid_next(stream, out, sid) :
81 sctp_mid_peek(stream, out, sid);
83 hdr->mid = htonl(mid);
87 static bool sctp_validate_data(struct sctp_chunk *chunk)
89 struct sctp_stream *stream;
92 if (chunk->chunk_hdr->type != SCTP_CID_DATA)
95 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
98 stream = &chunk->asoc->stream;
99 sid = sctp_chunk_stream_no(chunk);
100 ssn = ntohs(chunk->subh.data_hdr->ssn);
102 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
105 static bool sctp_validate_idata(struct sctp_chunk *chunk)
107 struct sctp_stream *stream;
111 if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
114 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
117 stream = &chunk->asoc->stream;
118 sid = sctp_chunk_stream_no(chunk);
119 mid = ntohl(chunk->subh.idata_hdr->mid);
121 return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
124 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
125 struct sctp_ulpevent *event)
127 struct sctp_ulpevent *cevent;
128 struct sk_buff *pos, *loc;
130 pos = skb_peek_tail(&ulpq->reasm);
132 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
136 cevent = sctp_skb2event(pos);
138 if (event->stream == cevent->stream &&
139 event->mid == cevent->mid &&
140 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
141 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
142 event->fsn > cevent->fsn))) {
143 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
147 if ((event->stream == cevent->stream &&
148 MID_lt(cevent->mid, event->mid)) ||
149 event->stream > cevent->stream) {
150 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
155 skb_queue_walk(&ulpq->reasm, pos) {
156 cevent = sctp_skb2event(pos);
158 if (event->stream < cevent->stream ||
159 (event->stream == cevent->stream &&
160 MID_lt(event->mid, cevent->mid))) {
164 if (event->stream == cevent->stream &&
165 event->mid == cevent->mid &&
166 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
167 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
168 event->fsn < cevent->fsn)) {
175 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
177 __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
180 static struct sctp_ulpevent *sctp_intl_retrieve_partial(
181 struct sctp_ulpq *ulpq,
182 struct sctp_ulpevent *event)
184 struct sk_buff *first_frag = NULL;
185 struct sk_buff *last_frag = NULL;
186 struct sctp_ulpevent *retval;
187 struct sctp_stream_in *sin;
192 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
194 skb_queue_walk(&ulpq->reasm, pos) {
195 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
197 if (cevent->stream < event->stream)
200 if (cevent->stream > event->stream ||
201 cevent->mid != sin->mid)
204 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
205 case SCTP_DATA_FIRST_FRAG:
207 case SCTP_DATA_MIDDLE_FRAG:
209 if (cevent->fsn == sin->fsn) {
212 next_fsn = cevent->fsn + 1;
214 } else if (cevent->fsn == next_fsn) {
221 case SCTP_DATA_LAST_FRAG:
223 if (cevent->fsn == sin->fsn) {
229 } else if (cevent->fsn == next_fsn) {
244 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
245 first_frag, last_frag);
249 retval->msg_flags |= MSG_EOR;
257 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
258 struct sctp_ulpq *ulpq,
259 struct sctp_ulpevent *event)
261 struct sctp_association *asoc = ulpq->asoc;
262 struct sk_buff *pos, *first_frag = NULL;
263 struct sctp_ulpevent *retval = NULL;
264 struct sk_buff *pd_first = NULL;
265 struct sk_buff *pd_last = NULL;
266 struct sctp_stream_in *sin;
272 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
274 skb_queue_walk(&ulpq->reasm, pos) {
275 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
277 if (cevent->stream < event->stream)
279 if (cevent->stream > event->stream)
282 if (MID_lt(cevent->mid, event->mid))
284 if (MID_lt(event->mid, cevent->mid))
287 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
288 case SCTP_DATA_FIRST_FRAG:
289 if (cevent->mid == sin->mid) {
300 case SCTP_DATA_MIDDLE_FRAG:
301 if (first_frag && cevent->mid == mid &&
302 cevent->fsn == next_fsn) {
313 case SCTP_DATA_LAST_FRAG:
314 if (first_frag && cevent->mid == mid &&
315 cevent->fsn == next_fsn)
326 pd_point = sctp_sk(asoc->base.sk)->pd_point;
327 if (pd_point && pd_point <= pd_len) {
328 retval = sctp_make_reassembled_event(asoc->base.net,
339 retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
342 retval->msg_flags |= MSG_EOR;
348 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
349 struct sctp_ulpevent *event)
351 struct sctp_ulpevent *retval = NULL;
352 struct sctp_stream_in *sin;
354 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
355 event->msg_flags |= MSG_EOR;
359 sctp_intl_store_reasm(ulpq, event);
361 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
362 if (sin->pd_mode && event->mid == sin->mid &&
363 event->fsn == sin->fsn)
364 retval = sctp_intl_retrieve_partial(ulpq, event);
367 retval = sctp_intl_retrieve_reassembled(ulpq, event);
372 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
373 struct sctp_ulpevent *event)
375 struct sctp_ulpevent *cevent;
376 struct sk_buff *pos, *loc;
378 pos = skb_peek_tail(&ulpq->lobby);
380 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
384 cevent = (struct sctp_ulpevent *)pos->cb;
385 if (event->stream == cevent->stream &&
386 MID_lt(cevent->mid, event->mid)) {
387 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
391 if (event->stream > cevent->stream) {
392 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
397 skb_queue_walk(&ulpq->lobby, pos) {
398 cevent = (struct sctp_ulpevent *)pos->cb;
400 if (cevent->stream > event->stream) {
404 if (cevent->stream == event->stream &&
405 MID_lt(event->mid, cevent->mid)) {
412 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
414 __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
417 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
418 struct sctp_ulpevent *event)
420 struct sk_buff_head *event_list;
421 struct sctp_stream *stream;
422 struct sk_buff *pos, *tmp;
423 __u16 sid = event->stream;
425 stream = &ulpq->asoc->stream;
426 event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
428 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
429 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
431 if (cevent->stream > sid)
434 if (cevent->stream < sid)
437 if (cevent->mid != sctp_mid_peek(stream, in, sid))
440 sctp_mid_next(stream, in, sid);
442 __skb_unlink(pos, &ulpq->lobby);
444 __skb_queue_tail(event_list, pos);
448 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
449 struct sctp_ulpevent *event)
451 struct sctp_stream *stream;
454 stream = &ulpq->asoc->stream;
457 if (event->mid != sctp_mid_peek(stream, in, sid)) {
458 sctp_intl_store_ordered(ulpq, event);
462 sctp_mid_next(stream, in, sid);
464 sctp_intl_retrieve_ordered(ulpq, event);
469 static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
470 struct sk_buff_head *skb_list)
472 struct sock *sk = ulpq->asoc->base.sk;
473 struct sctp_sock *sp = sctp_sk(sk);
474 struct sctp_ulpevent *event;
477 skb = __skb_peek(skb_list);
478 event = sctp_skb2event(skb);
480 if (sk->sk_shutdown & RCV_SHUTDOWN &&
481 (sk->sk_shutdown & SEND_SHUTDOWN ||
482 !sctp_ulpevent_is_notification(event)))
485 if (!sctp_ulpevent_is_notification(event)) {
486 sk_mark_napi_id(sk, skb);
487 sk_incoming_cpu_update(sk);
490 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
493 skb_queue_splice_tail_init(skb_list,
494 &sk->sk_receive_queue);
496 if (!sp->data_ready_signalled) {
497 sp->data_ready_signalled = 1;
498 sk->sk_data_ready(sk);
504 sctp_queue_purge_ulpevents(skb_list);
509 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
510 struct sctp_ulpevent *event)
512 struct sctp_ulpevent *cevent;
515 pos = skb_peek_tail(&ulpq->reasm_uo);
517 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
521 cevent = sctp_skb2event(pos);
523 if (event->stream == cevent->stream &&
524 event->mid == cevent->mid &&
525 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
526 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
527 event->fsn > cevent->fsn))) {
528 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
532 if ((event->stream == cevent->stream &&
533 MID_lt(cevent->mid, event->mid)) ||
534 event->stream > cevent->stream) {
535 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
539 skb_queue_walk(&ulpq->reasm_uo, pos) {
540 cevent = sctp_skb2event(pos);
542 if (event->stream < cevent->stream ||
543 (event->stream == cevent->stream &&
544 MID_lt(event->mid, cevent->mid)))
547 if (event->stream == cevent->stream &&
548 event->mid == cevent->mid &&
549 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
550 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
551 event->fsn < cevent->fsn))
555 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
558 static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
559 struct sctp_ulpq *ulpq,
560 struct sctp_ulpevent *event)
562 struct sk_buff *first_frag = NULL;
563 struct sk_buff *last_frag = NULL;
564 struct sctp_ulpevent *retval;
565 struct sctp_stream_in *sin;
570 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
572 skb_queue_walk(&ulpq->reasm_uo, pos) {
573 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
575 if (cevent->stream < event->stream)
577 if (cevent->stream > event->stream)
580 if (MID_lt(cevent->mid, sin->mid_uo))
582 if (MID_lt(sin->mid_uo, cevent->mid))
585 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
586 case SCTP_DATA_FIRST_FRAG:
588 case SCTP_DATA_MIDDLE_FRAG:
590 if (cevent->fsn == sin->fsn_uo) {
593 next_fsn = cevent->fsn + 1;
595 } else if (cevent->fsn == next_fsn) {
602 case SCTP_DATA_LAST_FRAG:
604 if (cevent->fsn == sin->fsn_uo) {
610 } else if (cevent->fsn == next_fsn) {
625 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
626 &ulpq->reasm_uo, first_frag,
629 sin->fsn_uo = next_fsn;
631 retval->msg_flags |= MSG_EOR;
639 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
640 struct sctp_ulpq *ulpq,
641 struct sctp_ulpevent *event)
643 struct sctp_association *asoc = ulpq->asoc;
644 struct sk_buff *pos, *first_frag = NULL;
645 struct sctp_ulpevent *retval = NULL;
646 struct sk_buff *pd_first = NULL;
647 struct sk_buff *pd_last = NULL;
648 struct sctp_stream_in *sin;
654 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
656 skb_queue_walk(&ulpq->reasm_uo, pos) {
657 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
659 if (cevent->stream < event->stream)
661 if (cevent->stream > event->stream)
664 if (MID_lt(cevent->mid, event->mid))
666 if (MID_lt(event->mid, cevent->mid))
669 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
670 case SCTP_DATA_FIRST_FRAG:
671 if (!sin->pd_mode_uo) {
672 sin->mid_uo = cevent->mid;
683 case SCTP_DATA_MIDDLE_FRAG:
684 if (first_frag && cevent->mid == mid &&
685 cevent->fsn == next_fsn) {
696 case SCTP_DATA_LAST_FRAG:
697 if (first_frag && cevent->mid == mid &&
698 cevent->fsn == next_fsn)
709 pd_point = sctp_sk(asoc->base.sk)->pd_point;
710 if (pd_point && pd_point <= pd_len) {
711 retval = sctp_make_reassembled_event(asoc->base.net,
715 sin->fsn_uo = next_fsn;
722 retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
725 retval->msg_flags |= MSG_EOR;
731 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
732 struct sctp_ulpevent *event)
734 struct sctp_ulpevent *retval = NULL;
735 struct sctp_stream_in *sin;
737 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
738 event->msg_flags |= MSG_EOR;
742 sctp_intl_store_reasm_uo(ulpq, event);
744 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
745 if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
746 event->fsn == sin->fsn_uo)
747 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
750 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
755 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
757 struct sctp_stream_in *csin, *sin = NULL;
758 struct sk_buff *first_frag = NULL;
759 struct sk_buff *last_frag = NULL;
760 struct sctp_ulpevent *retval;
765 skb_queue_walk(&ulpq->reasm_uo, pos) {
766 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
768 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
769 if (csin->pd_mode_uo)
772 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
773 case SCTP_DATA_FIRST_FRAG:
780 sid = cevent->stream;
781 sin->mid_uo = cevent->mid;
783 case SCTP_DATA_MIDDLE_FRAG:
786 if (cevent->stream == sid &&
787 cevent->mid == sin->mid_uo &&
788 cevent->fsn == next_fsn) {
795 case SCTP_DATA_LAST_FRAG:
808 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
809 &ulpq->reasm_uo, first_frag,
812 sin->fsn_uo = next_fsn;
819 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
820 struct sctp_chunk *chunk, gfp_t gfp)
822 struct sctp_ulpevent *event;
823 struct sk_buff_head temp;
826 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
830 event->mid = ntohl(chunk->subh.idata_hdr->mid);
831 if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
832 event->ppid = chunk->subh.idata_hdr->ppid;
834 event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
836 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
837 event = sctp_intl_reasm(ulpq, event);
839 skb_queue_head_init(&temp);
840 __skb_queue_tail(&temp, sctp_event2skb(event));
842 if (event->msg_flags & MSG_EOR)
843 event = sctp_intl_order(ulpq, event);
846 event = sctp_intl_reasm_uo(ulpq, event);
848 skb_queue_head_init(&temp);
849 __skb_queue_tail(&temp, sctp_event2skb(event));
854 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
855 sctp_enqueue_event(ulpq, &temp);
861 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
863 struct sctp_stream_in *csin, *sin = NULL;
864 struct sk_buff *first_frag = NULL;
865 struct sk_buff *last_frag = NULL;
866 struct sctp_ulpevent *retval;
871 skb_queue_walk(&ulpq->reasm, pos) {
872 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
874 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
878 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
879 case SCTP_DATA_FIRST_FRAG:
882 if (cevent->mid == csin->mid) {
887 sid = cevent->stream;
890 case SCTP_DATA_MIDDLE_FRAG:
893 if (cevent->stream == sid &&
894 cevent->mid == sin->mid &&
895 cevent->fsn == next_fsn) {
902 case SCTP_DATA_LAST_FRAG:
915 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
916 &ulpq->reasm, first_frag,
926 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
928 struct sctp_ulpevent *event;
929 struct sk_buff_head temp;
931 if (!skb_queue_empty(&ulpq->reasm)) {
933 event = sctp_intl_retrieve_first(ulpq);
935 skb_queue_head_init(&temp);
936 __skb_queue_tail(&temp, sctp_event2skb(event));
937 sctp_enqueue_event(ulpq, &temp);
942 if (!skb_queue_empty(&ulpq->reasm_uo)) {
944 event = sctp_intl_retrieve_first_uo(ulpq);
946 skb_queue_head_init(&temp);
947 __skb_queue_tail(&temp, sctp_event2skb(event));
948 sctp_enqueue_event(ulpq, &temp);
954 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
957 struct sctp_association *asoc = ulpq->asoc;
961 needed = ntohs(chunk->chunk_hdr->length) -
962 sizeof(struct sctp_idata_chunk);
964 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
965 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
967 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
970 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
974 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
975 sctp_intl_start_pd(ulpq, gfp);
978 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
979 __u32 mid, __u16 flags, gfp_t gfp)
981 struct sock *sk = ulpq->asoc->base.sk;
982 struct sctp_ulpevent *ev = NULL;
984 if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
985 SCTP_PARTIAL_DELIVERY_EVENT))
988 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
989 sid, mid, flags, gfp);
991 struct sctp_sock *sp = sctp_sk(sk);
993 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
995 if (!sp->data_ready_signalled) {
996 sp->data_ready_signalled = 1;
997 sk->sk_data_ready(sk);
1002 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1004 struct sctp_stream *stream = &ulpq->asoc->stream;
1005 struct sctp_ulpevent *cevent, *event = NULL;
1006 struct sk_buff_head *lobby = &ulpq->lobby;
1007 struct sk_buff *pos, *tmp;
1008 struct sk_buff_head temp;
1012 skb_queue_head_init(&temp);
1013 sctp_skb_for_each(pos, lobby, tmp) {
1014 cevent = (struct sctp_ulpevent *)pos->cb;
1015 csid = cevent->stream;
1024 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1027 __skb_unlink(pos, lobby);
1029 event = sctp_skb2event(pos);
1031 __skb_queue_tail(&temp, pos);
1034 if (!event && pos != (struct sk_buff *)lobby) {
1035 cevent = (struct sctp_ulpevent *)pos->cb;
1036 csid = cevent->stream;
1039 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1040 sctp_mid_next(stream, in, csid);
1041 __skb_unlink(pos, lobby);
1042 __skb_queue_tail(&temp, pos);
1043 event = sctp_skb2event(pos);
1048 sctp_intl_retrieve_ordered(ulpq, event);
1049 sctp_enqueue_event(ulpq, &temp);
1053 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1055 struct sctp_stream *stream = &ulpq->asoc->stream;
1058 for (sid = 0; sid < stream->incnt; sid++) {
1059 struct sctp_stream_in *sin = SCTP_SI(stream, sid);
1062 if (sin->pd_mode_uo) {
1063 sin->pd_mode_uo = 0;
1066 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1073 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1074 sctp_mid_skip(stream, in, sid, mid);
1076 sctp_intl_reap_ordered(ulpq, sid);
1080 /* intl abort pd happens only when all data needs to be cleaned */
1081 sctp_ulpq_flush(ulpq);
1084 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1085 int nskips, __be16 stream, __u8 flags)
1089 for (i = 0; i < nskips; i++)
1090 if (skiplist[i].stream == stream &&
1091 skiplist[i].flags == flags)
1097 #define SCTP_FTSN_U_BIT 0x1
1098 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1100 struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1101 struct sctp_association *asoc = q->asoc;
1102 struct sctp_chunk *ftsn_chunk = NULL;
1103 struct list_head *lchunk, *temp;
1104 int nskips = 0, skip_pos;
1105 struct sctp_chunk *chunk;
1108 if (!asoc->peer.prsctp_capable)
1111 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1112 asoc->adv_peer_ack_point = ctsn;
1114 list_for_each_safe(lchunk, temp, &q->abandoned) {
1115 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1116 tsn = ntohl(chunk->subh.data_hdr->tsn);
1118 if (TSN_lte(tsn, ctsn)) {
1119 list_del_init(lchunk);
1120 sctp_chunk_free(chunk);
1121 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1122 __be16 sid = chunk->subh.idata_hdr->stream;
1123 __be32 mid = chunk->subh.idata_hdr->mid;
1126 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1127 flags |= SCTP_FTSN_U_BIT;
1129 asoc->adv_peer_ack_point = tsn;
1130 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1132 ftsn_skip_arr[skip_pos].stream = sid;
1133 ftsn_skip_arr[skip_pos].reserved = 0;
1134 ftsn_skip_arr[skip_pos].flags = flags;
1135 ftsn_skip_arr[skip_pos].mid = mid;
1136 if (skip_pos == nskips)
1145 if (asoc->adv_peer_ack_point > ctsn)
1146 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1147 nskips, &ftsn_skip_arr[0]);
1150 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1151 SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
1155 #define _sctp_walk_ifwdtsn(pos, chunk, end) \
1156 for (pos = (void *)(chunk->subh.ifwdtsn_hdr + 1); \
1157 (void *)pos <= (void *)(chunk->subh.ifwdtsn_hdr + 1) + (end) - \
1158 sizeof(struct sctp_ifwdtsn_skip); pos++)
1160 #define sctp_walk_ifwdtsn(pos, ch) \
1161 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1162 sizeof(struct sctp_ifwdtsn_chunk))
1164 static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1166 struct sctp_fwdtsn_skip *skip;
1169 if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1172 incnt = chunk->asoc->stream.incnt;
1173 sctp_walk_fwdtsn(skip, chunk)
1174 if (ntohs(skip->stream) >= incnt)
1180 static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1182 struct sctp_ifwdtsn_skip *skip;
1185 if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1188 incnt = chunk->asoc->stream.incnt;
1189 sctp_walk_ifwdtsn(skip, chunk)
1190 if (ntohs(skip->stream) >= incnt)
1196 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1198 /* Move the Cumulattive TSN Ack ahead. */
1199 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1200 /* purge the fragmentation queue */
1201 sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1202 /* Abort any in progress partial delivery. */
1203 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1206 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1208 struct sk_buff *pos, *tmp;
1210 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1211 struct sctp_ulpevent *event = sctp_skb2event(pos);
1212 __u32 tsn = event->tsn;
1214 if (TSN_lte(tsn, ftsn)) {
1215 __skb_unlink(pos, &ulpq->reasm);
1216 sctp_ulpevent_free(event);
1220 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1221 struct sctp_ulpevent *event = sctp_skb2event(pos);
1222 __u32 tsn = event->tsn;
1224 if (TSN_lte(tsn, ftsn)) {
1225 __skb_unlink(pos, &ulpq->reasm_uo);
1226 sctp_ulpevent_free(event);
1231 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1233 /* Move the Cumulattive TSN Ack ahead. */
1234 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1235 /* purge the fragmentation queue */
1236 sctp_intl_reasm_flushtsn(ulpq, ftsn);
1237 /* abort only when it's for all data */
1238 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1239 sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1242 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1244 struct sctp_fwdtsn_skip *skip;
1246 /* Walk through all the skipped SSNs */
1247 sctp_walk_fwdtsn(skip, chunk)
1248 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1251 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1254 struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1255 struct sctp_stream *stream = &ulpq->asoc->stream;
1257 if (flags & SCTP_FTSN_U_BIT) {
1258 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1259 sin->pd_mode_uo = 0;
1260 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1266 if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1271 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1274 sctp_mid_skip(stream, in, sid, mid);
1276 sctp_intl_reap_ordered(ulpq, sid);
1279 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1281 struct sctp_ifwdtsn_skip *skip;
1283 /* Walk through all the skipped MIDs and abort stream pd if possible */
1284 sctp_walk_ifwdtsn(skip, chunk)
1285 sctp_intl_skip(ulpq, ntohs(skip->stream),
1286 ntohl(skip->mid), skip->flags);
1289 static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
1291 struct sk_buff_head temp;
1293 skb_queue_head_init(&temp);
1294 __skb_queue_tail(&temp, sctp_event2skb(event));
1295 return sctp_ulpq_tail_event(ulpq, &temp);
1298 static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1299 .data_chunk_len = sizeof(struct sctp_data_chunk),
1300 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
1301 /* DATA process functions */
1302 .make_datafrag = sctp_make_datafrag_empty,
1303 .assign_number = sctp_chunk_assign_ssn,
1304 .validate_data = sctp_validate_data,
1305 .ulpevent_data = sctp_ulpq_tail_data,
1306 .enqueue_event = do_ulpq_tail_event,
1307 .renege_events = sctp_ulpq_renege,
1308 .start_pd = sctp_ulpq_partial_delivery,
1309 .abort_pd = sctp_ulpq_abort_pd,
1310 /* FORWARD-TSN process functions */
1311 .generate_ftsn = sctp_generate_fwdtsn,
1312 .validate_ftsn = sctp_validate_fwdtsn,
1313 .report_ftsn = sctp_report_fwdtsn,
1314 .handle_ftsn = sctp_handle_fwdtsn,
1317 static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
1318 struct sctp_ulpevent *event)
1320 struct sk_buff_head temp;
1322 skb_queue_head_init(&temp);
1323 __skb_queue_tail(&temp, sctp_event2skb(event));
1324 return sctp_enqueue_event(ulpq, &temp);
1327 static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1328 .data_chunk_len = sizeof(struct sctp_idata_chunk),
1329 .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
1330 /* I-DATA process functions */
1331 .make_datafrag = sctp_make_idatafrag_empty,
1332 .assign_number = sctp_chunk_assign_mid,
1333 .validate_data = sctp_validate_idata,
1334 .ulpevent_data = sctp_ulpevent_idata,
1335 .enqueue_event = do_sctp_enqueue_event,
1336 .renege_events = sctp_renege_events,
1337 .start_pd = sctp_intl_start_pd,
1338 .abort_pd = sctp_intl_abort_pd,
1339 /* I-FORWARD-TSN process functions */
1340 .generate_ftsn = sctp_generate_iftsn,
1341 .validate_ftsn = sctp_validate_iftsn,
1342 .report_ftsn = sctp_report_iftsn,
1343 .handle_ftsn = sctp_handle_iftsn,
1346 void sctp_stream_interleave_init(struct sctp_stream *stream)
1348 struct sctp_association *asoc;
1350 asoc = container_of(stream, struct sctp_association, stream);
1351 stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
1352 : &sctp_stream_interleave_0;