]>
Commit | Line | Data |
---|---|---|
47505b8b | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
60c778b2 | 2 | /* SCTP kernel implementation |
1da177e4 LT |
3 | * (C) Copyright IBM Corp. 2001, 2004 |
4 | * Copyright (c) 1999-2000 Cisco, Inc. | |
5 | * Copyright (c) 1999-2001 Motorola, Inc. | |
6 | * | |
60c778b2 | 7 | * This file is part of the SCTP kernel implementation |
1da177e4 LT |
8 | * |
9 | * These functions handle output processing. | |
10 | * | |
1da177e4 LT |
11 | * Please send any bug reports or fixes you make to the |
12 | * email address(es): | |
91705c61 | 13 | * lksctp developers <[email protected]> |
1da177e4 | 14 | * |
1da177e4 LT |
15 | * Written or modified by: |
16 | * La Monte H.P. Yarroll <[email protected]> | |
17 | * Karl Knutson <[email protected]> | |
18 | * Jon Grimm <[email protected]> | |
19 | * Sridhar Samudrala <[email protected]> | |
1da177e4 LT |
20 | */ |
21 | ||
145ce502 JP |
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
23 | ||
1da177e4 LT |
24 | #include <linux/types.h> |
25 | #include <linux/kernel.h> | |
26 | #include <linux/wait.h> | |
27 | #include <linux/time.h> | |
28 | #include <linux/ip.h> | |
29 | #include <linux/ipv6.h> | |
30 | #include <linux/init.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
1da177e4 | 32 | #include <net/inet_ecn.h> |
8d2f9e81 | 33 | #include <net/ip.h> |
1da177e4 | 34 | #include <net/icmp.h> |
7c73a6fa | 35 | #include <net/net_namespace.h> |
1da177e4 | 36 | |
1da177e4 LT |
37 | #include <linux/socket.h> /* for sa_family_t */ |
38 | #include <net/sock.h> | |
39 | ||
40 | #include <net/sctp/sctp.h> | |
41 | #include <net/sctp/sm.h> | |
9ad0977f | 42 | #include <net/sctp/checksum.h> |
1da177e4 LT |
43 | |
44 | /* Forward declarations for private helpers. */ | |
86b36f2a XL |
45 | static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, |
46 | struct sctp_chunk *chunk); | |
47 | static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, | |
48 | struct sctp_chunk *chunk); | |
e83963b7 | 49 | static void sctp_packet_append_data(struct sctp_packet *packet, |
86b36f2a XL |
50 | struct sctp_chunk *chunk); |
51 | static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, | |
52 | struct sctp_chunk *chunk, | |
53 | u16 chunk_len); | |
1da177e4 | 54 | |
be297143 WY |
55 | static void sctp_packet_reset(struct sctp_packet *packet) |
56 | { | |
b7e10c25 RH |
57 | /* sctp_packet_transmit() relies on this to reset size to the |
58 | * current overhead after sending packets. | |
59 | */ | |
be297143 | 60 | packet->size = packet->overhead; |
b7e10c25 | 61 | |
be297143 WY |
62 | packet->has_cookie_echo = 0; |
63 | packet->has_sack = 0; | |
64 | packet->has_data = 0; | |
65 | packet->has_auth = 0; | |
66 | packet->ipfragok = 0; | |
67 | packet->auth = NULL; | |
68 | } | |
69 | ||
1da177e4 LT |
70 | /* Config a packet. |
71 | * This appears to be a followup set of initializations. | |
72 | */ | |
66b91d2c MRL |
73 | void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, |
74 | int ecn_capable) | |
1da177e4 | 75 | { |
90017acc MRL |
76 | struct sctp_transport *tp = packet->transport; |
77 | struct sctp_association *asoc = tp->asoc; | |
feddd6c1 | 78 | struct sctp_sock *sp = NULL; |
df2729c3 | 79 | struct sock *sk; |
1da177e4 | 80 | |
bb33381d | 81 | pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); |
1da177e4 | 82 | packet->vtag = vtag; |
1da177e4 | 83 | |
df2729c3 XL |
84 | /* do the following jobs only once for a flush schedule */ |
85 | if (!sctp_packet_empty(packet)) | |
86 | return; | |
90017acc | 87 | |
b7e10c25 | 88 | /* set packet max_size with pathmtu, then calculate overhead */ |
df2729c3 | 89 | packet->max_size = tp->pathmtu; |
feddd6c1 | 90 | |
b7e10c25 | 91 | if (asoc) { |
feddd6c1 MRL |
92 | sk = asoc->base.sk; |
93 | sp = sctp_sk(sk); | |
94 | } | |
95 | packet->overhead = sctp_mtu_payload(sp, 0, 0); | |
96 | packet->size = packet->overhead; | |
b7e10c25 | 97 | |
feddd6c1 | 98 | if (!asoc) |
df2729c3 | 99 | return; |
90017acc | 100 | |
df2729c3 | 101 | /* update dst or transport pathmtu if in need */ |
df2729c3 | 102 | if (!sctp_transport_dst_check(tp)) { |
feddd6c1 | 103 | sctp_transport_route(tp, NULL, sp); |
df2729c3 | 104 | if (asoc->param_flags & SPP_PMTUD_ENABLE) |
3ebfdf08 | 105 | sctp_assoc_sync_pmtu(asoc); |
69fec325 XL |
106 | } else if (!sctp_transport_pmtu_check(tp)) { |
107 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | |
108 | sctp_assoc_sync_pmtu(asoc); | |
90017acc MRL |
109 | } |
110 | ||
d805397c XL |
111 | if (asoc->pmtu_pending) { |
112 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | |
113 | sctp_assoc_sync_pmtu(asoc); | |
114 | asoc->pmtu_pending = 0; | |
115 | } | |
116 | ||
df2729c3 XL |
117 | /* If there a is a prepend chunk stick it on the list before |
118 | * any other chunks get appended. | |
119 | */ | |
120 | if (ecn_capable) { | |
121 | struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); | |
1da177e4 | 122 | |
1da177e4 LT |
123 | if (chunk) |
124 | sctp_packet_append_chunk(packet, chunk); | |
125 | } | |
df2729c3 XL |
126 | |
127 | if (!tp->dst) | |
128 | return; | |
129 | ||
130 | /* set packet max_size with gso_max_size if gso is enabled*/ | |
131 | rcu_read_lock(); | |
132 | if (__sk_dst_get(sk) != tp->dst) { | |
133 | dst_hold(tp->dst); | |
134 | sk_setup_caps(sk, tp->dst); | |
135 | } | |
136 | packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size | |
137 | : asoc->pathmtu; | |
138 | rcu_read_unlock(); | |
1da177e4 LT |
139 | } |
140 | ||
141 | /* Initialize the packet structure. */ | |
66b91d2c MRL |
142 | void sctp_packet_init(struct sctp_packet *packet, |
143 | struct sctp_transport *transport, | |
144 | __u16 sport, __u16 dport) | |
1da177e4 | 145 | { |
bb33381d | 146 | pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport); |
1da177e4 LT |
147 | |
148 | packet->transport = transport; | |
149 | packet->source_port = sport; | |
150 | packet->destination_port = dport; | |
79af02c2 | 151 | INIT_LIST_HEAD(&packet->chunk_list); |
b7e10c25 RH |
152 | /* The overhead will be calculated by sctp_packet_config() */ |
153 | packet->overhead = 0; | |
be297143 | 154 | sctp_packet_reset(packet); |
1da177e4 | 155 | packet->vtag = 0; |
1da177e4 LT |
156 | } |
157 | ||
158 | /* Free a packet. */ | |
159 | void sctp_packet_free(struct sctp_packet *packet) | |
160 | { | |
79af02c2 | 161 | struct sctp_chunk *chunk, *tmp; |
1da177e4 | 162 | |
bb33381d | 163 | pr_debug("%s: packet:%p\n", __func__, packet); |
1da177e4 | 164 | |
79af02c2 DM |
165 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
166 | list_del_init(&chunk->list); | |
1da177e4 | 167 | sctp_chunk_free(chunk); |
79af02c2 | 168 | } |
1da177e4 LT |
169 | } |
170 | ||
171 | /* This routine tries to append the chunk to the offered packet. If adding | |
172 | * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk | |
173 | * is not present in the packet, it transmits the input packet. | |
174 | * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long | |
175 | * as it can fit in the packet, but any more data that does not fit in this | |
176 | * packet can be sent only after receiving the COOKIE_ACK. | |
177 | */ | |
86b36f2a XL |
178 | enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet, |
179 | struct sctp_chunk *chunk, | |
180 | int one_packet, gfp_t gfp) | |
1da177e4 | 181 | { |
86b36f2a | 182 | enum sctp_xmit retval; |
1da177e4 | 183 | |
5b5e0928 | 184 | pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__, |
942b3235 | 185 | packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); |
1da177e4 LT |
186 | |
187 | switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { | |
188 | case SCTP_XMIT_PMTU_FULL: | |
189 | if (!packet->has_cookie_echo) { | |
64519440 XL |
190 | int error = 0; |
191 | ||
cea8768f | 192 | error = sctp_packet_transmit(packet, gfp); |
1da177e4 LT |
193 | if (error < 0) |
194 | chunk->skb->sk->sk_err = -error; | |
195 | ||
196 | /* If we have an empty packet, then we can NOT ever | |
197 | * return PMTU_FULL. | |
198 | */ | |
2e3216cd VY |
199 | if (!one_packet) |
200 | retval = sctp_packet_append_chunk(packet, | |
201 | chunk); | |
1da177e4 LT |
202 | } |
203 | break; | |
204 | ||
205 | case SCTP_XMIT_RWND_FULL: | |
206 | case SCTP_XMIT_OK: | |
526cbef7 | 207 | case SCTP_XMIT_DELAY: |
1da177e4 | 208 | break; |
3ff50b79 | 209 | } |
1da177e4 LT |
210 | |
211 | return retval; | |
212 | } | |
213 | ||
4cd57c80 | 214 | /* Try to bundle an auth chunk into the packet. */ |
86b36f2a XL |
215 | static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt, |
216 | struct sctp_chunk *chunk) | |
4cd57c80 VY |
217 | { |
218 | struct sctp_association *asoc = pkt->transport->asoc; | |
86b36f2a | 219 | enum sctp_xmit retval = SCTP_XMIT_OK; |
4cd57c80 | 220 | struct sctp_chunk *auth; |
4cd57c80 VY |
221 | |
222 | /* if we don't have an association, we can't do authentication */ | |
223 | if (!asoc) | |
224 | return retval; | |
225 | ||
226 | /* See if this is an auth chunk we are bundling or if | |
227 | * auth is already bundled. | |
228 | */ | |
4007cc88 | 229 | if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) |
4cd57c80 VY |
230 | return retval; |
231 | ||
232 | /* if the peer did not request this chunk to be authenticated, | |
233 | * don't do it | |
234 | */ | |
235 | if (!chunk->auth) | |
236 | return retval; | |
237 | ||
1b1e0bc9 | 238 | auth = sctp_make_auth(asoc, chunk->shkey->key_id); |
4cd57c80 VY |
239 | if (!auth) |
240 | return retval; | |
241 | ||
1b1e0bc9 XL |
242 | auth->shkey = chunk->shkey; |
243 | sctp_auth_shkey_hold(auth->shkey); | |
244 | ||
ed106277 NH |
245 | retval = __sctp_packet_append_chunk(pkt, auth); |
246 | ||
247 | if (retval != SCTP_XMIT_OK) | |
248 | sctp_chunk_free(auth); | |
4cd57c80 VY |
249 | |
250 | return retval; | |
251 | } | |
252 | ||
1da177e4 | 253 | /* Try to bundle a SACK with the packet. */ |
86b36f2a XL |
254 | static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt, |
255 | struct sctp_chunk *chunk) | |
1da177e4 | 256 | { |
86b36f2a | 257 | enum sctp_xmit retval = SCTP_XMIT_OK; |
1da177e4 LT |
258 | |
259 | /* If sending DATA and haven't aleady bundled a SACK, try to | |
260 | * bundle one in to the packet. | |
261 | */ | |
262 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && | |
263 | !pkt->has_cookie_echo) { | |
264 | struct sctp_association *asoc; | |
af87b823 | 265 | struct timer_list *timer; |
1da177e4 | 266 | asoc = pkt->transport->asoc; |
af87b823 | 267 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; |
1da177e4 | 268 | |
af87b823 DG |
269 | /* If the SACK timer is running, we have a pending SACK */ |
270 | if (timer_pending(timer)) { | |
1da177e4 | 271 | struct sctp_chunk *sack; |
4244854d NH |
272 | |
273 | if (pkt->transport->sack_generation != | |
274 | pkt->transport->asoc->peer.sack_generation) | |
275 | return retval; | |
276 | ||
1da177e4 LT |
277 | asoc->a_rwnd = asoc->rwnd; |
278 | sack = sctp_make_sack(asoc); | |
279 | if (sack) { | |
ed106277 NH |
280 | retval = __sctp_packet_append_chunk(pkt, sack); |
281 | if (retval != SCTP_XMIT_OK) { | |
282 | sctp_chunk_free(sack); | |
283 | goto out; | |
284 | } | |
4e7696d9 | 285 | SCTP_INC_STATS(asoc->base.net, |
7af03301 XL |
286 | SCTP_MIB_OUTCTRLCHUNKS); |
287 | asoc->stats.octrlchunks++; | |
1da177e4 | 288 | asoc->peer.sack_needed = 0; |
af87b823 | 289 | if (del_timer(timer)) |
1da177e4 LT |
290 | sctp_association_put(asoc); |
291 | } | |
292 | } | |
293 | } | |
ed106277 | 294 | out: |
1da177e4 LT |
295 | return retval; |
296 | } | |
297 | ||
ed106277 | 298 | |
1da177e4 LT |
299 | /* Append a chunk to the offered packet reporting back any inability to do |
300 | * so. | |
301 | */ | |
86b36f2a XL |
302 | static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, |
303 | struct sctp_chunk *chunk) | |
1da177e4 | 304 | { |
e2f036a9 | 305 | __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length)); |
86b36f2a | 306 | enum sctp_xmit retval = SCTP_XMIT_OK; |
1da177e4 | 307 | |
e83963b7 VY |
308 | /* Check to see if this chunk will fit into the packet */ |
309 | retval = sctp_packet_will_fit(packet, chunk, chunk_len); | |
310 | if (retval != SCTP_XMIT_OK) | |
311 | goto finish; | |
1da177e4 | 312 | |
e83963b7 | 313 | /* We believe that this chunk is OK to add to the packet */ |
4cd57c80 | 314 | switch (chunk->chunk_hdr->type) { |
f7010e61 | 315 | case SCTP_CID_DATA: |
668c9beb | 316 | case SCTP_CID_I_DATA: |
e83963b7 VY |
317 | /* Account for the data being in the packet */ |
318 | sctp_packet_append_data(packet, chunk); | |
1da177e4 LT |
319 | /* Disallow SACK bundling after DATA. */ |
320 | packet->has_sack = 1; | |
4cd57c80 VY |
321 | /* Disallow AUTH bundling after DATA */ |
322 | packet->has_auth = 1; | |
323 | /* Let it be knows that packet has DATA in it */ | |
324 | packet->has_data = 1; | |
759af00e VY |
325 | /* timestamp the chunk for rtx purposes */ |
326 | chunk->sent_at = jiffies; | |
a6c2f792 XL |
327 | /* Mainly used for prsctp RTX policy */ |
328 | chunk->sent_count++; | |
4cd57c80 | 329 | break; |
f7010e61 | 330 | case SCTP_CID_COOKIE_ECHO: |
1da177e4 | 331 | packet->has_cookie_echo = 1; |
4cd57c80 VY |
332 | break; |
333 | ||
f7010e61 | 334 | case SCTP_CID_SACK: |
1da177e4 | 335 | packet->has_sack = 1; |
196d6759 MB |
336 | if (chunk->asoc) |
337 | chunk->asoc->stats.osacks++; | |
4cd57c80 VY |
338 | break; |
339 | ||
f7010e61 | 340 | case SCTP_CID_AUTH: |
4cd57c80 VY |
341 | packet->has_auth = 1; |
342 | packet->auth = chunk; | |
343 | break; | |
344 | } | |
1da177e4 LT |
345 | |
346 | /* It is OK to send this chunk. */ | |
79af02c2 | 347 | list_add_tail(&chunk->list, &packet->chunk_list); |
1da177e4 LT |
348 | packet->size += chunk_len; |
349 | chunk->transport = packet->transport; | |
350 | finish: | |
351 | return retval; | |
352 | } | |
353 | ||
ed106277 NH |
354 | /* Append a chunk to the offered packet reporting back any inability to do |
355 | * so. | |
356 | */ | |
86b36f2a XL |
357 | enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet, |
358 | struct sctp_chunk *chunk) | |
ed106277 | 359 | { |
86b36f2a | 360 | enum sctp_xmit retval = SCTP_XMIT_OK; |
ed106277 | 361 | |
bb33381d | 362 | pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); |
ed106277 NH |
363 | |
364 | /* Data chunks are special. Before seeing what else we can | |
365 | * bundle into this packet, check to see if we are allowed to | |
366 | * send this DATA. | |
367 | */ | |
368 | if (sctp_chunk_is_data(chunk)) { | |
369 | retval = sctp_packet_can_append_data(packet, chunk); | |
370 | if (retval != SCTP_XMIT_OK) | |
371 | goto finish; | |
372 | } | |
373 | ||
374 | /* Try to bundle AUTH chunk */ | |
375 | retval = sctp_packet_bundle_auth(packet, chunk); | |
376 | if (retval != SCTP_XMIT_OK) | |
377 | goto finish; | |
378 | ||
379 | /* Try to bundle SACK chunk */ | |
380 | retval = sctp_packet_bundle_sack(packet, chunk); | |
381 | if (retval != SCTP_XMIT_OK) | |
382 | goto finish; | |
383 | ||
384 | retval = __sctp_packet_append_chunk(packet, chunk); | |
385 | ||
386 | finish: | |
387 | return retval; | |
388 | } | |
389 | ||
99519122 XL |
390 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) |
391 | { | |
392 | if (SCTP_OUTPUT_CB(head)->last == head) | |
393 | skb_shinfo(head)->frag_list = skb; | |
394 | else | |
395 | SCTP_OUTPUT_CB(head)->last->next = skb; | |
396 | SCTP_OUTPUT_CB(head)->last = skb; | |
397 | ||
398 | head->truesize += skb->truesize; | |
399 | head->data_len += skb->len; | |
400 | head->len += skb->len; | |
0d32f177 | 401 | refcount_add(skb->truesize, &head->sk->sk_wmem_alloc); |
99519122 XL |
402 | |
403 | __skb_header_release(skb); | |
404 | } | |
405 | ||
e4ff952a XL |
406 | static int sctp_packet_pack(struct sctp_packet *packet, |
407 | struct sk_buff *head, int gso, gfp_t gfp) | |
1da177e4 LT |
408 | { |
409 | struct sctp_transport *tp = packet->transport; | |
e4ff952a | 410 | struct sctp_auth_chunk *auth = NULL; |
79af02c2 | 411 | struct sctp_chunk *chunk, *tmp; |
e4ff952a XL |
412 | int pkt_count = 0, pkt_size; |
413 | struct sock *sk = head->sk; | |
414 | struct sk_buff *nskb; | |
ecc515d7 | 415 | int auth_len = 0; |
1da177e4 | 416 | |
90017acc | 417 | if (gso) { |
90017acc | 418 | skb_shinfo(head)->gso_type = sk->sk_gso_type; |
99519122 | 419 | SCTP_OUTPUT_CB(head)->last = head; |
e4ff952a XL |
420 | } else { |
421 | nskb = head; | |
422 | pkt_size = packet->size; | |
423 | goto merge; | |
90017acc | 424 | } |
1da177e4 | 425 | |
90017acc | 426 | do { |
e4ff952a XL |
427 | /* calculate the pkt_size and alloc nskb */ |
428 | pkt_size = packet->overhead; | |
429 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, | |
430 | list) { | |
431 | int padded = SCTP_PAD4(chunk->skb->len); | |
6eabca54 | 432 | |
e4ff952a XL |
433 | if (chunk == packet->auth) |
434 | auth_len = padded; | |
435 | else if (auth_len + padded + packet->overhead > | |
436 | tp->pathmtu) | |
437 | return 0; | |
438 | else if (pkt_size + padded > tp->pathmtu) | |
439 | break; | |
440 | pkt_size += padded; | |
90017acc | 441 | } |
e4ff952a XL |
442 | nskb = alloc_skb(pkt_size + MAX_HEADER, gfp); |
443 | if (!nskb) | |
444 | return 0; | |
445 | skb_reserve(nskb, packet->overhead + MAX_HEADER); | |
1da177e4 | 446 | |
e4ff952a XL |
447 | merge: |
448 | /* merge chunks into nskb and append nskb into head list */ | |
90017acc MRL |
449 | pkt_size -= packet->overhead; |
450 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { | |
e4ff952a XL |
451 | int padding; |
452 | ||
90017acc MRL |
453 | list_del_init(&chunk->list); |
454 | if (sctp_chunk_is_data(chunk)) { | |
cc6ac9bc XL |
455 | if (!sctp_chunk_retransmitted(chunk) && |
456 | !tp->rto_pending) { | |
90017acc MRL |
457 | chunk->rtt_in_progress = 1; |
458 | tp->rto_pending = 1; | |
459 | } | |
90017acc MRL |
460 | } |
461 | ||
e2f036a9 | 462 | padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len; |
90017acc | 463 | if (padding) |
b080db58 | 464 | skb_put_zero(chunk->skb, padding); |
90017acc | 465 | |
90017acc | 466 | if (chunk == packet->auth) |
e4ff952a XL |
467 | auth = (struct sctp_auth_chunk *) |
468 | skb_tail_pointer(nskb); | |
90017acc | 469 | |
59ae1d12 | 470 | skb_put_data(nskb, chunk->skb->data, chunk->skb->len); |
1da177e4 | 471 | |
90017acc MRL |
472 | pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n", |
473 | chunk, | |
474 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), | |
475 | chunk->has_tsn ? "TSN" : "No TSN", | |
476 | chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, | |
477 | ntohs(chunk->chunk_hdr->length), chunk->skb->len, | |
478 | chunk->rtt_in_progress); | |
479 | ||
e2f036a9 | 480 | pkt_size -= SCTP_PAD4(chunk->skb->len); |
1da177e4 | 481 | |
f1533cce | 482 | if (!sctp_chunk_is_data(chunk) && chunk != packet->auth) |
90017acc MRL |
483 | sctp_chunk_free(chunk); |
484 | ||
485 | if (!pkt_size) | |
486 | break; | |
487 | } | |
488 | ||
e4ff952a | 489 | if (auth) { |
1b1e0bc9 XL |
490 | sctp_auth_calculate_hmac(tp->asoc, nskb, auth, |
491 | packet->auth->shkey, gfp); | |
e4ff952a XL |
492 | /* free auth if no more chunks, or add it back */ |
493 | if (list_empty(&packet->chunk_list)) | |
494 | sctp_chunk_free(packet->auth); | |
495 | else | |
1aa25ec2 XL |
496 | list_add(&packet->auth->list, |
497 | &packet->chunk_list); | |
f1533cce MRL |
498 | } |
499 | ||
99519122 XL |
500 | if (gso) |
501 | sctp_packet_gso_append(head, nskb); | |
e4ff952a XL |
502 | |
503 | pkt_count++; | |
90017acc | 504 | } while (!list_empty(&packet->chunk_list)); |
4cd57c80 | 505 | |
e4ff952a XL |
506 | if (gso) { |
507 | memset(head->cb, 0, max(sizeof(struct inet_skb_parm), | |
508 | sizeof(struct inet6_skb_parm))); | |
509 | skb_shinfo(head)->gso_segs = pkt_count; | |
510 | skb_shinfo(head)->gso_size = GSO_BY_FRAGS; | |
e4ff952a | 511 | goto chksum; |
8dc92f7e | 512 | } |
1da177e4 | 513 | |
e4ff952a XL |
514 | if (sctp_checksum_disable) |
515 | return 1; | |
1da177e4 | 516 | |
600af7fd XL |
517 | if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) || |
518 | dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) { | |
e4ff952a XL |
519 | struct sctphdr *sh = |
520 | (struct sctphdr *)skb_transport_header(head); | |
1da177e4 | 521 | |
e4ff952a XL |
522 | sh->checksum = sctp_compute_cksum(head, 0); |
523 | } else { | |
524 | chksum: | |
525 | head->ip_summed = CHECKSUM_PARTIAL; | |
dba00306 | 526 | head->csum_not_inet = 1; |
e4ff952a XL |
527 | head->csum_start = skb_transport_header(head) - head->head; |
528 | head->csum_offset = offsetof(struct sctphdr, checksum); | |
1da177e4 LT |
529 | } |
530 | ||
e4ff952a XL |
531 | return pkt_count; |
532 | } | |
533 | ||
534 | /* All packets are sent to the network through this function from | |
535 | * sctp_outq_tail(). | |
536 | * | |
537 | * The return value is always 0 for now. | |
538 | */ | |
539 | int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |
540 | { | |
541 | struct sctp_transport *tp = packet->transport; | |
542 | struct sctp_association *asoc = tp->asoc; | |
543 | struct sctp_chunk *chunk, *tmp; | |
544 | int pkt_count, gso = 0; | |
e4ff952a XL |
545 | struct sk_buff *head; |
546 | struct sctphdr *sh; | |
547 | struct sock *sk; | |
1da177e4 | 548 | |
e4ff952a XL |
549 | pr_debug("%s: packet:%p\n", __func__, packet); |
550 | if (list_empty(&packet->chunk_list)) | |
551 | return 0; | |
552 | chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); | |
553 | sk = chunk->skb->sk; | |
1da177e4 | 554 | |
e4ff952a XL |
555 | /* check gso */ |
556 | if (packet->size > tp->pathmtu && !packet->ipfragok) { | |
557 | if (!sk_can_gso(sk)) { | |
558 | pr_err_once("Trying to GSO but underlying device doesn't support it."); | |
559 | goto out; | |
1da177e4 | 560 | } |
e4ff952a XL |
561 | gso = 1; |
562 | } | |
563 | ||
564 | /* alloc head skb */ | |
565 | head = alloc_skb((gso ? packet->overhead : packet->size) + | |
566 | MAX_HEADER, gfp); | |
567 | if (!head) | |
568 | goto out; | |
569 | skb_reserve(head, packet->overhead + MAX_HEADER); | |
02968ccf | 570 | skb_set_owner_w(head, sk); |
e4ff952a XL |
571 | |
572 | /* set sctp header */ | |
d58ff351 | 573 | sh = skb_push(head, sizeof(struct sctphdr)); |
e4ff952a XL |
574 | skb_reset_transport_header(head); |
575 | sh->source = htons(packet->source_port); | |
576 | sh->dest = htons(packet->destination_port); | |
577 | sh->vtag = htonl(packet->vtag); | |
578 | sh->checksum = 0; | |
579 | ||
df2729c3 | 580 | /* drop packet if no dst */ |
600af7fd | 581 | if (!tp->dst) { |
e4ff952a XL |
582 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
583 | kfree_skb(head); | |
584 | goto out; | |
585 | } | |
1da177e4 | 586 | |
bcd623d8 XL |
587 | rcu_read_lock(); |
588 | if (__sk_dst_get(sk) != tp->dst) { | |
589 | dst_hold(tp->dst); | |
590 | sk_setup_caps(sk, tp->dst); | |
591 | } | |
592 | rcu_read_unlock(); | |
593 | ||
e4ff952a XL |
594 | /* pack up chunks */ |
595 | pkt_count = sctp_packet_pack(packet, head, gso, gfp); | |
596 | if (!pkt_count) { | |
597 | kfree_skb(head); | |
598 | goto out; | |
599 | } | |
90017acc MRL |
600 | pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len); |
601 | ||
e4ff952a XL |
602 | /* start autoclose timer */ |
603 | if (packet->has_data && sctp_state(asoc, ESTABLISHED) && | |
604 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { | |
605 | struct timer_list *timer = | |
606 | &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; | |
607 | unsigned long timeout = | |
608 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; | |
1da177e4 | 609 | |
e4ff952a XL |
610 | if (!mod_timer(timer, jiffies + timeout)) |
611 | sctp_association_hold(asoc); | |
612 | } | |
90017acc | 613 | |
e4ff952a XL |
614 | /* sctp xmit */ |
615 | tp->af_specific->ecn_capable(sk); | |
616 | if (asoc) { | |
617 | asoc->stats.opackets += pkt_count; | |
618 | if (asoc->peer.last_sent_to != tp) | |
619 | asoc->peer.last_sent_to = tp; | |
90017acc MRL |
620 | } |
621 | head->ignore_df = packet->ipfragok; | |
486a43db | 622 | if (tp->dst_pending_confirm) |
c86a773c JA |
623 | skb_set_dst_pending_confirm(head, 1); |
624 | /* neighbour should be confirmed on successful transmission or | |
625 | * positive error | |
626 | */ | |
486a43db XL |
627 | if (tp->af_specific->sctp_xmit(head, tp) >= 0 && |
628 | tp->dst_pending_confirm) | |
c86a773c | 629 | tp->dst_pending_confirm = 0; |
1da177e4 | 630 | |
e4ff952a | 631 | out: |
79af02c2 DM |
632 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
633 | list_del_init(&chunk->list); | |
1da177e4 | 634 | if (!sctp_chunk_is_data(chunk)) |
d808ad9a | 635 | sctp_chunk_free(chunk); |
1da177e4 | 636 | } |
41001faf | 637 | sctp_packet_reset(packet); |
e4ff952a | 638 | return 0; |
1da177e4 LT |
639 | } |
640 | ||
641 | /******************************************************************** | |
642 | * 2nd Level Abstractions | |
643 | ********************************************************************/ | |
644 | ||
e83963b7 | 645 | /* This private function check to see if a chunk can be added */ |
86b36f2a XL |
646 | static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, |
647 | struct sctp_chunk *chunk) | |
1da177e4 | 648 | { |
e83963b7 | 649 | size_t datasize, rwnd, inflight, flight_size; |
1da177e4 | 650 | struct sctp_transport *transport = packet->transport; |
1da177e4 | 651 | struct sctp_association *asoc = transport->asoc; |
1da177e4 LT |
652 | struct sctp_outq *q = &asoc->outqueue; |
653 | ||
654 | /* RFC 2960 6.1 Transmission of DATA Chunks | |
655 | * | |
656 | * A) At any given time, the data sender MUST NOT transmit new data to | |
657 | * any destination transport address if its peer's rwnd indicates | |
658 | * that the peer has no buffer space (i.e. rwnd is 0, see Section | |
659 | * 6.2.1). However, regardless of the value of rwnd (including if it | |
660 | * is 0), the data sender can always have one DATA chunk in flight to | |
661 | * the receiver if allowed by cwnd (see rule B below). This rule | |
662 | * allows the sender to probe for a change in rwnd that the sender | |
663 | * missed due to the SACK having been lost in transit from the data | |
664 | * receiver to the data sender. | |
665 | */ | |
666 | ||
667 | rwnd = asoc->peer.rwnd; | |
e83963b7 VY |
668 | inflight = q->outstanding_bytes; |
669 | flight_size = transport->flight_size; | |
1da177e4 LT |
670 | |
671 | datasize = sctp_data_size(chunk); | |
672 | ||
723189fa DL |
673 | if (datasize > rwnd && inflight > 0) |
674 | /* We have (at least) one data chunk in flight, | |
675 | * so we can't fall back to rule 6.1 B). | |
676 | */ | |
677 | return SCTP_XMIT_RWND_FULL; | |
1da177e4 | 678 | |
1da177e4 LT |
679 | /* RFC 2960 6.1 Transmission of DATA Chunks |
680 | * | |
681 | * B) At any given time, the sender MUST NOT transmit new data | |
682 | * to a given transport address if it has cwnd or more bytes | |
683 | * of data outstanding to that transport address. | |
684 | */ | |
685 | /* RFC 7.2.4 & the Implementers Guide 2.8. | |
686 | * | |
687 | * 3) ... | |
688 | * When a Fast Retransmit is being performed the sender SHOULD | |
689 | * ignore the value of cwnd and SHOULD NOT delay retransmission. | |
690 | */ | |
723189fa DL |
691 | if (chunk->fast_retransmit != SCTP_NEED_FRTX && |
692 | flight_size >= transport->cwnd) | |
693 | return SCTP_XMIT_RWND_FULL; | |
1da177e4 LT |
694 | |
695 | /* Nagle's algorithm to solve small-packet problem: | |
696 | * Inhibit the sending of new chunks when new outgoing data arrives | |
697 | * if any previously transmitted data on the connection remains | |
698 | * unacknowledged. | |
699 | */ | |
1da177e4 | 700 | |
4ea0c32f | 701 | if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && |
f9ba3501 | 702 | !asoc->force_delay) |
4ea0c32f | 703 | /* Nothing unacked */ |
723189fa DL |
704 | return SCTP_XMIT_OK; |
705 | ||
706 | if (!sctp_packet_empty(packet)) | |
707 | /* Append to packet */ | |
708 | return SCTP_XMIT_OK; | |
709 | ||
723189fa DL |
710 | if (!sctp_state(asoc, ESTABLISHED)) |
711 | return SCTP_XMIT_OK; | |
712 | ||
713 | /* Check whether this chunk and all the rest of pending data will fit | |
714 | * or delay in hopes of bundling a full sized packet. | |
715 | */ | |
9f8d3147 | 716 | if (chunk->skb->len + q->out_qlen > transport->pathmtu - |
668c9beb | 717 | packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4) |
723189fa DL |
718 | /* Enough data queued to fill a packet */ |
719 | return SCTP_XMIT_OK; | |
720 | ||
721 | /* Don't delay large message writes that may have been fragmented */ | |
722 | if (!chunk->msg->can_delay) | |
723 | return SCTP_XMIT_OK; | |
724 | ||
725 | /* Defer until all data acked or packet full */ | |
526cbef7 | 726 | return SCTP_XMIT_DELAY; |
e83963b7 VY |
727 | } |
728 | ||
729 | /* This private function does management things when adding DATA chunk */ | |
730 | static void sctp_packet_append_data(struct sctp_packet *packet, | |
731 | struct sctp_chunk *chunk) | |
732 | { | |
733 | struct sctp_transport *transport = packet->transport; | |
734 | size_t datasize = sctp_data_size(chunk); | |
735 | struct sctp_association *asoc = transport->asoc; | |
736 | u32 rwnd = asoc->peer.rwnd; | |
737 | ||
1da177e4 LT |
738 | /* Keep track of how many bytes are in flight over this transport. */ |
739 | transport->flight_size += datasize; | |
740 | ||
741 | /* Keep track of how many bytes are in flight to the receiver. */ | |
742 | asoc->outqueue.outstanding_bytes += datasize; | |
743 | ||
a76c0adf | 744 | /* Update our view of the receiver's rwnd. */ |
1da177e4 LT |
745 | if (datasize < rwnd) |
746 | rwnd -= datasize; | |
747 | else | |
748 | rwnd = 0; | |
749 | ||
750 | asoc->peer.rwnd = rwnd; | |
d8dd1578 | 751 | sctp_chunk_assign_tsn(chunk); |
668c9beb | 752 | asoc->stream.si->assign_number(chunk); |
e83963b7 VY |
753 | } |
754 | ||
86b36f2a XL |
755 | static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, |
756 | struct sctp_chunk *chunk, | |
757 | u16 chunk_len) | |
e83963b7 | 758 | { |
86b36f2a | 759 | enum sctp_xmit retval = SCTP_XMIT_OK; |
7303a147 | 760 | size_t psize, pmtu, maxsize; |
e83963b7 | 761 | |
1b1e0bc9 XL |
762 | /* Don't bundle in this packet if this chunk's auth key doesn't |
763 | * match other chunks already enqueued on this packet. Also, | |
764 | * don't bundle the chunk with auth key if other chunks in this | |
765 | * packet don't have auth key. | |
766 | */ | |
767 | if ((packet->auth && chunk->shkey != packet->auth->shkey) || | |
768 | (!packet->auth && chunk->shkey && | |
769 | chunk->chunk_hdr->type != SCTP_CID_AUTH)) | |
770 | return SCTP_XMIT_PMTU_FULL; | |
771 | ||
e83963b7 | 772 | psize = packet->size; |
90017acc MRL |
773 | if (packet->transport->asoc) |
774 | pmtu = packet->transport->asoc->pathmtu; | |
775 | else | |
776 | pmtu = packet->transport->pathmtu; | |
e83963b7 VY |
777 | |
778 | /* Decide if we need to fragment or resubmit later. */ | |
90017acc MRL |
779 | if (psize + chunk_len > pmtu) { |
780 | /* It's OK to fragment at IP level if any one of the following | |
e83963b7 | 781 | * is true: |
90017acc MRL |
782 | * 1. The packet is empty (meaning this chunk is greater |
783 | * the MTU) | |
784 | * 2. The packet doesn't have any data in it yet and data | |
785 | * requires authentication. | |
e83963b7 | 786 | */ |
90017acc | 787 | if (sctp_packet_empty(packet) || |
e83963b7 VY |
788 | (!packet->has_data && chunk->auth)) { |
789 | /* We no longer do re-fragmentation. | |
790 | * Just fragment at the IP layer, if we | |
791 | * actually hit this condition | |
792 | */ | |
793 | packet->ipfragok = 1; | |
90017acc | 794 | goto out; |
e83963b7 | 795 | } |
90017acc | 796 | |
7303a147 MRL |
797 | /* Similarly, if this chunk was built before a PMTU |
798 | * reduction, we have to fragment it at IP level now. So | |
799 | * if the packet already contains something, we need to | |
800 | * flush. | |
801 | */ | |
802 | maxsize = pmtu - packet->overhead; | |
803 | if (packet->auth) | |
e2f036a9 | 804 | maxsize -= SCTP_PAD4(packet->auth->skb->len); |
7303a147 MRL |
805 | if (chunk_len > maxsize) |
806 | retval = SCTP_XMIT_PMTU_FULL; | |
807 | ||
90017acc MRL |
808 | /* It is also okay to fragment if the chunk we are |
809 | * adding is a control chunk, but only if current packet | |
810 | * is not a GSO one otherwise it causes fragmentation of | |
811 | * a large frame. So in this case we allow the | |
812 | * fragmentation by forcing it to be in a new packet. | |
813 | */ | |
814 | if (!sctp_chunk_is_data(chunk) && packet->has_data) | |
815 | retval = SCTP_XMIT_PMTU_FULL; | |
816 | ||
817 | if (psize + chunk_len > packet->max_size) | |
818 | /* Hit GSO/PMTU limit, gotta flush */ | |
819 | retval = SCTP_XMIT_PMTU_FULL; | |
820 | ||
821 | if (!packet->transport->burst_limited && | |
822 | psize + chunk_len > (packet->transport->cwnd >> 1)) | |
823 | /* Do not allow a single GSO packet to use more | |
824 | * than half of cwnd. | |
825 | */ | |
826 | retval = SCTP_XMIT_PMTU_FULL; | |
827 | ||
828 | if (packet->transport->burst_limited && | |
829 | psize + chunk_len > (packet->transport->burst_limited >> 1)) | |
830 | /* Do not allow a single GSO packet to use more | |
831 | * than half of original cwnd. | |
832 | */ | |
833 | retval = SCTP_XMIT_PMTU_FULL; | |
834 | /* Otherwise it will fit in the GSO packet */ | |
e83963b7 | 835 | } |
1da177e4 | 836 | |
90017acc | 837 | out: |
1da177e4 LT |
838 | return retval; |
839 | } |