]>
Commit | Line | Data |
---|---|---|
60c778b2 | 1 | /* SCTP kernel implementation |
1da177e4 LT |
2 | * (C) Copyright IBM Corp. 2001, 2004 |
3 | * Copyright (c) 1999 Cisco, Inc. | |
4 | * Copyright (c) 1999-2001 Motorola, Inc. | |
5 | * | |
60c778b2 | 6 | * This file is part of the SCTP kernel implementation |
1da177e4 LT |
7 | * |
8 | * These functions work with the state functions in sctp_sm_statefuns.c | |
9 | * to implement that state operations. These functions implement the | |
10 | * steps which require modifying existing data structures. | |
11 | * | |
60c778b2 | 12 | * This SCTP implementation is free software; |
1da177e4 LT |
13 | * you can redistribute it and/or modify it under the terms of |
14 | * the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2, or (at your option) | |
16 | * any later version. | |
17 | * | |
60c778b2 | 18 | * This SCTP implementation is distributed in the hope that it |
1da177e4 LT |
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
20 | * ************************ | |
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | |
22 | * See the GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
4b2f13a2 JK |
25 | * along with GNU CC; see the file COPYING. If not, see |
26 | * <http://www.gnu.org/licenses/>. | |
1da177e4 LT |
27 | * |
28 | * Please send any bug reports or fixes you make to the | |
29 | * email address(es): | |
91705c61 | 30 | * lksctp developers <[email protected]> |
1da177e4 | 31 | * |
1da177e4 LT |
32 | * Written or modified by: |
33 | * La Monte H.P. Yarroll <[email protected]> | |
34 | * Karl Knutson <[email protected]> | |
35 | * Jon Grimm <[email protected]> | |
36 | * Hui Huang <[email protected]> | |
37 | * Dajiang Zhang <[email protected]> | |
38 | * Daisy Chang <[email protected]> | |
39 | * Sridhar Samudrala <[email protected]> | |
40 | * Ardelle Fan <[email protected]> | |
1da177e4 LT |
41 | */ |
42 | ||
145ce502 JP |
43 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
44 | ||
1da177e4 LT |
45 | #include <linux/skbuff.h> |
46 | #include <linux/types.h> | |
47 | #include <linux/socket.h> | |
48 | #include <linux/ip.h> | |
5a0e3ad6 | 49 | #include <linux/gfp.h> |
1da177e4 LT |
50 | #include <net/sock.h> |
51 | #include <net/sctp/sctp.h> | |
52 | #include <net/sctp/sm.h> | |
53 | ||
54 | static int sctp_cmd_interpreter(sctp_event_t event_type, | |
55 | sctp_subtype_t subtype, | |
56 | sctp_state_t state, | |
57 | struct sctp_endpoint *ep, | |
58 | struct sctp_association *asoc, | |
59 | void *event_arg, | |
d808ad9a | 60 | sctp_disposition_t status, |
1da177e4 | 61 | sctp_cmd_seq_t *commands, |
dd0fc66f | 62 | gfp_t gfp); |
1da177e4 LT |
63 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, |
64 | sctp_state_t state, | |
65 | struct sctp_endpoint *ep, | |
649621e3 | 66 | struct sctp_association **asoc, |
1da177e4 LT |
67 | void *event_arg, |
68 | sctp_disposition_t status, | |
69 | sctp_cmd_seq_t *commands, | |
dd0fc66f | 70 | gfp_t gfp); |
1da177e4 LT |
71 | |
72 | /******************************************************************** | |
73 | * Helper functions | |
74 | ********************************************************************/ | |
75 | ||
76 | /* A helper function for delayed processing of INET ECN CE bit. */ | |
d808ad9a | 77 | static void sctp_do_ecn_ce_work(struct sctp_association *asoc, |
1da177e4 LT |
78 | __u32 lowest_tsn) |
79 | { | |
80 | /* Save the TSN away for comparison when we receive CWR */ | |
81 | ||
82 | asoc->last_ecne_tsn = lowest_tsn; | |
83 | asoc->need_ecne = 1; | |
84 | } | |
85 | ||
86 | /* Helper function for delayed processing of SCTP ECNE chunk. */ | |
87 | /* RFC 2960 Appendix A | |
88 | * | |
89 | * RFC 2481 details a specific bit for a sender to send in | |
90 | * the header of its next outbound TCP segment to indicate to | |
91 | * its peer that it has reduced its congestion window. This | |
92 | * is termed the CWR bit. For SCTP the same indication is made | |
93 | * by including the CWR chunk. This chunk contains one data | |
94 | * element, i.e. the TSN number that was sent in the ECNE chunk. | |
95 | * This element represents the lowest TSN number in the datagram | |
96 | * that was originally marked with the CE bit. | |
97 | */ | |
98 | static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, | |
99 | __u32 lowest_tsn, | |
100 | struct sctp_chunk *chunk) | |
101 | { | |
102 | struct sctp_chunk *repl; | |
103 | ||
104 | /* Our previously transmitted packet ran into some congestion | |
105 | * so we should take action by reducing cwnd and ssthresh | |
106 | * and then ACK our peer that we we've done so by | |
107 | * sending a CWR. | |
108 | */ | |
109 | ||
110 | /* First, try to determine if we want to actually lower | |
111 | * our cwnd variables. Only lower them if the ECNE looks more | |
112 | * recent than the last response. | |
113 | */ | |
114 | if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { | |
115 | struct sctp_transport *transport; | |
116 | ||
117 | /* Find which transport's congestion variables | |
118 | * need to be adjusted. | |
119 | */ | |
120 | transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); | |
121 | ||
122 | /* Update the congestion variables. */ | |
123 | if (transport) | |
124 | sctp_transport_lower_cwnd(transport, | |
125 | SCTP_LOWER_CWND_ECNE); | |
126 | asoc->last_cwr_tsn = lowest_tsn; | |
127 | } | |
128 | ||
129 | /* Always try to quiet the other end. In case of lost CWR, | |
130 | * resend last_cwr_tsn. | |
131 | */ | |
132 | repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); | |
133 | ||
134 | /* If we run out of memory, it will look like a lost CWR. We'll | |
135 | * get back in sync eventually. | |
136 | */ | |
137 | return repl; | |
138 | } | |
139 | ||
140 | /* Helper function to do delayed processing of ECN CWR chunk. */ | |
141 | static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, | |
142 | __u32 lowest_tsn) | |
143 | { | |
144 | /* Turn off ECNE getting auto-prepended to every outgoing | |
145 | * packet | |
146 | */ | |
147 | asoc->need_ecne = 0; | |
148 | } | |
149 | ||
150 | /* Generate SACK if necessary. We call this at the end of a packet. */ | |
151 | static int sctp_gen_sack(struct sctp_association *asoc, int force, | |
152 | sctp_cmd_seq_t *commands) | |
153 | { | |
154 | __u32 ctsn, max_tsn_seen; | |
155 | struct sctp_chunk *sack; | |
52ccb8e9 | 156 | struct sctp_transport *trans = asoc->peer.last_data_from; |
1da177e4 LT |
157 | int error = 0; |
158 | ||
d808ad9a | 159 | if (force || |
52ccb8e9 FF |
160 | (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || |
161 | (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) | |
1da177e4 LT |
162 | asoc->peer.sack_needed = 1; |
163 | ||
164 | ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); | |
165 | max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); | |
166 | ||
167 | /* From 12.2 Parameters necessary per association (i.e. the TCB): | |
168 | * | |
169 | * Ack State : This flag indicates if the next received packet | |
170 | * : is to be responded to with a SACK. ... | |
171 | * : When DATA chunks are out of order, SACK's | |
172 | * : are not delayed (see Section 6). | |
173 | * | |
174 | * [This is actually not mentioned in Section 6, but we | |
175 | * implement it here anyway. --piggy] | |
176 | */ | |
d808ad9a | 177 | if (max_tsn_seen != ctsn) |
1da177e4 LT |
178 | asoc->peer.sack_needed = 1; |
179 | ||
180 | /* From 6.2 Acknowledgement on Reception of DATA Chunks: | |
181 | * | |
182 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, | |
183 | * an acknowledgement SHOULD be generated for at least every | |
184 | * second packet (not every second DATA chunk) received, and | |
185 | * SHOULD be generated within 200 ms of the arrival of any | |
186 | * unacknowledged DATA chunk. ... | |
187 | */ | |
188 | if (!asoc->peer.sack_needed) { | |
d364d927 | 189 | asoc->peer.sack_cnt++; |
52ccb8e9 FF |
190 | |
191 | /* Set the SACK delay timeout based on the | |
192 | * SACK delay for the last transport | |
193 | * data was received from, or the default | |
194 | * for the association. | |
195 | */ | |
d364d927 WY |
196 | if (trans) { |
197 | /* We will need a SACK for the next packet. */ | |
198 | if (asoc->peer.sack_cnt >= trans->sackfreq - 1) | |
199 | asoc->peer.sack_needed = 1; | |
200 | ||
d808ad9a | 201 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = |
52ccb8e9 | 202 | trans->sackdelay; |
d364d927 WY |
203 | } else { |
204 | /* We will need a SACK for the next packet. */ | |
205 | if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) | |
206 | asoc->peer.sack_needed = 1; | |
207 | ||
d808ad9a | 208 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = |
52ccb8e9 | 209 | asoc->sackdelay; |
d364d927 | 210 | } |
52ccb8e9 FF |
211 | |
212 | /* Restart the SACK timer. */ | |
213 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | |
214 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | |
1da177e4 | 215 | } else { |
07b4d6a1 MRL |
216 | __u32 old_a_rwnd = asoc->a_rwnd; |
217 | ||
90f2f531 | 218 | asoc->a_rwnd = asoc->rwnd; |
1da177e4 | 219 | sack = sctp_make_sack(asoc); |
07b4d6a1 MRL |
220 | if (!sack) { |
221 | asoc->a_rwnd = old_a_rwnd; | |
1da177e4 | 222 | goto nomem; |
07b4d6a1 | 223 | } |
1da177e4 LT |
224 | |
225 | asoc->peer.sack_needed = 0; | |
d364d927 | 226 | asoc->peer.sack_cnt = 0; |
1da177e4 | 227 | |
732ba35e | 228 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); |
1da177e4 LT |
229 | |
230 | /* Stop the SACK timer. */ | |
231 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | |
232 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | |
233 | } | |
52ccb8e9 | 234 | |
1da177e4 LT |
235 | return error; |
236 | nomem: | |
237 | error = -ENOMEM; | |
238 | return error; | |
239 | } | |
240 | ||
241 | /* When the T3-RTX timer expires, it calls this function to create the | |
242 | * relevant state machine event. | |
243 | */ | |
244 | void sctp_generate_t3_rtx_event(unsigned long peer) | |
245 | { | |
246 | int error; | |
247 | struct sctp_transport *transport = (struct sctp_transport *) peer; | |
248 | struct sctp_association *asoc = transport->asoc; | |
635682a1 KH |
249 | struct sock *sk = asoc->base.sk; |
250 | struct net *net = sock_net(sk); | |
1da177e4 LT |
251 | |
252 | /* Check whether a task is in the sock. */ | |
253 | ||
635682a1 KH |
254 | bh_lock_sock(sk); |
255 | if (sock_owned_by_user(sk)) { | |
bb33381d | 256 | pr_debug("%s: sock is busy\n", __func__); |
1da177e4 LT |
257 | |
258 | /* Try again later. */ | |
259 | if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) | |
260 | sctp_transport_hold(transport); | |
261 | goto out_unlock; | |
262 | } | |
263 | ||
1da177e4 | 264 | /* Run through the state machine. */ |
55e26eb9 | 265 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, |
1da177e4 LT |
266 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), |
267 | asoc->state, | |
268 | asoc->ep, asoc, | |
269 | transport, GFP_ATOMIC); | |
270 | ||
271 | if (error) | |
635682a1 | 272 | sk->sk_err = -error; |
1da177e4 LT |
273 | |
274 | out_unlock: | |
635682a1 | 275 | bh_unlock_sock(sk); |
1da177e4 LT |
276 | sctp_transport_put(transport); |
277 | } | |
278 | ||
279 | /* This is a sa interface for producing timeout events. It works | |
280 | * for timeouts which use the association as their parameter. | |
281 | */ | |
282 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | |
283 | sctp_event_timeout_t timeout_type) | |
284 | { | |
635682a1 KH |
285 | struct sock *sk = asoc->base.sk; |
286 | struct net *net = sock_net(sk); | |
1da177e4 LT |
287 | int error = 0; |
288 | ||
635682a1 KH |
289 | bh_lock_sock(sk); |
290 | if (sock_owned_by_user(sk)) { | |
bb33381d DB |
291 | pr_debug("%s: sock is busy: timer %d\n", __func__, |
292 | timeout_type); | |
1da177e4 LT |
293 | |
294 | /* Try again later. */ | |
295 | if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) | |
296 | sctp_association_hold(asoc); | |
297 | goto out_unlock; | |
298 | } | |
299 | ||
300 | /* Is this association really dead and just waiting around for | |
301 | * the timer to let go of the reference? | |
302 | */ | |
303 | if (asoc->base.dead) | |
304 | goto out_unlock; | |
305 | ||
306 | /* Run through the state machine. */ | |
55e26eb9 | 307 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, |
1da177e4 LT |
308 | SCTP_ST_TIMEOUT(timeout_type), |
309 | asoc->state, asoc->ep, asoc, | |
310 | (void *)timeout_type, GFP_ATOMIC); | |
311 | ||
312 | if (error) | |
635682a1 | 313 | sk->sk_err = -error; |
1da177e4 LT |
314 | |
315 | out_unlock: | |
635682a1 | 316 | bh_unlock_sock(sk); |
1da177e4 LT |
317 | sctp_association_put(asoc); |
318 | } | |
319 | ||
320 | static void sctp_generate_t1_cookie_event(unsigned long data) | |
321 | { | |
322 | struct sctp_association *asoc = (struct sctp_association *) data; | |
323 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); | |
324 | } | |
325 | ||
326 | static void sctp_generate_t1_init_event(unsigned long data) | |
327 | { | |
328 | struct sctp_association *asoc = (struct sctp_association *) data; | |
329 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); | |
330 | } | |
331 | ||
332 | static void sctp_generate_t2_shutdown_event(unsigned long data) | |
333 | { | |
334 | struct sctp_association *asoc = (struct sctp_association *) data; | |
335 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); | |
336 | } | |
337 | ||
338 | static void sctp_generate_t4_rto_event(unsigned long data) | |
339 | { | |
340 | struct sctp_association *asoc = (struct sctp_association *) data; | |
341 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); | |
342 | } | |
343 | ||
344 | static void sctp_generate_t5_shutdown_guard_event(unsigned long data) | |
345 | { | |
d808ad9a YH |
346 | struct sctp_association *asoc = (struct sctp_association *)data; |
347 | sctp_generate_timeout_event(asoc, | |
1da177e4 LT |
348 | SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); |
349 | ||
350 | } /* sctp_generate_t5_shutdown_guard_event() */ | |
351 | ||
352 | static void sctp_generate_autoclose_event(unsigned long data) | |
353 | { | |
354 | struct sctp_association *asoc = (struct sctp_association *) data; | |
355 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); | |
356 | } | |
357 | ||
358 | /* Generate a heart beat event. If the sock is busy, reschedule. Make | |
359 | * sure that the transport is still valid. | |
360 | */ | |
361 | void sctp_generate_heartbeat_event(unsigned long data) | |
362 | { | |
363 | int error = 0; | |
364 | struct sctp_transport *transport = (struct sctp_transport *) data; | |
365 | struct sctp_association *asoc = transport->asoc; | |
635682a1 KH |
366 | struct sock *sk = asoc->base.sk; |
367 | struct net *net = sock_net(sk); | |
ba6f5e33 | 368 | u32 elapsed, timeout; |
1da177e4 | 369 | |
635682a1 KH |
370 | bh_lock_sock(sk); |
371 | if (sock_owned_by_user(sk)) { | |
bb33381d | 372 | pr_debug("%s: sock is busy\n", __func__); |
1da177e4 LT |
373 | |
374 | /* Try again later. */ | |
375 | if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) | |
376 | sctp_transport_hold(transport); | |
377 | goto out_unlock; | |
378 | } | |
379 | ||
ba6f5e33 MRL |
380 | /* Check if we should still send the heartbeat or reschedule */ |
381 | elapsed = jiffies - transport->last_time_sent; | |
382 | timeout = sctp_transport_timeout(transport); | |
383 | if (elapsed < timeout) { | |
384 | elapsed = timeout - elapsed; | |
385 | if (!mod_timer(&transport->hb_timer, jiffies + elapsed)) | |
386 | sctp_transport_hold(transport); | |
387 | goto out_unlock; | |
388 | } | |
389 | ||
55e26eb9 | 390 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, |
1da177e4 LT |
391 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), |
392 | asoc->state, asoc->ep, asoc, | |
393 | transport, GFP_ATOMIC); | |
394 | ||
f05940e6 | 395 | if (error) |
635682a1 | 396 | sk->sk_err = -error; |
1da177e4 LT |
397 | |
398 | out_unlock: | |
635682a1 | 399 | bh_unlock_sock(sk); |
1da177e4 LT |
400 | sctp_transport_put(transport); |
401 | } | |
402 | ||
50b5d6ad VY |
403 | /* Handle the timeout of the ICMP protocol unreachable timer. Trigger |
404 | * the correct state machine transition that will close the association. | |
405 | */ | |
406 | void sctp_generate_proto_unreach_event(unsigned long data) | |
407 | { | |
408 | struct sctp_transport *transport = (struct sctp_transport *) data; | |
409 | struct sctp_association *asoc = transport->asoc; | |
635682a1 KH |
410 | struct sock *sk = asoc->base.sk; |
411 | struct net *net = sock_net(sk); | |
cb3f837b | 412 | |
635682a1 KH |
413 | bh_lock_sock(sk); |
414 | if (sock_owned_by_user(sk)) { | |
bb33381d | 415 | pr_debug("%s: sock is busy\n", __func__); |
50b5d6ad VY |
416 | |
417 | /* Try again later. */ | |
418 | if (!mod_timer(&transport->proto_unreach_timer, | |
419 | jiffies + (HZ/20))) | |
420 | sctp_association_hold(asoc); | |
421 | goto out_unlock; | |
422 | } | |
423 | ||
424 | /* Is this structure just waiting around for us to actually | |
425 | * get destroyed? | |
426 | */ | |
427 | if (asoc->base.dead) | |
428 | goto out_unlock; | |
429 | ||
55e26eb9 | 430 | sctp_do_sm(net, SCTP_EVENT_T_OTHER, |
50b5d6ad VY |
431 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), |
432 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); | |
433 | ||
434 | out_unlock: | |
635682a1 | 435 | bh_unlock_sock(sk); |
50b5d6ad VY |
436 | sctp_association_put(asoc); |
437 | } | |
438 | ||
439 | ||
1da177e4 LT |
440 | /* Inject a SACK Timeout event into the state machine. */ |
441 | static void sctp_generate_sack_event(unsigned long data) | |
442 | { | |
443 | struct sctp_association *asoc = (struct sctp_association *) data; | |
444 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); | |
445 | } | |
446 | ||
447 | sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { | |
448 | NULL, | |
449 | sctp_generate_t1_cookie_event, | |
450 | sctp_generate_t1_init_event, | |
451 | sctp_generate_t2_shutdown_event, | |
452 | NULL, | |
453 | sctp_generate_t4_rto_event, | |
454 | sctp_generate_t5_shutdown_guard_event, | |
1e7d3d90 | 455 | NULL, |
1da177e4 LT |
456 | sctp_generate_sack_event, |
457 | sctp_generate_autoclose_event, | |
458 | }; | |
459 | ||
460 | ||
461 | /* RFC 2960 8.2 Path Failure Detection | |
462 | * | |
463 | * When its peer endpoint is multi-homed, an endpoint should keep a | |
464 | * error counter for each of the destination transport addresses of the | |
465 | * peer endpoint. | |
466 | * | |
467 | * Each time the T3-rtx timer expires on any address, or when a | |
468 | * HEARTBEAT sent to an idle address is not acknowledged within a RTO, | |
469 | * the error counter of that destination address will be incremented. | |
470 | * When the value in the error counter exceeds the protocol parameter | |
471 | * 'Path.Max.Retrans' of that destination address, the endpoint should | |
472 | * mark the destination transport address as inactive, and a | |
473 | * notification SHOULD be sent to the upper layer. | |
474 | * | |
475 | */ | |
5aa93bcf NH |
476 | static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, |
477 | struct sctp_association *asoc, | |
7e99013a VY |
478 | struct sctp_transport *transport, |
479 | int is_hb) | |
1da177e4 | 480 | { |
566178f8 ZY |
481 | struct net *net = sock_net(asoc->base.sk); |
482 | ||
1da177e4 LT |
483 | /* The check for association's overall error counter exceeding the |
484 | * threshold is done in the state function. | |
485 | */ | |
b9f84786 VY |
486 | /* We are here due to a timer expiration. If the timer was |
487 | * not a HEARTBEAT, then normal error tracking is done. | |
488 | * If the timer was a heartbeat, we only increment error counts | |
489 | * when we already have an outstanding HEARTBEAT that has not | |
490 | * been acknowledged. | |
25985edc | 491 | * Additionally, some tranport states inhibit error increments. |
ad8fec17 | 492 | */ |
b9f84786 | 493 | if (!is_hb) { |
ad8fec17 | 494 | asoc->overall_error_count++; |
b9f84786 VY |
495 | if (transport->state != SCTP_INACTIVE) |
496 | transport->error_count++; | |
497 | } else if (transport->hb_sent) { | |
498 | if (transport->state != SCTP_UNCONFIRMED) | |
499 | asoc->overall_error_count++; | |
500 | if (transport->state != SCTP_INACTIVE) | |
501 | transport->error_count++; | |
502 | } | |
1da177e4 | 503 | |
5aa93bcf | 504 | /* If the transport error count is greater than the pf_retrans |
7cce3b75 | 505 | * threshold, and less than pathmaxrtx, and if the current state |
8c2eab90 KH |
506 | * is SCTP_ACTIVE, then mark this transport as Partially Failed, |
507 | * see SCTP Quick Failover Draft, section 5.1 | |
5aa93bcf | 508 | */ |
566178f8 ZY |
509 | if (net->sctp.pf_enable && |
510 | (transport->state == SCTP_ACTIVE) && | |
5aa93bcf NH |
511 | (asoc->pf_retrans < transport->pathmaxrxt) && |
512 | (transport->error_count > asoc->pf_retrans)) { | |
513 | ||
514 | sctp_assoc_control_transport(asoc, transport, | |
515 | SCTP_TRANSPORT_PF, | |
516 | 0); | |
517 | ||
518 | /* Update the hb timer to resend a heartbeat every rto */ | |
ba6f5e33 | 519 | sctp_transport_reset_hb_timer(transport); |
5aa93bcf NH |
520 | } |
521 | ||
3f7a87d2 | 522 | if (transport->state != SCTP_INACTIVE && |
b9f84786 | 523 | (transport->error_count > transport->pathmaxrxt)) { |
bb33381d DB |
524 | pr_debug("%s: association:%p transport addr:%pISpc failed\n", |
525 | __func__, asoc, &transport->ipaddr.sa); | |
526 | ||
1da177e4 LT |
527 | sctp_assoc_control_transport(asoc, transport, |
528 | SCTP_TRANSPORT_DOWN, | |
529 | SCTP_FAILED_THRESHOLD); | |
530 | } | |
531 | ||
532 | /* E2) For the destination address for which the timer | |
533 | * expires, set RTO <- RTO * 2 ("back off the timer"). The | |
534 | * maximum value discussed in rule C7 above (RTO.max) may be | |
535 | * used to provide an upper bound to this doubling operation. | |
faee47cd VY |
536 | * |
537 | * Special Case: the first HB doesn't trigger exponential backoff. | |
3ad2f3fb | 538 | * The first unacknowledged HB triggers it. We do this with a flag |
faee47cd | 539 | * that indicates that we have an outstanding HB. |
1da177e4 | 540 | */ |
7e99013a | 541 | if (!is_hb || transport->hb_sent) { |
faee47cd | 542 | transport->rto = min((transport->rto * 2), transport->asoc->rto_max); |
196d6759 | 543 | sctp_max_rto(asoc, transport); |
faee47cd | 544 | } |
1da177e4 LT |
545 | } |
546 | ||
547 | /* Worker routine to handle INIT command failure. */ | |
548 | static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, | |
549 | struct sctp_association *asoc, | |
95c96174 | 550 | unsigned int error) |
1da177e4 LT |
551 | { |
552 | struct sctp_ulpevent *event; | |
553 | ||
cb3f837b | 554 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC, |
a5a35e76 | 555 | (__u16)error, 0, 0, NULL, |
1da177e4 LT |
556 | GFP_ATOMIC); |
557 | ||
558 | if (event) | |
559 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | |
560 | SCTP_ULPEVENT(event)); | |
561 | ||
562 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | |
563 | SCTP_STATE(SCTP_STATE_CLOSED)); | |
564 | ||
565 | /* SEND_FAILED sent later when cleaning up the association. */ | |
566 | asoc->outqueue.error = error; | |
567 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | |
568 | } | |
569 | ||
570 | /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ | |
571 | static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | |
572 | struct sctp_association *asoc, | |
573 | sctp_event_t event_type, | |
574 | sctp_subtype_t subtype, | |
575 | struct sctp_chunk *chunk, | |
95c96174 | 576 | unsigned int error) |
1da177e4 LT |
577 | { |
578 | struct sctp_ulpevent *event; | |
de4594a5 | 579 | struct sctp_chunk *abort; |
1da177e4 LT |
580 | /* Cancel any partial delivery in progress. */ |
581 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | |
582 | ||
a5a35e76 VY |
583 | if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) |
584 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | |
585 | (__u16)error, 0, 0, chunk, | |
586 | GFP_ATOMIC); | |
587 | else | |
588 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | |
589 | (__u16)error, 0, 0, NULL, | |
1da177e4 LT |
590 | GFP_ATOMIC); |
591 | if (event) | |
592 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | |
593 | SCTP_ULPEVENT(event)); | |
594 | ||
de4594a5 NH |
595 | if (asoc->overall_error_count >= asoc->max_retrans) { |
596 | abort = sctp_make_violation_max_retrans(asoc, chunk); | |
597 | if (abort) | |
598 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
599 | SCTP_CHUNK(abort)); | |
600 | } | |
601 | ||
1da177e4 LT |
602 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
603 | SCTP_STATE(SCTP_STATE_CLOSED)); | |
604 | ||
1da177e4 LT |
605 | /* SEND_FAILED sent later when cleaning up the association. */ |
606 | asoc->outqueue.error = error; | |
607 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | |
608 | } | |
609 | ||
610 | /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT | |
611 | * inside the cookie. In reality, this is only used for INIT-ACK processing | |
612 | * since all other cases use "temporary" associations and can do all | |
613 | * their work in statefuns directly. | |
614 | */ | |
615 | static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, | |
616 | struct sctp_association *asoc, | |
617 | struct sctp_chunk *chunk, | |
3182cd84 | 618 | sctp_init_chunk_t *peer_init, |
dd0fc66f | 619 | gfp_t gfp) |
1da177e4 LT |
620 | { |
621 | int error; | |
622 | ||
623 | /* We only process the init as a sideeffect in a single | |
624 | * case. This is when we process the INIT-ACK. If we | |
625 | * fail during INIT processing (due to malloc problems), | |
626 | * just return the error and stop processing the stack. | |
627 | */ | |
de6becdc | 628 | if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) |
1da177e4 LT |
629 | error = -ENOMEM; |
630 | else | |
631 | error = 0; | |
632 | ||
633 | return error; | |
634 | } | |
635 | ||
636 | /* Helper function to break out starting up of heartbeat timers. */ | |
637 | static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, | |
638 | struct sctp_association *asoc) | |
639 | { | |
640 | struct sctp_transport *t; | |
1da177e4 LT |
641 | |
642 | /* Start a heartbeat timer for each transport on the association. | |
643 | * hold a reference on the transport to make sure none of | |
644 | * the needed data structures go away. | |
645 | */ | |
ba6f5e33 MRL |
646 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) |
647 | sctp_transport_reset_hb_timer(t); | |
1da177e4 LT |
648 | } |
649 | ||
650 | static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, | |
651 | struct sctp_association *asoc) | |
652 | { | |
653 | struct sctp_transport *t; | |
1da177e4 LT |
654 | |
655 | /* Stop all heartbeat timers. */ | |
656 | ||
9dbc15f0 RD |
657 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
658 | transports) { | |
1da177e4 LT |
659 | if (del_timer(&t->hb_timer)) |
660 | sctp_transport_put(t); | |
661 | } | |
662 | } | |
663 | ||
664 | /* Helper function to stop any pending T3-RTX timers */ | |
665 | static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, | |
d808ad9a | 666 | struct sctp_association *asoc) |
1da177e4 LT |
667 | { |
668 | struct sctp_transport *t; | |
1da177e4 | 669 | |
9dbc15f0 RD |
670 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
671 | transports) { | |
25cc4ae9 | 672 | if (del_timer(&t->T3_rtx_timer)) |
1da177e4 | 673 | sctp_transport_put(t); |
1da177e4 LT |
674 | } |
675 | } | |
676 | ||
677 | ||
1da177e4 LT |
678 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ |
679 | static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | |
680 | struct sctp_association *asoc, | |
681 | struct sctp_transport *t, | |
682 | struct sctp_chunk *chunk) | |
683 | { | |
684 | sctp_sender_hb_info_t *hbinfo; | |
34d2d89f | 685 | int was_unconfirmed = 0; |
1da177e4 LT |
686 | |
687 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the | |
688 | * HEARTBEAT should clear the error counter of the destination | |
689 | * transport address to which the HEARTBEAT was sent. | |
1da177e4 LT |
690 | */ |
691 | t->error_count = 0; | |
f8d96052 TG |
692 | |
693 | /* | |
694 | * Although RFC4960 specifies that the overall error count must | |
695 | * be cleared when a HEARTBEAT ACK is received, we make an | |
696 | * exception while in SHUTDOWN PENDING. If the peer keeps its | |
697 | * window shut forever, we may never be able to transmit our | |
698 | * outstanding data and rely on the retransmission limit be reached | |
699 | * to shutdown the association. | |
700 | */ | |
f648f807 | 701 | if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) |
f8d96052 | 702 | t->asoc->overall_error_count = 0; |
1da177e4 | 703 | |
faee47cd VY |
704 | /* Clear the hb_sent flag to signal that we had a good |
705 | * acknowledgement. | |
706 | */ | |
707 | t->hb_sent = 0; | |
708 | ||
1da177e4 LT |
709 | /* Mark the destination transport address as active if it is not so |
710 | * marked. | |
711 | */ | |
34d2d89f MH |
712 | if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { |
713 | was_unconfirmed = 1; | |
1da177e4 LT |
714 | sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, |
715 | SCTP_HEARTBEAT_SUCCESS); | |
34d2d89f | 716 | } |
1da177e4 | 717 | |
5aa93bcf NH |
718 | if (t->state == SCTP_PF) |
719 | sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, | |
720 | SCTP_HEARTBEAT_SUCCESS); | |
721 | ||
8c2f414a DB |
722 | /* HB-ACK was received for a the proper HB. Consider this |
723 | * forward progress. | |
724 | */ | |
725 | if (t->dst) | |
726 | dst_confirm(t->dst); | |
727 | ||
1da177e4 LT |
728 | /* The receiver of the HEARTBEAT ACK should also perform an |
729 | * RTT measurement for that destination transport address | |
730 | * using the time value carried in the HEARTBEAT ACK chunk. | |
e533ca16 VY |
731 | * If the transport's rto_pending variable has been cleared, |
732 | * it was most likely due to a retransmit. However, we want | |
733 | * to re-enable it to properly update the rto. | |
1da177e4 | 734 | */ |
e533ca16 VY |
735 | if (t->rto_pending == 0) |
736 | t->rto_pending = 1; | |
737 | ||
1da177e4 LT |
738 | hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; |
739 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); | |
ad8fec17 SS |
740 | |
741 | /* Update the heartbeat timer. */ | |
ba6f5e33 | 742 | sctp_transport_reset_hb_timer(t); |
34d2d89f MH |
743 | |
744 | if (was_unconfirmed && asoc->peer.transport_count == 1) | |
745 | sctp_transport_immediate_rtx(t); | |
1da177e4 LT |
746 | } |
747 | ||
1da177e4 LT |
748 | |
749 | /* Helper function to process the process SACK command. */ | |
750 | static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, | |
751 | struct sctp_association *asoc, | |
edfee033 | 752 | struct sctp_chunk *chunk) |
1da177e4 | 753 | { |
2e3216cd | 754 | int err = 0; |
1da177e4 | 755 | |
edfee033 | 756 | if (sctp_outq_sack(&asoc->outqueue, chunk)) { |
55e26eb9 EB |
757 | struct net *net = sock_net(asoc->base.sk); |
758 | ||
1da177e4 | 759 | /* There are no more TSNs awaiting SACK. */ |
55e26eb9 | 760 | err = sctp_do_sm(net, SCTP_EVENT_T_OTHER, |
1da177e4 LT |
761 | SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), |
762 | asoc->state, asoc->ep, asoc, NULL, | |
763 | GFP_ATOMIC); | |
1da177e4 LT |
764 | } |
765 | ||
766 | return err; | |
767 | } | |
768 | ||
769 | /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set | |
770 | * the transport for a shutdown chunk. | |
771 | */ | |
d808ad9a | 772 | static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, |
1da177e4 LT |
773 | struct sctp_association *asoc, |
774 | struct sctp_chunk *chunk) | |
775 | { | |
776 | struct sctp_transport *t; | |
777 | ||
c17b02b3 VY |
778 | if (chunk->transport) |
779 | t = chunk->transport; | |
780 | else { | |
781 | t = sctp_assoc_choose_alter_transport(asoc, | |
9919b455 | 782 | asoc->shutdown_last_sent_to); |
c17b02b3 VY |
783 | chunk->transport = t; |
784 | } | |
1da177e4 LT |
785 | asoc->shutdown_last_sent_to = t; |
786 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; | |
1da177e4 LT |
787 | } |
788 | ||
789 | /* Helper function to change the state of an association. */ | |
d808ad9a | 790 | static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, |
1da177e4 LT |
791 | struct sctp_association *asoc, |
792 | sctp_state_t state) | |
793 | { | |
794 | struct sock *sk = asoc->base.sk; | |
795 | ||
796 | asoc->state = state; | |
797 | ||
bb33381d | 798 | pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]); |
3f7a87d2 | 799 | |
1da177e4 | 800 | if (sctp_style(sk, TCP)) { |
3f7a87d2 | 801 | /* Change the sk->sk_state of a TCP-style socket that has |
af901ca1 | 802 | * successfully completed a connect() call. |
1da177e4 LT |
803 | */ |
804 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) | |
805 | sk->sk_state = SCTP_SS_ESTABLISHED; | |
806 | ||
807 | /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ | |
808 | if (sctp_state(asoc, SHUTDOWN_RECEIVED) && | |
d46e416c XL |
809 | sctp_sstate(sk, ESTABLISHED)) { |
810 | sk->sk_state = SCTP_SS_CLOSING; | |
1da177e4 | 811 | sk->sk_shutdown |= RCV_SHUTDOWN; |
d46e416c | 812 | } |
1da177e4 LT |
813 | } |
814 | ||
3f7a87d2 FF |
815 | if (sctp_state(asoc, COOKIE_WAIT)) { |
816 | /* Reset init timeouts since they may have been | |
817 | * increased due to timer expirations. | |
818 | */ | |
819 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = | |
1e7d3d90 | 820 | asoc->rto_initial; |
3f7a87d2 | 821 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = |
1e7d3d90 | 822 | asoc->rto_initial; |
3f7a87d2 FF |
823 | } |
824 | ||
1da177e4 LT |
825 | if (sctp_state(asoc, ESTABLISHED) || |
826 | sctp_state(asoc, CLOSED) || | |
827 | sctp_state(asoc, SHUTDOWN_RECEIVED)) { | |
828 | /* Wake up any processes waiting in the asoc's wait queue in | |
829 | * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). | |
d808ad9a | 830 | */ |
1da177e4 LT |
831 | if (waitqueue_active(&asoc->wait)) |
832 | wake_up_interruptible(&asoc->wait); | |
833 | ||
834 | /* Wake up any processes waiting in the sk's sleep queue of | |
835 | * a TCP-style or UDP-style peeled-off socket in | |
836 | * sctp_wait_for_accept() or sctp_wait_for_packet(). | |
837 | * For a UDP-style socket, the waiters are woken up by the | |
838 | * notifications. | |
839 | */ | |
840 | if (!sctp_style(sk, UDP)) | |
841 | sk->sk_state_change(sk); | |
842 | } | |
843 | } | |
844 | ||
845 | /* Helper function to delete an association. */ | |
846 | static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, | |
847 | struct sctp_association *asoc) | |
848 | { | |
849 | struct sock *sk = asoc->base.sk; | |
850 | ||
851 | /* If it is a non-temporary association belonging to a TCP-style | |
d808ad9a | 852 | * listening socket that is not closed, do not free it so that accept() |
1da177e4 | 853 | * can pick it up later. |
d808ad9a | 854 | */ |
1da177e4 LT |
855 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && |
856 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) | |
857 | return; | |
858 | ||
1da177e4 LT |
859 | sctp_association_free(asoc); |
860 | } | |
861 | ||
862 | /* | |
863 | * ADDIP Section 4.1 ASCONF Chunk Procedures | |
864 | * A4) Start a T-4 RTO timer, using the RTO value of the selected | |
865 | * destination address (we use active path instead of primary path just | |
d808ad9a | 866 | * because primary path may be inactive. |
1da177e4 LT |
867 | */ |
868 | static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, | |
869 | struct sctp_association *asoc, | |
870 | struct sctp_chunk *chunk) | |
871 | { | |
872 | struct sctp_transport *t; | |
873 | ||
9919b455 | 874 | t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); |
1da177e4 LT |
875 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; |
876 | chunk->transport = t; | |
877 | } | |
878 | ||
d808ad9a | 879 | /* Process an incoming Operation Error Chunk. */ |
1da177e4 LT |
880 | static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, |
881 | struct sctp_association *asoc, | |
882 | struct sctp_chunk *chunk) | |
883 | { | |
1da177e4 | 884 | struct sctp_errhdr *err_hdr; |
3df26787 WY |
885 | struct sctp_ulpevent *ev; |
886 | ||
887 | while (chunk->chunk_end > chunk->skb->data) { | |
888 | err_hdr = (struct sctp_errhdr *)(chunk->skb->data); | |
889 | ||
890 | ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, | |
891 | GFP_ATOMIC); | |
892 | if (!ev) | |
893 | return; | |
894 | ||
895 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
896 | ||
897 | switch (err_hdr->cause) { | |
898 | case SCTP_ERROR_UNKNOWN_CHUNK: | |
899 | { | |
900 | sctp_chunkhdr_t *unk_chunk_hdr; | |
901 | ||
902 | unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable; | |
903 | switch (unk_chunk_hdr->type) { | |
904 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with | |
905 | * an ERROR chunk reporting that it did not recognized | |
906 | * the ASCONF chunk type, the sender of the ASCONF MUST | |
907 | * NOT send any further ASCONF chunks and MUST stop its | |
908 | * T-4 timer. | |
909 | */ | |
910 | case SCTP_CID_ASCONF: | |
911 | if (asoc->peer.asconf_capable == 0) | |
912 | break; | |
913 | ||
914 | asoc->peer.asconf_capable = 0; | |
915 | sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, | |
1da177e4 | 916 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); |
3df26787 WY |
917 | break; |
918 | default: | |
919 | break; | |
920 | } | |
1da177e4 | 921 | break; |
3df26787 | 922 | } |
1da177e4 LT |
923 | default: |
924 | break; | |
925 | } | |
1da177e4 LT |
926 | } |
927 | } | |
928 | ||
929 | /* Process variable FWDTSN chunk information. */ | |
d808ad9a | 930 | static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, |
1da177e4 LT |
931 | struct sctp_chunk *chunk) |
932 | { | |
933 | struct sctp_fwdtsn_skip *skip; | |
934 | /* Walk through all the skipped SSNs */ | |
935 | sctp_walk_fwdtsn(skip, chunk) { | |
936 | sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); | |
937 | } | |
1da177e4 LT |
938 | } |
939 | ||
d808ad9a | 940 | /* Helper function to remove the association non-primary peer |
1da177e4 | 941 | * transports. |
d808ad9a | 942 | */ |
1da177e4 LT |
943 | static void sctp_cmd_del_non_primary(struct sctp_association *asoc) |
944 | { | |
945 | struct sctp_transport *t; | |
946 | struct list_head *pos; | |
947 | struct list_head *temp; | |
948 | ||
949 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | |
950 | t = list_entry(pos, struct sctp_transport, transports); | |
5f242a13 | 951 | if (!sctp_cmp_addr_exact(&t->ipaddr, |
d808ad9a | 952 | &asoc->peer.primary_addr)) { |
73e67420 | 953 | sctp_assoc_rm_peer(asoc, t); |
1da177e4 LT |
954 | } |
955 | } | |
1da177e4 LT |
956 | } |
957 | ||
8de8c873 SS |
958 | /* Helper function to set sk_err on a 1-1 style socket. */ |
959 | static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) | |
960 | { | |
961 | struct sock *sk = asoc->base.sk; | |
962 | ||
963 | if (!sctp_style(sk, UDP)) | |
964 | sk->sk_err = error; | |
965 | } | |
966 | ||
07d93967 VY |
967 | /* Helper function to generate an association change event */ |
968 | static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands, | |
969 | struct sctp_association *asoc, | |
970 | u8 state) | |
971 | { | |
972 | struct sctp_ulpevent *ev; | |
973 | ||
974 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0, | |
975 | asoc->c.sinit_num_ostreams, | |
976 | asoc->c.sinit_max_instreams, | |
977 | NULL, GFP_ATOMIC); | |
978 | if (ev) | |
979 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
980 | } | |
981 | ||
982 | /* Helper function to generate an adaptation indication event */ | |
983 | static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands, | |
984 | struct sctp_association *asoc) | |
985 | { | |
986 | struct sctp_ulpevent *ev; | |
987 | ||
988 | ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); | |
989 | ||
990 | if (ev) | |
991 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
992 | } | |
993 | ||
96cd0d3d VY |
994 | |
995 | static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, | |
996 | sctp_event_timeout_t timer, | |
997 | char *name) | |
998 | { | |
999 | struct sctp_transport *t; | |
1000 | ||
1001 | t = asoc->init_last_sent_to; | |
1002 | asoc->init_err_counter++; | |
1003 | ||
1004 | if (t->init_sent_count > (asoc->init_cycle + 1)) { | |
1005 | asoc->timeouts[timer] *= 2; | |
1006 | if (asoc->timeouts[timer] > asoc->max_init_timeo) { | |
1007 | asoc->timeouts[timer] = asoc->max_init_timeo; | |
1008 | } | |
1009 | asoc->init_cycle++; | |
bb33381d DB |
1010 | |
1011 | pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" | |
1012 | " cycle:%d timeout:%ld\n", __func__, name, | |
1013 | asoc->init_err_counter, asoc->init_cycle, | |
1014 | asoc->timeouts[timer]); | |
96cd0d3d VY |
1015 | } |
1016 | ||
1017 | } | |
1018 | ||
9c5c62be VY |
1019 | /* Send the whole message, chunk by chunk, to the outqueue. |
1020 | * This way the whole message is queued up and bundling if | |
1021 | * encouraged for small fragments. | |
1022 | */ | |
66388f2c XL |
1023 | static void sctp_cmd_send_msg(struct sctp_association *asoc, |
1024 | struct sctp_datamsg *msg, gfp_t gfp) | |
9c5c62be VY |
1025 | { |
1026 | struct sctp_chunk *chunk; | |
9c5c62be | 1027 | |
66388f2c XL |
1028 | list_for_each_entry(chunk, &msg->chunks, frag_list) |
1029 | sctp_outq_tail(&asoc->outqueue, chunk, gfp); | |
9c5c62be VY |
1030 | } |
1031 | ||
1032 | ||
c0786693 VY |
1033 | /* Sent the next ASCONF packet currently stored in the association. |
1034 | * This happens after the ASCONF_ACK was succeffully processed. | |
1035 | */ | |
1036 | static void sctp_cmd_send_asconf(struct sctp_association *asoc) | |
1037 | { | |
55e26eb9 EB |
1038 | struct net *net = sock_net(asoc->base.sk); |
1039 | ||
c0786693 VY |
1040 | /* Send the next asconf chunk from the addip chunk |
1041 | * queue. | |
1042 | */ | |
1043 | if (!list_empty(&asoc->addip_chunk_list)) { | |
1044 | struct list_head *entry = asoc->addip_chunk_list.next; | |
1045 | struct sctp_chunk *asconf = list_entry(entry, | |
1046 | struct sctp_chunk, list); | |
1047 | list_del_init(entry); | |
1048 | ||
1049 | /* Hold the chunk until an ASCONF_ACK is received. */ | |
1050 | sctp_chunk_hold(asconf); | |
55e26eb9 | 1051 | if (sctp_primitive_ASCONF(net, asoc, asconf)) |
c0786693 VY |
1052 | sctp_chunk_free(asconf); |
1053 | else | |
1054 | asoc->addip_last_asconf = asconf; | |
1055 | } | |
1056 | } | |
1057 | ||
9c5c62be | 1058 | |
1da177e4 LT |
1059 | /* These three macros allow us to pull the debugging code out of the |
1060 | * main flow of sctp_do_sm() to keep attention focused on the real | |
1061 | * functionality there. | |
1062 | */ | |
bb33381d DB |
1063 | #define debug_pre_sfn() \ |
1064 | pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ | |
1065 | ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ | |
1066 | asoc, sctp_state_tbl[state], state_fn->name) | |
1067 | ||
1068 | #define debug_post_sfn() \ | |
1069 | pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ | |
1070 | sctp_status_tbl[status]) | |
1071 | ||
1072 | #define debug_post_sfx() \ | |
1073 | pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ | |
1074 | asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ | |
1075 | sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) | |
1da177e4 LT |
1076 | |
1077 | /* | |
1078 | * This is the master state machine processing function. | |
1079 | * | |
1080 | * If you want to understand all of lksctp, this is a | |
1081 | * good place to start. | |
1082 | */ | |
55e26eb9 | 1083 | int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, |
1da177e4 LT |
1084 | sctp_state_t state, |
1085 | struct sctp_endpoint *ep, | |
1086 | struct sctp_association *asoc, | |
1087 | void *event_arg, | |
dd0fc66f | 1088 | gfp_t gfp) |
1da177e4 LT |
1089 | { |
1090 | sctp_cmd_seq_t commands; | |
1091 | const sctp_sm_table_entry_t *state_fn; | |
1092 | sctp_disposition_t status; | |
1093 | int error = 0; | |
1094 | typedef const char *(printfn_t)(sctp_subtype_t); | |
1da177e4 LT |
1095 | static printfn_t *table[] = { |
1096 | NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, | |
1097 | }; | |
1098 | printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; | |
1099 | ||
1100 | /* Look up the state function, run it, and then process the | |
1101 | * side effects. These three steps are the heart of lksctp. | |
1102 | */ | |
55e26eb9 | 1103 | state_fn = sctp_sm_lookup_event(net, event_type, state, subtype); |
1da177e4 LT |
1104 | |
1105 | sctp_init_cmd_seq(&commands); | |
1106 | ||
bb33381d | 1107 | debug_pre_sfn(); |
131334d0 | 1108 | status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands); |
bb33381d | 1109 | debug_post_sfn(); |
1da177e4 LT |
1110 | |
1111 | error = sctp_side_effects(event_type, subtype, state, | |
649621e3 | 1112 | ep, &asoc, event_arg, status, |
1da177e4 | 1113 | &commands, gfp); |
bb33381d | 1114 | debug_post_sfx(); |
1da177e4 LT |
1115 | |
1116 | return error; | |
1117 | } | |
1118 | ||
1da177e4 LT |
1119 | /***************************************************************** |
1120 | * This the master state function side effect processing function. | |
1121 | *****************************************************************/ | |
1122 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | |
1123 | sctp_state_t state, | |
1124 | struct sctp_endpoint *ep, | |
649621e3 | 1125 | struct sctp_association **asoc, |
1da177e4 LT |
1126 | void *event_arg, |
1127 | sctp_disposition_t status, | |
1128 | sctp_cmd_seq_t *commands, | |
dd0fc66f | 1129 | gfp_t gfp) |
1da177e4 LT |
1130 | { |
1131 | int error; | |
1132 | ||
1133 | /* FIXME - Most of the dispositions left today would be categorized | |
1134 | * as "exceptional" dispositions. For those dispositions, it | |
1135 | * may not be proper to run through any of the commands at all. | |
1136 | * For example, the command interpreter might be run only with | |
1137 | * disposition SCTP_DISPOSITION_CONSUME. | |
1138 | */ | |
1139 | if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, | |
649621e3 | 1140 | ep, *asoc, |
1da177e4 LT |
1141 | event_arg, status, |
1142 | commands, gfp))) | |
1143 | goto bail; | |
1144 | ||
1145 | switch (status) { | |
1146 | case SCTP_DISPOSITION_DISCARD: | |
bb33381d DB |
1147 | pr_debug("%s: ignored sctp protocol event - state:%d, " |
1148 | "event_type:%d, event_id:%d\n", __func__, state, | |
1149 | event_type, subtype.chunk); | |
1da177e4 LT |
1150 | break; |
1151 | ||
1152 | case SCTP_DISPOSITION_NOMEM: | |
1153 | /* We ran out of memory, so we need to discard this | |
1154 | * packet. | |
1155 | */ | |
1156 | /* BUG--we should now recover some memory, probably by | |
1157 | * reneging... | |
1158 | */ | |
1159 | error = -ENOMEM; | |
1160 | break; | |
1161 | ||
d808ad9a | 1162 | case SCTP_DISPOSITION_DELETE_TCB: |
649621e3 | 1163 | case SCTP_DISPOSITION_ABORT: |
1da177e4 | 1164 | /* This should now be a command. */ |
649621e3 | 1165 | *asoc = NULL; |
1da177e4 LT |
1166 | break; |
1167 | ||
1168 | case SCTP_DISPOSITION_CONSUME: | |
1da177e4 LT |
1169 | /* |
1170 | * We should no longer have much work to do here as the | |
1171 | * real work has been done as explicit commands above. | |
1172 | */ | |
1173 | break; | |
1174 | ||
1175 | case SCTP_DISPOSITION_VIOLATION: | |
e87cc472 JP |
1176 | net_err_ratelimited("protocol violation state %d chunkid %d\n", |
1177 | state, subtype.chunk); | |
1da177e4 LT |
1178 | break; |
1179 | ||
1180 | case SCTP_DISPOSITION_NOT_IMPL: | |
145ce502 JP |
1181 | pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n", |
1182 | state, event_type, subtype.chunk); | |
1da177e4 LT |
1183 | break; |
1184 | ||
1185 | case SCTP_DISPOSITION_BUG: | |
145ce502 | 1186 | pr_err("bug in state %d, event_type %d, event_id %d\n", |
1da177e4 LT |
1187 | state, event_type, subtype.chunk); |
1188 | BUG(); | |
1189 | break; | |
1190 | ||
1191 | default: | |
145ce502 | 1192 | pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n", |
1da177e4 LT |
1193 | status, state, event_type, subtype.chunk); |
1194 | BUG(); | |
1195 | break; | |
3ff50b79 | 1196 | } |
1da177e4 LT |
1197 | |
1198 | bail: | |
1199 | return error; | |
1200 | } | |
1201 | ||
1202 | /******************************************************************** | |
1203 | * 2nd Level Abstractions | |
1204 | ********************************************************************/ | |
1205 | ||
1206 | /* This is the side-effect interpreter. */ | |
1207 | static int sctp_cmd_interpreter(sctp_event_t event_type, | |
1208 | sctp_subtype_t subtype, | |
1209 | sctp_state_t state, | |
1210 | struct sctp_endpoint *ep, | |
1211 | struct sctp_association *asoc, | |
1212 | void *event_arg, | |
d808ad9a | 1213 | sctp_disposition_t status, |
1da177e4 | 1214 | sctp_cmd_seq_t *commands, |
dd0fc66f | 1215 | gfp_t gfp) |
1da177e4 | 1216 | { |
fb586f25 MRL |
1217 | struct sock *sk = ep->base.sk; |
1218 | struct sctp_sock *sp = sctp_sk(sk); | |
1da177e4 LT |
1219 | int error = 0; |
1220 | int force; | |
1221 | sctp_cmd_t *cmd; | |
1222 | struct sctp_chunk *new_obj; | |
1223 | struct sctp_chunk *chunk = NULL; | |
1224 | struct sctp_packet *packet; | |
1da177e4 LT |
1225 | struct timer_list *timer; |
1226 | unsigned long timeout; | |
1227 | struct sctp_transport *t; | |
1228 | struct sctp_sackhdr sackh; | |
1229 | int local_cork = 0; | |
1230 | ||
1231 | if (SCTP_EVENT_T_TIMEOUT != event_type) | |
ea110733 | 1232 | chunk = event_arg; |
1da177e4 LT |
1233 | |
1234 | /* Note: This whole file is a huge candidate for rework. | |
1235 | * For example, each command could either have its own handler, so | |
1236 | * the loop would look like: | |
1237 | * while (cmds) | |
1238 | * cmd->handle(x, y, z) | |
1239 | * --jgrimm | |
1240 | */ | |
1241 | while (NULL != (cmd = sctp_next_cmd(commands))) { | |
1242 | switch (cmd->verb) { | |
1243 | case SCTP_CMD_NOP: | |
1244 | /* Do nothing. */ | |
1245 | break; | |
1246 | ||
1247 | case SCTP_CMD_NEW_ASOC: | |
1248 | /* Register a new association. */ | |
1249 | if (local_cork) { | |
cea8768f | 1250 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1da177e4 LT |
1251 | local_cork = 0; |
1252 | } | |
f9e42b85 | 1253 | |
1da177e4 | 1254 | /* Register with the endpoint. */ |
f9e42b85 DB |
1255 | asoc = cmd->obj.asoc; |
1256 | BUG_ON(asoc->peer.primary_path == NULL); | |
1da177e4 | 1257 | sctp_endpoint_add_asoc(ep, asoc); |
1da177e4 LT |
1258 | break; |
1259 | ||
1260 | case SCTP_CMD_UPDATE_ASSOC: | |
b26ddd81 | 1261 | sctp_assoc_update(asoc, cmd->obj.asoc); |
1da177e4 LT |
1262 | break; |
1263 | ||
1264 | case SCTP_CMD_PURGE_OUTQUEUE: | |
1265 | sctp_outq_teardown(&asoc->outqueue); | |
1266 | break; | |
1267 | ||
d808ad9a | 1268 | case SCTP_CMD_DELETE_TCB: |
1da177e4 | 1269 | if (local_cork) { |
cea8768f | 1270 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1da177e4 LT |
1271 | local_cork = 0; |
1272 | } | |
1273 | /* Delete the current association. */ | |
1274 | sctp_cmd_delete_tcb(commands, asoc); | |
1275 | asoc = NULL; | |
1276 | break; | |
1277 | ||
1278 | case SCTP_CMD_NEW_STATE: | |
1279 | /* Enter a new state. */ | |
1280 | sctp_cmd_new_state(commands, asoc, cmd->obj.state); | |
1281 | break; | |
1282 | ||
1283 | case SCTP_CMD_REPORT_TSN: | |
1284 | /* Record the arrival of a TSN. */ | |
8e1ee18c | 1285 | error = sctp_tsnmap_mark(&asoc->peer.tsn_map, |
4244854d | 1286 | cmd->obj.u32, NULL); |
1da177e4 LT |
1287 | break; |
1288 | ||
1289 | case SCTP_CMD_REPORT_FWDTSN: | |
1290 | /* Move the Cumulattive TSN Ack ahead. */ | |
1291 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | |
1292 | ||
ea2dfb37 VY |
1293 | /* purge the fragmentation queue */ |
1294 | sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); | |
1295 | ||
1da177e4 LT |
1296 | /* Abort any in progress partial delivery. */ |
1297 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | |
1298 | break; | |
1299 | ||
1300 | case SCTP_CMD_PROCESS_FWDTSN: | |
b26ddd81 | 1301 | sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk); |
d808ad9a | 1302 | break; |
1da177e4 LT |
1303 | |
1304 | case SCTP_CMD_GEN_SACK: | |
1305 | /* Generate a Selective ACK. | |
1306 | * The argument tells us whether to just count | |
1307 | * the packet and MAYBE generate a SACK, or | |
1308 | * force a SACK out. | |
1309 | */ | |
1310 | force = cmd->obj.i32; | |
1311 | error = sctp_gen_sack(asoc, force, commands); | |
1312 | break; | |
1313 | ||
1314 | case SCTP_CMD_PROCESS_SACK: | |
1315 | /* Process an inbound SACK. */ | |
1316 | error = sctp_cmd_process_sack(commands, asoc, | |
b26ddd81 | 1317 | cmd->obj.chunk); |
1da177e4 LT |
1318 | break; |
1319 | ||
1320 | case SCTP_CMD_GEN_INIT_ACK: | |
1321 | /* Generate an INIT ACK chunk. */ | |
1322 | new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, | |
1323 | 0); | |
1324 | if (!new_obj) | |
1325 | goto nomem; | |
1326 | ||
1327 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1328 | SCTP_CHUNK(new_obj)); | |
1329 | break; | |
1330 | ||
1331 | case SCTP_CMD_PEER_INIT: | |
1332 | /* Process a unified INIT from the peer. | |
1333 | * Note: Only used during INIT-ACK processing. If | |
1334 | * there is an error just return to the outter | |
1335 | * layer which will bail. | |
1336 | */ | |
1337 | error = sctp_cmd_process_init(commands, asoc, chunk, | |
b26ddd81 | 1338 | cmd->obj.init, gfp); |
1da177e4 LT |
1339 | break; |
1340 | ||
1341 | case SCTP_CMD_GEN_COOKIE_ECHO: | |
1342 | /* Generate a COOKIE ECHO chunk. */ | |
1343 | new_obj = sctp_make_cookie_echo(asoc, chunk); | |
1344 | if (!new_obj) { | |
b26ddd81 NH |
1345 | if (cmd->obj.chunk) |
1346 | sctp_chunk_free(cmd->obj.chunk); | |
1da177e4 LT |
1347 | goto nomem; |
1348 | } | |
1349 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1350 | SCTP_CHUNK(new_obj)); | |
1351 | ||
1352 | /* If there is an ERROR chunk to be sent along with | |
1353 | * the COOKIE_ECHO, send it, too. | |
1354 | */ | |
b26ddd81 | 1355 | if (cmd->obj.chunk) |
1da177e4 | 1356 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
b26ddd81 | 1357 | SCTP_CHUNK(cmd->obj.chunk)); |
1da177e4 | 1358 | |
96cd0d3d VY |
1359 | if (new_obj->transport) { |
1360 | new_obj->transport->init_sent_count++; | |
1361 | asoc->init_last_sent_to = new_obj->transport; | |
1362 | } | |
1363 | ||
1da177e4 | 1364 | /* FIXME - Eventually come up with a cleaner way to |
d808ad9a YH |
1365 | * enabling COOKIE-ECHO + DATA bundling during |
1366 | * multihoming stale cookie scenarios, the following | |
1367 | * command plays with asoc->peer.retran_path to | |
1368 | * avoid the problem of sending the COOKIE-ECHO and | |
1369 | * DATA in different paths, which could result | |
1370 | * in the association being ABORTed if the DATA chunk | |
1da177e4 LT |
1371 | * is processed first by the server. Checking the |
1372 | * init error counter simply causes this command | |
1373 | * to be executed only during failed attempts of | |
1374 | * association establishment. | |
1375 | */ | |
3f7a87d2 FF |
1376 | if ((asoc->peer.retran_path != |
1377 | asoc->peer.primary_path) && | |
1378 | (asoc->init_err_counter > 0)) { | |
1379 | sctp_add_cmd_sf(commands, | |
d808ad9a | 1380 | SCTP_CMD_FORCE_PRIM_RETRAN, |
1da177e4 LT |
1381 | SCTP_NULL()); |
1382 | } | |
1383 | ||
1384 | break; | |
1385 | ||
1386 | case SCTP_CMD_GEN_SHUTDOWN: | |
1387 | /* Generate SHUTDOWN when in SHUTDOWN_SENT state. | |
1388 | * Reset error counts. | |
1389 | */ | |
1390 | asoc->overall_error_count = 0; | |
1391 | ||
1392 | /* Generate a SHUTDOWN chunk. */ | |
1393 | new_obj = sctp_make_shutdown(asoc, chunk); | |
1394 | if (!new_obj) | |
1395 | goto nomem; | |
1396 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1397 | SCTP_CHUNK(new_obj)); | |
1398 | break; | |
1399 | ||
1400 | case SCTP_CMD_CHUNK_ULP: | |
1401 | /* Send a chunk to the sockets layer. */ | |
bb33381d DB |
1402 | pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", |
1403 | __func__, cmd->obj.chunk, &asoc->ulpq); | |
1404 | ||
b26ddd81 | 1405 | sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, |
1da177e4 LT |
1406 | GFP_ATOMIC); |
1407 | break; | |
1408 | ||
1409 | case SCTP_CMD_EVENT_ULP: | |
1410 | /* Send a notification to the sockets layer. */ | |
bb33381d DB |
1411 | pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", |
1412 | __func__, cmd->obj.ulpevent, &asoc->ulpq); | |
1413 | ||
b26ddd81 | 1414 | sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); |
1da177e4 LT |
1415 | break; |
1416 | ||
1417 | case SCTP_CMD_REPLY: | |
1418 | /* If an caller has not already corked, do cork. */ | |
1419 | if (!asoc->outqueue.cork) { | |
1420 | sctp_outq_cork(&asoc->outqueue); | |
1421 | local_cork = 1; | |
1422 | } | |
1423 | /* Send a chunk to our peer. */ | |
83dbc3d4 | 1424 | sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp); |
1da177e4 LT |
1425 | break; |
1426 | ||
1427 | case SCTP_CMD_SEND_PKT: | |
1428 | /* Send a full packet to our peer. */ | |
b26ddd81 | 1429 | packet = cmd->obj.packet; |
cea8768f | 1430 | sctp_packet_transmit(packet, gfp); |
1da177e4 LT |
1431 | sctp_ootb_pkt_free(packet); |
1432 | break; | |
1433 | ||
b6157d8e VY |
1434 | case SCTP_CMD_T1_RETRAN: |
1435 | /* Mark a transport for retransmission. */ | |
1436 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | |
1437 | SCTP_RTXR_T1_RTX); | |
1438 | break; | |
1439 | ||
1da177e4 LT |
1440 | case SCTP_CMD_RETRAN: |
1441 | /* Mark a transport for retransmission. */ | |
1442 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | |
1443 | SCTP_RTXR_T3_RTX); | |
1444 | break; | |
1445 | ||
1da177e4 LT |
1446 | case SCTP_CMD_ECN_CE: |
1447 | /* Do delayed CE processing. */ | |
1448 | sctp_do_ecn_ce_work(asoc, cmd->obj.u32); | |
1449 | break; | |
1450 | ||
1451 | case SCTP_CMD_ECN_ECNE: | |
1452 | /* Do delayed ECNE processing. */ | |
1453 | new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, | |
1454 | chunk); | |
1455 | if (new_obj) | |
1456 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1457 | SCTP_CHUNK(new_obj)); | |
1458 | break; | |
1459 | ||
1460 | case SCTP_CMD_ECN_CWR: | |
1461 | /* Do delayed CWR processing. */ | |
1462 | sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); | |
1463 | break; | |
1464 | ||
1465 | case SCTP_CMD_SETUP_T2: | |
b26ddd81 | 1466 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); |
1da177e4 LT |
1467 | break; |
1468 | ||
f8d96052 TG |
1469 | case SCTP_CMD_TIMER_START_ONCE: |
1470 | timer = &asoc->timers[cmd->obj.to]; | |
1471 | ||
1472 | if (timer_pending(timer)) | |
1473 | break; | |
1474 | /* fall through */ | |
1475 | ||
1da177e4 LT |
1476 | case SCTP_CMD_TIMER_START: |
1477 | timer = &asoc->timers[cmd->obj.to]; | |
1478 | timeout = asoc->timeouts[cmd->obj.to]; | |
09a62660 | 1479 | BUG_ON(!timeout); |
1da177e4 LT |
1480 | |
1481 | timer->expires = jiffies + timeout; | |
1482 | sctp_association_hold(asoc); | |
1483 | add_timer(timer); | |
1484 | break; | |
1485 | ||
1486 | case SCTP_CMD_TIMER_RESTART: | |
1487 | timer = &asoc->timers[cmd->obj.to]; | |
1488 | timeout = asoc->timeouts[cmd->obj.to]; | |
1489 | if (!mod_timer(timer, jiffies + timeout)) | |
1490 | sctp_association_hold(asoc); | |
1491 | break; | |
1492 | ||
1493 | case SCTP_CMD_TIMER_STOP: | |
1494 | timer = &asoc->timers[cmd->obj.to]; | |
25cc4ae9 | 1495 | if (del_timer(timer)) |
1da177e4 LT |
1496 | sctp_association_put(asoc); |
1497 | break; | |
1498 | ||
3f7a87d2 | 1499 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: |
b26ddd81 | 1500 | chunk = cmd->obj.chunk; |
9919b455 WY |
1501 | t = sctp_assoc_choose_alter_transport(asoc, |
1502 | asoc->init_last_sent_to); | |
3f7a87d2 FF |
1503 | asoc->init_last_sent_to = t; |
1504 | chunk->transport = t; | |
1505 | t->init_sent_count++; | |
e0e9db17 VY |
1506 | /* Set the new transport as primary */ |
1507 | sctp_assoc_set_primary(asoc, t); | |
3f7a87d2 FF |
1508 | break; |
1509 | ||
1da177e4 LT |
1510 | case SCTP_CMD_INIT_RESTART: |
1511 | /* Do the needed accounting and updates | |
1512 | * associated with restarting an initialization | |
3f7a87d2 FF |
1513 | * timer. Only multiply the timeout by two if |
1514 | * all transports have been tried at the current | |
1515 | * timeout. | |
1516 | */ | |
96cd0d3d VY |
1517 | sctp_cmd_t1_timer_update(asoc, |
1518 | SCTP_EVENT_TIMEOUT_T1_INIT, | |
1519 | "INIT"); | |
3f7a87d2 FF |
1520 | |
1521 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | |
1522 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | |
1523 | break; | |
1524 | ||
1525 | case SCTP_CMD_COOKIEECHO_RESTART: | |
1526 | /* Do the needed accounting and updates | |
1527 | * associated with restarting an initialization | |
1528 | * timer. Only multiply the timeout by two if | |
1529 | * all transports have been tried at the current | |
1530 | * timeout. | |
1da177e4 | 1531 | */ |
96cd0d3d VY |
1532 | sctp_cmd_t1_timer_update(asoc, |
1533 | SCTP_EVENT_TIMEOUT_T1_COOKIE, | |
1534 | "COOKIE"); | |
1da177e4 LT |
1535 | |
1536 | /* If we've sent any data bundled with | |
1537 | * COOKIE-ECHO we need to resend. | |
1538 | */ | |
9dbc15f0 RD |
1539 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
1540 | transports) { | |
b6157d8e VY |
1541 | sctp_retransmit_mark(&asoc->outqueue, t, |
1542 | SCTP_RTXR_T1_RTX); | |
1da177e4 LT |
1543 | } |
1544 | ||
1545 | sctp_add_cmd_sf(commands, | |
1546 | SCTP_CMD_TIMER_RESTART, | |
3f7a87d2 | 1547 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); |
1da177e4 LT |
1548 | break; |
1549 | ||
1550 | case SCTP_CMD_INIT_FAILED: | |
dc251b2b | 1551 | sctp_cmd_init_failed(commands, asoc, cmd->obj.err); |
1da177e4 LT |
1552 | break; |
1553 | ||
1554 | case SCTP_CMD_ASSOC_FAILED: | |
1555 | sctp_cmd_assoc_failed(commands, asoc, event_type, | |
5be291fe | 1556 | subtype, chunk, cmd->obj.err); |
1da177e4 LT |
1557 | break; |
1558 | ||
3f7a87d2 FF |
1559 | case SCTP_CMD_INIT_COUNTER_INC: |
1560 | asoc->init_err_counter++; | |
1da177e4 LT |
1561 | break; |
1562 | ||
3f7a87d2 FF |
1563 | case SCTP_CMD_INIT_COUNTER_RESET: |
1564 | asoc->init_err_counter = 0; | |
1565 | asoc->init_cycle = 0; | |
96cd0d3d VY |
1566 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
1567 | transports) { | |
1568 | t->init_sent_count = 0; | |
1569 | } | |
1da177e4 LT |
1570 | break; |
1571 | ||
1572 | case SCTP_CMD_REPORT_DUP: | |
1573 | sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, | |
1574 | cmd->obj.u32); | |
1575 | break; | |
1576 | ||
1577 | case SCTP_CMD_REPORT_BAD_TAG: | |
bb33381d | 1578 | pr_debug("%s: vtag mismatch!\n", __func__); |
1da177e4 LT |
1579 | break; |
1580 | ||
1581 | case SCTP_CMD_STRIKE: | |
1582 | /* Mark one strike against a transport. */ | |
5aa93bcf NH |
1583 | sctp_do_8_2_transport_strike(commands, asoc, |
1584 | cmd->obj.transport, 0); | |
7e99013a VY |
1585 | break; |
1586 | ||
1587 | case SCTP_CMD_TRANSPORT_IDLE: | |
1588 | t = cmd->obj.transport; | |
1589 | sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); | |
1da177e4 LT |
1590 | break; |
1591 | ||
7e99013a | 1592 | case SCTP_CMD_TRANSPORT_HB_SENT: |
1da177e4 | 1593 | t = cmd->obj.transport; |
5aa93bcf NH |
1594 | sctp_do_8_2_transport_strike(commands, asoc, |
1595 | t, 1); | |
7e99013a | 1596 | t->hb_sent = 1; |
1da177e4 LT |
1597 | break; |
1598 | ||
1599 | case SCTP_CMD_TRANSPORT_ON: | |
1600 | t = cmd->obj.transport; | |
1601 | sctp_cmd_transport_on(commands, asoc, t, chunk); | |
1602 | break; | |
1603 | ||
1604 | case SCTP_CMD_HB_TIMERS_START: | |
1605 | sctp_cmd_hb_timers_start(commands, asoc); | |
1606 | break; | |
1607 | ||
1608 | case SCTP_CMD_HB_TIMER_UPDATE: | |
1609 | t = cmd->obj.transport; | |
ba6f5e33 | 1610 | sctp_transport_reset_hb_timer(t); |
1da177e4 LT |
1611 | break; |
1612 | ||
1613 | case SCTP_CMD_HB_TIMERS_STOP: | |
1614 | sctp_cmd_hb_timers_stop(commands, asoc); | |
1615 | break; | |
1616 | ||
1617 | case SCTP_CMD_REPORT_ERROR: | |
1618 | error = cmd->obj.error; | |
1619 | break; | |
1620 | ||
1621 | case SCTP_CMD_PROCESS_CTSN: | |
1622 | /* Dummy up a SACK for processing. */ | |
2178eda8 | 1623 | sackh.cum_tsn_ack = cmd->obj.be32; |
d4d6fb57 VY |
1624 | sackh.a_rwnd = asoc->peer.rwnd + |
1625 | asoc->outqueue.outstanding_bytes; | |
1da177e4 LT |
1626 | sackh.num_gap_ack_blocks = 0; |
1627 | sackh.num_dup_tsns = 0; | |
f6e80abe | 1628 | chunk->subh.sack_hdr = &sackh; |
1da177e4 | 1629 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, |
f6e80abe | 1630 | SCTP_CHUNK(chunk)); |
1da177e4 LT |
1631 | break; |
1632 | ||
1633 | case SCTP_CMD_DISCARD_PACKET: | |
2e3216cd VY |
1634 | /* We need to discard the whole packet. |
1635 | * Uncork the queue since there might be | |
1636 | * responses pending | |
1637 | */ | |
1da177e4 | 1638 | chunk->pdiscard = 1; |
2e3216cd | 1639 | if (asoc) { |
cea8768f | 1640 | sctp_outq_uncork(&asoc->outqueue, gfp); |
2e3216cd VY |
1641 | local_cork = 0; |
1642 | } | |
1da177e4 LT |
1643 | break; |
1644 | ||
1645 | case SCTP_CMD_RTO_PENDING: | |
1646 | t = cmd->obj.transport; | |
1647 | t->rto_pending = 1; | |
1648 | break; | |
1649 | ||
1650 | case SCTP_CMD_PART_DELIVER: | |
b26ddd81 | 1651 | sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC); |
1da177e4 LT |
1652 | break; |
1653 | ||
1654 | case SCTP_CMD_RENEGE: | |
b26ddd81 | 1655 | sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk, |
1da177e4 LT |
1656 | GFP_ATOMIC); |
1657 | break; | |
1658 | ||
1659 | case SCTP_CMD_SETUP_T4: | |
b26ddd81 | 1660 | sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); |
1da177e4 LT |
1661 | break; |
1662 | ||
1663 | case SCTP_CMD_PROCESS_OPERR: | |
1664 | sctp_cmd_process_operr(commands, asoc, chunk); | |
1665 | break; | |
1666 | case SCTP_CMD_CLEAR_INIT_TAG: | |
1667 | asoc->peer.i.init_tag = 0; | |
1668 | break; | |
1669 | case SCTP_CMD_DEL_NON_PRIMARY: | |
1670 | sctp_cmd_del_non_primary(asoc); | |
1671 | break; | |
1672 | case SCTP_CMD_T3_RTX_TIMERS_STOP: | |
1673 | sctp_cmd_t3_rtx_timers_stop(commands, asoc); | |
1674 | break; | |
1675 | case SCTP_CMD_FORCE_PRIM_RETRAN: | |
1676 | t = asoc->peer.retran_path; | |
1677 | asoc->peer.retran_path = asoc->peer.primary_path; | |
83dbc3d4 | 1678 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1da177e4 LT |
1679 | local_cork = 0; |
1680 | asoc->peer.retran_path = t; | |
1681 | break; | |
8de8c873 SS |
1682 | case SCTP_CMD_SET_SK_ERR: |
1683 | sctp_cmd_set_sk_err(asoc, cmd->obj.error); | |
1684 | break; | |
07d93967 VY |
1685 | case SCTP_CMD_ASSOC_CHANGE: |
1686 | sctp_cmd_assoc_change(commands, asoc, | |
1687 | cmd->obj.u8); | |
1688 | break; | |
1689 | case SCTP_CMD_ADAPTATION_IND: | |
1690 | sctp_cmd_adaptation_ind(commands, asoc); | |
1691 | break; | |
1692 | ||
730fc3d0 VY |
1693 | case SCTP_CMD_ASSOC_SHKEY: |
1694 | error = sctp_auth_asoc_init_active_key(asoc, | |
1695 | GFP_ATOMIC); | |
1696 | break; | |
f4ad85ca GJ |
1697 | case SCTP_CMD_UPDATE_INITTAG: |
1698 | asoc->peer.i.init_tag = cmd->obj.u32; | |
1699 | break; | |
9c5c62be VY |
1700 | case SCTP_CMD_SEND_MSG: |
1701 | if (!asoc->outqueue.cork) { | |
1702 | sctp_outq_cork(&asoc->outqueue); | |
1703 | local_cork = 1; | |
1704 | } | |
66388f2c | 1705 | sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp); |
9c5c62be | 1706 | break; |
c0786693 VY |
1707 | case SCTP_CMD_SEND_NEXT_ASCONF: |
1708 | sctp_cmd_send_asconf(asoc); | |
1709 | break; | |
a000c01e WY |
1710 | case SCTP_CMD_PURGE_ASCONF_QUEUE: |
1711 | sctp_asconf_queue_teardown(asoc); | |
1712 | break; | |
d5ccd496 MM |
1713 | |
1714 | case SCTP_CMD_SET_ASOC: | |
1715 | asoc = cmd->obj.asoc; | |
1716 | break; | |
1717 | ||
1da177e4 | 1718 | default: |
b26ddd81 NH |
1719 | pr_warn("Impossible command: %u\n", |
1720 | cmd->verb); | |
1da177e4 | 1721 | break; |
3ff50b79 SH |
1722 | } |
1723 | ||
1da177e4 LT |
1724 | if (error) |
1725 | break; | |
1726 | } | |
1727 | ||
1728 | out: | |
2e3216cd VY |
1729 | /* If this is in response to a received chunk, wait until |
1730 | * we are done with the packet to open the queue so that we don't | |
1731 | * send multiple packets in response to a single request. | |
1732 | */ | |
1733 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { | |
1734 | if (chunk->end_of_packet || chunk->singleton) | |
83dbc3d4 | 1735 | sctp_outq_uncork(&asoc->outqueue, gfp); |
2e3216cd | 1736 | } else if (local_cork) |
83dbc3d4 | 1737 | sctp_outq_uncork(&asoc->outqueue, gfp); |
fb586f25 | 1738 | |
0970f5b3 MRL |
1739 | if (sp->data_ready_signalled) |
1740 | sp->data_ready_signalled = 0; | |
1741 | ||
1da177e4 LT |
1742 | return error; |
1743 | nomem: | |
1744 | error = -ENOMEM; | |
1745 | goto out; | |
1746 | } | |
1747 |