]>
Commit | Line | Data |
---|---|---|
60c778b2 | 1 | /* SCTP kernel implementation |
1da177e4 LT |
2 | * (C) Copyright IBM Corp. 2001, 2004 |
3 | * Copyright (c) 1999 Cisco, Inc. | |
4 | * Copyright (c) 1999-2001 Motorola, Inc. | |
5 | * | |
60c778b2 | 6 | * This file is part of the SCTP kernel implementation |
1da177e4 LT |
7 | * |
8 | * These functions work with the state functions in sctp_sm_statefuns.c | |
9 | * to implement that state operations. These functions implement the | |
10 | * steps which require modifying existing data structures. | |
11 | * | |
60c778b2 | 12 | * This SCTP implementation is free software; |
1da177e4 LT |
13 | * you can redistribute it and/or modify it under the terms of |
14 | * the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2, or (at your option) | |
16 | * any later version. | |
17 | * | |
60c778b2 | 18 | * This SCTP implementation is distributed in the hope that it |
1da177e4 LT |
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
20 | * ************************ | |
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | |
22 | * See the GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with GNU CC; see the file COPYING. If not, write to | |
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | |
27 | * Boston, MA 02111-1307, USA. | |
28 | * | |
29 | * Please send any bug reports or fixes you make to the | |
30 | * email address(es): | |
31 | * lksctp developers <[email protected]> | |
32 | * | |
33 | * Or submit a bug report through the following website: | |
34 | * http://www.sf.net/projects/lksctp | |
35 | * | |
36 | * Written or modified by: | |
37 | * La Monte H.P. Yarroll <[email protected]> | |
38 | * Karl Knutson <[email protected]> | |
39 | * Jon Grimm <[email protected]> | |
40 | * Hui Huang <[email protected]> | |
41 | * Dajiang Zhang <[email protected]> | |
42 | * Daisy Chang <[email protected]> | |
43 | * Sridhar Samudrala <[email protected]> | |
44 | * Ardelle Fan <[email protected]> | |
45 | * | |
46 | * Any bugs reported given to us we will try to fix... any fixes shared will | |
47 | * be incorporated into the next SCTP release. | |
48 | */ | |
49 | ||
50 | #include <linux/skbuff.h> | |
51 | #include <linux/types.h> | |
52 | #include <linux/socket.h> | |
53 | #include <linux/ip.h> | |
54 | #include <net/sock.h> | |
55 | #include <net/sctp/sctp.h> | |
56 | #include <net/sctp/sm.h> | |
57 | ||
58 | static int sctp_cmd_interpreter(sctp_event_t event_type, | |
59 | sctp_subtype_t subtype, | |
60 | sctp_state_t state, | |
61 | struct sctp_endpoint *ep, | |
62 | struct sctp_association *asoc, | |
63 | void *event_arg, | |
d808ad9a | 64 | sctp_disposition_t status, |
1da177e4 | 65 | sctp_cmd_seq_t *commands, |
dd0fc66f | 66 | gfp_t gfp); |
1da177e4 LT |
67 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, |
68 | sctp_state_t state, | |
69 | struct sctp_endpoint *ep, | |
70 | struct sctp_association *asoc, | |
71 | void *event_arg, | |
72 | sctp_disposition_t status, | |
73 | sctp_cmd_seq_t *commands, | |
dd0fc66f | 74 | gfp_t gfp); |
1da177e4 LT |
75 | |
76 | /******************************************************************** | |
77 | * Helper functions | |
78 | ********************************************************************/ | |
79 | ||
80 | /* A helper function for delayed processing of INET ECN CE bit. */ | |
d808ad9a | 81 | static void sctp_do_ecn_ce_work(struct sctp_association *asoc, |
1da177e4 LT |
82 | __u32 lowest_tsn) |
83 | { | |
84 | /* Save the TSN away for comparison when we receive CWR */ | |
85 | ||
86 | asoc->last_ecne_tsn = lowest_tsn; | |
87 | asoc->need_ecne = 1; | |
88 | } | |
89 | ||
90 | /* Helper function for delayed processing of SCTP ECNE chunk. */ | |
91 | /* RFC 2960 Appendix A | |
92 | * | |
93 | * RFC 2481 details a specific bit for a sender to send in | |
94 | * the header of its next outbound TCP segment to indicate to | |
95 | * its peer that it has reduced its congestion window. This | |
96 | * is termed the CWR bit. For SCTP the same indication is made | |
97 | * by including the CWR chunk. This chunk contains one data | |
98 | * element, i.e. the TSN number that was sent in the ECNE chunk. | |
99 | * This element represents the lowest TSN number in the datagram | |
100 | * that was originally marked with the CE bit. | |
101 | */ | |
102 | static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, | |
103 | __u32 lowest_tsn, | |
104 | struct sctp_chunk *chunk) | |
105 | { | |
106 | struct sctp_chunk *repl; | |
107 | ||
108 | /* Our previously transmitted packet ran into some congestion | |
109 | * so we should take action by reducing cwnd and ssthresh | |
110 | * and then ACK our peer that we we've done so by | |
111 | * sending a CWR. | |
112 | */ | |
113 | ||
114 | /* First, try to determine if we want to actually lower | |
115 | * our cwnd variables. Only lower them if the ECNE looks more | |
116 | * recent than the last response. | |
117 | */ | |
118 | if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { | |
119 | struct sctp_transport *transport; | |
120 | ||
121 | /* Find which transport's congestion variables | |
122 | * need to be adjusted. | |
123 | */ | |
124 | transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); | |
125 | ||
126 | /* Update the congestion variables. */ | |
127 | if (transport) | |
128 | sctp_transport_lower_cwnd(transport, | |
129 | SCTP_LOWER_CWND_ECNE); | |
130 | asoc->last_cwr_tsn = lowest_tsn; | |
131 | } | |
132 | ||
133 | /* Always try to quiet the other end. In case of lost CWR, | |
134 | * resend last_cwr_tsn. | |
135 | */ | |
136 | repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); | |
137 | ||
138 | /* If we run out of memory, it will look like a lost CWR. We'll | |
139 | * get back in sync eventually. | |
140 | */ | |
141 | return repl; | |
142 | } | |
143 | ||
144 | /* Helper function to do delayed processing of ECN CWR chunk. */ | |
145 | static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, | |
146 | __u32 lowest_tsn) | |
147 | { | |
148 | /* Turn off ECNE getting auto-prepended to every outgoing | |
149 | * packet | |
150 | */ | |
151 | asoc->need_ecne = 0; | |
152 | } | |
153 | ||
154 | /* Generate SACK if necessary. We call this at the end of a packet. */ | |
155 | static int sctp_gen_sack(struct sctp_association *asoc, int force, | |
156 | sctp_cmd_seq_t *commands) | |
157 | { | |
158 | __u32 ctsn, max_tsn_seen; | |
159 | struct sctp_chunk *sack; | |
52ccb8e9 | 160 | struct sctp_transport *trans = asoc->peer.last_data_from; |
1da177e4 LT |
161 | int error = 0; |
162 | ||
d808ad9a | 163 | if (force || |
52ccb8e9 FF |
164 | (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || |
165 | (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) | |
1da177e4 LT |
166 | asoc->peer.sack_needed = 1; |
167 | ||
168 | ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); | |
169 | max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); | |
170 | ||
171 | /* From 12.2 Parameters necessary per association (i.e. the TCB): | |
172 | * | |
173 | * Ack State : This flag indicates if the next received packet | |
174 | * : is to be responded to with a SACK. ... | |
175 | * : When DATA chunks are out of order, SACK's | |
176 | * : are not delayed (see Section 6). | |
177 | * | |
178 | * [This is actually not mentioned in Section 6, but we | |
179 | * implement it here anyway. --piggy] | |
180 | */ | |
d808ad9a | 181 | if (max_tsn_seen != ctsn) |
1da177e4 LT |
182 | asoc->peer.sack_needed = 1; |
183 | ||
184 | /* From 6.2 Acknowledgement on Reception of DATA Chunks: | |
185 | * | |
186 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, | |
187 | * an acknowledgement SHOULD be generated for at least every | |
188 | * second packet (not every second DATA chunk) received, and | |
189 | * SHOULD be generated within 200 ms of the arrival of any | |
190 | * unacknowledged DATA chunk. ... | |
191 | */ | |
192 | if (!asoc->peer.sack_needed) { | |
d364d927 | 193 | asoc->peer.sack_cnt++; |
52ccb8e9 FF |
194 | |
195 | /* Set the SACK delay timeout based on the | |
196 | * SACK delay for the last transport | |
197 | * data was received from, or the default | |
198 | * for the association. | |
199 | */ | |
d364d927 WY |
200 | if (trans) { |
201 | /* We will need a SACK for the next packet. */ | |
202 | if (asoc->peer.sack_cnt >= trans->sackfreq - 1) | |
203 | asoc->peer.sack_needed = 1; | |
204 | ||
d808ad9a | 205 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = |
52ccb8e9 | 206 | trans->sackdelay; |
d364d927 WY |
207 | } else { |
208 | /* We will need a SACK for the next packet. */ | |
209 | if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) | |
210 | asoc->peer.sack_needed = 1; | |
211 | ||
d808ad9a | 212 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = |
52ccb8e9 | 213 | asoc->sackdelay; |
d364d927 | 214 | } |
52ccb8e9 FF |
215 | |
216 | /* Restart the SACK timer. */ | |
217 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | |
218 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | |
1da177e4 LT |
219 | } else { |
220 | if (asoc->a_rwnd > asoc->rwnd) | |
221 | asoc->a_rwnd = asoc->rwnd; | |
222 | sack = sctp_make_sack(asoc); | |
223 | if (!sack) | |
224 | goto nomem; | |
225 | ||
226 | asoc->peer.sack_needed = 0; | |
d364d927 | 227 | asoc->peer.sack_cnt = 0; |
1da177e4 | 228 | |
732ba35e | 229 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); |
1da177e4 LT |
230 | |
231 | /* Stop the SACK timer. */ | |
232 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | |
233 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | |
234 | } | |
52ccb8e9 | 235 | |
1da177e4 LT |
236 | return error; |
237 | nomem: | |
238 | error = -ENOMEM; | |
239 | return error; | |
240 | } | |
241 | ||
242 | /* When the T3-RTX timer expires, it calls this function to create the | |
243 | * relevant state machine event. | |
244 | */ | |
245 | void sctp_generate_t3_rtx_event(unsigned long peer) | |
246 | { | |
247 | int error; | |
248 | struct sctp_transport *transport = (struct sctp_transport *) peer; | |
249 | struct sctp_association *asoc = transport->asoc; | |
250 | ||
251 | /* Check whether a task is in the sock. */ | |
252 | ||
253 | sctp_bh_lock_sock(asoc->base.sk); | |
254 | if (sock_owned_by_user(asoc->base.sk)) { | |
0dc47877 | 255 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); |
1da177e4 LT |
256 | |
257 | /* Try again later. */ | |
258 | if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) | |
259 | sctp_transport_hold(transport); | |
260 | goto out_unlock; | |
261 | } | |
262 | ||
263 | /* Is this transport really dead and just waiting around for | |
264 | * the timer to let go of the reference? | |
265 | */ | |
266 | if (transport->dead) | |
267 | goto out_unlock; | |
268 | ||
269 | /* Run through the state machine. */ | |
270 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | |
271 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), | |
272 | asoc->state, | |
273 | asoc->ep, asoc, | |
274 | transport, GFP_ATOMIC); | |
275 | ||
276 | if (error) | |
277 | asoc->base.sk->sk_err = -error; | |
278 | ||
279 | out_unlock: | |
280 | sctp_bh_unlock_sock(asoc->base.sk); | |
281 | sctp_transport_put(transport); | |
282 | } | |
283 | ||
284 | /* This is a sa interface for producing timeout events. It works | |
285 | * for timeouts which use the association as their parameter. | |
286 | */ | |
287 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | |
288 | sctp_event_timeout_t timeout_type) | |
289 | { | |
290 | int error = 0; | |
291 | ||
292 | sctp_bh_lock_sock(asoc->base.sk); | |
293 | if (sock_owned_by_user(asoc->base.sk)) { | |
294 | SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n", | |
0dc47877 | 295 | __func__, |
1da177e4 LT |
296 | timeout_type); |
297 | ||
298 | /* Try again later. */ | |
299 | if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) | |
300 | sctp_association_hold(asoc); | |
301 | goto out_unlock; | |
302 | } | |
303 | ||
304 | /* Is this association really dead and just waiting around for | |
305 | * the timer to let go of the reference? | |
306 | */ | |
307 | if (asoc->base.dead) | |
308 | goto out_unlock; | |
309 | ||
310 | /* Run through the state machine. */ | |
311 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | |
312 | SCTP_ST_TIMEOUT(timeout_type), | |
313 | asoc->state, asoc->ep, asoc, | |
314 | (void *)timeout_type, GFP_ATOMIC); | |
315 | ||
316 | if (error) | |
317 | asoc->base.sk->sk_err = -error; | |
318 | ||
319 | out_unlock: | |
320 | sctp_bh_unlock_sock(asoc->base.sk); | |
321 | sctp_association_put(asoc); | |
322 | } | |
323 | ||
324 | static void sctp_generate_t1_cookie_event(unsigned long data) | |
325 | { | |
326 | struct sctp_association *asoc = (struct sctp_association *) data; | |
327 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); | |
328 | } | |
329 | ||
330 | static void sctp_generate_t1_init_event(unsigned long data) | |
331 | { | |
332 | struct sctp_association *asoc = (struct sctp_association *) data; | |
333 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); | |
334 | } | |
335 | ||
336 | static void sctp_generate_t2_shutdown_event(unsigned long data) | |
337 | { | |
338 | struct sctp_association *asoc = (struct sctp_association *) data; | |
339 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); | |
340 | } | |
341 | ||
342 | static void sctp_generate_t4_rto_event(unsigned long data) | |
343 | { | |
344 | struct sctp_association *asoc = (struct sctp_association *) data; | |
345 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); | |
346 | } | |
347 | ||
348 | static void sctp_generate_t5_shutdown_guard_event(unsigned long data) | |
349 | { | |
d808ad9a YH |
350 | struct sctp_association *asoc = (struct sctp_association *)data; |
351 | sctp_generate_timeout_event(asoc, | |
1da177e4 LT |
352 | SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); |
353 | ||
354 | } /* sctp_generate_t5_shutdown_guard_event() */ | |
355 | ||
356 | static void sctp_generate_autoclose_event(unsigned long data) | |
357 | { | |
358 | struct sctp_association *asoc = (struct sctp_association *) data; | |
359 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); | |
360 | } | |
361 | ||
362 | /* Generate a heart beat event. If the sock is busy, reschedule. Make | |
363 | * sure that the transport is still valid. | |
364 | */ | |
365 | void sctp_generate_heartbeat_event(unsigned long data) | |
366 | { | |
367 | int error = 0; | |
368 | struct sctp_transport *transport = (struct sctp_transport *) data; | |
369 | struct sctp_association *asoc = transport->asoc; | |
370 | ||
371 | sctp_bh_lock_sock(asoc->base.sk); | |
372 | if (sock_owned_by_user(asoc->base.sk)) { | |
0dc47877 | 373 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); |
1da177e4 LT |
374 | |
375 | /* Try again later. */ | |
376 | if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) | |
377 | sctp_transport_hold(transport); | |
378 | goto out_unlock; | |
379 | } | |
380 | ||
381 | /* Is this structure just waiting around for us to actually | |
382 | * get destroyed? | |
383 | */ | |
384 | if (transport->dead) | |
385 | goto out_unlock; | |
386 | ||
387 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | |
388 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), | |
389 | asoc->state, asoc->ep, asoc, | |
390 | transport, GFP_ATOMIC); | |
391 | ||
d808ad9a | 392 | if (error) |
1da177e4 LT |
393 | asoc->base.sk->sk_err = -error; |
394 | ||
395 | out_unlock: | |
396 | sctp_bh_unlock_sock(asoc->base.sk); | |
397 | sctp_transport_put(transport); | |
398 | } | |
399 | ||
400 | /* Inject a SACK Timeout event into the state machine. */ | |
401 | static void sctp_generate_sack_event(unsigned long data) | |
402 | { | |
403 | struct sctp_association *asoc = (struct sctp_association *) data; | |
404 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); | |
405 | } | |
406 | ||
407 | sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { | |
408 | NULL, | |
409 | sctp_generate_t1_cookie_event, | |
410 | sctp_generate_t1_init_event, | |
411 | sctp_generate_t2_shutdown_event, | |
412 | NULL, | |
413 | sctp_generate_t4_rto_event, | |
414 | sctp_generate_t5_shutdown_guard_event, | |
1e7d3d90 | 415 | NULL, |
1da177e4 LT |
416 | sctp_generate_sack_event, |
417 | sctp_generate_autoclose_event, | |
418 | }; | |
419 | ||
420 | ||
421 | /* RFC 2960 8.2 Path Failure Detection | |
422 | * | |
423 | * When its peer endpoint is multi-homed, an endpoint should keep a | |
424 | * error counter for each of the destination transport addresses of the | |
425 | * peer endpoint. | |
426 | * | |
427 | * Each time the T3-rtx timer expires on any address, or when a | |
428 | * HEARTBEAT sent to an idle address is not acknowledged within a RTO, | |
429 | * the error counter of that destination address will be incremented. | |
430 | * When the value in the error counter exceeds the protocol parameter | |
431 | * 'Path.Max.Retrans' of that destination address, the endpoint should | |
432 | * mark the destination transport address as inactive, and a | |
433 | * notification SHOULD be sent to the upper layer. | |
434 | * | |
435 | */ | |
436 | static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | |
7e99013a VY |
437 | struct sctp_transport *transport, |
438 | int is_hb) | |
1da177e4 LT |
439 | { |
440 | /* The check for association's overall error counter exceeding the | |
441 | * threshold is done in the state function. | |
442 | */ | |
ad8fec17 SS |
443 | /* When probing UNCONFIRMED addresses, the association overall |
444 | * error count is NOT incremented | |
445 | */ | |
446 | if (transport->state != SCTP_UNCONFIRMED) | |
447 | asoc->overall_error_count++; | |
1da177e4 | 448 | |
3f7a87d2 | 449 | if (transport->state != SCTP_INACTIVE && |
52ccb8e9 | 450 | (transport->error_count++ >= transport->pathmaxrxt)) { |
3f7a87d2 FF |
451 | SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", |
452 | " transport IP: port:%d failed.\n", | |
453 | asoc, | |
b3f5b3b6 AV |
454 | (&transport->ipaddr), |
455 | ntohs(transport->ipaddr.v4.sin_port)); | |
1da177e4 LT |
456 | sctp_assoc_control_transport(asoc, transport, |
457 | SCTP_TRANSPORT_DOWN, | |
458 | SCTP_FAILED_THRESHOLD); | |
459 | } | |
460 | ||
461 | /* E2) For the destination address for which the timer | |
462 | * expires, set RTO <- RTO * 2 ("back off the timer"). The | |
463 | * maximum value discussed in rule C7 above (RTO.max) may be | |
464 | * used to provide an upper bound to this doubling operation. | |
faee47cd VY |
465 | * |
466 | * Special Case: the first HB doesn't trigger exponential backoff. | |
467 | * The first unacknowleged HB triggers it. We do this with a flag | |
468 | * that indicates that we have an outstanding HB. | |
1da177e4 | 469 | */ |
7e99013a | 470 | if (!is_hb || transport->hb_sent) { |
faee47cd VY |
471 | transport->last_rto = transport->rto; |
472 | transport->rto = min((transport->rto * 2), transport->asoc->rto_max); | |
473 | } | |
1da177e4 LT |
474 | } |
475 | ||
476 | /* Worker routine to handle INIT command failure. */ | |
477 | static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, | |
478 | struct sctp_association *asoc, | |
479 | unsigned error) | |
480 | { | |
481 | struct sctp_ulpevent *event; | |
482 | ||
483 | event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC, | |
a5a35e76 | 484 | (__u16)error, 0, 0, NULL, |
1da177e4 LT |
485 | GFP_ATOMIC); |
486 | ||
487 | if (event) | |
488 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | |
489 | SCTP_ULPEVENT(event)); | |
490 | ||
491 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | |
492 | SCTP_STATE(SCTP_STATE_CLOSED)); | |
493 | ||
494 | /* SEND_FAILED sent later when cleaning up the association. */ | |
495 | asoc->outqueue.error = error; | |
496 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | |
497 | } | |
498 | ||
499 | /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ | |
500 | static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | |
501 | struct sctp_association *asoc, | |
502 | sctp_event_t event_type, | |
503 | sctp_subtype_t subtype, | |
504 | struct sctp_chunk *chunk, | |
505 | unsigned error) | |
506 | { | |
507 | struct sctp_ulpevent *event; | |
508 | ||
509 | /* Cancel any partial delivery in progress. */ | |
510 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | |
511 | ||
a5a35e76 VY |
512 | if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) |
513 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | |
514 | (__u16)error, 0, 0, chunk, | |
515 | GFP_ATOMIC); | |
516 | else | |
517 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | |
518 | (__u16)error, 0, 0, NULL, | |
1da177e4 LT |
519 | GFP_ATOMIC); |
520 | if (event) | |
521 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | |
522 | SCTP_ULPEVENT(event)); | |
523 | ||
524 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | |
525 | SCTP_STATE(SCTP_STATE_CLOSED)); | |
526 | ||
1da177e4 LT |
527 | /* SEND_FAILED sent later when cleaning up the association. */ |
528 | asoc->outqueue.error = error; | |
529 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | |
530 | } | |
531 | ||
532 | /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT | |
533 | * inside the cookie. In reality, this is only used for INIT-ACK processing | |
534 | * since all other cases use "temporary" associations and can do all | |
535 | * their work in statefuns directly. | |
536 | */ | |
537 | static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, | |
538 | struct sctp_association *asoc, | |
539 | struct sctp_chunk *chunk, | |
3182cd84 | 540 | sctp_init_chunk_t *peer_init, |
dd0fc66f | 541 | gfp_t gfp) |
1da177e4 LT |
542 | { |
543 | int error; | |
544 | ||
545 | /* We only process the init as a sideeffect in a single | |
546 | * case. This is when we process the INIT-ACK. If we | |
547 | * fail during INIT processing (due to malloc problems), | |
548 | * just return the error and stop processing the stack. | |
549 | */ | |
550 | if (!sctp_process_init(asoc, chunk->chunk_hdr->type, | |
551 | sctp_source(chunk), peer_init, gfp)) | |
552 | error = -ENOMEM; | |
553 | else | |
554 | error = 0; | |
555 | ||
556 | return error; | |
557 | } | |
558 | ||
559 | /* Helper function to break out starting up of heartbeat timers. */ | |
560 | static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, | |
561 | struct sctp_association *asoc) | |
562 | { | |
563 | struct sctp_transport *t; | |
1da177e4 LT |
564 | |
565 | /* Start a heartbeat timer for each transport on the association. | |
566 | * hold a reference on the transport to make sure none of | |
567 | * the needed data structures go away. | |
568 | */ | |
9dbc15f0 | 569 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { |
1da177e4 LT |
570 | |
571 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | |
572 | sctp_transport_hold(t); | |
573 | } | |
574 | } | |
575 | ||
576 | static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, | |
577 | struct sctp_association *asoc) | |
578 | { | |
579 | struct sctp_transport *t; | |
1da177e4 LT |
580 | |
581 | /* Stop all heartbeat timers. */ | |
582 | ||
9dbc15f0 RD |
583 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
584 | transports) { | |
1da177e4 LT |
585 | if (del_timer(&t->hb_timer)) |
586 | sctp_transport_put(t); | |
587 | } | |
588 | } | |
589 | ||
590 | /* Helper function to stop any pending T3-RTX timers */ | |
591 | static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, | |
d808ad9a | 592 | struct sctp_association *asoc) |
1da177e4 LT |
593 | { |
594 | struct sctp_transport *t; | |
1da177e4 | 595 | |
9dbc15f0 RD |
596 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
597 | transports) { | |
1da177e4 LT |
598 | if (timer_pending(&t->T3_rtx_timer) && |
599 | del_timer(&t->T3_rtx_timer)) { | |
600 | sctp_transport_put(t); | |
601 | } | |
602 | } | |
603 | } | |
604 | ||
605 | ||
606 | /* Helper function to update the heartbeat timer. */ | |
607 | static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, | |
1da177e4 LT |
608 | struct sctp_transport *t) |
609 | { | |
610 | /* Update the heartbeat timer. */ | |
611 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | |
612 | sctp_transport_hold(t); | |
613 | } | |
614 | ||
615 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ | |
616 | static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | |
617 | struct sctp_association *asoc, | |
618 | struct sctp_transport *t, | |
619 | struct sctp_chunk *chunk) | |
620 | { | |
621 | sctp_sender_hb_info_t *hbinfo; | |
622 | ||
623 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the | |
624 | * HEARTBEAT should clear the error counter of the destination | |
625 | * transport address to which the HEARTBEAT was sent. | |
626 | * The association's overall error count is also cleared. | |
627 | */ | |
628 | t->error_count = 0; | |
629 | t->asoc->overall_error_count = 0; | |
630 | ||
faee47cd VY |
631 | /* Clear the hb_sent flag to signal that we had a good |
632 | * acknowledgement. | |
633 | */ | |
634 | t->hb_sent = 0; | |
635 | ||
1da177e4 LT |
636 | /* Mark the destination transport address as active if it is not so |
637 | * marked. | |
638 | */ | |
ad8fec17 | 639 | if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) |
1da177e4 LT |
640 | sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, |
641 | SCTP_HEARTBEAT_SUCCESS); | |
642 | ||
643 | /* The receiver of the HEARTBEAT ACK should also perform an | |
644 | * RTT measurement for that destination transport address | |
645 | * using the time value carried in the HEARTBEAT ACK chunk. | |
e533ca16 VY |
646 | * If the transport's rto_pending variable has been cleared, |
647 | * it was most likely due to a retransmit. However, we want | |
648 | * to re-enable it to properly update the rto. | |
1da177e4 | 649 | */ |
e533ca16 VY |
650 | if (t->rto_pending == 0) |
651 | t->rto_pending = 1; | |
652 | ||
1da177e4 LT |
653 | hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; |
654 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); | |
ad8fec17 SS |
655 | |
656 | /* Update the heartbeat timer. */ | |
657 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | |
658 | sctp_transport_hold(t); | |
1da177e4 LT |
659 | } |
660 | ||
1da177e4 LT |
661 | |
662 | /* Helper function to process the process SACK command. */ | |
663 | static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, | |
664 | struct sctp_association *asoc, | |
665 | struct sctp_sackhdr *sackh) | |
666 | { | |
2e3216cd | 667 | int err = 0; |
1da177e4 LT |
668 | |
669 | if (sctp_outq_sack(&asoc->outqueue, sackh)) { | |
670 | /* There are no more TSNs awaiting SACK. */ | |
671 | err = sctp_do_sm(SCTP_EVENT_T_OTHER, | |
672 | SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), | |
673 | asoc->state, asoc->ep, asoc, NULL, | |
674 | GFP_ATOMIC); | |
1da177e4 LT |
675 | } |
676 | ||
677 | return err; | |
678 | } | |
679 | ||
680 | /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set | |
681 | * the transport for a shutdown chunk. | |
682 | */ | |
d808ad9a | 683 | static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, |
1da177e4 LT |
684 | struct sctp_association *asoc, |
685 | struct sctp_chunk *chunk) | |
686 | { | |
687 | struct sctp_transport *t; | |
688 | ||
9919b455 WY |
689 | t = sctp_assoc_choose_alter_transport(asoc, |
690 | asoc->shutdown_last_sent_to); | |
1da177e4 LT |
691 | asoc->shutdown_last_sent_to = t; |
692 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; | |
693 | chunk->transport = t; | |
694 | } | |
695 | ||
696 | /* Helper function to change the state of an association. */ | |
d808ad9a | 697 | static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, |
1da177e4 LT |
698 | struct sctp_association *asoc, |
699 | sctp_state_t state) | |
700 | { | |
701 | struct sock *sk = asoc->base.sk; | |
702 | ||
703 | asoc->state = state; | |
704 | ||
3f7a87d2 FF |
705 | SCTP_DEBUG_PRINTK("sctp_cmd_new_state: asoc %p[%s]\n", |
706 | asoc, sctp_state_tbl[state]); | |
707 | ||
1da177e4 | 708 | if (sctp_style(sk, TCP)) { |
3f7a87d2 | 709 | /* Change the sk->sk_state of a TCP-style socket that has |
1da177e4 LT |
710 | * sucessfully completed a connect() call. |
711 | */ | |
712 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) | |
713 | sk->sk_state = SCTP_SS_ESTABLISHED; | |
714 | ||
715 | /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ | |
716 | if (sctp_state(asoc, SHUTDOWN_RECEIVED) && | |
717 | sctp_sstate(sk, ESTABLISHED)) | |
718 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
719 | } | |
720 | ||
3f7a87d2 FF |
721 | if (sctp_state(asoc, COOKIE_WAIT)) { |
722 | /* Reset init timeouts since they may have been | |
723 | * increased due to timer expirations. | |
724 | */ | |
725 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = | |
1e7d3d90 | 726 | asoc->rto_initial; |
3f7a87d2 | 727 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = |
1e7d3d90 | 728 | asoc->rto_initial; |
3f7a87d2 FF |
729 | } |
730 | ||
1da177e4 LT |
731 | if (sctp_state(asoc, ESTABLISHED) || |
732 | sctp_state(asoc, CLOSED) || | |
733 | sctp_state(asoc, SHUTDOWN_RECEIVED)) { | |
734 | /* Wake up any processes waiting in the asoc's wait queue in | |
735 | * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). | |
d808ad9a | 736 | */ |
1da177e4 LT |
737 | if (waitqueue_active(&asoc->wait)) |
738 | wake_up_interruptible(&asoc->wait); | |
739 | ||
740 | /* Wake up any processes waiting in the sk's sleep queue of | |
741 | * a TCP-style or UDP-style peeled-off socket in | |
742 | * sctp_wait_for_accept() or sctp_wait_for_packet(). | |
743 | * For a UDP-style socket, the waiters are woken up by the | |
744 | * notifications. | |
745 | */ | |
746 | if (!sctp_style(sk, UDP)) | |
747 | sk->sk_state_change(sk); | |
748 | } | |
749 | } | |
750 | ||
751 | /* Helper function to delete an association. */ | |
752 | static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, | |
753 | struct sctp_association *asoc) | |
754 | { | |
755 | struct sock *sk = asoc->base.sk; | |
756 | ||
757 | /* If it is a non-temporary association belonging to a TCP-style | |
d808ad9a | 758 | * listening socket that is not closed, do not free it so that accept() |
1da177e4 | 759 | * can pick it up later. |
d808ad9a | 760 | */ |
1da177e4 LT |
761 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && |
762 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) | |
763 | return; | |
764 | ||
765 | sctp_unhash_established(asoc); | |
766 | sctp_association_free(asoc); | |
767 | } | |
768 | ||
769 | /* | |
770 | * ADDIP Section 4.1 ASCONF Chunk Procedures | |
771 | * A4) Start a T-4 RTO timer, using the RTO value of the selected | |
772 | * destination address (we use active path instead of primary path just | |
d808ad9a | 773 | * because primary path may be inactive. |
1da177e4 LT |
774 | */ |
775 | static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, | |
776 | struct sctp_association *asoc, | |
777 | struct sctp_chunk *chunk) | |
778 | { | |
779 | struct sctp_transport *t; | |
780 | ||
9919b455 | 781 | t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); |
1da177e4 LT |
782 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; |
783 | chunk->transport = t; | |
784 | } | |
785 | ||
d808ad9a | 786 | /* Process an incoming Operation Error Chunk. */ |
1da177e4 LT |
787 | static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, |
788 | struct sctp_association *asoc, | |
789 | struct sctp_chunk *chunk) | |
790 | { | |
1da177e4 | 791 | struct sctp_errhdr *err_hdr; |
3df26787 WY |
792 | struct sctp_ulpevent *ev; |
793 | ||
794 | while (chunk->chunk_end > chunk->skb->data) { | |
795 | err_hdr = (struct sctp_errhdr *)(chunk->skb->data); | |
796 | ||
797 | ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, | |
798 | GFP_ATOMIC); | |
799 | if (!ev) | |
800 | return; | |
801 | ||
802 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
803 | ||
804 | switch (err_hdr->cause) { | |
805 | case SCTP_ERROR_UNKNOWN_CHUNK: | |
806 | { | |
807 | sctp_chunkhdr_t *unk_chunk_hdr; | |
808 | ||
809 | unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable; | |
810 | switch (unk_chunk_hdr->type) { | |
811 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with | |
812 | * an ERROR chunk reporting that it did not recognized | |
813 | * the ASCONF chunk type, the sender of the ASCONF MUST | |
814 | * NOT send any further ASCONF chunks and MUST stop its | |
815 | * T-4 timer. | |
816 | */ | |
817 | case SCTP_CID_ASCONF: | |
818 | if (asoc->peer.asconf_capable == 0) | |
819 | break; | |
820 | ||
821 | asoc->peer.asconf_capable = 0; | |
822 | sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, | |
1da177e4 | 823 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); |
3df26787 WY |
824 | break; |
825 | default: | |
826 | break; | |
827 | } | |
1da177e4 | 828 | break; |
3df26787 | 829 | } |
1da177e4 LT |
830 | default: |
831 | break; | |
832 | } | |
1da177e4 LT |
833 | } |
834 | } | |
835 | ||
836 | /* Process variable FWDTSN chunk information. */ | |
d808ad9a | 837 | static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, |
1da177e4 LT |
838 | struct sctp_chunk *chunk) |
839 | { | |
840 | struct sctp_fwdtsn_skip *skip; | |
841 | /* Walk through all the skipped SSNs */ | |
842 | sctp_walk_fwdtsn(skip, chunk) { | |
843 | sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); | |
844 | } | |
845 | ||
846 | return; | |
847 | } | |
848 | ||
d808ad9a | 849 | /* Helper function to remove the association non-primary peer |
1da177e4 | 850 | * transports. |
d808ad9a | 851 | */ |
1da177e4 LT |
852 | static void sctp_cmd_del_non_primary(struct sctp_association *asoc) |
853 | { | |
854 | struct sctp_transport *t; | |
855 | struct list_head *pos; | |
856 | struct list_head *temp; | |
857 | ||
858 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | |
859 | t = list_entry(pos, struct sctp_transport, transports); | |
5f242a13 | 860 | if (!sctp_cmp_addr_exact(&t->ipaddr, |
d808ad9a | 861 | &asoc->peer.primary_addr)) { |
38a03145 | 862 | sctp_assoc_del_peer(asoc, &t->ipaddr); |
1da177e4 LT |
863 | } |
864 | } | |
865 | ||
866 | return; | |
867 | } | |
868 | ||
8de8c873 SS |
869 | /* Helper function to set sk_err on a 1-1 style socket. */ |
870 | static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) | |
871 | { | |
872 | struct sock *sk = asoc->base.sk; | |
873 | ||
874 | if (!sctp_style(sk, UDP)) | |
875 | sk->sk_err = error; | |
876 | } | |
877 | ||
07d93967 VY |
878 | /* Helper function to generate an association change event */ |
879 | static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands, | |
880 | struct sctp_association *asoc, | |
881 | u8 state) | |
882 | { | |
883 | struct sctp_ulpevent *ev; | |
884 | ||
885 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0, | |
886 | asoc->c.sinit_num_ostreams, | |
887 | asoc->c.sinit_max_instreams, | |
888 | NULL, GFP_ATOMIC); | |
889 | if (ev) | |
890 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
891 | } | |
892 | ||
893 | /* Helper function to generate an adaptation indication event */ | |
894 | static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands, | |
895 | struct sctp_association *asoc) | |
896 | { | |
897 | struct sctp_ulpevent *ev; | |
898 | ||
899 | ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); | |
900 | ||
901 | if (ev) | |
902 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | |
903 | } | |
904 | ||
96cd0d3d VY |
905 | |
906 | static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, | |
907 | sctp_event_timeout_t timer, | |
908 | char *name) | |
909 | { | |
910 | struct sctp_transport *t; | |
911 | ||
912 | t = asoc->init_last_sent_to; | |
913 | asoc->init_err_counter++; | |
914 | ||
915 | if (t->init_sent_count > (asoc->init_cycle + 1)) { | |
916 | asoc->timeouts[timer] *= 2; | |
917 | if (asoc->timeouts[timer] > asoc->max_init_timeo) { | |
918 | asoc->timeouts[timer] = asoc->max_init_timeo; | |
919 | } | |
920 | asoc->init_cycle++; | |
921 | SCTP_DEBUG_PRINTK( | |
922 | "T1 %s Timeout adjustment" | |
923 | " init_err_counter: %d" | |
924 | " cycle: %d" | |
925 | " timeout: %ld\n", | |
926 | name, | |
927 | asoc->init_err_counter, | |
928 | asoc->init_cycle, | |
929 | asoc->timeouts[timer]); | |
930 | } | |
931 | ||
932 | } | |
933 | ||
1da177e4 LT |
934 | /* These three macros allow us to pull the debugging code out of the |
935 | * main flow of sctp_do_sm() to keep attention focused on the real | |
936 | * functionality there. | |
937 | */ | |
938 | #define DEBUG_PRE \ | |
939 | SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \ | |
940 | "ep %p, %s, %s, asoc %p[%s], %s\n", \ | |
941 | ep, sctp_evttype_tbl[event_type], \ | |
942 | (*debug_fn)(subtype), asoc, \ | |
943 | sctp_state_tbl[state], state_fn->name) | |
944 | ||
945 | #define DEBUG_POST \ | |
946 | SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \ | |
947 | "asoc %p, status: %s\n", \ | |
948 | asoc, sctp_status_tbl[status]) | |
949 | ||
950 | #define DEBUG_POST_SFX \ | |
951 | SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \ | |
952 | error, asoc, \ | |
953 | sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ | |
954 | sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED]) | |
955 | ||
956 | /* | |
957 | * This is the master state machine processing function. | |
958 | * | |
959 | * If you want to understand all of lksctp, this is a | |
960 | * good place to start. | |
961 | */ | |
962 | int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, | |
963 | sctp_state_t state, | |
964 | struct sctp_endpoint *ep, | |
965 | struct sctp_association *asoc, | |
966 | void *event_arg, | |
dd0fc66f | 967 | gfp_t gfp) |
1da177e4 LT |
968 | { |
969 | sctp_cmd_seq_t commands; | |
970 | const sctp_sm_table_entry_t *state_fn; | |
971 | sctp_disposition_t status; | |
972 | int error = 0; | |
973 | typedef const char *(printfn_t)(sctp_subtype_t); | |
974 | ||
975 | static printfn_t *table[] = { | |
976 | NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, | |
977 | }; | |
978 | printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; | |
979 | ||
980 | /* Look up the state function, run it, and then process the | |
981 | * side effects. These three steps are the heart of lksctp. | |
982 | */ | |
983 | state_fn = sctp_sm_lookup_event(event_type, state, subtype); | |
984 | ||
985 | sctp_init_cmd_seq(&commands); | |
986 | ||
987 | DEBUG_PRE; | |
988 | status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands); | |
989 | DEBUG_POST; | |
990 | ||
991 | error = sctp_side_effects(event_type, subtype, state, | |
d808ad9a | 992 | ep, asoc, event_arg, status, |
1da177e4 LT |
993 | &commands, gfp); |
994 | DEBUG_POST_SFX; | |
995 | ||
996 | return error; | |
997 | } | |
998 | ||
999 | #undef DEBUG_PRE | |
1000 | #undef DEBUG_POST | |
1001 | ||
1002 | /***************************************************************** | |
1003 | * This the master state function side effect processing function. | |
1004 | *****************************************************************/ | |
1005 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | |
1006 | sctp_state_t state, | |
1007 | struct sctp_endpoint *ep, | |
1008 | struct sctp_association *asoc, | |
1009 | void *event_arg, | |
1010 | sctp_disposition_t status, | |
1011 | sctp_cmd_seq_t *commands, | |
dd0fc66f | 1012 | gfp_t gfp) |
1da177e4 LT |
1013 | { |
1014 | int error; | |
1015 | ||
1016 | /* FIXME - Most of the dispositions left today would be categorized | |
1017 | * as "exceptional" dispositions. For those dispositions, it | |
1018 | * may not be proper to run through any of the commands at all. | |
1019 | * For example, the command interpreter might be run only with | |
1020 | * disposition SCTP_DISPOSITION_CONSUME. | |
1021 | */ | |
1022 | if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, | |
1023 | ep, asoc, | |
1024 | event_arg, status, | |
1025 | commands, gfp))) | |
1026 | goto bail; | |
1027 | ||
1028 | switch (status) { | |
1029 | case SCTP_DISPOSITION_DISCARD: | |
1030 | SCTP_DEBUG_PRINTK("Ignored sctp protocol event - state %d, " | |
1031 | "event_type %d, event_id %d\n", | |
1032 | state, event_type, subtype.chunk); | |
1033 | break; | |
1034 | ||
1035 | case SCTP_DISPOSITION_NOMEM: | |
1036 | /* We ran out of memory, so we need to discard this | |
1037 | * packet. | |
1038 | */ | |
1039 | /* BUG--we should now recover some memory, probably by | |
1040 | * reneging... | |
1041 | */ | |
1042 | error = -ENOMEM; | |
1043 | break; | |
1044 | ||
d808ad9a | 1045 | case SCTP_DISPOSITION_DELETE_TCB: |
1da177e4 LT |
1046 | /* This should now be a command. */ |
1047 | break; | |
1048 | ||
1049 | case SCTP_DISPOSITION_CONSUME: | |
1050 | case SCTP_DISPOSITION_ABORT: | |
1051 | /* | |
1052 | * We should no longer have much work to do here as the | |
1053 | * real work has been done as explicit commands above. | |
1054 | */ | |
1055 | break; | |
1056 | ||
1057 | case SCTP_DISPOSITION_VIOLATION: | |
d99fa429 WY |
1058 | if (net_ratelimit()) |
1059 | printk(KERN_ERR "sctp protocol violation state %d " | |
1060 | "chunkid %d\n", state, subtype.chunk); | |
1da177e4 LT |
1061 | break; |
1062 | ||
1063 | case SCTP_DISPOSITION_NOT_IMPL: | |
1064 | printk(KERN_WARNING "sctp unimplemented feature in state %d, " | |
1065 | "event_type %d, event_id %d\n", | |
1066 | state, event_type, subtype.chunk); | |
1067 | break; | |
1068 | ||
1069 | case SCTP_DISPOSITION_BUG: | |
1070 | printk(KERN_ERR "sctp bug in state %d, " | |
1071 | "event_type %d, event_id %d\n", | |
1072 | state, event_type, subtype.chunk); | |
1073 | BUG(); | |
1074 | break; | |
1075 | ||
1076 | default: | |
1077 | printk(KERN_ERR "sctp impossible disposition %d " | |
1078 | "in state %d, event_type %d, event_id %d\n", | |
1079 | status, state, event_type, subtype.chunk); | |
1080 | BUG(); | |
1081 | break; | |
3ff50b79 | 1082 | } |
1da177e4 LT |
1083 | |
1084 | bail: | |
1085 | return error; | |
1086 | } | |
1087 | ||
1088 | /******************************************************************** | |
1089 | * 2nd Level Abstractions | |
1090 | ********************************************************************/ | |
1091 | ||
1092 | /* This is the side-effect interpreter. */ | |
1093 | static int sctp_cmd_interpreter(sctp_event_t event_type, | |
1094 | sctp_subtype_t subtype, | |
1095 | sctp_state_t state, | |
1096 | struct sctp_endpoint *ep, | |
1097 | struct sctp_association *asoc, | |
1098 | void *event_arg, | |
d808ad9a | 1099 | sctp_disposition_t status, |
1da177e4 | 1100 | sctp_cmd_seq_t *commands, |
dd0fc66f | 1101 | gfp_t gfp) |
1da177e4 LT |
1102 | { |
1103 | int error = 0; | |
1104 | int force; | |
1105 | sctp_cmd_t *cmd; | |
1106 | struct sctp_chunk *new_obj; | |
1107 | struct sctp_chunk *chunk = NULL; | |
1108 | struct sctp_packet *packet; | |
1da177e4 LT |
1109 | struct timer_list *timer; |
1110 | unsigned long timeout; | |
1111 | struct sctp_transport *t; | |
1112 | struct sctp_sackhdr sackh; | |
1113 | int local_cork = 0; | |
1114 | ||
1115 | if (SCTP_EVENT_T_TIMEOUT != event_type) | |
1116 | chunk = (struct sctp_chunk *) event_arg; | |
1117 | ||
1118 | /* Note: This whole file is a huge candidate for rework. | |
1119 | * For example, each command could either have its own handler, so | |
1120 | * the loop would look like: | |
1121 | * while (cmds) | |
1122 | * cmd->handle(x, y, z) | |
1123 | * --jgrimm | |
1124 | */ | |
1125 | while (NULL != (cmd = sctp_next_cmd(commands))) { | |
1126 | switch (cmd->verb) { | |
1127 | case SCTP_CMD_NOP: | |
1128 | /* Do nothing. */ | |
1129 | break; | |
1130 | ||
1131 | case SCTP_CMD_NEW_ASOC: | |
1132 | /* Register a new association. */ | |
1133 | if (local_cork) { | |
d808ad9a | 1134 | sctp_outq_uncork(&asoc->outqueue); |
1da177e4 LT |
1135 | local_cork = 0; |
1136 | } | |
1137 | asoc = cmd->obj.ptr; | |
1138 | /* Register with the endpoint. */ | |
1139 | sctp_endpoint_add_asoc(ep, asoc); | |
1140 | sctp_hash_established(asoc); | |
1141 | break; | |
1142 | ||
1143 | case SCTP_CMD_UPDATE_ASSOC: | |
1144 | sctp_assoc_update(asoc, cmd->obj.ptr); | |
1145 | break; | |
1146 | ||
1147 | case SCTP_CMD_PURGE_OUTQUEUE: | |
1148 | sctp_outq_teardown(&asoc->outqueue); | |
1149 | break; | |
1150 | ||
d808ad9a | 1151 | case SCTP_CMD_DELETE_TCB: |
1da177e4 LT |
1152 | if (local_cork) { |
1153 | sctp_outq_uncork(&asoc->outqueue); | |
1154 | local_cork = 0; | |
1155 | } | |
1156 | /* Delete the current association. */ | |
1157 | sctp_cmd_delete_tcb(commands, asoc); | |
1158 | asoc = NULL; | |
1159 | break; | |
1160 | ||
1161 | case SCTP_CMD_NEW_STATE: | |
1162 | /* Enter a new state. */ | |
1163 | sctp_cmd_new_state(commands, asoc, cmd->obj.state); | |
1164 | break; | |
1165 | ||
1166 | case SCTP_CMD_REPORT_TSN: | |
1167 | /* Record the arrival of a TSN. */ | |
8e1ee18c VY |
1168 | error = sctp_tsnmap_mark(&asoc->peer.tsn_map, |
1169 | cmd->obj.u32); | |
1da177e4 LT |
1170 | break; |
1171 | ||
1172 | case SCTP_CMD_REPORT_FWDTSN: | |
1173 | /* Move the Cumulattive TSN Ack ahead. */ | |
1174 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | |
1175 | ||
ea2dfb37 VY |
1176 | /* purge the fragmentation queue */ |
1177 | sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); | |
1178 | ||
1da177e4 LT |
1179 | /* Abort any in progress partial delivery. */ |
1180 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | |
1181 | break; | |
1182 | ||
1183 | case SCTP_CMD_PROCESS_FWDTSN: | |
1184 | sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr); | |
d808ad9a | 1185 | break; |
1da177e4 LT |
1186 | |
1187 | case SCTP_CMD_GEN_SACK: | |
1188 | /* Generate a Selective ACK. | |
1189 | * The argument tells us whether to just count | |
1190 | * the packet and MAYBE generate a SACK, or | |
1191 | * force a SACK out. | |
1192 | */ | |
1193 | force = cmd->obj.i32; | |
1194 | error = sctp_gen_sack(asoc, force, commands); | |
1195 | break; | |
1196 | ||
1197 | case SCTP_CMD_PROCESS_SACK: | |
1198 | /* Process an inbound SACK. */ | |
1199 | error = sctp_cmd_process_sack(commands, asoc, | |
1200 | cmd->obj.ptr); | |
1201 | break; | |
1202 | ||
1203 | case SCTP_CMD_GEN_INIT_ACK: | |
1204 | /* Generate an INIT ACK chunk. */ | |
1205 | new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, | |
1206 | 0); | |
1207 | if (!new_obj) | |
1208 | goto nomem; | |
1209 | ||
1210 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1211 | SCTP_CHUNK(new_obj)); | |
1212 | break; | |
1213 | ||
1214 | case SCTP_CMD_PEER_INIT: | |
1215 | /* Process a unified INIT from the peer. | |
1216 | * Note: Only used during INIT-ACK processing. If | |
1217 | * there is an error just return to the outter | |
1218 | * layer which will bail. | |
1219 | */ | |
1220 | error = sctp_cmd_process_init(commands, asoc, chunk, | |
1221 | cmd->obj.ptr, gfp); | |
1222 | break; | |
1223 | ||
1224 | case SCTP_CMD_GEN_COOKIE_ECHO: | |
1225 | /* Generate a COOKIE ECHO chunk. */ | |
1226 | new_obj = sctp_make_cookie_echo(asoc, chunk); | |
1227 | if (!new_obj) { | |
1228 | if (cmd->obj.ptr) | |
1229 | sctp_chunk_free(cmd->obj.ptr); | |
1230 | goto nomem; | |
1231 | } | |
1232 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1233 | SCTP_CHUNK(new_obj)); | |
1234 | ||
1235 | /* If there is an ERROR chunk to be sent along with | |
1236 | * the COOKIE_ECHO, send it, too. | |
1237 | */ | |
1238 | if (cmd->obj.ptr) | |
1239 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1240 | SCTP_CHUNK(cmd->obj.ptr)); | |
1241 | ||
96cd0d3d VY |
1242 | if (new_obj->transport) { |
1243 | new_obj->transport->init_sent_count++; | |
1244 | asoc->init_last_sent_to = new_obj->transport; | |
1245 | } | |
1246 | ||
1da177e4 | 1247 | /* FIXME - Eventually come up with a cleaner way to |
d808ad9a YH |
1248 | * enabling COOKIE-ECHO + DATA bundling during |
1249 | * multihoming stale cookie scenarios, the following | |
1250 | * command plays with asoc->peer.retran_path to | |
1251 | * avoid the problem of sending the COOKIE-ECHO and | |
1252 | * DATA in different paths, which could result | |
1253 | * in the association being ABORTed if the DATA chunk | |
1da177e4 LT |
1254 | * is processed first by the server. Checking the |
1255 | * init error counter simply causes this command | |
1256 | * to be executed only during failed attempts of | |
1257 | * association establishment. | |
1258 | */ | |
3f7a87d2 FF |
1259 | if ((asoc->peer.retran_path != |
1260 | asoc->peer.primary_path) && | |
1261 | (asoc->init_err_counter > 0)) { | |
1262 | sctp_add_cmd_sf(commands, | |
d808ad9a | 1263 | SCTP_CMD_FORCE_PRIM_RETRAN, |
1da177e4 LT |
1264 | SCTP_NULL()); |
1265 | } | |
1266 | ||
1267 | break; | |
1268 | ||
1269 | case SCTP_CMD_GEN_SHUTDOWN: | |
1270 | /* Generate SHUTDOWN when in SHUTDOWN_SENT state. | |
1271 | * Reset error counts. | |
1272 | */ | |
1273 | asoc->overall_error_count = 0; | |
1274 | ||
1275 | /* Generate a SHUTDOWN chunk. */ | |
1276 | new_obj = sctp_make_shutdown(asoc, chunk); | |
1277 | if (!new_obj) | |
1278 | goto nomem; | |
1279 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1280 | SCTP_CHUNK(new_obj)); | |
1281 | break; | |
1282 | ||
1283 | case SCTP_CMD_CHUNK_ULP: | |
1284 | /* Send a chunk to the sockets layer. */ | |
1285 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | |
1286 | "chunk_up:", cmd->obj.ptr, | |
1287 | "ulpq:", &asoc->ulpq); | |
1288 | sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr, | |
1289 | GFP_ATOMIC); | |
1290 | break; | |
1291 | ||
1292 | case SCTP_CMD_EVENT_ULP: | |
1293 | /* Send a notification to the sockets layer. */ | |
1294 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | |
1295 | "event_up:",cmd->obj.ptr, | |
1296 | "ulpq:",&asoc->ulpq); | |
1297 | sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr); | |
1298 | break; | |
1299 | ||
1300 | case SCTP_CMD_REPLY: | |
1301 | /* If an caller has not already corked, do cork. */ | |
1302 | if (!asoc->outqueue.cork) { | |
1303 | sctp_outq_cork(&asoc->outqueue); | |
1304 | local_cork = 1; | |
1305 | } | |
1306 | /* Send a chunk to our peer. */ | |
1307 | error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr); | |
1308 | break; | |
1309 | ||
1310 | case SCTP_CMD_SEND_PKT: | |
1311 | /* Send a full packet to our peer. */ | |
1312 | packet = cmd->obj.ptr; | |
1313 | sctp_packet_transmit(packet); | |
1314 | sctp_ootb_pkt_free(packet); | |
1315 | break; | |
1316 | ||
b6157d8e VY |
1317 | case SCTP_CMD_T1_RETRAN: |
1318 | /* Mark a transport for retransmission. */ | |
1319 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | |
1320 | SCTP_RTXR_T1_RTX); | |
1321 | break; | |
1322 | ||
1da177e4 LT |
1323 | case SCTP_CMD_RETRAN: |
1324 | /* Mark a transport for retransmission. */ | |
1325 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | |
1326 | SCTP_RTXR_T3_RTX); | |
1327 | break; | |
1328 | ||
1329 | case SCTP_CMD_TRANSMIT: | |
1330 | /* Kick start transmission. */ | |
1331 | error = sctp_outq_uncork(&asoc->outqueue); | |
1332 | local_cork = 0; | |
1333 | break; | |
1334 | ||
1335 | case SCTP_CMD_ECN_CE: | |
1336 | /* Do delayed CE processing. */ | |
1337 | sctp_do_ecn_ce_work(asoc, cmd->obj.u32); | |
1338 | break; | |
1339 | ||
1340 | case SCTP_CMD_ECN_ECNE: | |
1341 | /* Do delayed ECNE processing. */ | |
1342 | new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, | |
1343 | chunk); | |
1344 | if (new_obj) | |
1345 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1346 | SCTP_CHUNK(new_obj)); | |
1347 | break; | |
1348 | ||
1349 | case SCTP_CMD_ECN_CWR: | |
1350 | /* Do delayed CWR processing. */ | |
1351 | sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); | |
1352 | break; | |
1353 | ||
1354 | case SCTP_CMD_SETUP_T2: | |
1355 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); | |
1356 | break; | |
1357 | ||
1358 | case SCTP_CMD_TIMER_START: | |
1359 | timer = &asoc->timers[cmd->obj.to]; | |
1360 | timeout = asoc->timeouts[cmd->obj.to]; | |
09a62660 | 1361 | BUG_ON(!timeout); |
1da177e4 LT |
1362 | |
1363 | timer->expires = jiffies + timeout; | |
1364 | sctp_association_hold(asoc); | |
1365 | add_timer(timer); | |
1366 | break; | |
1367 | ||
1368 | case SCTP_CMD_TIMER_RESTART: | |
1369 | timer = &asoc->timers[cmd->obj.to]; | |
1370 | timeout = asoc->timeouts[cmd->obj.to]; | |
1371 | if (!mod_timer(timer, jiffies + timeout)) | |
1372 | sctp_association_hold(asoc); | |
1373 | break; | |
1374 | ||
1375 | case SCTP_CMD_TIMER_STOP: | |
1376 | timer = &asoc->timers[cmd->obj.to]; | |
1377 | if (timer_pending(timer) && del_timer(timer)) | |
1378 | sctp_association_put(asoc); | |
1379 | break; | |
1380 | ||
3f7a87d2 FF |
1381 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: |
1382 | chunk = cmd->obj.ptr; | |
9919b455 WY |
1383 | t = sctp_assoc_choose_alter_transport(asoc, |
1384 | asoc->init_last_sent_to); | |
3f7a87d2 FF |
1385 | asoc->init_last_sent_to = t; |
1386 | chunk->transport = t; | |
1387 | t->init_sent_count++; | |
1388 | break; | |
1389 | ||
1da177e4 LT |
1390 | case SCTP_CMD_INIT_RESTART: |
1391 | /* Do the needed accounting and updates | |
1392 | * associated with restarting an initialization | |
3f7a87d2 FF |
1393 | * timer. Only multiply the timeout by two if |
1394 | * all transports have been tried at the current | |
1395 | * timeout. | |
1396 | */ | |
96cd0d3d VY |
1397 | sctp_cmd_t1_timer_update(asoc, |
1398 | SCTP_EVENT_TIMEOUT_T1_INIT, | |
1399 | "INIT"); | |
3f7a87d2 FF |
1400 | |
1401 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | |
1402 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | |
1403 | break; | |
1404 | ||
1405 | case SCTP_CMD_COOKIEECHO_RESTART: | |
1406 | /* Do the needed accounting and updates | |
1407 | * associated with restarting an initialization | |
1408 | * timer. Only multiply the timeout by two if | |
1409 | * all transports have been tried at the current | |
1410 | * timeout. | |
1da177e4 | 1411 | */ |
96cd0d3d VY |
1412 | sctp_cmd_t1_timer_update(asoc, |
1413 | SCTP_EVENT_TIMEOUT_T1_COOKIE, | |
1414 | "COOKIE"); | |
1da177e4 LT |
1415 | |
1416 | /* If we've sent any data bundled with | |
1417 | * COOKIE-ECHO we need to resend. | |
1418 | */ | |
9dbc15f0 RD |
1419 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
1420 | transports) { | |
b6157d8e VY |
1421 | sctp_retransmit_mark(&asoc->outqueue, t, |
1422 | SCTP_RTXR_T1_RTX); | |
1da177e4 LT |
1423 | } |
1424 | ||
1425 | sctp_add_cmd_sf(commands, | |
1426 | SCTP_CMD_TIMER_RESTART, | |
3f7a87d2 | 1427 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); |
1da177e4 LT |
1428 | break; |
1429 | ||
1430 | case SCTP_CMD_INIT_FAILED: | |
dc251b2b | 1431 | sctp_cmd_init_failed(commands, asoc, cmd->obj.err); |
1da177e4 LT |
1432 | break; |
1433 | ||
1434 | case SCTP_CMD_ASSOC_FAILED: | |
1435 | sctp_cmd_assoc_failed(commands, asoc, event_type, | |
5be291fe | 1436 | subtype, chunk, cmd->obj.err); |
1da177e4 LT |
1437 | break; |
1438 | ||
3f7a87d2 FF |
1439 | case SCTP_CMD_INIT_COUNTER_INC: |
1440 | asoc->init_err_counter++; | |
1da177e4 LT |
1441 | break; |
1442 | ||
3f7a87d2 FF |
1443 | case SCTP_CMD_INIT_COUNTER_RESET: |
1444 | asoc->init_err_counter = 0; | |
1445 | asoc->init_cycle = 0; | |
96cd0d3d VY |
1446 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
1447 | transports) { | |
1448 | t->init_sent_count = 0; | |
1449 | } | |
1da177e4 LT |
1450 | break; |
1451 | ||
1452 | case SCTP_CMD_REPORT_DUP: | |
1453 | sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, | |
1454 | cmd->obj.u32); | |
1455 | break; | |
1456 | ||
1457 | case SCTP_CMD_REPORT_BAD_TAG: | |
1458 | SCTP_DEBUG_PRINTK("vtag mismatch!\n"); | |
1459 | break; | |
1460 | ||
1461 | case SCTP_CMD_STRIKE: | |
1462 | /* Mark one strike against a transport. */ | |
7e99013a VY |
1463 | sctp_do_8_2_transport_strike(asoc, cmd->obj.transport, |
1464 | 0); | |
1465 | break; | |
1466 | ||
1467 | case SCTP_CMD_TRANSPORT_IDLE: | |
1468 | t = cmd->obj.transport; | |
1469 | sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); | |
1da177e4 LT |
1470 | break; |
1471 | ||
7e99013a | 1472 | case SCTP_CMD_TRANSPORT_HB_SENT: |
1da177e4 | 1473 | t = cmd->obj.transport; |
7e99013a VY |
1474 | sctp_do_8_2_transport_strike(asoc, t, 1); |
1475 | t->hb_sent = 1; | |
1da177e4 LT |
1476 | break; |
1477 | ||
1478 | case SCTP_CMD_TRANSPORT_ON: | |
1479 | t = cmd->obj.transport; | |
1480 | sctp_cmd_transport_on(commands, asoc, t, chunk); | |
1481 | break; | |
1482 | ||
1483 | case SCTP_CMD_HB_TIMERS_START: | |
1484 | sctp_cmd_hb_timers_start(commands, asoc); | |
1485 | break; | |
1486 | ||
1487 | case SCTP_CMD_HB_TIMER_UPDATE: | |
1488 | t = cmd->obj.transport; | |
765ff02e | 1489 | sctp_cmd_hb_timer_update(commands, t); |
1da177e4 LT |
1490 | break; |
1491 | ||
1492 | case SCTP_CMD_HB_TIMERS_STOP: | |
1493 | sctp_cmd_hb_timers_stop(commands, asoc); | |
1494 | break; | |
1495 | ||
1496 | case SCTP_CMD_REPORT_ERROR: | |
1497 | error = cmd->obj.error; | |
1498 | break; | |
1499 | ||
1500 | case SCTP_CMD_PROCESS_CTSN: | |
1501 | /* Dummy up a SACK for processing. */ | |
2178eda8 | 1502 | sackh.cum_tsn_ack = cmd->obj.be32; |
1da177e4 LT |
1503 | sackh.a_rwnd = 0; |
1504 | sackh.num_gap_ack_blocks = 0; | |
1505 | sackh.num_dup_tsns = 0; | |
1506 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, | |
1507 | SCTP_SACKH(&sackh)); | |
1508 | break; | |
1509 | ||
1510 | case SCTP_CMD_DISCARD_PACKET: | |
2e3216cd VY |
1511 | /* We need to discard the whole packet. |
1512 | * Uncork the queue since there might be | |
1513 | * responses pending | |
1514 | */ | |
1da177e4 | 1515 | chunk->pdiscard = 1; |
2e3216cd VY |
1516 | if (asoc) { |
1517 | sctp_outq_uncork(&asoc->outqueue); | |
1518 | local_cork = 0; | |
1519 | } | |
1da177e4 LT |
1520 | break; |
1521 | ||
1522 | case SCTP_CMD_RTO_PENDING: | |
1523 | t = cmd->obj.transport; | |
1524 | t->rto_pending = 1; | |
1525 | break; | |
1526 | ||
1527 | case SCTP_CMD_PART_DELIVER: | |
1528 | sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr, | |
1529 | GFP_ATOMIC); | |
1530 | break; | |
1531 | ||
1532 | case SCTP_CMD_RENEGE: | |
1533 | sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr, | |
1534 | GFP_ATOMIC); | |
1535 | break; | |
1536 | ||
1537 | case SCTP_CMD_SETUP_T4: | |
1538 | sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr); | |
1539 | break; | |
1540 | ||
1541 | case SCTP_CMD_PROCESS_OPERR: | |
1542 | sctp_cmd_process_operr(commands, asoc, chunk); | |
1543 | break; | |
1544 | case SCTP_CMD_CLEAR_INIT_TAG: | |
1545 | asoc->peer.i.init_tag = 0; | |
1546 | break; | |
1547 | case SCTP_CMD_DEL_NON_PRIMARY: | |
1548 | sctp_cmd_del_non_primary(asoc); | |
1549 | break; | |
1550 | case SCTP_CMD_T3_RTX_TIMERS_STOP: | |
1551 | sctp_cmd_t3_rtx_timers_stop(commands, asoc); | |
1552 | break; | |
1553 | case SCTP_CMD_FORCE_PRIM_RETRAN: | |
1554 | t = asoc->peer.retran_path; | |
1555 | asoc->peer.retran_path = asoc->peer.primary_path; | |
1556 | error = sctp_outq_uncork(&asoc->outqueue); | |
1557 | local_cork = 0; | |
1558 | asoc->peer.retran_path = t; | |
1559 | break; | |
8de8c873 SS |
1560 | case SCTP_CMD_SET_SK_ERR: |
1561 | sctp_cmd_set_sk_err(asoc, cmd->obj.error); | |
1562 | break; | |
07d93967 VY |
1563 | case SCTP_CMD_ASSOC_CHANGE: |
1564 | sctp_cmd_assoc_change(commands, asoc, | |
1565 | cmd->obj.u8); | |
1566 | break; | |
1567 | case SCTP_CMD_ADAPTATION_IND: | |
1568 | sctp_cmd_adaptation_ind(commands, asoc); | |
1569 | break; | |
1570 | ||
730fc3d0 VY |
1571 | case SCTP_CMD_ASSOC_SHKEY: |
1572 | error = sctp_auth_asoc_init_active_key(asoc, | |
1573 | GFP_ATOMIC); | |
1574 | break; | |
f4ad85ca GJ |
1575 | case SCTP_CMD_UPDATE_INITTAG: |
1576 | asoc->peer.i.init_tag = cmd->obj.u32; | |
1577 | break; | |
730fc3d0 | 1578 | |
1da177e4 LT |
1579 | default: |
1580 | printk(KERN_WARNING "Impossible command: %u, %p\n", | |
1581 | cmd->verb, cmd->obj.ptr); | |
1582 | break; | |
3ff50b79 SH |
1583 | } |
1584 | ||
1da177e4 LT |
1585 | if (error) |
1586 | break; | |
1587 | } | |
1588 | ||
1589 | out: | |
2e3216cd VY |
1590 | /* If this is in response to a received chunk, wait until |
1591 | * we are done with the packet to open the queue so that we don't | |
1592 | * send multiple packets in response to a single request. | |
1593 | */ | |
1594 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { | |
1595 | if (chunk->end_of_packet || chunk->singleton) | |
1596 | sctp_outq_uncork(&asoc->outqueue); | |
1597 | } else if (local_cork) | |
1598 | sctp_outq_uncork(&asoc->outqueue); | |
1da177e4 LT |
1599 | return error; |
1600 | nomem: | |
1601 | error = -ENOMEM; | |
1602 | goto out; | |
1603 | } | |
1604 |