]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* SCTP kernel reference Implementation |
2 | * (C) Copyright IBM Corp. 2001, 2004 | |
3 | * Copyright (c) 1999 Cisco, Inc. | |
4 | * Copyright (c) 1999-2001 Motorola, Inc. | |
5 | * | |
6 | * This file is part of the SCTP kernel reference Implementation | |
7 | * | |
8 | * These functions work with the state functions in sctp_sm_statefuns.c | |
9 | * to implement that state operations. These functions implement the | |
10 | * steps which require modifying existing data structures. | |
11 | * | |
12 | * The SCTP reference implementation is free software; | |
13 | * you can redistribute it and/or modify it under the terms of | |
14 | * the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2, or (at your option) | |
16 | * any later version. | |
17 | * | |
18 | * The SCTP reference implementation is distributed in the hope that it | |
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | |
20 | * ************************ | |
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | |
22 | * See the GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with GNU CC; see the file COPYING. If not, write to | |
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | |
27 | * Boston, MA 02111-1307, USA. | |
28 | * | |
29 | * Please send any bug reports or fixes you make to the | |
30 | * email address(es): | |
31 | * lksctp developers <[email protected]> | |
32 | * | |
33 | * Or submit a bug report through the following website: | |
34 | * http://www.sf.net/projects/lksctp | |
35 | * | |
36 | * Written or modified by: | |
37 | * La Monte H.P. Yarroll <[email protected]> | |
38 | * Karl Knutson <[email protected]> | |
39 | * Jon Grimm <[email protected]> | |
40 | * Hui Huang <[email protected]> | |
41 | * Dajiang Zhang <[email protected]> | |
42 | * Daisy Chang <[email protected]> | |
43 | * Sridhar Samudrala <[email protected]> | |
44 | * Ardelle Fan <[email protected]> | |
45 | * | |
46 | * Any bugs reported given to us we will try to fix... any fixes shared will | |
47 | * be incorporated into the next SCTP release. | |
48 | */ | |
49 | ||
50 | #include <linux/skbuff.h> | |
51 | #include <linux/types.h> | |
52 | #include <linux/socket.h> | |
53 | #include <linux/ip.h> | |
54 | #include <net/sock.h> | |
55 | #include <net/sctp/sctp.h> | |
56 | #include <net/sctp/sm.h> | |
57 | ||
58 | static int sctp_cmd_interpreter(sctp_event_t event_type, | |
59 | sctp_subtype_t subtype, | |
60 | sctp_state_t state, | |
61 | struct sctp_endpoint *ep, | |
62 | struct sctp_association *asoc, | |
63 | void *event_arg, | |
64 | sctp_disposition_t status, | |
65 | sctp_cmd_seq_t *commands, | |
dd0fc66f | 66 | gfp_t gfp); |
1da177e4 LT |
67 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, |
68 | sctp_state_t state, | |
69 | struct sctp_endpoint *ep, | |
70 | struct sctp_association *asoc, | |
71 | void *event_arg, | |
72 | sctp_disposition_t status, | |
73 | sctp_cmd_seq_t *commands, | |
dd0fc66f | 74 | gfp_t gfp); |
1da177e4 LT |
75 | |
76 | /******************************************************************** | |
77 | * Helper functions | |
78 | ********************************************************************/ | |
79 | ||
80 | /* A helper function for delayed processing of INET ECN CE bit. */ | |
81 | static void sctp_do_ecn_ce_work(struct sctp_association *asoc, | |
82 | __u32 lowest_tsn) | |
83 | { | |
84 | /* Save the TSN away for comparison when we receive CWR */ | |
85 | ||
86 | asoc->last_ecne_tsn = lowest_tsn; | |
87 | asoc->need_ecne = 1; | |
88 | } | |
89 | ||
90 | /* Helper function for delayed processing of SCTP ECNE chunk. */ | |
91 | /* RFC 2960 Appendix A | |
92 | * | |
93 | * RFC 2481 details a specific bit for a sender to send in | |
94 | * the header of its next outbound TCP segment to indicate to | |
95 | * its peer that it has reduced its congestion window. This | |
96 | * is termed the CWR bit. For SCTP the same indication is made | |
97 | * by including the CWR chunk. This chunk contains one data | |
98 | * element, i.e. the TSN number that was sent in the ECNE chunk. | |
99 | * This element represents the lowest TSN number in the datagram | |
100 | * that was originally marked with the CE bit. | |
101 | */ | |
102 | static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, | |
103 | __u32 lowest_tsn, | |
104 | struct sctp_chunk *chunk) | |
105 | { | |
106 | struct sctp_chunk *repl; | |
107 | ||
108 | /* Our previously transmitted packet ran into some congestion | |
109 | * so we should take action by reducing cwnd and ssthresh | |
110 | * and then ACK our peer that we we've done so by | |
111 | * sending a CWR. | |
112 | */ | |
113 | ||
114 | /* First, try to determine if we want to actually lower | |
115 | * our cwnd variables. Only lower them if the ECNE looks more | |
116 | * recent than the last response. | |
117 | */ | |
118 | if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { | |
119 | struct sctp_transport *transport; | |
120 | ||
121 | /* Find which transport's congestion variables | |
122 | * need to be adjusted. | |
123 | */ | |
124 | transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); | |
125 | ||
126 | /* Update the congestion variables. */ | |
127 | if (transport) | |
128 | sctp_transport_lower_cwnd(transport, | |
129 | SCTP_LOWER_CWND_ECNE); | |
130 | asoc->last_cwr_tsn = lowest_tsn; | |
131 | } | |
132 | ||
133 | /* Always try to quiet the other end. In case of lost CWR, | |
134 | * resend last_cwr_tsn. | |
135 | */ | |
136 | repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); | |
137 | ||
138 | /* If we run out of memory, it will look like a lost CWR. We'll | |
139 | * get back in sync eventually. | |
140 | */ | |
141 | return repl; | |
142 | } | |
143 | ||
144 | /* Helper function to do delayed processing of ECN CWR chunk. */ | |
145 | static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, | |
146 | __u32 lowest_tsn) | |
147 | { | |
148 | /* Turn off ECNE getting auto-prepended to every outgoing | |
149 | * packet | |
150 | */ | |
151 | asoc->need_ecne = 0; | |
152 | } | |
153 | ||
154 | /* Generate SACK if necessary. We call this at the end of a packet. */ | |
155 | static int sctp_gen_sack(struct sctp_association *asoc, int force, | |
156 | sctp_cmd_seq_t *commands) | |
157 | { | |
158 | __u32 ctsn, max_tsn_seen; | |
159 | struct sctp_chunk *sack; | |
52ccb8e9 | 160 | struct sctp_transport *trans = asoc->peer.last_data_from; |
1da177e4 LT |
161 | int error = 0; |
162 | ||
52ccb8e9 FF |
163 | if (force || |
164 | (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || | |
165 | (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) | |
1da177e4 LT |
166 | asoc->peer.sack_needed = 1; |
167 | ||
168 | ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); | |
169 | max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); | |
170 | ||
171 | /* From 12.2 Parameters necessary per association (i.e. the TCB): | |
172 | * | |
173 | * Ack State : This flag indicates if the next received packet | |
174 | * : is to be responded to with a SACK. ... | |
175 | * : When DATA chunks are out of order, SACK's | |
176 | * : are not delayed (see Section 6). | |
177 | * | |
178 | * [This is actually not mentioned in Section 6, but we | |
179 | * implement it here anyway. --piggy] | |
180 | */ | |
181 | if (max_tsn_seen != ctsn) | |
182 | asoc->peer.sack_needed = 1; | |
183 | ||
184 | /* From 6.2 Acknowledgement on Reception of DATA Chunks: | |
185 | * | |
186 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, | |
187 | * an acknowledgement SHOULD be generated for at least every | |
188 | * second packet (not every second DATA chunk) received, and | |
189 | * SHOULD be generated within 200 ms of the arrival of any | |
190 | * unacknowledged DATA chunk. ... | |
191 | */ | |
192 | if (!asoc->peer.sack_needed) { | |
193 | /* We will need a SACK for the next packet. */ | |
194 | asoc->peer.sack_needed = 1; | |
52ccb8e9 FF |
195 | |
196 | /* Set the SACK delay timeout based on the | |
197 | * SACK delay for the last transport | |
198 | * data was received from, or the default | |
199 | * for the association. | |
200 | */ | |
201 | if (trans) | |
202 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = | |
203 | trans->sackdelay; | |
204 | else | |
205 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = | |
206 | asoc->sackdelay; | |
207 | ||
208 | /* Restart the SACK timer. */ | |
209 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | |
210 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | |
1da177e4 LT |
211 | } else { |
212 | if (asoc->a_rwnd > asoc->rwnd) | |
213 | asoc->a_rwnd = asoc->rwnd; | |
214 | sack = sctp_make_sack(asoc); | |
215 | if (!sack) | |
216 | goto nomem; | |
217 | ||
218 | asoc->peer.sack_needed = 0; | |
219 | ||
220 | error = sctp_outq_tail(&asoc->outqueue, sack); | |
221 | ||
222 | /* Stop the SACK timer. */ | |
223 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | |
224 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | |
225 | } | |
52ccb8e9 | 226 | |
1da177e4 LT |
227 | return error; |
228 | nomem: | |
229 | error = -ENOMEM; | |
230 | return error; | |
231 | } | |
232 | ||
233 | /* When the T3-RTX timer expires, it calls this function to create the | |
234 | * relevant state machine event. | |
235 | */ | |
236 | void sctp_generate_t3_rtx_event(unsigned long peer) | |
237 | { | |
238 | int error; | |
239 | struct sctp_transport *transport = (struct sctp_transport *) peer; | |
240 | struct sctp_association *asoc = transport->asoc; | |
241 | ||
242 | /* Check whether a task is in the sock. */ | |
243 | ||
244 | sctp_bh_lock_sock(asoc->base.sk); | |
245 | if (sock_owned_by_user(asoc->base.sk)) { | |
246 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); | |
247 | ||
248 | /* Try again later. */ | |
249 | if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) | |
250 | sctp_transport_hold(transport); | |
251 | goto out_unlock; | |
252 | } | |
253 | ||
254 | /* Is this transport really dead and just waiting around for | |
255 | * the timer to let go of the reference? | |
256 | */ | |
257 | if (transport->dead) | |
258 | goto out_unlock; | |
259 | ||
260 | /* Run through the state machine. */ | |
261 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | |
262 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), | |
263 | asoc->state, | |
264 | asoc->ep, asoc, | |
265 | transport, GFP_ATOMIC); | |
266 | ||
267 | if (error) | |
268 | asoc->base.sk->sk_err = -error; | |
269 | ||
270 | out_unlock: | |
271 | sctp_bh_unlock_sock(asoc->base.sk); | |
272 | sctp_transport_put(transport); | |
273 | } | |
274 | ||
275 | /* This is a sa interface for producing timeout events. It works | |
276 | * for timeouts which use the association as their parameter. | |
277 | */ | |
278 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | |
279 | sctp_event_timeout_t timeout_type) | |
280 | { | |
281 | int error = 0; | |
282 | ||
283 | sctp_bh_lock_sock(asoc->base.sk); | |
284 | if (sock_owned_by_user(asoc->base.sk)) { | |
285 | SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n", | |
286 | __FUNCTION__, | |
287 | timeout_type); | |
288 | ||
289 | /* Try again later. */ | |
290 | if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) | |
291 | sctp_association_hold(asoc); | |
292 | goto out_unlock; | |
293 | } | |
294 | ||
295 | /* Is this association really dead and just waiting around for | |
296 | * the timer to let go of the reference? | |
297 | */ | |
298 | if (asoc->base.dead) | |
299 | goto out_unlock; | |
300 | ||
301 | /* Run through the state machine. */ | |
302 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | |
303 | SCTP_ST_TIMEOUT(timeout_type), | |
304 | asoc->state, asoc->ep, asoc, | |
305 | (void *)timeout_type, GFP_ATOMIC); | |
306 | ||
307 | if (error) | |
308 | asoc->base.sk->sk_err = -error; | |
309 | ||
310 | out_unlock: | |
311 | sctp_bh_unlock_sock(asoc->base.sk); | |
312 | sctp_association_put(asoc); | |
313 | } | |
314 | ||
315 | static void sctp_generate_t1_cookie_event(unsigned long data) | |
316 | { | |
317 | struct sctp_association *asoc = (struct sctp_association *) data; | |
318 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); | |
319 | } | |
320 | ||
321 | static void sctp_generate_t1_init_event(unsigned long data) | |
322 | { | |
323 | struct sctp_association *asoc = (struct sctp_association *) data; | |
324 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); | |
325 | } | |
326 | ||
327 | static void sctp_generate_t2_shutdown_event(unsigned long data) | |
328 | { | |
329 | struct sctp_association *asoc = (struct sctp_association *) data; | |
330 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); | |
331 | } | |
332 | ||
333 | static void sctp_generate_t4_rto_event(unsigned long data) | |
334 | { | |
335 | struct sctp_association *asoc = (struct sctp_association *) data; | |
336 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); | |
337 | } | |
338 | ||
339 | static void sctp_generate_t5_shutdown_guard_event(unsigned long data) | |
340 | { | |
341 | struct sctp_association *asoc = (struct sctp_association *)data; | |
342 | sctp_generate_timeout_event(asoc, | |
343 | SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); | |
344 | ||
345 | } /* sctp_generate_t5_shutdown_guard_event() */ | |
346 | ||
347 | static void sctp_generate_autoclose_event(unsigned long data) | |
348 | { | |
349 | struct sctp_association *asoc = (struct sctp_association *) data; | |
350 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); | |
351 | } | |
352 | ||
353 | /* Generate a heart beat event. If the sock is busy, reschedule. Make | |
354 | * sure that the transport is still valid. | |
355 | */ | |
356 | void sctp_generate_heartbeat_event(unsigned long data) | |
357 | { | |
358 | int error = 0; | |
359 | struct sctp_transport *transport = (struct sctp_transport *) data; | |
360 | struct sctp_association *asoc = transport->asoc; | |
361 | ||
362 | sctp_bh_lock_sock(asoc->base.sk); | |
363 | if (sock_owned_by_user(asoc->base.sk)) { | |
364 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); | |
365 | ||
366 | /* Try again later. */ | |
367 | if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) | |
368 | sctp_transport_hold(transport); | |
369 | goto out_unlock; | |
370 | } | |
371 | ||
372 | /* Is this structure just waiting around for us to actually | |
373 | * get destroyed? | |
374 | */ | |
375 | if (transport->dead) | |
376 | goto out_unlock; | |
377 | ||
378 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | |
379 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), | |
380 | asoc->state, asoc->ep, asoc, | |
381 | transport, GFP_ATOMIC); | |
382 | ||
383 | if (error) | |
384 | asoc->base.sk->sk_err = -error; | |
385 | ||
386 | out_unlock: | |
387 | sctp_bh_unlock_sock(asoc->base.sk); | |
388 | sctp_transport_put(transport); | |
389 | } | |
390 | ||
391 | /* Inject a SACK Timeout event into the state machine. */ | |
392 | static void sctp_generate_sack_event(unsigned long data) | |
393 | { | |
394 | struct sctp_association *asoc = (struct sctp_association *) data; | |
395 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); | |
396 | } | |
397 | ||
398 | sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { | |
399 | NULL, | |
400 | sctp_generate_t1_cookie_event, | |
401 | sctp_generate_t1_init_event, | |
402 | sctp_generate_t2_shutdown_event, | |
403 | NULL, | |
404 | sctp_generate_t4_rto_event, | |
405 | sctp_generate_t5_shutdown_guard_event, | |
1e7d3d90 | 406 | NULL, |
1da177e4 LT |
407 | sctp_generate_sack_event, |
408 | sctp_generate_autoclose_event, | |
409 | }; | |
410 | ||
411 | ||
412 | /* RFC 2960 8.2 Path Failure Detection | |
413 | * | |
414 | * When its peer endpoint is multi-homed, an endpoint should keep a | |
415 | * error counter for each of the destination transport addresses of the | |
416 | * peer endpoint. | |
417 | * | |
418 | * Each time the T3-rtx timer expires on any address, or when a | |
419 | * HEARTBEAT sent to an idle address is not acknowledged within a RTO, | |
420 | * the error counter of that destination address will be incremented. | |
421 | * When the value in the error counter exceeds the protocol parameter | |
422 | * 'Path.Max.Retrans' of that destination address, the endpoint should | |
423 | * mark the destination transport address as inactive, and a | |
424 | * notification SHOULD be sent to the upper layer. | |
425 | * | |
426 | */ | |
427 | static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | |
428 | struct sctp_transport *transport) | |
429 | { | |
430 | /* The check for association's overall error counter exceeding the | |
431 | * threshold is done in the state function. | |
432 | */ | |
433 | asoc->overall_error_count++; | |
434 | ||
3f7a87d2 | 435 | if (transport->state != SCTP_INACTIVE && |
52ccb8e9 | 436 | (transport->error_count++ >= transport->pathmaxrxt)) { |
3f7a87d2 FF |
437 | SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", |
438 | " transport IP: port:%d failed.\n", | |
439 | asoc, | |
440 | (&transport->ipaddr), | |
441 | transport->ipaddr.v4.sin_port); | |
1da177e4 LT |
442 | sctp_assoc_control_transport(asoc, transport, |
443 | SCTP_TRANSPORT_DOWN, | |
444 | SCTP_FAILED_THRESHOLD); | |
445 | } | |
446 | ||
447 | /* E2) For the destination address for which the timer | |
448 | * expires, set RTO <- RTO * 2 ("back off the timer"). The | |
449 | * maximum value discussed in rule C7 above (RTO.max) may be | |
450 | * used to provide an upper bound to this doubling operation. | |
451 | */ | |
452 | transport->rto = min((transport->rto * 2), transport->asoc->rto_max); | |
453 | } | |
454 | ||
455 | /* Worker routine to handle INIT command failure. */ | |
456 | static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, | |
457 | struct sctp_association *asoc, | |
458 | unsigned error) | |
459 | { | |
460 | struct sctp_ulpevent *event; | |
461 | ||
462 | event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC, | |
463 | (__u16)error, 0, 0, | |
464 | GFP_ATOMIC); | |
465 | ||
466 | if (event) | |
467 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | |
468 | SCTP_ULPEVENT(event)); | |
469 | ||
470 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | |
471 | SCTP_STATE(SCTP_STATE_CLOSED)); | |
472 | ||
473 | /* SEND_FAILED sent later when cleaning up the association. */ | |
474 | asoc->outqueue.error = error; | |
475 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | |
476 | } | |
477 | ||
478 | /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ | |
479 | static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | |
480 | struct sctp_association *asoc, | |
481 | sctp_event_t event_type, | |
482 | sctp_subtype_t subtype, | |
483 | struct sctp_chunk *chunk, | |
484 | unsigned error) | |
485 | { | |
486 | struct sctp_ulpevent *event; | |
487 | ||
488 | /* Cancel any partial delivery in progress. */ | |
489 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | |
490 | ||
491 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | |
492 | (__u16)error, 0, 0, | |
493 | GFP_ATOMIC); | |
494 | if (event) | |
495 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | |
496 | SCTP_ULPEVENT(event)); | |
497 | ||
498 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | |
499 | SCTP_STATE(SCTP_STATE_CLOSED)); | |
500 | ||
501 | /* Set sk_err to ECONNRESET on a 1-1 style socket. */ | |
502 | if (!sctp_style(asoc->base.sk, UDP)) | |
503 | asoc->base.sk->sk_err = ECONNRESET; | |
504 | ||
505 | /* SEND_FAILED sent later when cleaning up the association. */ | |
506 | asoc->outqueue.error = error; | |
507 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | |
508 | } | |
509 | ||
510 | /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT | |
511 | * inside the cookie. In reality, this is only used for INIT-ACK processing | |
512 | * since all other cases use "temporary" associations and can do all | |
513 | * their work in statefuns directly. | |
514 | */ | |
515 | static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, | |
516 | struct sctp_association *asoc, | |
517 | struct sctp_chunk *chunk, | |
3182cd84 | 518 | sctp_init_chunk_t *peer_init, |
dd0fc66f | 519 | gfp_t gfp) |
1da177e4 LT |
520 | { |
521 | int error; | |
522 | ||
523 | /* We only process the init as a sideeffect in a single | |
524 | * case. This is when we process the INIT-ACK. If we | |
525 | * fail during INIT processing (due to malloc problems), | |
526 | * just return the error and stop processing the stack. | |
527 | */ | |
528 | if (!sctp_process_init(asoc, chunk->chunk_hdr->type, | |
529 | sctp_source(chunk), peer_init, gfp)) | |
530 | error = -ENOMEM; | |
531 | else | |
532 | error = 0; | |
533 | ||
534 | return error; | |
535 | } | |
536 | ||
537 | /* Helper function to break out starting up of heartbeat timers. */ | |
538 | static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, | |
539 | struct sctp_association *asoc) | |
540 | { | |
541 | struct sctp_transport *t; | |
542 | struct list_head *pos; | |
543 | ||
544 | /* Start a heartbeat timer for each transport on the association. | |
545 | * hold a reference on the transport to make sure none of | |
546 | * the needed data structures go away. | |
547 | */ | |
548 | list_for_each(pos, &asoc->peer.transport_addr_list) { | |
549 | t = list_entry(pos, struct sctp_transport, transports); | |
550 | ||
551 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | |
552 | sctp_transport_hold(t); | |
553 | } | |
554 | } | |
555 | ||
556 | static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, | |
557 | struct sctp_association *asoc) | |
558 | { | |
559 | struct sctp_transport *t; | |
560 | struct list_head *pos; | |
561 | ||
562 | /* Stop all heartbeat timers. */ | |
563 | ||
564 | list_for_each(pos, &asoc->peer.transport_addr_list) { | |
565 | t = list_entry(pos, struct sctp_transport, transports); | |
566 | if (del_timer(&t->hb_timer)) | |
567 | sctp_transport_put(t); | |
568 | } | |
569 | } | |
570 | ||
571 | /* Helper function to stop any pending T3-RTX timers */ | |
572 | static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, | |
573 | struct sctp_association *asoc) | |
574 | { | |
575 | struct sctp_transport *t; | |
576 | struct list_head *pos; | |
577 | ||
578 | list_for_each(pos, &asoc->peer.transport_addr_list) { | |
579 | t = list_entry(pos, struct sctp_transport, transports); | |
580 | if (timer_pending(&t->T3_rtx_timer) && | |
581 | del_timer(&t->T3_rtx_timer)) { | |
582 | sctp_transport_put(t); | |
583 | } | |
584 | } | |
585 | } | |
586 | ||
587 | ||
588 | /* Helper function to update the heartbeat timer. */ | |
589 | static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, | |
590 | struct sctp_association *asoc, | |
591 | struct sctp_transport *t) | |
592 | { | |
593 | /* Update the heartbeat timer. */ | |
594 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | |
595 | sctp_transport_hold(t); | |
596 | } | |
597 | ||
598 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ | |
599 | static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | |
600 | struct sctp_association *asoc, | |
601 | struct sctp_transport *t, | |
602 | struct sctp_chunk *chunk) | |
603 | { | |
604 | sctp_sender_hb_info_t *hbinfo; | |
605 | ||
606 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the | |
607 | * HEARTBEAT should clear the error counter of the destination | |
608 | * transport address to which the HEARTBEAT was sent. | |
609 | * The association's overall error count is also cleared. | |
610 | */ | |
611 | t->error_count = 0; | |
612 | t->asoc->overall_error_count = 0; | |
613 | ||
614 | /* Mark the destination transport address as active if it is not so | |
615 | * marked. | |
616 | */ | |
3f7a87d2 | 617 | if (t->state == SCTP_INACTIVE) |
1da177e4 LT |
618 | sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, |
619 | SCTP_HEARTBEAT_SUCCESS); | |
620 | ||
621 | /* The receiver of the HEARTBEAT ACK should also perform an | |
622 | * RTT measurement for that destination transport address | |
623 | * using the time value carried in the HEARTBEAT ACK chunk. | |
624 | */ | |
625 | hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; | |
626 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); | |
627 | } | |
628 | ||
629 | /* Helper function to do a transport reset at the expiry of the hearbeat | |
630 | * timer. | |
631 | */ | |
632 | static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds, | |
633 | struct sctp_association *asoc, | |
634 | struct sctp_transport *t) | |
635 | { | |
636 | sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); | |
637 | ||
638 | /* Mark one strike against a transport. */ | |
639 | sctp_do_8_2_transport_strike(asoc, t); | |
640 | } | |
641 | ||
642 | /* Helper function to process the process SACK command. */ | |
643 | static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, | |
644 | struct sctp_association *asoc, | |
645 | struct sctp_sackhdr *sackh) | |
646 | { | |
647 | int err; | |
648 | ||
649 | if (sctp_outq_sack(&asoc->outqueue, sackh)) { | |
650 | /* There are no more TSNs awaiting SACK. */ | |
651 | err = sctp_do_sm(SCTP_EVENT_T_OTHER, | |
652 | SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), | |
653 | asoc->state, asoc->ep, asoc, NULL, | |
654 | GFP_ATOMIC); | |
655 | } else { | |
656 | /* Windows may have opened, so we need | |
657 | * to check if we have DATA to transmit | |
658 | */ | |
659 | err = sctp_outq_flush(&asoc->outqueue, 0); | |
660 | } | |
661 | ||
662 | return err; | |
663 | } | |
664 | ||
665 | /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set | |
666 | * the transport for a shutdown chunk. | |
667 | */ | |
668 | static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, | |
669 | struct sctp_association *asoc, | |
670 | struct sctp_chunk *chunk) | |
671 | { | |
672 | struct sctp_transport *t; | |
673 | ||
674 | t = sctp_assoc_choose_shutdown_transport(asoc); | |
675 | asoc->shutdown_last_sent_to = t; | |
676 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; | |
677 | chunk->transport = t; | |
678 | } | |
679 | ||
680 | /* Helper function to change the state of an association. */ | |
681 | static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, | |
682 | struct sctp_association *asoc, | |
683 | sctp_state_t state) | |
684 | { | |
685 | struct sock *sk = asoc->base.sk; | |
686 | ||
687 | asoc->state = state; | |
688 | ||
3f7a87d2 FF |
689 | SCTP_DEBUG_PRINTK("sctp_cmd_new_state: asoc %p[%s]\n", |
690 | asoc, sctp_state_tbl[state]); | |
691 | ||
1da177e4 | 692 | if (sctp_style(sk, TCP)) { |
3f7a87d2 | 693 | /* Change the sk->sk_state of a TCP-style socket that has |
1da177e4 LT |
694 | * sucessfully completed a connect() call. |
695 | */ | |
696 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) | |
697 | sk->sk_state = SCTP_SS_ESTABLISHED; | |
698 | ||
699 | /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ | |
700 | if (sctp_state(asoc, SHUTDOWN_RECEIVED) && | |
701 | sctp_sstate(sk, ESTABLISHED)) | |
702 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
703 | } | |
704 | ||
3f7a87d2 FF |
705 | if (sctp_state(asoc, COOKIE_WAIT)) { |
706 | /* Reset init timeouts since they may have been | |
707 | * increased due to timer expirations. | |
708 | */ | |
709 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = | |
1e7d3d90 | 710 | asoc->rto_initial; |
3f7a87d2 | 711 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = |
1e7d3d90 | 712 | asoc->rto_initial; |
3f7a87d2 FF |
713 | } |
714 | ||
1da177e4 LT |
715 | if (sctp_state(asoc, ESTABLISHED) || |
716 | sctp_state(asoc, CLOSED) || | |
717 | sctp_state(asoc, SHUTDOWN_RECEIVED)) { | |
718 | /* Wake up any processes waiting in the asoc's wait queue in | |
719 | * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). | |
720 | */ | |
721 | if (waitqueue_active(&asoc->wait)) | |
722 | wake_up_interruptible(&asoc->wait); | |
723 | ||
724 | /* Wake up any processes waiting in the sk's sleep queue of | |
725 | * a TCP-style or UDP-style peeled-off socket in | |
726 | * sctp_wait_for_accept() or sctp_wait_for_packet(). | |
727 | * For a UDP-style socket, the waiters are woken up by the | |
728 | * notifications. | |
729 | */ | |
730 | if (!sctp_style(sk, UDP)) | |
731 | sk->sk_state_change(sk); | |
732 | } | |
733 | } | |
734 | ||
735 | /* Helper function to delete an association. */ | |
736 | static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, | |
737 | struct sctp_association *asoc) | |
738 | { | |
739 | struct sock *sk = asoc->base.sk; | |
740 | ||
741 | /* If it is a non-temporary association belonging to a TCP-style | |
742 | * listening socket that is not closed, do not free it so that accept() | |
743 | * can pick it up later. | |
744 | */ | |
745 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && | |
746 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) | |
747 | return; | |
748 | ||
749 | sctp_unhash_established(asoc); | |
750 | sctp_association_free(asoc); | |
751 | } | |
752 | ||
753 | /* | |
754 | * ADDIP Section 4.1 ASCONF Chunk Procedures | |
755 | * A4) Start a T-4 RTO timer, using the RTO value of the selected | |
756 | * destination address (we use active path instead of primary path just | |
757 | * because primary path may be inactive. | |
758 | */ | |
759 | static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, | |
760 | struct sctp_association *asoc, | |
761 | struct sctp_chunk *chunk) | |
762 | { | |
763 | struct sctp_transport *t; | |
764 | ||
765 | t = asoc->peer.active_path; | |
766 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; | |
767 | chunk->transport = t; | |
768 | } | |
769 | ||
770 | /* Process an incoming Operation Error Chunk. */ | |
771 | static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, | |
772 | struct sctp_association *asoc, | |
773 | struct sctp_chunk *chunk) | |
774 | { | |
775 | struct sctp_operr_chunk *operr_chunk; | |
776 | struct sctp_errhdr *err_hdr; | |
777 | ||
778 | operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr; | |
779 | err_hdr = &operr_chunk->err_hdr; | |
780 | ||
781 | switch (err_hdr->cause) { | |
782 | case SCTP_ERROR_UNKNOWN_CHUNK: | |
783 | { | |
784 | struct sctp_chunkhdr *unk_chunk_hdr; | |
785 | ||
786 | unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable; | |
787 | switch (unk_chunk_hdr->type) { | |
788 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an | |
789 | * ERROR chunk reporting that it did not recognized the ASCONF | |
790 | * chunk type, the sender of the ASCONF MUST NOT send any | |
791 | * further ASCONF chunks and MUST stop its T-4 timer. | |
792 | */ | |
793 | case SCTP_CID_ASCONF: | |
794 | asoc->peer.asconf_capable = 0; | |
795 | sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, | |
796 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | |
797 | break; | |
798 | default: | |
799 | break; | |
800 | } | |
801 | break; | |
802 | } | |
803 | default: | |
804 | break; | |
805 | } | |
806 | } | |
807 | ||
808 | /* Process variable FWDTSN chunk information. */ | |
809 | static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, | |
810 | struct sctp_chunk *chunk) | |
811 | { | |
812 | struct sctp_fwdtsn_skip *skip; | |
813 | /* Walk through all the skipped SSNs */ | |
814 | sctp_walk_fwdtsn(skip, chunk) { | |
815 | sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); | |
816 | } | |
817 | ||
818 | return; | |
819 | } | |
820 | ||
821 | /* Helper function to remove the association non-primary peer | |
822 | * transports. | |
823 | */ | |
824 | static void sctp_cmd_del_non_primary(struct sctp_association *asoc) | |
825 | { | |
826 | struct sctp_transport *t; | |
827 | struct list_head *pos; | |
828 | struct list_head *temp; | |
829 | ||
830 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | |
831 | t = list_entry(pos, struct sctp_transport, transports); | |
832 | if (!sctp_cmp_addr_exact(&t->ipaddr, | |
833 | &asoc->peer.primary_addr)) { | |
834 | sctp_assoc_del_peer(asoc, &t->ipaddr); | |
835 | } | |
836 | } | |
837 | ||
838 | return; | |
839 | } | |
840 | ||
841 | /* These three macros allow us to pull the debugging code out of the | |
842 | * main flow of sctp_do_sm() to keep attention focused on the real | |
843 | * functionality there. | |
844 | */ | |
845 | #define DEBUG_PRE \ | |
846 | SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \ | |
847 | "ep %p, %s, %s, asoc %p[%s], %s\n", \ | |
848 | ep, sctp_evttype_tbl[event_type], \ | |
849 | (*debug_fn)(subtype), asoc, \ | |
850 | sctp_state_tbl[state], state_fn->name) | |
851 | ||
852 | #define DEBUG_POST \ | |
853 | SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \ | |
854 | "asoc %p, status: %s\n", \ | |
855 | asoc, sctp_status_tbl[status]) | |
856 | ||
857 | #define DEBUG_POST_SFX \ | |
858 | SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \ | |
859 | error, asoc, \ | |
860 | sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ | |
861 | sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED]) | |
862 | ||
863 | /* | |
864 | * This is the master state machine processing function. | |
865 | * | |
866 | * If you want to understand all of lksctp, this is a | |
867 | * good place to start. | |
868 | */ | |
869 | int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, | |
870 | sctp_state_t state, | |
871 | struct sctp_endpoint *ep, | |
872 | struct sctp_association *asoc, | |
873 | void *event_arg, | |
dd0fc66f | 874 | gfp_t gfp) |
1da177e4 LT |
875 | { |
876 | sctp_cmd_seq_t commands; | |
877 | const sctp_sm_table_entry_t *state_fn; | |
878 | sctp_disposition_t status; | |
879 | int error = 0; | |
880 | typedef const char *(printfn_t)(sctp_subtype_t); | |
881 | ||
882 | static printfn_t *table[] = { | |
883 | NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, | |
884 | }; | |
885 | printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; | |
886 | ||
887 | /* Look up the state function, run it, and then process the | |
888 | * side effects. These three steps are the heart of lksctp. | |
889 | */ | |
890 | state_fn = sctp_sm_lookup_event(event_type, state, subtype); | |
891 | ||
892 | sctp_init_cmd_seq(&commands); | |
893 | ||
894 | DEBUG_PRE; | |
895 | status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands); | |
896 | DEBUG_POST; | |
897 | ||
898 | error = sctp_side_effects(event_type, subtype, state, | |
899 | ep, asoc, event_arg, status, | |
900 | &commands, gfp); | |
901 | DEBUG_POST_SFX; | |
902 | ||
903 | return error; | |
904 | } | |
905 | ||
906 | #undef DEBUG_PRE | |
907 | #undef DEBUG_POST | |
908 | ||
909 | /***************************************************************** | |
910 | * This the master state function side effect processing function. | |
911 | *****************************************************************/ | |
912 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | |
913 | sctp_state_t state, | |
914 | struct sctp_endpoint *ep, | |
915 | struct sctp_association *asoc, | |
916 | void *event_arg, | |
917 | sctp_disposition_t status, | |
918 | sctp_cmd_seq_t *commands, | |
dd0fc66f | 919 | gfp_t gfp) |
1da177e4 LT |
920 | { |
921 | int error; | |
922 | ||
923 | /* FIXME - Most of the dispositions left today would be categorized | |
924 | * as "exceptional" dispositions. For those dispositions, it | |
925 | * may not be proper to run through any of the commands at all. | |
926 | * For example, the command interpreter might be run only with | |
927 | * disposition SCTP_DISPOSITION_CONSUME. | |
928 | */ | |
929 | if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, | |
930 | ep, asoc, | |
931 | event_arg, status, | |
932 | commands, gfp))) | |
933 | goto bail; | |
934 | ||
935 | switch (status) { | |
936 | case SCTP_DISPOSITION_DISCARD: | |
937 | SCTP_DEBUG_PRINTK("Ignored sctp protocol event - state %d, " | |
938 | "event_type %d, event_id %d\n", | |
939 | state, event_type, subtype.chunk); | |
940 | break; | |
941 | ||
942 | case SCTP_DISPOSITION_NOMEM: | |
943 | /* We ran out of memory, so we need to discard this | |
944 | * packet. | |
945 | */ | |
946 | /* BUG--we should now recover some memory, probably by | |
947 | * reneging... | |
948 | */ | |
949 | error = -ENOMEM; | |
950 | break; | |
951 | ||
952 | case SCTP_DISPOSITION_DELETE_TCB: | |
953 | /* This should now be a command. */ | |
954 | break; | |
955 | ||
956 | case SCTP_DISPOSITION_CONSUME: | |
957 | case SCTP_DISPOSITION_ABORT: | |
958 | /* | |
959 | * We should no longer have much work to do here as the | |
960 | * real work has been done as explicit commands above. | |
961 | */ | |
962 | break; | |
963 | ||
964 | case SCTP_DISPOSITION_VIOLATION: | |
965 | printk(KERN_ERR "sctp protocol violation state %d " | |
966 | "chunkid %d\n", state, subtype.chunk); | |
967 | break; | |
968 | ||
969 | case SCTP_DISPOSITION_NOT_IMPL: | |
970 | printk(KERN_WARNING "sctp unimplemented feature in state %d, " | |
971 | "event_type %d, event_id %d\n", | |
972 | state, event_type, subtype.chunk); | |
973 | break; | |
974 | ||
975 | case SCTP_DISPOSITION_BUG: | |
976 | printk(KERN_ERR "sctp bug in state %d, " | |
977 | "event_type %d, event_id %d\n", | |
978 | state, event_type, subtype.chunk); | |
979 | BUG(); | |
980 | break; | |
981 | ||
982 | default: | |
983 | printk(KERN_ERR "sctp impossible disposition %d " | |
984 | "in state %d, event_type %d, event_id %d\n", | |
985 | status, state, event_type, subtype.chunk); | |
986 | BUG(); | |
987 | break; | |
988 | }; | |
989 | ||
990 | bail: | |
991 | return error; | |
992 | } | |
993 | ||
994 | /******************************************************************** | |
995 | * 2nd Level Abstractions | |
996 | ********************************************************************/ | |
997 | ||
998 | /* This is the side-effect interpreter. */ | |
999 | static int sctp_cmd_interpreter(sctp_event_t event_type, | |
1000 | sctp_subtype_t subtype, | |
1001 | sctp_state_t state, | |
1002 | struct sctp_endpoint *ep, | |
1003 | struct sctp_association *asoc, | |
1004 | void *event_arg, | |
1005 | sctp_disposition_t status, | |
1006 | sctp_cmd_seq_t *commands, | |
dd0fc66f | 1007 | gfp_t gfp) |
1da177e4 LT |
1008 | { |
1009 | int error = 0; | |
1010 | int force; | |
1011 | sctp_cmd_t *cmd; | |
1012 | struct sctp_chunk *new_obj; | |
1013 | struct sctp_chunk *chunk = NULL; | |
1014 | struct sctp_packet *packet; | |
1015 | struct list_head *pos; | |
1016 | struct timer_list *timer; | |
1017 | unsigned long timeout; | |
1018 | struct sctp_transport *t; | |
1019 | struct sctp_sackhdr sackh; | |
1020 | int local_cork = 0; | |
1021 | ||
1022 | if (SCTP_EVENT_T_TIMEOUT != event_type) | |
1023 | chunk = (struct sctp_chunk *) event_arg; | |
1024 | ||
1025 | /* Note: This whole file is a huge candidate for rework. | |
1026 | * For example, each command could either have its own handler, so | |
1027 | * the loop would look like: | |
1028 | * while (cmds) | |
1029 | * cmd->handle(x, y, z) | |
1030 | * --jgrimm | |
1031 | */ | |
1032 | while (NULL != (cmd = sctp_next_cmd(commands))) { | |
1033 | switch (cmd->verb) { | |
1034 | case SCTP_CMD_NOP: | |
1035 | /* Do nothing. */ | |
1036 | break; | |
1037 | ||
1038 | case SCTP_CMD_NEW_ASOC: | |
1039 | /* Register a new association. */ | |
1040 | if (local_cork) { | |
1041 | sctp_outq_uncork(&asoc->outqueue); | |
1042 | local_cork = 0; | |
1043 | } | |
1044 | asoc = cmd->obj.ptr; | |
1045 | /* Register with the endpoint. */ | |
1046 | sctp_endpoint_add_asoc(ep, asoc); | |
1047 | sctp_hash_established(asoc); | |
1048 | break; | |
1049 | ||
1050 | case SCTP_CMD_UPDATE_ASSOC: | |
1051 | sctp_assoc_update(asoc, cmd->obj.ptr); | |
1052 | break; | |
1053 | ||
1054 | case SCTP_CMD_PURGE_OUTQUEUE: | |
1055 | sctp_outq_teardown(&asoc->outqueue); | |
1056 | break; | |
1057 | ||
1058 | case SCTP_CMD_DELETE_TCB: | |
1059 | if (local_cork) { | |
1060 | sctp_outq_uncork(&asoc->outqueue); | |
1061 | local_cork = 0; | |
1062 | } | |
1063 | /* Delete the current association. */ | |
1064 | sctp_cmd_delete_tcb(commands, asoc); | |
1065 | asoc = NULL; | |
1066 | break; | |
1067 | ||
1068 | case SCTP_CMD_NEW_STATE: | |
1069 | /* Enter a new state. */ | |
1070 | sctp_cmd_new_state(commands, asoc, cmd->obj.state); | |
1071 | break; | |
1072 | ||
1073 | case SCTP_CMD_REPORT_TSN: | |
1074 | /* Record the arrival of a TSN. */ | |
1075 | sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32); | |
1076 | break; | |
1077 | ||
1078 | case SCTP_CMD_REPORT_FWDTSN: | |
1079 | /* Move the Cumulattive TSN Ack ahead. */ | |
1080 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | |
1081 | ||
1082 | /* Abort any in progress partial delivery. */ | |
1083 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | |
1084 | break; | |
1085 | ||
1086 | case SCTP_CMD_PROCESS_FWDTSN: | |
1087 | sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr); | |
1088 | break; | |
1089 | ||
1090 | case SCTP_CMD_GEN_SACK: | |
1091 | /* Generate a Selective ACK. | |
1092 | * The argument tells us whether to just count | |
1093 | * the packet and MAYBE generate a SACK, or | |
1094 | * force a SACK out. | |
1095 | */ | |
1096 | force = cmd->obj.i32; | |
1097 | error = sctp_gen_sack(asoc, force, commands); | |
1098 | break; | |
1099 | ||
1100 | case SCTP_CMD_PROCESS_SACK: | |
1101 | /* Process an inbound SACK. */ | |
1102 | error = sctp_cmd_process_sack(commands, asoc, | |
1103 | cmd->obj.ptr); | |
1104 | break; | |
1105 | ||
1106 | case SCTP_CMD_GEN_INIT_ACK: | |
1107 | /* Generate an INIT ACK chunk. */ | |
1108 | new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, | |
1109 | 0); | |
1110 | if (!new_obj) | |
1111 | goto nomem; | |
1112 | ||
1113 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1114 | SCTP_CHUNK(new_obj)); | |
1115 | break; | |
1116 | ||
1117 | case SCTP_CMD_PEER_INIT: | |
1118 | /* Process a unified INIT from the peer. | |
1119 | * Note: Only used during INIT-ACK processing. If | |
1120 | * there is an error just return to the outter | |
1121 | * layer which will bail. | |
1122 | */ | |
1123 | error = sctp_cmd_process_init(commands, asoc, chunk, | |
1124 | cmd->obj.ptr, gfp); | |
1125 | break; | |
1126 | ||
1127 | case SCTP_CMD_GEN_COOKIE_ECHO: | |
1128 | /* Generate a COOKIE ECHO chunk. */ | |
1129 | new_obj = sctp_make_cookie_echo(asoc, chunk); | |
1130 | if (!new_obj) { | |
1131 | if (cmd->obj.ptr) | |
1132 | sctp_chunk_free(cmd->obj.ptr); | |
1133 | goto nomem; | |
1134 | } | |
1135 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1136 | SCTP_CHUNK(new_obj)); | |
1137 | ||
1138 | /* If there is an ERROR chunk to be sent along with | |
1139 | * the COOKIE_ECHO, send it, too. | |
1140 | */ | |
1141 | if (cmd->obj.ptr) | |
1142 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1143 | SCTP_CHUNK(cmd->obj.ptr)); | |
1144 | ||
1145 | /* FIXME - Eventually come up with a cleaner way to | |
1146 | * enabling COOKIE-ECHO + DATA bundling during | |
1147 | * multihoming stale cookie scenarios, the following | |
1148 | * command plays with asoc->peer.retran_path to | |
1149 | * avoid the problem of sending the COOKIE-ECHO and | |
1150 | * DATA in different paths, which could result | |
1151 | * in the association being ABORTed if the DATA chunk | |
1152 | * is processed first by the server. Checking the | |
1153 | * init error counter simply causes this command | |
1154 | * to be executed only during failed attempts of | |
1155 | * association establishment. | |
1156 | */ | |
3f7a87d2 FF |
1157 | if ((asoc->peer.retran_path != |
1158 | asoc->peer.primary_path) && | |
1159 | (asoc->init_err_counter > 0)) { | |
1160 | sctp_add_cmd_sf(commands, | |
1da177e4 LT |
1161 | SCTP_CMD_FORCE_PRIM_RETRAN, |
1162 | SCTP_NULL()); | |
1163 | } | |
1164 | ||
1165 | break; | |
1166 | ||
1167 | case SCTP_CMD_GEN_SHUTDOWN: | |
1168 | /* Generate SHUTDOWN when in SHUTDOWN_SENT state. | |
1169 | * Reset error counts. | |
1170 | */ | |
1171 | asoc->overall_error_count = 0; | |
1172 | ||
1173 | /* Generate a SHUTDOWN chunk. */ | |
1174 | new_obj = sctp_make_shutdown(asoc, chunk); | |
1175 | if (!new_obj) | |
1176 | goto nomem; | |
1177 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1178 | SCTP_CHUNK(new_obj)); | |
1179 | break; | |
1180 | ||
1181 | case SCTP_CMD_CHUNK_ULP: | |
1182 | /* Send a chunk to the sockets layer. */ | |
1183 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | |
1184 | "chunk_up:", cmd->obj.ptr, | |
1185 | "ulpq:", &asoc->ulpq); | |
1186 | sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr, | |
1187 | GFP_ATOMIC); | |
1188 | break; | |
1189 | ||
1190 | case SCTP_CMD_EVENT_ULP: | |
1191 | /* Send a notification to the sockets layer. */ | |
1192 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | |
1193 | "event_up:",cmd->obj.ptr, | |
1194 | "ulpq:",&asoc->ulpq); | |
1195 | sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr); | |
1196 | break; | |
1197 | ||
1198 | case SCTP_CMD_REPLY: | |
1199 | /* If an caller has not already corked, do cork. */ | |
1200 | if (!asoc->outqueue.cork) { | |
1201 | sctp_outq_cork(&asoc->outqueue); | |
1202 | local_cork = 1; | |
1203 | } | |
1204 | /* Send a chunk to our peer. */ | |
1205 | error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr); | |
1206 | break; | |
1207 | ||
1208 | case SCTP_CMD_SEND_PKT: | |
1209 | /* Send a full packet to our peer. */ | |
1210 | packet = cmd->obj.ptr; | |
1211 | sctp_packet_transmit(packet); | |
1212 | sctp_ootb_pkt_free(packet); | |
1213 | break; | |
1214 | ||
1215 | case SCTP_CMD_RETRAN: | |
1216 | /* Mark a transport for retransmission. */ | |
1217 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | |
1218 | SCTP_RTXR_T3_RTX); | |
1219 | break; | |
1220 | ||
1221 | case SCTP_CMD_TRANSMIT: | |
1222 | /* Kick start transmission. */ | |
1223 | error = sctp_outq_uncork(&asoc->outqueue); | |
1224 | local_cork = 0; | |
1225 | break; | |
1226 | ||
1227 | case SCTP_CMD_ECN_CE: | |
1228 | /* Do delayed CE processing. */ | |
1229 | sctp_do_ecn_ce_work(asoc, cmd->obj.u32); | |
1230 | break; | |
1231 | ||
1232 | case SCTP_CMD_ECN_ECNE: | |
1233 | /* Do delayed ECNE processing. */ | |
1234 | new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, | |
1235 | chunk); | |
1236 | if (new_obj) | |
1237 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | |
1238 | SCTP_CHUNK(new_obj)); | |
1239 | break; | |
1240 | ||
1241 | case SCTP_CMD_ECN_CWR: | |
1242 | /* Do delayed CWR processing. */ | |
1243 | sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); | |
1244 | break; | |
1245 | ||
1246 | case SCTP_CMD_SETUP_T2: | |
1247 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); | |
1248 | break; | |
1249 | ||
1250 | case SCTP_CMD_TIMER_START: | |
1251 | timer = &asoc->timers[cmd->obj.to]; | |
1252 | timeout = asoc->timeouts[cmd->obj.to]; | |
09a62660 | 1253 | BUG_ON(!timeout); |
1da177e4 LT |
1254 | |
1255 | timer->expires = jiffies + timeout; | |
1256 | sctp_association_hold(asoc); | |
1257 | add_timer(timer); | |
1258 | break; | |
1259 | ||
1260 | case SCTP_CMD_TIMER_RESTART: | |
1261 | timer = &asoc->timers[cmd->obj.to]; | |
1262 | timeout = asoc->timeouts[cmd->obj.to]; | |
1263 | if (!mod_timer(timer, jiffies + timeout)) | |
1264 | sctp_association_hold(asoc); | |
1265 | break; | |
1266 | ||
1267 | case SCTP_CMD_TIMER_STOP: | |
1268 | timer = &asoc->timers[cmd->obj.to]; | |
1269 | if (timer_pending(timer) && del_timer(timer)) | |
1270 | sctp_association_put(asoc); | |
1271 | break; | |
1272 | ||
3f7a87d2 FF |
1273 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: |
1274 | chunk = cmd->obj.ptr; | |
1275 | t = sctp_assoc_choose_init_transport(asoc); | |
1276 | asoc->init_last_sent_to = t; | |
1277 | chunk->transport = t; | |
1278 | t->init_sent_count++; | |
1279 | break; | |
1280 | ||
1da177e4 LT |
1281 | case SCTP_CMD_INIT_RESTART: |
1282 | /* Do the needed accounting and updates | |
1283 | * associated with restarting an initialization | |
3f7a87d2 FF |
1284 | * timer. Only multiply the timeout by two if |
1285 | * all transports have been tried at the current | |
1286 | * timeout. | |
1287 | */ | |
1288 | t = asoc->init_last_sent_to; | |
1289 | asoc->init_err_counter++; | |
1290 | ||
1291 | if (t->init_sent_count > (asoc->init_cycle + 1)) { | |
1292 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] *= 2; | |
1293 | if (asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] > | |
1294 | asoc->max_init_timeo) { | |
1295 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = | |
1296 | asoc->max_init_timeo; | |
1297 | } | |
1298 | asoc->init_cycle++; | |
1299 | SCTP_DEBUG_PRINTK( | |
1300 | "T1 INIT Timeout adjustment" | |
1301 | " init_err_counter: %d" | |
1302 | " cycle: %d" | |
8116ffad | 1303 | " timeout: %ld\n", |
3f7a87d2 FF |
1304 | asoc->init_err_counter, |
1305 | asoc->init_cycle, | |
1306 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]); | |
1307 | } | |
1308 | ||
1309 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | |
1310 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | |
1311 | break; | |
1312 | ||
1313 | case SCTP_CMD_COOKIEECHO_RESTART: | |
1314 | /* Do the needed accounting and updates | |
1315 | * associated with restarting an initialization | |
1316 | * timer. Only multiply the timeout by two if | |
1317 | * all transports have been tried at the current | |
1318 | * timeout. | |
1da177e4 | 1319 | */ |
3f7a87d2 FF |
1320 | asoc->init_err_counter++; |
1321 | ||
1322 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] *= 2; | |
1323 | if (asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] > | |
1da177e4 | 1324 | asoc->max_init_timeo) { |
3f7a87d2 | 1325 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = |
1da177e4 LT |
1326 | asoc->max_init_timeo; |
1327 | } | |
3f7a87d2 FF |
1328 | SCTP_DEBUG_PRINTK( |
1329 | "T1 COOKIE Timeout adjustment" | |
1330 | " init_err_counter: %d" | |
8116ffad | 1331 | " timeout: %ld\n", |
3f7a87d2 FF |
1332 | asoc->init_err_counter, |
1333 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]); | |
1da177e4 LT |
1334 | |
1335 | /* If we've sent any data bundled with | |
1336 | * COOKIE-ECHO we need to resend. | |
1337 | */ | |
1338 | list_for_each(pos, &asoc->peer.transport_addr_list) { | |
1339 | t = list_entry(pos, struct sctp_transport, | |
1340 | transports); | |
1341 | sctp_retransmit_mark(&asoc->outqueue, t, 0); | |
1342 | } | |
1343 | ||
1344 | sctp_add_cmd_sf(commands, | |
1345 | SCTP_CMD_TIMER_RESTART, | |
3f7a87d2 | 1346 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); |
1da177e4 LT |
1347 | break; |
1348 | ||
1349 | case SCTP_CMD_INIT_FAILED: | |
1350 | sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); | |
1351 | break; | |
1352 | ||
1353 | case SCTP_CMD_ASSOC_FAILED: | |
1354 | sctp_cmd_assoc_failed(commands, asoc, event_type, | |
1355 | subtype, chunk, cmd->obj.u32); | |
1356 | break; | |
1357 | ||
3f7a87d2 FF |
1358 | case SCTP_CMD_INIT_COUNTER_INC: |
1359 | asoc->init_err_counter++; | |
1da177e4 LT |
1360 | break; |
1361 | ||
3f7a87d2 FF |
1362 | case SCTP_CMD_INIT_COUNTER_RESET: |
1363 | asoc->init_err_counter = 0; | |
1364 | asoc->init_cycle = 0; | |
1da177e4 LT |
1365 | break; |
1366 | ||
1367 | case SCTP_CMD_REPORT_DUP: | |
1368 | sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, | |
1369 | cmd->obj.u32); | |
1370 | break; | |
1371 | ||
1372 | case SCTP_CMD_REPORT_BAD_TAG: | |
1373 | SCTP_DEBUG_PRINTK("vtag mismatch!\n"); | |
1374 | break; | |
1375 | ||
1376 | case SCTP_CMD_STRIKE: | |
1377 | /* Mark one strike against a transport. */ | |
1378 | sctp_do_8_2_transport_strike(asoc, cmd->obj.transport); | |
1379 | break; | |
1380 | ||
1381 | case SCTP_CMD_TRANSPORT_RESET: | |
1382 | t = cmd->obj.transport; | |
1383 | sctp_cmd_transport_reset(commands, asoc, t); | |
1384 | break; | |
1385 | ||
1386 | case SCTP_CMD_TRANSPORT_ON: | |
1387 | t = cmd->obj.transport; | |
1388 | sctp_cmd_transport_on(commands, asoc, t, chunk); | |
1389 | break; | |
1390 | ||
1391 | case SCTP_CMD_HB_TIMERS_START: | |
1392 | sctp_cmd_hb_timers_start(commands, asoc); | |
1393 | break; | |
1394 | ||
1395 | case SCTP_CMD_HB_TIMER_UPDATE: | |
1396 | t = cmd->obj.transport; | |
1397 | sctp_cmd_hb_timer_update(commands, asoc, t); | |
1398 | break; | |
1399 | ||
1400 | case SCTP_CMD_HB_TIMERS_STOP: | |
1401 | sctp_cmd_hb_timers_stop(commands, asoc); | |
1402 | break; | |
1403 | ||
1404 | case SCTP_CMD_REPORT_ERROR: | |
1405 | error = cmd->obj.error; | |
1406 | break; | |
1407 | ||
1408 | case SCTP_CMD_PROCESS_CTSN: | |
1409 | /* Dummy up a SACK for processing. */ | |
1410 | sackh.cum_tsn_ack = cmd->obj.u32; | |
1411 | sackh.a_rwnd = 0; | |
1412 | sackh.num_gap_ack_blocks = 0; | |
1413 | sackh.num_dup_tsns = 0; | |
1414 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, | |
1415 | SCTP_SACKH(&sackh)); | |
1416 | break; | |
1417 | ||
1418 | case SCTP_CMD_DISCARD_PACKET: | |
1419 | /* We need to discard the whole packet. */ | |
1420 | chunk->pdiscard = 1; | |
1421 | break; | |
1422 | ||
1423 | case SCTP_CMD_RTO_PENDING: | |
1424 | t = cmd->obj.transport; | |
1425 | t->rto_pending = 1; | |
1426 | break; | |
1427 | ||
1428 | case SCTP_CMD_PART_DELIVER: | |
1429 | sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr, | |
1430 | GFP_ATOMIC); | |
1431 | break; | |
1432 | ||
1433 | case SCTP_CMD_RENEGE: | |
1434 | sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr, | |
1435 | GFP_ATOMIC); | |
1436 | break; | |
1437 | ||
1438 | case SCTP_CMD_SETUP_T4: | |
1439 | sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr); | |
1440 | break; | |
1441 | ||
1442 | case SCTP_CMD_PROCESS_OPERR: | |
1443 | sctp_cmd_process_operr(commands, asoc, chunk); | |
1444 | break; | |
1445 | case SCTP_CMD_CLEAR_INIT_TAG: | |
1446 | asoc->peer.i.init_tag = 0; | |
1447 | break; | |
1448 | case SCTP_CMD_DEL_NON_PRIMARY: | |
1449 | sctp_cmd_del_non_primary(asoc); | |
1450 | break; | |
1451 | case SCTP_CMD_T3_RTX_TIMERS_STOP: | |
1452 | sctp_cmd_t3_rtx_timers_stop(commands, asoc); | |
1453 | break; | |
1454 | case SCTP_CMD_FORCE_PRIM_RETRAN: | |
1455 | t = asoc->peer.retran_path; | |
1456 | asoc->peer.retran_path = asoc->peer.primary_path; | |
1457 | error = sctp_outq_uncork(&asoc->outqueue); | |
1458 | local_cork = 0; | |
1459 | asoc->peer.retran_path = t; | |
1460 | break; | |
1461 | default: | |
1462 | printk(KERN_WARNING "Impossible command: %u, %p\n", | |
1463 | cmd->verb, cmd->obj.ptr); | |
1464 | break; | |
1465 | }; | |
1466 | if (error) | |
1467 | break; | |
1468 | } | |
1469 | ||
1470 | out: | |
1471 | if (local_cork) | |
1472 | sctp_outq_uncork(&asoc->outqueue); | |
1473 | return error; | |
1474 | nomem: | |
1475 | error = -ENOMEM; | |
1476 | goto out; | |
1477 | } | |
1478 |