]>
Commit | Line | Data |
---|---|---|
f0cbd3ec FB |
1 | /* |
2 | * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994 | |
3 | * The Regents of the University of California. All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
33 | * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94 | |
34 | * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp | |
35 | */ | |
36 | ||
37 | /* | |
38 | * Changes and additions relating to SLiRP | |
39 | * Copyright (c) 1995 Danny Gasparovski. | |
40 | * | |
41 | * Please read the file COPYRIGHT for the | |
42 | * terms and conditions of the copyright. | |
43 | */ | |
44 | ||
45 | #include <slirp.h> | |
46 | #include "ip_icmp.h" | |
47 | ||
48 | struct socket tcb; | |
49 | ||
50 | #define min(x,y) ((x) < (y) ? (x) : (y)) | |
51 | #define max(x,y) ((x) > (y) ? (x) : (y)) | |
52 | ||
53 | int tcprexmtthresh = 3; | |
54 | struct socket *tcp_last_so = &tcb; | |
55 | ||
56 | tcp_seq tcp_iss; /* tcp initial send seq # */ | |
57 | ||
58 | #define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ) | |
59 | ||
60 | /* for modulo comparisons of timestamps */ | |
61 | #define TSTMP_LT(a,b) ((int)((a)-(b)) < 0) | |
62 | #define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0) | |
63 | ||
64 | /* | |
65 | * Insert segment ti into reassembly queue of tcp with | |
66 | * control block tp. Return TH_FIN if reassembly now includes | |
67 | * a segment with FIN. The macro form does the common case inline | |
68 | * (segment is the next to be received on an established connection, | |
69 | * and the queue is empty), avoiding linkage into and removal | |
70 | * from the queue and repetition of various conversions. | |
71 | * Set DELACK for segments received in order, but ack immediately | |
72 | * when segments are out of order (so fast retransmit can work). | |
73 | */ | |
74 | #ifdef TCP_ACK_HACK | |
75 | #define TCP_REASS(tp, ti, m, so, flags) {\ | |
76 | if ((ti)->ti_seq == (tp)->rcv_nxt && \ | |
77 | (tp)->seg_next == (tcpiphdrp_32)(tp) && \ | |
78 | (tp)->t_state == TCPS_ESTABLISHED) {\ | |
79 | if (ti->ti_flags & TH_PUSH) \ | |
80 | tp->t_flags |= TF_ACKNOW; \ | |
81 | else \ | |
82 | tp->t_flags |= TF_DELACK; \ | |
83 | (tp)->rcv_nxt += (ti)->ti_len; \ | |
84 | flags = (ti)->ti_flags & TH_FIN; \ | |
85 | tcpstat.tcps_rcvpack++;\ | |
86 | tcpstat.tcps_rcvbyte += (ti)->ti_len;\ | |
87 | if (so->so_emu) { \ | |
88 | if (tcp_emu((so),(m))) sbappend((so), (m)); \ | |
89 | } else \ | |
90 | sbappend((so), (m)); \ | |
91 | /* sorwakeup(so); */ \ | |
92 | } else {\ | |
93 | (flags) = tcp_reass((tp), (ti), (m)); \ | |
94 | tp->t_flags |= TF_ACKNOW; \ | |
95 | } \ | |
96 | } | |
97 | #else | |
98 | #define TCP_REASS(tp, ti, m, so, flags) { \ | |
99 | if ((ti)->ti_seq == (tp)->rcv_nxt && \ | |
100 | (tp)->seg_next == (tcpiphdrp_32)(tp) && \ | |
101 | (tp)->t_state == TCPS_ESTABLISHED) { \ | |
102 | tp->t_flags |= TF_DELACK; \ | |
103 | (tp)->rcv_nxt += (ti)->ti_len; \ | |
104 | flags = (ti)->ti_flags & TH_FIN; \ | |
105 | tcpstat.tcps_rcvpack++;\ | |
106 | tcpstat.tcps_rcvbyte += (ti)->ti_len;\ | |
107 | if (so->so_emu) { \ | |
108 | if (tcp_emu((so),(m))) sbappend(so, (m)); \ | |
109 | } else \ | |
110 | sbappend((so), (m)); \ | |
111 | /* sorwakeup(so); */ \ | |
112 | } else { \ | |
113 | (flags) = tcp_reass((tp), (ti), (m)); \ | |
114 | tp->t_flags |= TF_ACKNOW; \ | |
115 | } \ | |
116 | } | |
117 | #endif | |
118 | ||
119 | int | |
120 | tcp_reass(tp, ti, m) | |
121 | register struct tcpcb *tp; | |
122 | register struct tcpiphdr *ti; | |
123 | struct mbuf *m; | |
124 | { | |
125 | register struct tcpiphdr *q; | |
126 | struct socket *so = tp->t_socket; | |
127 | int flags; | |
128 | ||
129 | /* | |
130 | * Call with ti==0 after become established to | |
131 | * force pre-ESTABLISHED data up to user socket. | |
132 | */ | |
133 | if (ti == 0) | |
134 | goto present; | |
135 | ||
136 | /* | |
137 | * Find a segment which begins after this one does. | |
138 | */ | |
139 | for (q = (struct tcpiphdr *)tp->seg_next; q != (struct tcpiphdr *)tp; | |
140 | q = (struct tcpiphdr *)q->ti_next) | |
141 | if (SEQ_GT(q->ti_seq, ti->ti_seq)) | |
142 | break; | |
143 | ||
144 | /* | |
145 | * If there is a preceding segment, it may provide some of | |
146 | * our data already. If so, drop the data from the incoming | |
147 | * segment. If it provides all of our data, drop us. | |
148 | */ | |
149 | if ((struct tcpiphdr *)q->ti_prev != (struct tcpiphdr *)tp) { | |
150 | register int i; | |
151 | q = (struct tcpiphdr *)q->ti_prev; | |
152 | /* conversion to int (in i) handles seq wraparound */ | |
153 | i = q->ti_seq + q->ti_len - ti->ti_seq; | |
154 | if (i > 0) { | |
155 | if (i >= ti->ti_len) { | |
156 | tcpstat.tcps_rcvduppack++; | |
157 | tcpstat.tcps_rcvdupbyte += ti->ti_len; | |
158 | m_freem(m); | |
159 | /* | |
160 | * Try to present any queued data | |
161 | * at the left window edge to the user. | |
162 | * This is needed after the 3-WHS | |
163 | * completes. | |
164 | */ | |
165 | goto present; /* ??? */ | |
166 | } | |
167 | m_adj(m, i); | |
168 | ti->ti_len -= i; | |
169 | ti->ti_seq += i; | |
170 | } | |
171 | q = (struct tcpiphdr *)(q->ti_next); | |
172 | } | |
173 | tcpstat.tcps_rcvoopack++; | |
174 | tcpstat.tcps_rcvoobyte += ti->ti_len; | |
175 | REASS_MBUF(ti) = (mbufp_32) m; /* XXX */ | |
176 | ||
177 | /* | |
178 | * While we overlap succeeding segments trim them or, | |
179 | * if they are completely covered, dequeue them. | |
180 | */ | |
181 | while (q != (struct tcpiphdr *)tp) { | |
182 | register int i = (ti->ti_seq + ti->ti_len) - q->ti_seq; | |
183 | if (i <= 0) | |
184 | break; | |
185 | if (i < q->ti_len) { | |
186 | q->ti_seq += i; | |
187 | q->ti_len -= i; | |
188 | m_adj((struct mbuf *) REASS_MBUF(q), i); | |
189 | break; | |
190 | } | |
191 | q = (struct tcpiphdr *)q->ti_next; | |
192 | m = (struct mbuf *) REASS_MBUF((struct tcpiphdr *)q->ti_prev); | |
193 | remque_32((void *)(q->ti_prev)); | |
194 | m_freem(m); | |
195 | } | |
196 | ||
197 | /* | |
198 | * Stick new segment in its place. | |
199 | */ | |
200 | insque_32(ti, (void *)(q->ti_prev)); | |
201 | ||
202 | present: | |
203 | /* | |
204 | * Present data to user, advancing rcv_nxt through | |
205 | * completed sequence space. | |
206 | */ | |
207 | if (!TCPS_HAVEESTABLISHED(tp->t_state)) | |
208 | return (0); | |
209 | ti = (struct tcpiphdr *) tp->seg_next; | |
210 | if (ti == (struct tcpiphdr *)tp || ti->ti_seq != tp->rcv_nxt) | |
211 | return (0); | |
212 | if (tp->t_state == TCPS_SYN_RECEIVED && ti->ti_len) | |
213 | return (0); | |
214 | do { | |
215 | tp->rcv_nxt += ti->ti_len; | |
216 | flags = ti->ti_flags & TH_FIN; | |
217 | remque_32(ti); | |
218 | m = (struct mbuf *) REASS_MBUF(ti); /* XXX */ | |
219 | ti = (struct tcpiphdr *)ti->ti_next; | |
220 | /* if (so->so_state & SS_FCANTRCVMORE) */ | |
221 | if (so->so_state & SS_FCANTSENDMORE) | |
222 | m_freem(m); | |
223 | else { | |
224 | if (so->so_emu) { | |
225 | if (tcp_emu(so,m)) sbappend(so, m); | |
226 | } else | |
227 | sbappend(so, m); | |
228 | } | |
229 | } while (ti != (struct tcpiphdr *)tp && ti->ti_seq == tp->rcv_nxt); | |
230 | /* sorwakeup(so); */ | |
231 | return (flags); | |
232 | } | |
233 | ||
234 | /* | |
235 | * TCP input routine, follows pages 65-76 of the | |
236 | * protocol specification dated September, 1981 very closely. | |
237 | */ | |
238 | void | |
239 | tcp_input(m, iphlen, inso) | |
240 | register struct mbuf *m; | |
241 | int iphlen; | |
242 | struct socket *inso; | |
243 | { | |
244 | struct ip save_ip, *ip; | |
245 | register struct tcpiphdr *ti; | |
246 | caddr_t optp = NULL; | |
247 | int optlen = 0; | |
248 | int len, tlen, off; | |
249 | register struct tcpcb *tp = 0; | |
250 | register int tiflags; | |
251 | struct socket *so = 0; | |
252 | int todrop, acked, ourfinisacked, needoutput = 0; | |
253 | /* int dropsocket = 0; */ | |
254 | int iss = 0; | |
255 | u_long tiwin; | |
256 | int ret; | |
257 | /* int ts_present = 0; */ | |
258 | ||
259 | DEBUG_CALL("tcp_input"); | |
260 | DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n", | |
261 | (long )m, iphlen, (long )inso )); | |
262 | ||
263 | /* | |
264 | * If called with m == 0, then we're continuing the connect | |
265 | */ | |
266 | if (m == NULL) { | |
267 | so = inso; | |
268 | ||
269 | /* Re-set a few variables */ | |
270 | tp = sototcpcb(so); | |
271 | m = so->so_m; | |
272 | so->so_m = 0; | |
273 | ti = so->so_ti; | |
274 | tiwin = ti->ti_win; | |
275 | tiflags = ti->ti_flags; | |
276 | ||
277 | goto cont_conn; | |
278 | } | |
279 | ||
280 | ||
281 | tcpstat.tcps_rcvtotal++; | |
282 | /* | |
283 | * Get IP and TCP header together in first mbuf. | |
284 | * Note: IP leaves IP header in first mbuf. | |
285 | */ | |
286 | ti = mtod(m, struct tcpiphdr *); | |
287 | if (iphlen > sizeof(struct ip )) { | |
288 | ip_stripoptions(m, (struct mbuf *)0); | |
289 | iphlen=sizeof(struct ip ); | |
290 | } | |
291 | /* XXX Check if too short */ | |
292 | ||
293 | ||
294 | /* | |
295 | * Save a copy of the IP header in case we want restore it | |
296 | * for sending an ICMP error message in response. | |
297 | */ | |
298 | ip=mtod(m, struct ip *); | |
299 | save_ip = *ip; | |
300 | save_ip.ip_len+= iphlen; | |
301 | ||
302 | /* | |
303 | * Checksum extended TCP header and data. | |
304 | */ | |
305 | tlen = ((struct ip *)ti)->ip_len; | |
306 | ti->ti_next = ti->ti_prev = 0; | |
307 | ti->ti_x1 = 0; | |
308 | ti->ti_len = htons((u_int16_t)tlen); | |
309 | len = sizeof(struct ip ) + tlen; | |
310 | /* keep checksum for ICMP reply | |
311 | * ti->ti_sum = cksum(m, len); | |
312 | * if (ti->ti_sum) { */ | |
313 | if(cksum(m, len)) { | |
314 | tcpstat.tcps_rcvbadsum++; | |
315 | goto drop; | |
316 | } | |
317 | ||
318 | /* | |
319 | * Check that TCP offset makes sense, | |
320 | * pull out TCP options and adjust length. XXX | |
321 | */ | |
322 | off = ti->ti_off << 2; | |
323 | if (off < sizeof (struct tcphdr) || off > tlen) { | |
324 | tcpstat.tcps_rcvbadoff++; | |
325 | goto drop; | |
326 | } | |
327 | tlen -= off; | |
328 | ti->ti_len = tlen; | |
329 | if (off > sizeof (struct tcphdr)) { | |
330 | optlen = off - sizeof (struct tcphdr); | |
331 | optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr); | |
332 | ||
333 | /* | |
334 | * Do quick retrieval of timestamp options ("options | |
335 | * prediction?"). If timestamp is the only option and it's | |
336 | * formatted as recommended in RFC 1323 appendix A, we | |
337 | * quickly get the values now and not bother calling | |
338 | * tcp_dooptions(), etc. | |
339 | */ | |
340 | /* if ((optlen == TCPOLEN_TSTAMP_APPA || | |
341 | * (optlen > TCPOLEN_TSTAMP_APPA && | |
342 | * optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && | |
343 | * *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) && | |
344 | * (ti->ti_flags & TH_SYN) == 0) { | |
345 | * ts_present = 1; | |
346 | * ts_val = ntohl(*(u_int32_t *)(optp + 4)); | |
347 | * ts_ecr = ntohl(*(u_int32_t *)(optp + 8)); | |
348 | * optp = NULL; / * we've parsed the options * / | |
349 | * } | |
350 | */ | |
351 | } | |
352 | tiflags = ti->ti_flags; | |
353 | ||
354 | /* | |
355 | * Convert TCP protocol specific fields to host format. | |
356 | */ | |
357 | NTOHL(ti->ti_seq); | |
358 | NTOHL(ti->ti_ack); | |
359 | NTOHS(ti->ti_win); | |
360 | NTOHS(ti->ti_urp); | |
361 | ||
362 | /* | |
363 | * Drop TCP, IP headers and TCP options. | |
364 | */ | |
365 | m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); | |
366 | m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); | |
367 | ||
368 | /* | |
369 | * Locate pcb for segment. | |
370 | */ | |
371 | findso: | |
372 | so = tcp_last_so; | |
373 | if (so->so_fport != ti->ti_dport || | |
374 | so->so_lport != ti->ti_sport || | |
375 | so->so_laddr.s_addr != ti->ti_src.s_addr || | |
376 | so->so_faddr.s_addr != ti->ti_dst.s_addr) { | |
377 | so = solookup(&tcb, ti->ti_src, ti->ti_sport, | |
378 | ti->ti_dst, ti->ti_dport); | |
379 | if (so) | |
380 | tcp_last_so = so; | |
381 | ++tcpstat.tcps_socachemiss; | |
382 | } | |
383 | ||
384 | /* | |
385 | * If the state is CLOSED (i.e., TCB does not exist) then | |
386 | * all data in the incoming segment is discarded. | |
387 | * If the TCB exists but is in CLOSED state, it is embryonic, | |
388 | * but should either do a listen or a connect soon. | |
389 | * | |
390 | * state == CLOSED means we've done socreate() but haven't | |
391 | * attached it to a protocol yet... | |
392 | * | |
393 | * XXX If a TCB does not exist, and the TH_SYN flag is | |
394 | * the only flag set, then create a session, mark it | |
395 | * as if it was LISTENING, and continue... | |
396 | */ | |
397 | if (so == 0) { | |
398 | if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN) | |
399 | goto dropwithreset; | |
400 | ||
401 | if ((so = socreate()) == NULL) | |
402 | goto dropwithreset; | |
403 | if (tcp_attach(so) < 0) { | |
404 | free(so); /* Not sofree (if it failed, it's not insqued) */ | |
405 | goto dropwithreset; | |
406 | } | |
407 | ||
408 | sbreserve(&so->so_snd, tcp_sndspace); | |
409 | sbreserve(&so->so_rcv, tcp_rcvspace); | |
410 | ||
411 | /* tcp_last_so = so; */ /* XXX ? */ | |
412 | /* tp = sototcpcb(so); */ | |
413 | ||
414 | so->so_laddr = ti->ti_src; | |
415 | so->so_lport = ti->ti_sport; | |
416 | so->so_faddr = ti->ti_dst; | |
417 | so->so_fport = ti->ti_dport; | |
418 | ||
419 | if ((so->so_iptos = tcp_tos(so)) == 0) | |
420 | so->so_iptos = ((struct ip *)ti)->ip_tos; | |
421 | ||
422 | tp = sototcpcb(so); | |
423 | tp->t_state = TCPS_LISTEN; | |
424 | } | |
425 | ||
426 | /* | |
427 | * If this is a still-connecting socket, this probably | |
428 | * a retransmit of the SYN. Whether it's a retransmit SYN | |
429 | * or something else, we nuke it. | |
430 | */ | |
431 | if (so->so_state & SS_ISFCONNECTING) | |
432 | goto drop; | |
433 | ||
434 | tp = sototcpcb(so); | |
435 | ||
436 | /* XXX Should never fail */ | |
437 | if (tp == 0) | |
438 | goto dropwithreset; | |
439 | if (tp->t_state == TCPS_CLOSED) | |
440 | goto drop; | |
441 | ||
442 | /* Unscale the window into a 32-bit value. */ | |
443 | /* if ((tiflags & TH_SYN) == 0) | |
444 | * tiwin = ti->ti_win << tp->snd_scale; | |
445 | * else | |
446 | */ | |
447 | tiwin = ti->ti_win; | |
448 | ||
449 | /* | |
450 | * Segment received on connection. | |
451 | * Reset idle time and keep-alive timer. | |
452 | */ | |
453 | tp->t_idle = 0; | |
454 | if (so_options) | |
455 | tp->t_timer[TCPT_KEEP] = tcp_keepintvl; | |
456 | else | |
457 | tp->t_timer[TCPT_KEEP] = tcp_keepidle; | |
458 | ||
459 | /* | |
460 | * Process options if not in LISTEN state, | |
461 | * else do it below (after getting remote address). | |
462 | */ | |
463 | if (optp && tp->t_state != TCPS_LISTEN) | |
464 | tcp_dooptions(tp, (u_char *)optp, optlen, ti); | |
465 | /* , */ | |
466 | /* &ts_present, &ts_val, &ts_ecr); */ | |
467 | ||
468 | /* | |
469 | * Header prediction: check for the two common cases | |
470 | * of a uni-directional data xfer. If the packet has | |
471 | * no control flags, is in-sequence, the window didn't | |
472 | * change and we're not retransmitting, it's a | |
473 | * candidate. If the length is zero and the ack moved | |
474 | * forward, we're the sender side of the xfer. Just | |
475 | * free the data acked & wake any higher level process | |
476 | * that was blocked waiting for space. If the length | |
477 | * is non-zero and the ack didn't move, we're the | |
478 | * receiver side. If we're getting packets in-order | |
479 | * (the reassembly queue is empty), add the data to | |
480 | * the socket buffer and note that we need a delayed ack. | |
481 | * | |
482 | * XXX Some of these tests are not needed | |
483 | * eg: the tiwin == tp->snd_wnd prevents many more | |
484 | * predictions.. with no *real* advantage.. | |
485 | */ | |
486 | if (tp->t_state == TCPS_ESTABLISHED && | |
487 | (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && | |
488 | /* (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) && */ | |
489 | ti->ti_seq == tp->rcv_nxt && | |
490 | tiwin && tiwin == tp->snd_wnd && | |
491 | tp->snd_nxt == tp->snd_max) { | |
492 | /* | |
493 | * If last ACK falls within this segment's sequence numbers, | |
494 | * record the timestamp. | |
495 | */ | |
496 | /* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) && | |
497 | * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len)) { | |
498 | * tp->ts_recent_age = tcp_now; | |
499 | * tp->ts_recent = ts_val; | |
500 | * } | |
501 | */ | |
502 | if (ti->ti_len == 0) { | |
503 | if (SEQ_GT(ti->ti_ack, tp->snd_una) && | |
504 | SEQ_LEQ(ti->ti_ack, tp->snd_max) && | |
505 | tp->snd_cwnd >= tp->snd_wnd) { | |
506 | /* | |
507 | * this is a pure ack for outstanding data. | |
508 | */ | |
509 | ++tcpstat.tcps_predack; | |
510 | /* if (ts_present) | |
511 | * tcp_xmit_timer(tp, tcp_now-ts_ecr+1); | |
512 | * else | |
513 | */ if (tp->t_rtt && | |
514 | SEQ_GT(ti->ti_ack, tp->t_rtseq)) | |
515 | tcp_xmit_timer(tp, tp->t_rtt); | |
516 | acked = ti->ti_ack - tp->snd_una; | |
517 | tcpstat.tcps_rcvackpack++; | |
518 | tcpstat.tcps_rcvackbyte += acked; | |
519 | sbdrop(&so->so_snd, acked); | |
520 | tp->snd_una = ti->ti_ack; | |
521 | m_freem(m); | |
522 | ||
523 | /* | |
524 | * If all outstanding data are acked, stop | |
525 | * retransmit timer, otherwise restart timer | |
526 | * using current (possibly backed-off) value. | |
527 | * If process is waiting for space, | |
528 | * wakeup/selwakeup/signal. If data | |
529 | * are ready to send, let tcp_output | |
530 | * decide between more output or persist. | |
531 | */ | |
532 | if (tp->snd_una == tp->snd_max) | |
533 | tp->t_timer[TCPT_REXMT] = 0; | |
534 | else if (tp->t_timer[TCPT_PERSIST] == 0) | |
535 | tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; | |
536 | ||
537 | /* | |
538 | * There's room in so_snd, sowwakup will read() | |
539 | * from the socket if we can | |
540 | */ | |
541 | /* if (so->so_snd.sb_flags & SB_NOTIFY) | |
542 | * sowwakeup(so); | |
543 | */ | |
544 | /* | |
545 | * This is called because sowwakeup might have | |
546 | * put data into so_snd. Since we don't so sowwakeup, | |
547 | * we don't need this.. XXX??? | |
548 | */ | |
549 | if (so->so_snd.sb_cc) | |
550 | (void) tcp_output(tp); | |
551 | ||
552 | return; | |
553 | } | |
554 | } else if (ti->ti_ack == tp->snd_una && | |
555 | tp->seg_next == (tcpiphdrp_32)tp && | |
556 | ti->ti_len <= sbspace(&so->so_rcv)) { | |
557 | /* | |
558 | * this is a pure, in-sequence data packet | |
559 | * with nothing on the reassembly queue and | |
560 | * we have enough buffer space to take it. | |
561 | */ | |
562 | ++tcpstat.tcps_preddat; | |
563 | tp->rcv_nxt += ti->ti_len; | |
564 | tcpstat.tcps_rcvpack++; | |
565 | tcpstat.tcps_rcvbyte += ti->ti_len; | |
566 | /* | |
567 | * Add data to socket buffer. | |
568 | */ | |
569 | if (so->so_emu) { | |
570 | if (tcp_emu(so,m)) sbappend(so, m); | |
571 | } else | |
572 | sbappend(so, m); | |
573 | ||
574 | /* | |
575 | * XXX This is called when data arrives. Later, check | |
576 | * if we can actually write() to the socket | |
577 | * XXX Need to check? It's be NON_BLOCKING | |
578 | */ | |
579 | /* sorwakeup(so); */ | |
580 | ||
581 | /* | |
582 | * If this is a short packet, then ACK now - with Nagel | |
583 | * congestion avoidance sender won't send more until | |
584 | * he gets an ACK. | |
585 | * | |
586 | * Here are 3 interpretations of what should happen. | |
587 | * The best (for me) is to delay-ack everything except | |
588 | * if it's a one-byte packet containing an ESC | |
589 | * (this means it's an arrow key (or similar) sent using | |
590 | * Nagel, hence there will be no echo) | |
591 | * The first of these is the original, the second is the | |
592 | * middle ground between the other 2 | |
593 | */ | |
594 | /* if (((unsigned)ti->ti_len < tp->t_maxseg)) { | |
595 | */ | |
596 | /* if (((unsigned)ti->ti_len < tp->t_maxseg && | |
597 | * (so->so_iptos & IPTOS_LOWDELAY) == 0) || | |
598 | * ((so->so_iptos & IPTOS_LOWDELAY) && | |
599 | * ((struct tcpiphdr_2 *)ti)->first_char == (char)27)) { | |
600 | */ | |
601 | if ((unsigned)ti->ti_len == 1 && | |
602 | ((struct tcpiphdr_2 *)ti)->first_char == (char)27) { | |
603 | tp->t_flags |= TF_ACKNOW; | |
604 | tcp_output(tp); | |
605 | } else { | |
606 | tp->t_flags |= TF_DELACK; | |
607 | } | |
608 | return; | |
609 | } | |
610 | } /* header prediction */ | |
611 | /* | |
612 | * Calculate amount of space in receive window, | |
613 | * and then do TCP input processing. | |
614 | * Receive window is amount of space in rcv queue, | |
615 | * but not less than advertised window. | |
616 | */ | |
617 | { int win; | |
618 | win = sbspace(&so->so_rcv); | |
619 | if (win < 0) | |
620 | win = 0; | |
621 | tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt)); | |
622 | } | |
623 | ||
624 | switch (tp->t_state) { | |
625 | ||
626 | /* | |
627 | * If the state is LISTEN then ignore segment if it contains an RST. | |
628 | * If the segment contains an ACK then it is bad and send a RST. | |
629 | * If it does not contain a SYN then it is not interesting; drop it. | |
630 | * Don't bother responding if the destination was a broadcast. | |
631 | * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial | |
632 | * tp->iss, and send a segment: | |
633 | * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> | |
634 | * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss. | |
635 | * Fill in remote peer address fields if not previously specified. | |
636 | * Enter SYN_RECEIVED state, and process any other fields of this | |
637 | * segment in this state. | |
638 | */ | |
639 | case TCPS_LISTEN: { | |
640 | ||
641 | if (tiflags & TH_RST) | |
642 | goto drop; | |
643 | if (tiflags & TH_ACK) | |
644 | goto dropwithreset; | |
645 | if ((tiflags & TH_SYN) == 0) | |
646 | goto drop; | |
647 | ||
648 | /* | |
649 | * This has way too many gotos... | |
650 | * But a bit of spaghetti code never hurt anybody :) | |
651 | */ | |
652 | ||
653 | /* | |
654 | * If this is destined for the control address, then flag to | |
655 | * tcp_ctl once connected, otherwise connect | |
656 | */ | |
657 | if ((so->so_faddr.s_addr&htonl(0xffffff00)) == special_addr.s_addr) { | |
658 | int lastbyte=ntohl(so->so_faddr.s_addr) & 0xff; | |
659 | if (lastbyte!=CTL_ALIAS && lastbyte!=CTL_DNS) { | |
660 | #if 0 | |
661 | if(lastbyte==CTL_CMD || lastbyte==CTL_EXEC) { | |
662 | /* Command or exec adress */ | |
663 | so->so_state |= SS_CTL; | |
664 | } else { | |
665 | /* May be an add exec */ | |
666 | struct ex_list *ex_ptr; | |
667 | ||
668 | for(ex_ptr = exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) { | |
669 | if(ex_ptr->ex_fport == so->so_fport && | |
670 | lastbyte == ex_ptr->ex_addr) { | |
671 | so->so_state |= SS_CTL; | |
672 | break; | |
673 | } | |
674 | } | |
675 | } | |
676 | if(so->so_state & SS_CTL) goto cont_input; | |
677 | #endif | |
678 | } | |
679 | /* CTL_ALIAS: Do nothing, tcp_fconnect will be called on it */ | |
680 | } | |
681 | ||
682 | if (so->so_emu & EMU_NOCONNECT) { | |
683 | so->so_emu &= ~EMU_NOCONNECT; | |
684 | goto cont_input; | |
685 | } | |
686 | ||
687 | if(tcp_fconnect(so) == -1 && errno != EINPROGRESS) { | |
688 | u_char code=ICMP_UNREACH_NET; | |
689 | DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n", | |
690 | errno,strerror(errno))); | |
691 | if(errno == ECONNREFUSED) { | |
692 | /* ACK the SYN, send RST to refuse the connection */ | |
693 | tcp_respond(tp, ti, m, ti->ti_seq+1, (tcp_seq)0, | |
694 | TH_RST|TH_ACK); | |
695 | } else { | |
696 | if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST; | |
697 | HTONL(ti->ti_seq); /* restore tcp header */ | |
698 | HTONL(ti->ti_ack); | |
699 | HTONS(ti->ti_win); | |
700 | HTONS(ti->ti_urp); | |
701 | m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); | |
702 | m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); | |
703 | *ip=save_ip; | |
704 | icmp_error(m, ICMP_UNREACH,code, 0,strerror(errno)); | |
705 | } | |
706 | tp = tcp_close(tp); | |
707 | m_free(m); | |
708 | } else { | |
709 | /* | |
710 | * Haven't connected yet, save the current mbuf | |
711 | * and ti, and return | |
712 | * XXX Some OS's don't tell us whether the connect() | |
713 | * succeeded or not. So we must time it out. | |
714 | */ | |
715 | so->so_m = m; | |
716 | so->so_ti = ti; | |
717 | tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; | |
718 | tp->t_state = TCPS_SYN_RECEIVED; | |
719 | } | |
720 | return; | |
721 | ||
722 | cont_conn: | |
723 | /* m==NULL | |
724 | * Check if the connect succeeded | |
725 | */ | |
726 | if (so->so_state & SS_NOFDREF) { | |
727 | tp = tcp_close(tp); | |
728 | goto dropwithreset; | |
729 | } | |
730 | cont_input: | |
731 | tcp_template(tp); | |
732 | ||
733 | if (optp) | |
734 | tcp_dooptions(tp, (u_char *)optp, optlen, ti); | |
735 | /* , */ | |
736 | /* &ts_present, &ts_val, &ts_ecr); */ | |
737 | ||
738 | if (iss) | |
739 | tp->iss = iss; | |
740 | else | |
741 | tp->iss = tcp_iss; | |
742 | tcp_iss += TCP_ISSINCR/2; | |
743 | tp->irs = ti->ti_seq; | |
744 | tcp_sendseqinit(tp); | |
745 | tcp_rcvseqinit(tp); | |
746 | tp->t_flags |= TF_ACKNOW; | |
747 | tp->t_state = TCPS_SYN_RECEIVED; | |
748 | tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; | |
749 | tcpstat.tcps_accepts++; | |
750 | goto trimthenstep6; | |
751 | } /* case TCPS_LISTEN */ | |
752 | ||
753 | /* | |
754 | * If the state is SYN_SENT: | |
755 | * if seg contains an ACK, but not for our SYN, drop the input. | |
756 | * if seg contains a RST, then drop the connection. | |
757 | * if seg does not contain SYN, then drop it. | |
758 | * Otherwise this is an acceptable SYN segment | |
759 | * initialize tp->rcv_nxt and tp->irs | |
760 | * if seg contains ack then advance tp->snd_una | |
761 | * if SYN has been acked change to ESTABLISHED else SYN_RCVD state | |
762 | * arrange for segment to be acked (eventually) | |
763 | * continue processing rest of data/controls, beginning with URG | |
764 | */ | |
765 | case TCPS_SYN_SENT: | |
766 | if ((tiflags & TH_ACK) && | |
767 | (SEQ_LEQ(ti->ti_ack, tp->iss) || | |
768 | SEQ_GT(ti->ti_ack, tp->snd_max))) | |
769 | goto dropwithreset; | |
770 | ||
771 | if (tiflags & TH_RST) { | |
772 | if (tiflags & TH_ACK) | |
773 | tp = tcp_drop(tp,0); /* XXX Check t_softerror! */ | |
774 | goto drop; | |
775 | } | |
776 | ||
777 | if ((tiflags & TH_SYN) == 0) | |
778 | goto drop; | |
779 | if (tiflags & TH_ACK) { | |
780 | tp->snd_una = ti->ti_ack; | |
781 | if (SEQ_LT(tp->snd_nxt, tp->snd_una)) | |
782 | tp->snd_nxt = tp->snd_una; | |
783 | } | |
784 | ||
785 | tp->t_timer[TCPT_REXMT] = 0; | |
786 | tp->irs = ti->ti_seq; | |
787 | tcp_rcvseqinit(tp); | |
788 | tp->t_flags |= TF_ACKNOW; | |
789 | if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) { | |
790 | tcpstat.tcps_connects++; | |
791 | soisfconnected(so); | |
792 | tp->t_state = TCPS_ESTABLISHED; | |
793 | ||
794 | /* Do window scaling on this connection? */ | |
795 | /* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == | |
796 | * (TF_RCVD_SCALE|TF_REQ_SCALE)) { | |
797 | * tp->snd_scale = tp->requested_s_scale; | |
798 | * tp->rcv_scale = tp->request_r_scale; | |
799 | * } | |
800 | */ | |
801 | (void) tcp_reass(tp, (struct tcpiphdr *)0, | |
802 | (struct mbuf *)0); | |
803 | /* | |
804 | * if we didn't have to retransmit the SYN, | |
805 | * use its rtt as our initial srtt & rtt var. | |
806 | */ | |
807 | if (tp->t_rtt) | |
808 | tcp_xmit_timer(tp, tp->t_rtt); | |
809 | } else | |
810 | tp->t_state = TCPS_SYN_RECEIVED; | |
811 | ||
812 | trimthenstep6: | |
813 | /* | |
814 | * Advance ti->ti_seq to correspond to first data byte. | |
815 | * If data, trim to stay within window, | |
816 | * dropping FIN if necessary. | |
817 | */ | |
818 | ti->ti_seq++; | |
819 | if (ti->ti_len > tp->rcv_wnd) { | |
820 | todrop = ti->ti_len - tp->rcv_wnd; | |
821 | m_adj(m, -todrop); | |
822 | ti->ti_len = tp->rcv_wnd; | |
823 | tiflags &= ~TH_FIN; | |
824 | tcpstat.tcps_rcvpackafterwin++; | |
825 | tcpstat.tcps_rcvbyteafterwin += todrop; | |
826 | } | |
827 | tp->snd_wl1 = ti->ti_seq - 1; | |
828 | tp->rcv_up = ti->ti_seq; | |
829 | goto step6; | |
830 | } /* switch tp->t_state */ | |
831 | /* | |
832 | * States other than LISTEN or SYN_SENT. | |
833 | * First check timestamp, if present. | |
834 | * Then check that at least some bytes of segment are within | |
835 | * receive window. If segment begins before rcv_nxt, | |
836 | * drop leading data (and SYN); if nothing left, just ack. | |
837 | * | |
838 | * RFC 1323 PAWS: If we have a timestamp reply on this segment | |
839 | * and it's less than ts_recent, drop it. | |
840 | */ | |
841 | /* if (ts_present && (tiflags & TH_RST) == 0 && tp->ts_recent && | |
842 | * TSTMP_LT(ts_val, tp->ts_recent)) { | |
843 | * | |
844 | */ /* Check to see if ts_recent is over 24 days old. */ | |
845 | /* if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) { | |
846 | */ /* | |
847 | * * Invalidate ts_recent. If this segment updates | |
848 | * * ts_recent, the age will be reset later and ts_recent | |
849 | * * will get a valid value. If it does not, setting | |
850 | * * ts_recent to zero will at least satisfy the | |
851 | * * requirement that zero be placed in the timestamp | |
852 | * * echo reply when ts_recent isn't valid. The | |
853 | * * age isn't reset until we get a valid ts_recent | |
854 | * * because we don't want out-of-order segments to be | |
855 | * * dropped when ts_recent is old. | |
856 | * */ | |
857 | /* tp->ts_recent = 0; | |
858 | * } else { | |
859 | * tcpstat.tcps_rcvduppack++; | |
860 | * tcpstat.tcps_rcvdupbyte += ti->ti_len; | |
861 | * tcpstat.tcps_pawsdrop++; | |
862 | * goto dropafterack; | |
863 | * } | |
864 | * } | |
865 | */ | |
866 | ||
867 | todrop = tp->rcv_nxt - ti->ti_seq; | |
868 | if (todrop > 0) { | |
869 | if (tiflags & TH_SYN) { | |
870 | tiflags &= ~TH_SYN; | |
871 | ti->ti_seq++; | |
872 | if (ti->ti_urp > 1) | |
873 | ti->ti_urp--; | |
874 | else | |
875 | tiflags &= ~TH_URG; | |
876 | todrop--; | |
877 | } | |
878 | /* | |
879 | * Following if statement from Stevens, vol. 2, p. 960. | |
880 | */ | |
881 | if (todrop > ti->ti_len | |
882 | || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) { | |
883 | /* | |
884 | * Any valid FIN must be to the left of the window. | |
885 | * At this point the FIN must be a duplicate or out | |
886 | * of sequence; drop it. | |
887 | */ | |
888 | tiflags &= ~TH_FIN; | |
889 | ||
890 | /* | |
891 | * Send an ACK to resynchronize and drop any data. | |
892 | * But keep on processing for RST or ACK. | |
893 | */ | |
894 | tp->t_flags |= TF_ACKNOW; | |
895 | todrop = ti->ti_len; | |
896 | tcpstat.tcps_rcvduppack++; | |
897 | tcpstat.tcps_rcvdupbyte += todrop; | |
898 | } else { | |
899 | tcpstat.tcps_rcvpartduppack++; | |
900 | tcpstat.tcps_rcvpartdupbyte += todrop; | |
901 | } | |
902 | m_adj(m, todrop); | |
903 | ti->ti_seq += todrop; | |
904 | ti->ti_len -= todrop; | |
905 | if (ti->ti_urp > todrop) | |
906 | ti->ti_urp -= todrop; | |
907 | else { | |
908 | tiflags &= ~TH_URG; | |
909 | ti->ti_urp = 0; | |
910 | } | |
911 | } | |
912 | /* | |
913 | * If new data are received on a connection after the | |
914 | * user processes are gone, then RST the other end. | |
915 | */ | |
916 | if ((so->so_state & SS_NOFDREF) && | |
917 | tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) { | |
918 | tp = tcp_close(tp); | |
919 | tcpstat.tcps_rcvafterclose++; | |
920 | goto dropwithreset; | |
921 | } | |
922 | ||
923 | /* | |
924 | * If segment ends after window, drop trailing data | |
925 | * (and PUSH and FIN); if nothing left, just ACK. | |
926 | */ | |
927 | todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd); | |
928 | if (todrop > 0) { | |
929 | tcpstat.tcps_rcvpackafterwin++; | |
930 | if (todrop >= ti->ti_len) { | |
931 | tcpstat.tcps_rcvbyteafterwin += ti->ti_len; | |
932 | /* | |
933 | * If a new connection request is received | |
934 | * while in TIME_WAIT, drop the old connection | |
935 | * and start over if the sequence numbers | |
936 | * are above the previous ones. | |
937 | */ | |
938 | if (tiflags & TH_SYN && | |
939 | tp->t_state == TCPS_TIME_WAIT && | |
940 | SEQ_GT(ti->ti_seq, tp->rcv_nxt)) { | |
941 | iss = tp->rcv_nxt + TCP_ISSINCR; | |
942 | tp = tcp_close(tp); | |
943 | goto findso; | |
944 | } | |
945 | /* | |
946 | * If window is closed can only take segments at | |
947 | * window edge, and have to drop data and PUSH from | |
948 | * incoming segments. Continue processing, but | |
949 | * remember to ack. Otherwise, drop segment | |
950 | * and ack. | |
951 | */ | |
952 | if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) { | |
953 | tp->t_flags |= TF_ACKNOW; | |
954 | tcpstat.tcps_rcvwinprobe++; | |
955 | } else | |
956 | goto dropafterack; | |
957 | } else | |
958 | tcpstat.tcps_rcvbyteafterwin += todrop; | |
959 | m_adj(m, -todrop); | |
960 | ti->ti_len -= todrop; | |
961 | tiflags &= ~(TH_PUSH|TH_FIN); | |
962 | } | |
963 | ||
964 | /* | |
965 | * If last ACK falls within this segment's sequence numbers, | |
966 | * record its timestamp. | |
967 | */ | |
968 | /* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) && | |
969 | * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len + | |
970 | * ((tiflags & (TH_SYN|TH_FIN)) != 0))) { | |
971 | * tp->ts_recent_age = tcp_now; | |
972 | * tp->ts_recent = ts_val; | |
973 | * } | |
974 | */ | |
975 | ||
976 | /* | |
977 | * If the RST bit is set examine the state: | |
978 | * SYN_RECEIVED STATE: | |
979 | * If passive open, return to LISTEN state. | |
980 | * If active open, inform user that connection was refused. | |
981 | * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES: | |
982 | * Inform user that connection was reset, and close tcb. | |
983 | * CLOSING, LAST_ACK, TIME_WAIT STATES | |
984 | * Close the tcb. | |
985 | */ | |
986 | if (tiflags&TH_RST) switch (tp->t_state) { | |
987 | ||
988 | case TCPS_SYN_RECEIVED: | |
989 | /* so->so_error = ECONNREFUSED; */ | |
990 | goto close; | |
991 | ||
992 | case TCPS_ESTABLISHED: | |
993 | case TCPS_FIN_WAIT_1: | |
994 | case TCPS_FIN_WAIT_2: | |
995 | case TCPS_CLOSE_WAIT: | |
996 | /* so->so_error = ECONNRESET; */ | |
997 | close: | |
998 | tp->t_state = TCPS_CLOSED; | |
999 | tcpstat.tcps_drops++; | |
1000 | tp = tcp_close(tp); | |
1001 | goto drop; | |
1002 | ||
1003 | case TCPS_CLOSING: | |
1004 | case TCPS_LAST_ACK: | |
1005 | case TCPS_TIME_WAIT: | |
1006 | tp = tcp_close(tp); | |
1007 | goto drop; | |
1008 | } | |
1009 | ||
1010 | /* | |
1011 | * If a SYN is in the window, then this is an | |
1012 | * error and we send an RST and drop the connection. | |
1013 | */ | |
1014 | if (tiflags & TH_SYN) { | |
1015 | tp = tcp_drop(tp,0); | |
1016 | goto dropwithreset; | |
1017 | } | |
1018 | ||
1019 | /* | |
1020 | * If the ACK bit is off we drop the segment and return. | |
1021 | */ | |
1022 | if ((tiflags & TH_ACK) == 0) goto drop; | |
1023 | ||
1024 | /* | |
1025 | * Ack processing. | |
1026 | */ | |
1027 | switch (tp->t_state) { | |
1028 | /* | |
1029 | * In SYN_RECEIVED state if the ack ACKs our SYN then enter | |
1030 | * ESTABLISHED state and continue processing, otherwise | |
1031 | * send an RST. una<=ack<=max | |
1032 | */ | |
1033 | case TCPS_SYN_RECEIVED: | |
1034 | ||
1035 | if (SEQ_GT(tp->snd_una, ti->ti_ack) || | |
1036 | SEQ_GT(ti->ti_ack, tp->snd_max)) | |
1037 | goto dropwithreset; | |
1038 | tcpstat.tcps_connects++; | |
1039 | tp->t_state = TCPS_ESTABLISHED; | |
1040 | /* | |
1041 | * The sent SYN is ack'ed with our sequence number +1 | |
1042 | * The first data byte already in the buffer will get | |
1043 | * lost if no correction is made. This is only needed for | |
1044 | * SS_CTL since the buffer is empty otherwise. | |
1045 | * tp->snd_una++; or: | |
1046 | */ | |
1047 | tp->snd_una=ti->ti_ack; | |
1048 | if (so->so_state & SS_CTL) { | |
1049 | /* So tcp_ctl reports the right state */ | |
1050 | ret = tcp_ctl(so); | |
1051 | if (ret == 1) { | |
1052 | soisfconnected(so); | |
1053 | so->so_state &= ~SS_CTL; /* success XXX */ | |
1054 | } else if (ret == 2) { | |
1055 | so->so_state = SS_NOFDREF; /* CTL_CMD */ | |
1056 | } else { | |
1057 | needoutput = 1; | |
1058 | tp->t_state = TCPS_FIN_WAIT_1; | |
1059 | } | |
1060 | } else { | |
1061 | soisfconnected(so); | |
1062 | } | |
1063 | ||
1064 | /* Do window scaling? */ | |
1065 | /* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == | |
1066 | * (TF_RCVD_SCALE|TF_REQ_SCALE)) { | |
1067 | * tp->snd_scale = tp->requested_s_scale; | |
1068 | * tp->rcv_scale = tp->request_r_scale; | |
1069 | * } | |
1070 | */ | |
1071 | (void) tcp_reass(tp, (struct tcpiphdr *)0, (struct mbuf *)0); | |
1072 | tp->snd_wl1 = ti->ti_seq - 1; | |
1073 | /* Avoid ack processing; snd_una==ti_ack => dup ack */ | |
1074 | goto synrx_to_est; | |
1075 | /* fall into ... */ | |
1076 | ||
1077 | /* | |
1078 | * In ESTABLISHED state: drop duplicate ACKs; ACK out of range | |
1079 | * ACKs. If the ack is in the range | |
1080 | * tp->snd_una < ti->ti_ack <= tp->snd_max | |
1081 | * then advance tp->snd_una to ti->ti_ack and drop | |
1082 | * data from the retransmission queue. If this ACK reflects | |
1083 | * more up to date window information we update our window information. | |
1084 | */ | |
1085 | case TCPS_ESTABLISHED: | |
1086 | case TCPS_FIN_WAIT_1: | |
1087 | case TCPS_FIN_WAIT_2: | |
1088 | case TCPS_CLOSE_WAIT: | |
1089 | case TCPS_CLOSING: | |
1090 | case TCPS_LAST_ACK: | |
1091 | case TCPS_TIME_WAIT: | |
1092 | ||
1093 | if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) { | |
1094 | if (ti->ti_len == 0 && tiwin == tp->snd_wnd) { | |
1095 | tcpstat.tcps_rcvdupack++; | |
1096 | DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n", | |
1097 | (long )m, (long )so)); | |
1098 | /* | |
1099 | * If we have outstanding data (other than | |
1100 | * a window probe), this is a completely | |
1101 | * duplicate ack (ie, window info didn't | |
1102 | * change), the ack is the biggest we've | |
1103 | * seen and we've seen exactly our rexmt | |
1104 | * threshold of them, assume a packet | |
1105 | * has been dropped and retransmit it. | |
1106 | * Kludge snd_nxt & the congestion | |
1107 | * window so we send only this one | |
1108 | * packet. | |
1109 | * | |
1110 | * We know we're losing at the current | |
1111 | * window size so do congestion avoidance | |
1112 | * (set ssthresh to half the current window | |
1113 | * and pull our congestion window back to | |
1114 | * the new ssthresh). | |
1115 | * | |
1116 | * Dup acks mean that packets have left the | |
1117 | * network (they're now cached at the receiver) | |
1118 | * so bump cwnd by the amount in the receiver | |
1119 | * to keep a constant cwnd packets in the | |
1120 | * network. | |
1121 | */ | |
1122 | if (tp->t_timer[TCPT_REXMT] == 0 || | |
1123 | ti->ti_ack != tp->snd_una) | |
1124 | tp->t_dupacks = 0; | |
1125 | else if (++tp->t_dupacks == tcprexmtthresh) { | |
1126 | tcp_seq onxt = tp->snd_nxt; | |
1127 | u_int win = | |
1128 | min(tp->snd_wnd, tp->snd_cwnd) / 2 / | |
1129 | tp->t_maxseg; | |
1130 | ||
1131 | if (win < 2) | |
1132 | win = 2; | |
1133 | tp->snd_ssthresh = win * tp->t_maxseg; | |
1134 | tp->t_timer[TCPT_REXMT] = 0; | |
1135 | tp->t_rtt = 0; | |
1136 | tp->snd_nxt = ti->ti_ack; | |
1137 | tp->snd_cwnd = tp->t_maxseg; | |
1138 | (void) tcp_output(tp); | |
1139 | tp->snd_cwnd = tp->snd_ssthresh + | |
1140 | tp->t_maxseg * tp->t_dupacks; | |
1141 | if (SEQ_GT(onxt, tp->snd_nxt)) | |
1142 | tp->snd_nxt = onxt; | |
1143 | goto drop; | |
1144 | } else if (tp->t_dupacks > tcprexmtthresh) { | |
1145 | tp->snd_cwnd += tp->t_maxseg; | |
1146 | (void) tcp_output(tp); | |
1147 | goto drop; | |
1148 | } | |
1149 | } else | |
1150 | tp->t_dupacks = 0; | |
1151 | break; | |
1152 | } | |
1153 | synrx_to_est: | |
1154 | /* | |
1155 | * If the congestion window was inflated to account | |
1156 | * for the other side's cached packets, retract it. | |
1157 | */ | |
1158 | if (tp->t_dupacks > tcprexmtthresh && | |
1159 | tp->snd_cwnd > tp->snd_ssthresh) | |
1160 | tp->snd_cwnd = tp->snd_ssthresh; | |
1161 | tp->t_dupacks = 0; | |
1162 | if (SEQ_GT(ti->ti_ack, tp->snd_max)) { | |
1163 | tcpstat.tcps_rcvacktoomuch++; | |
1164 | goto dropafterack; | |
1165 | } | |
1166 | acked = ti->ti_ack - tp->snd_una; | |
1167 | tcpstat.tcps_rcvackpack++; | |
1168 | tcpstat.tcps_rcvackbyte += acked; | |
1169 | ||
1170 | /* | |
1171 | * If we have a timestamp reply, update smoothed | |
1172 | * round trip time. If no timestamp is present but | |
1173 | * transmit timer is running and timed sequence | |
1174 | * number was acked, update smoothed round trip time. | |
1175 | * Since we now have an rtt measurement, cancel the | |
1176 | * timer backoff (cf., Phil Karn's retransmit alg.). | |
1177 | * Recompute the initial retransmit timer. | |
1178 | */ | |
1179 | /* if (ts_present) | |
1180 | * tcp_xmit_timer(tp, tcp_now-ts_ecr+1); | |
1181 | * else | |
1182 | */ | |
1183 | if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq)) | |
1184 | tcp_xmit_timer(tp,tp->t_rtt); | |
1185 | ||
1186 | /* | |
1187 | * If all outstanding data is acked, stop retransmit | |
1188 | * timer and remember to restart (more output or persist). | |
1189 | * If there is more data to be acked, restart retransmit | |
1190 | * timer, using current (possibly backed-off) value. | |
1191 | */ | |
1192 | if (ti->ti_ack == tp->snd_max) { | |
1193 | tp->t_timer[TCPT_REXMT] = 0; | |
1194 | needoutput = 1; | |
1195 | } else if (tp->t_timer[TCPT_PERSIST] == 0) | |
1196 | tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; | |
1197 | /* | |
1198 | * When new data is acked, open the congestion window. | |
1199 | * If the window gives us less than ssthresh packets | |
1200 | * in flight, open exponentially (maxseg per packet). | |
1201 | * Otherwise open linearly: maxseg per window | |
1202 | * (maxseg^2 / cwnd per packet). | |
1203 | */ | |
1204 | { | |
1205 | register u_int cw = tp->snd_cwnd; | |
1206 | register u_int incr = tp->t_maxseg; | |
1207 | ||
1208 | if (cw > tp->snd_ssthresh) | |
1209 | incr = incr * incr / cw; | |
1210 | tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale); | |
1211 | } | |
1212 | if (acked > so->so_snd.sb_cc) { | |
1213 | tp->snd_wnd -= so->so_snd.sb_cc; | |
1214 | sbdrop(&so->so_snd, (int )so->so_snd.sb_cc); | |
1215 | ourfinisacked = 1; | |
1216 | } else { | |
1217 | sbdrop(&so->so_snd, acked); | |
1218 | tp->snd_wnd -= acked; | |
1219 | ourfinisacked = 0; | |
1220 | } | |
1221 | /* | |
1222 | * XXX sowwakup is called when data is acked and there's room for | |
1223 | * for more data... it should read() the socket | |
1224 | */ | |
1225 | /* if (so->so_snd.sb_flags & SB_NOTIFY) | |
1226 | * sowwakeup(so); | |
1227 | */ | |
1228 | tp->snd_una = ti->ti_ack; | |
1229 | if (SEQ_LT(tp->snd_nxt, tp->snd_una)) | |
1230 | tp->snd_nxt = tp->snd_una; | |
1231 | ||
1232 | switch (tp->t_state) { | |
1233 | ||
1234 | /* | |
1235 | * In FIN_WAIT_1 STATE in addition to the processing | |
1236 | * for the ESTABLISHED state if our FIN is now acknowledged | |
1237 | * then enter FIN_WAIT_2. | |
1238 | */ | |
1239 | case TCPS_FIN_WAIT_1: | |
1240 | if (ourfinisacked) { | |
1241 | /* | |
1242 | * If we can't receive any more | |
1243 | * data, then closing user can proceed. | |
1244 | * Starting the timer is contrary to the | |
1245 | * specification, but if we don't get a FIN | |
1246 | * we'll hang forever. | |
1247 | */ | |
1248 | if (so->so_state & SS_FCANTRCVMORE) { | |
1249 | soisfdisconnected(so); | |
1250 | tp->t_timer[TCPT_2MSL] = tcp_maxidle; | |
1251 | } | |
1252 | tp->t_state = TCPS_FIN_WAIT_2; | |
1253 | } | |
1254 | break; | |
1255 | ||
1256 | /* | |
1257 | * In CLOSING STATE in addition to the processing for | |
1258 | * the ESTABLISHED state if the ACK acknowledges our FIN | |
1259 | * then enter the TIME-WAIT state, otherwise ignore | |
1260 | * the segment. | |
1261 | */ | |
1262 | case TCPS_CLOSING: | |
1263 | if (ourfinisacked) { | |
1264 | tp->t_state = TCPS_TIME_WAIT; | |
1265 | tcp_canceltimers(tp); | |
1266 | tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; | |
1267 | soisfdisconnected(so); | |
1268 | } | |
1269 | break; | |
1270 | ||
1271 | /* | |
1272 | * In LAST_ACK, we may still be waiting for data to drain | |
1273 | * and/or to be acked, as well as for the ack of our FIN. | |
1274 | * If our FIN is now acknowledged, delete the TCB, | |
1275 | * enter the closed state and return. | |
1276 | */ | |
1277 | case TCPS_LAST_ACK: | |
1278 | if (ourfinisacked) { | |
1279 | tp = tcp_close(tp); | |
1280 | goto drop; | |
1281 | } | |
1282 | break; | |
1283 | ||
1284 | /* | |
1285 | * In TIME_WAIT state the only thing that should arrive | |
1286 | * is a retransmission of the remote FIN. Acknowledge | |
1287 | * it and restart the finack timer. | |
1288 | */ | |
1289 | case TCPS_TIME_WAIT: | |
1290 | tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; | |
1291 | goto dropafterack; | |
1292 | } | |
1293 | } /* switch(tp->t_state) */ | |
1294 | ||
1295 | step6: | |
1296 | /* | |
1297 | * Update window information. | |
1298 | * Don't look at window if no ACK: TAC's send garbage on first SYN. | |
1299 | */ | |
1300 | if ((tiflags & TH_ACK) && | |
1301 | (SEQ_LT(tp->snd_wl1, ti->ti_seq) || | |
1302 | (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) || | |
1303 | (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) { | |
1304 | /* keep track of pure window updates */ | |
1305 | if (ti->ti_len == 0 && | |
1306 | tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd) | |
1307 | tcpstat.tcps_rcvwinupd++; | |
1308 | tp->snd_wnd = tiwin; | |
1309 | tp->snd_wl1 = ti->ti_seq; | |
1310 | tp->snd_wl2 = ti->ti_ack; | |
1311 | if (tp->snd_wnd > tp->max_sndwnd) | |
1312 | tp->max_sndwnd = tp->snd_wnd; | |
1313 | needoutput = 1; | |
1314 | } | |
1315 | ||
1316 | /* | |
1317 | * Process segments with URG. | |
1318 | */ | |
1319 | if ((tiflags & TH_URG) && ti->ti_urp && | |
1320 | TCPS_HAVERCVDFIN(tp->t_state) == 0) { | |
1321 | /* | |
1322 | * This is a kludge, but if we receive and accept | |
1323 | * random urgent pointers, we'll crash in | |
1324 | * soreceive. It's hard to imagine someone | |
1325 | * actually wanting to send this much urgent data. | |
1326 | */ | |
1327 | if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen) { | |
1328 | ti->ti_urp = 0; | |
1329 | tiflags &= ~TH_URG; | |
1330 | goto dodata; | |
1331 | } | |
1332 | /* | |
1333 | * If this segment advances the known urgent pointer, | |
1334 | * then mark the data stream. This should not happen | |
1335 | * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since | |
1336 | * a FIN has been received from the remote side. | |
1337 | * In these states we ignore the URG. | |
1338 | * | |
1339 | * According to RFC961 (Assigned Protocols), | |
1340 | * the urgent pointer points to the last octet | |
1341 | * of urgent data. We continue, however, | |
1342 | * to consider it to indicate the first octet | |
1343 | * of data past the urgent section as the original | |
1344 | * spec states (in one of two places). | |
1345 | */ | |
1346 | if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) { | |
1347 | tp->rcv_up = ti->ti_seq + ti->ti_urp; | |
1348 | so->so_urgc = so->so_rcv.sb_cc + | |
1349 | (tp->rcv_up - tp->rcv_nxt); /* -1; */ | |
1350 | tp->rcv_up = ti->ti_seq + ti->ti_urp; | |
1351 | ||
1352 | } | |
1353 | } else | |
1354 | /* | |
1355 | * If no out of band data is expected, | |
1356 | * pull receive urgent pointer along | |
1357 | * with the receive window. | |
1358 | */ | |
1359 | if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) | |
1360 | tp->rcv_up = tp->rcv_nxt; | |
1361 | dodata: | |
1362 | ||
1363 | /* | |
1364 | * Process the segment text, merging it into the TCP sequencing queue, | |
1365 | * and arranging for acknowledgment of receipt if necessary. | |
1366 | * This process logically involves adjusting tp->rcv_wnd as data | |
1367 | * is presented to the user (this happens in tcp_usrreq.c, | |
1368 | * case PRU_RCVD). If a FIN has already been received on this | |
1369 | * connection then we just ignore the text. | |
1370 | */ | |
1371 | if ((ti->ti_len || (tiflags&TH_FIN)) && | |
1372 | TCPS_HAVERCVDFIN(tp->t_state) == 0) { | |
1373 | TCP_REASS(tp, ti, m, so, tiflags); | |
1374 | /* | |
1375 | * Note the amount of data that peer has sent into | |
1376 | * our window, in order to estimate the sender's | |
1377 | * buffer size. | |
1378 | */ | |
1379 | len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt); | |
1380 | } else { | |
1381 | m_free(m); | |
1382 | tiflags &= ~TH_FIN; | |
1383 | } | |
1384 | ||
1385 | /* | |
1386 | * If FIN is received ACK the FIN and let the user know | |
1387 | * that the connection is closing. | |
1388 | */ | |
1389 | if (tiflags & TH_FIN) { | |
1390 | if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { | |
1391 | /* | |
1392 | * If we receive a FIN we can't send more data, | |
1393 | * set it SS_FDRAIN | |
1394 | * Shutdown the socket if there is no rx data in the | |
1395 | * buffer. | |
1396 | * soread() is called on completion of shutdown() and | |
1397 | * will got to TCPS_LAST_ACK, and use tcp_output() | |
1398 | * to send the FIN. | |
1399 | */ | |
1400 | /* sofcantrcvmore(so); */ | |
1401 | sofwdrain(so); | |
1402 | ||
1403 | tp->t_flags |= TF_ACKNOW; | |
1404 | tp->rcv_nxt++; | |
1405 | } | |
1406 | switch (tp->t_state) { | |
1407 | ||
1408 | /* | |
1409 | * In SYN_RECEIVED and ESTABLISHED STATES | |
1410 | * enter the CLOSE_WAIT state. | |
1411 | */ | |
1412 | case TCPS_SYN_RECEIVED: | |
1413 | case TCPS_ESTABLISHED: | |
1414 | if(so->so_emu == EMU_CTL) /* no shutdown on socket */ | |
1415 | tp->t_state = TCPS_LAST_ACK; | |
1416 | else | |
1417 | tp->t_state = TCPS_CLOSE_WAIT; | |
1418 | break; | |
1419 | ||
1420 | /* | |
1421 | * If still in FIN_WAIT_1 STATE FIN has not been acked so | |
1422 | * enter the CLOSING state. | |
1423 | */ | |
1424 | case TCPS_FIN_WAIT_1: | |
1425 | tp->t_state = TCPS_CLOSING; | |
1426 | break; | |
1427 | ||
1428 | /* | |
1429 | * In FIN_WAIT_2 state enter the TIME_WAIT state, | |
1430 | * starting the time-wait timer, turning off the other | |
1431 | * standard timers. | |
1432 | */ | |
1433 | case TCPS_FIN_WAIT_2: | |
1434 | tp->t_state = TCPS_TIME_WAIT; | |
1435 | tcp_canceltimers(tp); | |
1436 | tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; | |
1437 | soisfdisconnected(so); | |
1438 | break; | |
1439 | ||
1440 | /* | |
1441 | * In TIME_WAIT state restart the 2 MSL time_wait timer. | |
1442 | */ | |
1443 | case TCPS_TIME_WAIT: | |
1444 | tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; | |
1445 | break; | |
1446 | } | |
1447 | } | |
1448 | ||
1449 | /* | |
1450 | * If this is a small packet, then ACK now - with Nagel | |
1451 | * congestion avoidance sender won't send more until | |
1452 | * he gets an ACK. | |
1453 | * | |
1454 | * See above. | |
1455 | */ | |
1456 | /* if (ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg) { | |
1457 | */ | |
1458 | /* if ((ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg && | |
1459 | * (so->so_iptos & IPTOS_LOWDELAY) == 0) || | |
1460 | * ((so->so_iptos & IPTOS_LOWDELAY) && | |
1461 | * ((struct tcpiphdr_2 *)ti)->first_char == (char)27)) { | |
1462 | */ | |
1463 | if (ti->ti_len && (unsigned)ti->ti_len <= 5 && | |
1464 | ((struct tcpiphdr_2 *)ti)->first_char == (char)27) { | |
1465 | tp->t_flags |= TF_ACKNOW; | |
1466 | } | |
1467 | ||
1468 | /* | |
1469 | * Return any desired output. | |
1470 | */ | |
1471 | if (needoutput || (tp->t_flags & TF_ACKNOW)) { | |
1472 | (void) tcp_output(tp); | |
1473 | } | |
1474 | return; | |
1475 | ||
1476 | dropafterack: | |
1477 | /* | |
1478 | * Generate an ACK dropping incoming segment if it occupies | |
1479 | * sequence space, where the ACK reflects our state. | |
1480 | */ | |
1481 | if (tiflags & TH_RST) | |
1482 | goto drop; | |
1483 | m_freem(m); | |
1484 | tp->t_flags |= TF_ACKNOW; | |
1485 | (void) tcp_output(tp); | |
1486 | return; | |
1487 | ||
1488 | dropwithreset: | |
1489 | /* reuses m if m!=NULL, m_free() unnecessary */ | |
1490 | if (tiflags & TH_ACK) | |
1491 | tcp_respond(tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST); | |
1492 | else { | |
1493 | if (tiflags & TH_SYN) ti->ti_len++; | |
1494 | tcp_respond(tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0, | |
1495 | TH_RST|TH_ACK); | |
1496 | } | |
1497 | ||
1498 | return; | |
1499 | ||
1500 | drop: | |
1501 | /* | |
1502 | * Drop space held by incoming segment and return. | |
1503 | */ | |
1504 | m_free(m); | |
1505 | ||
1506 | return; | |
1507 | } | |
1508 | ||
1509 | /* , ts_present, ts_val, ts_ecr) */ | |
1510 | /* int *ts_present; | |
1511 | * u_int32_t *ts_val, *ts_ecr; | |
1512 | */ | |
1513 | void | |
1514 | tcp_dooptions(tp, cp, cnt, ti) | |
1515 | struct tcpcb *tp; | |
1516 | u_char *cp; | |
1517 | int cnt; | |
1518 | struct tcpiphdr *ti; | |
1519 | { | |
1520 | u_int16_t mss; | |
1521 | int opt, optlen; | |
1522 | ||
1523 | DEBUG_CALL("tcp_dooptions"); | |
1524 | DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt)); | |
1525 | ||
1526 | for (; cnt > 0; cnt -= optlen, cp += optlen) { | |
1527 | opt = cp[0]; | |
1528 | if (opt == TCPOPT_EOL) | |
1529 | break; | |
1530 | if (opt == TCPOPT_NOP) | |
1531 | optlen = 1; | |
1532 | else { | |
1533 | optlen = cp[1]; | |
1534 | if (optlen <= 0) | |
1535 | break; | |
1536 | } | |
1537 | switch (opt) { | |
1538 | ||
1539 | default: | |
1540 | continue; | |
1541 | ||
1542 | case TCPOPT_MAXSEG: | |
1543 | if (optlen != TCPOLEN_MAXSEG) | |
1544 | continue; | |
1545 | if (!(ti->ti_flags & TH_SYN)) | |
1546 | continue; | |
1547 | memcpy((char *) &mss, (char *) cp + 2, sizeof(mss)); | |
1548 | NTOHS(mss); | |
1549 | (void) tcp_mss(tp, mss); /* sets t_maxseg */ | |
1550 | break; | |
1551 | ||
1552 | /* case TCPOPT_WINDOW: | |
1553 | * if (optlen != TCPOLEN_WINDOW) | |
1554 | * continue; | |
1555 | * if (!(ti->ti_flags & TH_SYN)) | |
1556 | * continue; | |
1557 | * tp->t_flags |= TF_RCVD_SCALE; | |
1558 | * tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); | |
1559 | * break; | |
1560 | */ | |
1561 | /* case TCPOPT_TIMESTAMP: | |
1562 | * if (optlen != TCPOLEN_TIMESTAMP) | |
1563 | * continue; | |
1564 | * *ts_present = 1; | |
1565 | * memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val)); | |
1566 | * NTOHL(*ts_val); | |
1567 | * memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr)); | |
1568 | * NTOHL(*ts_ecr); | |
1569 | * | |
1570 | */ /* | |
1571 | * * A timestamp received in a SYN makes | |
1572 | * * it ok to send timestamp requests and replies. | |
1573 | * */ | |
1574 | /* if (ti->ti_flags & TH_SYN) { | |
1575 | * tp->t_flags |= TF_RCVD_TSTMP; | |
1576 | * tp->ts_recent = *ts_val; | |
1577 | * tp->ts_recent_age = tcp_now; | |
1578 | * } | |
1579 | */ break; | |
1580 | } | |
1581 | } | |
1582 | } | |
1583 | ||
1584 | ||
1585 | /* | |
1586 | * Pull out of band byte out of a segment so | |
1587 | * it doesn't appear in the user's data queue. | |
1588 | * It is still reflected in the segment length for | |
1589 | * sequencing purposes. | |
1590 | */ | |
1591 | ||
1592 | #ifdef notdef | |
1593 | ||
1594 | void | |
1595 | tcp_pulloutofband(so, ti, m) | |
1596 | struct socket *so; | |
1597 | struct tcpiphdr *ti; | |
1598 | register struct mbuf *m; | |
1599 | { | |
1600 | int cnt = ti->ti_urp - 1; | |
1601 | ||
1602 | while (cnt >= 0) { | |
1603 | if (m->m_len > cnt) { | |
1604 | char *cp = mtod(m, caddr_t) + cnt; | |
1605 | struct tcpcb *tp = sototcpcb(so); | |
1606 | ||
1607 | tp->t_iobc = *cp; | |
1608 | tp->t_oobflags |= TCPOOB_HAVEDATA; | |
1609 | memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1)); | |
1610 | m->m_len--; | |
1611 | return; | |
1612 | } | |
1613 | cnt -= m->m_len; | |
1614 | m = m->m_next; /* XXX WRONG! Fix it! */ | |
1615 | if (m == 0) | |
1616 | break; | |
1617 | } | |
1618 | panic("tcp_pulloutofband"); | |
1619 | } | |
1620 | ||
1621 | #endif /* notdef */ | |
1622 | ||
1623 | /* | |
1624 | * Collect new round-trip time estimate | |
1625 | * and update averages and current timeout. | |
1626 | */ | |
1627 | ||
1628 | void | |
1629 | tcp_xmit_timer(tp, rtt) | |
1630 | register struct tcpcb *tp; | |
1631 | int rtt; | |
1632 | { | |
1633 | register short delta; | |
1634 | ||
1635 | DEBUG_CALL("tcp_xmit_timer"); | |
1636 | DEBUG_ARG("tp = %lx", (long)tp); | |
1637 | DEBUG_ARG("rtt = %d", rtt); | |
1638 | ||
1639 | tcpstat.tcps_rttupdated++; | |
1640 | if (tp->t_srtt != 0) { | |
1641 | /* | |
1642 | * srtt is stored as fixed point with 3 bits after the | |
1643 | * binary point (i.e., scaled by 8). The following magic | |
1644 | * is equivalent to the smoothing algorithm in rfc793 with | |
1645 | * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed | |
1646 | * point). Adjust rtt to origin 0. | |
1647 | */ | |
1648 | delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT); | |
1649 | if ((tp->t_srtt += delta) <= 0) | |
1650 | tp->t_srtt = 1; | |
1651 | /* | |
1652 | * We accumulate a smoothed rtt variance (actually, a | |
1653 | * smoothed mean difference), then set the retransmit | |
1654 | * timer to smoothed rtt + 4 times the smoothed variance. | |
1655 | * rttvar is stored as fixed point with 2 bits after the | |
1656 | * binary point (scaled by 4). The following is | |
1657 | * equivalent to rfc793 smoothing with an alpha of .75 | |
1658 | * (rttvar = rttvar*3/4 + |delta| / 4). This replaces | |
1659 | * rfc793's wired-in beta. | |
1660 | */ | |
1661 | if (delta < 0) | |
1662 | delta = -delta; | |
1663 | delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT); | |
1664 | if ((tp->t_rttvar += delta) <= 0) | |
1665 | tp->t_rttvar = 1; | |
1666 | } else { | |
1667 | /* | |
1668 | * No rtt measurement yet - use the unsmoothed rtt. | |
1669 | * Set the variance to half the rtt (so our first | |
1670 | * retransmit happens at 3*rtt). | |
1671 | */ | |
1672 | tp->t_srtt = rtt << TCP_RTT_SHIFT; | |
1673 | tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); | |
1674 | } | |
1675 | tp->t_rtt = 0; | |
1676 | tp->t_rxtshift = 0; | |
1677 | ||
1678 | /* | |
1679 | * the retransmit should happen at rtt + 4 * rttvar. | |
1680 | * Because of the way we do the smoothing, srtt and rttvar | |
1681 | * will each average +1/2 tick of bias. When we compute | |
1682 | * the retransmit timer, we want 1/2 tick of rounding and | |
1683 | * 1 extra tick because of +-1/2 tick uncertainty in the | |
1684 | * firing of the timer. The bias will give us exactly the | |
1685 | * 1.5 tick we need. But, because the bias is | |
1686 | * statistical, we have to test that we don't drop below | |
1687 | * the minimum feasible timer (which is 2 ticks). | |
1688 | */ | |
1689 | TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), | |
1690 | (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */ | |
1691 | ||
1692 | /* | |
1693 | * We received an ack for a packet that wasn't retransmitted; | |
1694 | * it is probably safe to discard any error indications we've | |
1695 | * received recently. This isn't quite right, but close enough | |
1696 | * for now (a route might have failed after we sent a segment, | |
1697 | * and the return path might not be symmetrical). | |
1698 | */ | |
1699 | tp->t_softerror = 0; | |
1700 | } | |
1701 | ||
1702 | /* | |
1703 | * Determine a reasonable value for maxseg size. | |
1704 | * If the route is known, check route for mtu. | |
1705 | * If none, use an mss that can be handled on the outgoing | |
1706 | * interface without forcing IP to fragment; if bigger than | |
1707 | * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES | |
1708 | * to utilize large mbufs. If no route is found, route has no mtu, | |
1709 | * or the destination isn't local, use a default, hopefully conservative | |
1710 | * size (usually 512 or the default IP max size, but no more than the mtu | |
1711 | * of the interface), as we can't discover anything about intervening | |
1712 | * gateways or networks. We also initialize the congestion/slow start | |
1713 | * window to be a single segment if the destination isn't local. | |
1714 | * While looking at the routing entry, we also initialize other path-dependent | |
1715 | * parameters from pre-set or cached values in the routing entry. | |
1716 | */ | |
1717 | ||
1718 | int | |
1719 | tcp_mss(tp, offer) | |
1720 | register struct tcpcb *tp; | |
1721 | u_int offer; | |
1722 | { | |
1723 | struct socket *so = tp->t_socket; | |
1724 | int mss; | |
1725 | ||
1726 | DEBUG_CALL("tcp_mss"); | |
1727 | DEBUG_ARG("tp = %lx", (long)tp); | |
1728 | DEBUG_ARG("offer = %d", offer); | |
1729 | ||
1730 | mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr); | |
1731 | if (offer) | |
1732 | mss = min(mss, offer); | |
1733 | mss = max(mss, 32); | |
1734 | if (mss < tp->t_maxseg || offer != 0) | |
1735 | tp->t_maxseg = mss; | |
1736 | ||
1737 | tp->snd_cwnd = mss; | |
1738 | ||
1739 | sbreserve(&so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0)); | |
1740 | sbreserve(&so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0)); | |
1741 | ||
1742 | DEBUG_MISC((dfd, " returning mss = %d\n", mss)); | |
1743 | ||
1744 | return mss; | |
1745 | } |