]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e48c414e ACM |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
4 | * operating system. INET is implemented using the BSD Socket | |
5 | * interface as the means of communication with the user level. | |
6 | * | |
7 | * Generic TIME_WAIT sockets functions | |
8 | * | |
9 | * From code orinally in TCP | |
10 | */ | |
11 | ||
172589cc | 12 | #include <linux/kernel.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
3a9a231d | 14 | #include <linux/module.h> |
e48c414e ACM |
15 | #include <net/inet_hashtables.h> |
16 | #include <net/inet_timewait_sock.h> | |
696ab2d3 | 17 | #include <net/ip.h> |
e48c414e | 18 | |
13475a30 | 19 | |
2a8875e7 ED |
20 | /** |
21 | * inet_twsk_bind_unhash - unhash a timewait socket from bind hash | |
22 | * @tw: timewait socket | |
23 | * @hashinfo: hashinfo pointer | |
24 | * | |
25 | * unhash a timewait socket from bind hash, if hashed. | |
26 | * bind hash lock must be held by caller. | |
27 | * Returns 1 if caller should call inet_twsk_put() after lock release. | |
3cdaedae | 28 | */ |
fc01538f | 29 | void inet_twsk_bind_unhash(struct inet_timewait_sock *tw, |
3cdaedae ED |
30 | struct inet_hashinfo *hashinfo) |
31 | { | |
936a192f | 32 | struct inet_bind2_bucket *tb2 = tw->tw_tb2; |
3cdaedae ED |
33 | struct inet_bind_bucket *tb = tw->tw_tb; |
34 | ||
35 | if (!tb) | |
fc01538f | 36 | return; |
3cdaedae ED |
37 | |
38 | __hlist_del(&tw->tw_bind_node); | |
39 | tw->tw_tb = NULL; | |
936a192f KI |
40 | |
41 | __hlist_del(&tw->tw_bind2_node); | |
42 | tw->tw_tb2 = NULL; | |
43 | inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2); | |
8002d44f | 44 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); |
936a192f | 45 | |
fc01538f | 46 | __sock_put((struct sock *)tw); |
3cdaedae ED |
47 | } |
48 | ||
e48c414e | 49 | /* Must be called with locally disabled BHs. */ |
789f558c | 50 | static void inet_twsk_kill(struct inet_timewait_sock *tw) |
e48c414e | 51 | { |
789f558c | 52 | struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo; |
9db66bdc | 53 | spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); |
936a192f | 54 | struct inet_bind_hashbucket *bhead, *bhead2; |
e48c414e | 55 | |
9db66bdc | 56 | spin_lock(lock); |
fc01538f | 57 | sk_nulls_del_node_init_rcu((struct sock *)tw); |
9db66bdc | 58 | spin_unlock(lock); |
e48c414e ACM |
59 | |
60 | /* Disassociate with bind bucket. */ | |
04c494e6 ED |
61 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, |
62 | hashinfo->bhash_size)]; | |
936a192f KI |
63 | bhead2 = inet_bhashfn_portaddr(hashinfo, (struct sock *)tw, |
64 | twsk_net(tw), tw->tw_num); | |
3cdaedae | 65 | |
e48c414e | 66 | spin_lock(&bhead->lock); |
936a192f | 67 | spin_lock(&bhead2->lock); |
fc01538f | 68 | inet_twsk_bind_unhash(tw, hashinfo); |
936a192f | 69 | spin_unlock(&bhead2->lock); |
e48c414e | 70 | spin_unlock(&bhead->lock); |
3cdaedae | 71 | |
e9bd0cca | 72 | refcount_dec(&tw->tw_dr->tw_refcount); |
789f558c | 73 | inet_twsk_put(tw); |
e48c414e ACM |
74 | } |
75 | ||
05dbc7b5 | 76 | void inet_twsk_free(struct inet_timewait_sock *tw) |
7054fb93 | 77 | { |
4dbc8ef7 ACM |
78 | struct module *owner = tw->tw_prot->owner; |
79 | twsk_destructor((struct sock *)tw); | |
4dbc8ef7 ACM |
80 | kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); |
81 | module_put(owner); | |
82 | } | |
83 | ||
84 | void inet_twsk_put(struct inet_timewait_sock *tw) | |
85 | { | |
41c6d650 | 86 | if (refcount_dec_and_test(&tw->tw_refcnt)) |
4dbc8ef7 | 87 | inet_twsk_free(tw); |
7054fb93 PE |
88 | } |
89 | EXPORT_SYMBOL_GPL(inet_twsk_put); | |
90 | ||
81b3ade5 KI |
91 | static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, |
92 | struct hlist_nulls_head *list) | |
05dbc7b5 | 93 | { |
81b3ade5 | 94 | hlist_nulls_add_head_rcu(&tw->tw_node, list); |
05dbc7b5 ED |
95 | } |
96 | ||
97 | static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, | |
98 | struct hlist_head *list) | |
99 | { | |
100 | hlist_add_head(&tw->tw_bind_node, list); | |
101 | } | |
102 | ||
936a192f KI |
103 | static void inet_twsk_add_bind2_node(struct inet_timewait_sock *tw, |
104 | struct hlist_head *list) | |
105 | { | |
106 | hlist_add_head(&tw->tw_bind2_node, list); | |
107 | } | |
108 | ||
e48c414e | 109 | /* |
e599ea14 | 110 | * Enter the time wait state. This is called with locally disabled BH. |
e48c414e ACM |
111 | * Essentially we whip up a timewait bucket, copy the relevant info into it |
112 | * from the SK, and mess with hash chains and list linkage. | |
113 | */ | |
ec94c269 | 114 | void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, |
e48c414e ACM |
115 | struct inet_hashinfo *hashinfo) |
116 | { | |
117 | const struct inet_sock *inet = inet_sk(sk); | |
463c84b9 | 118 | const struct inet_connection_sock *icsk = inet_csk(sk); |
81c3d547 | 119 | struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); |
9db66bdc | 120 | spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); |
936a192f KI |
121 | struct inet_bind_hashbucket *bhead, *bhead2; |
122 | ||
e48c414e ACM |
123 | /* Step 1: Put TW into bind hash. Original socket stays there too. |
124 | Note, that any socket with inet->num != 0 MUST be bound in | |
125 | binding cache, even if it is closed. | |
126 | */ | |
04c494e6 ED |
127 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, |
128 | hashinfo->bhash_size)]; | |
936a192f KI |
129 | bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num); |
130 | ||
e599ea14 | 131 | spin_lock(&bhead->lock); |
936a192f KI |
132 | spin_lock(&bhead2->lock); |
133 | ||
463c84b9 | 134 | tw->tw_tb = icsk->icsk_bind_hash; |
547b792c | 135 | WARN_ON(!icsk->icsk_bind_hash); |
e48c414e | 136 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); |
936a192f KI |
137 | |
138 | tw->tw_tb2 = icsk->icsk_bind2_hash; | |
139 | WARN_ON(!icsk->icsk_bind2_hash); | |
140 | inet_twsk_add_bind2_node(tw, &tw->tw_tb2->deathrow); | |
141 | ||
142 | spin_unlock(&bhead2->lock); | |
e48c414e ACM |
143 | spin_unlock(&bhead->lock); |
144 | ||
9db66bdc | 145 | spin_lock(lock); |
e48c414e | 146 | |
81b3ade5 | 147 | inet_twsk_add_node_rcu(tw, &ehead->chain); |
3ab5aee7 | 148 | |
05dbc7b5 | 149 | /* Step 3: Remove SK from hash chain */ |
3ab5aee7 ED |
150 | if (__sk_nulls_del_node_init_rcu(sk)) |
151 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | |
e48c414e | 152 | |
e599ea14 | 153 | spin_unlock(lock); |
ec94c269 ED |
154 | |
155 | /* tw_refcnt is set to 3 because we have : | |
156 | * - one reference for bhash chain. | |
157 | * - one reference for ehash chain. | |
158 | * - one reference for timer. | |
159 | * We can use atomic_set() because prior spin_lock()/spin_unlock() | |
160 | * committed into memory all tw fields. | |
161 | * Also note that after this point, we lost our implicit reference | |
162 | * so we are not allowed to use tw anymore. | |
163 | */ | |
164 | refcount_set(&tw->tw_refcnt, 3); | |
e48c414e | 165 | } |
ec94c269 | 166 | EXPORT_SYMBOL_GPL(inet_twsk_hashdance); |
696ab2d3 | 167 | |
1ab791dc | 168 | static void tw_timer_handler(struct timer_list *t) |
c676270b | 169 | { |
1ab791dc | 170 | struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer); |
789f558c | 171 | |
789f558c ED |
172 | inet_twsk_kill(tw); |
173 | } | |
174 | ||
175 | struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, | |
176 | struct inet_timewait_death_row *dr, | |
177 | const int state) | |
178 | { | |
179 | struct inet_timewait_sock *tw; | |
180 | ||
6f605b57 KI |
181 | if (refcount_read(&dr->tw_refcount) - 1 >= |
182 | READ_ONCE(dr->sysctl_max_tw_buckets)) | |
789f558c ED |
183 | return NULL; |
184 | ||
185 | tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, | |
186 | GFP_ATOMIC); | |
00db4124 | 187 | if (tw) { |
c676270b ACM |
188 | const struct inet_sock *inet = inet_sk(sk); |
189 | ||
789f558c | 190 | tw->tw_dr = dr; |
c676270b | 191 | /* Give us an identity. */ |
c720c7e8 ED |
192 | tw->tw_daddr = inet->inet_daddr; |
193 | tw->tw_rcv_saddr = inet->inet_rcv_saddr; | |
c676270b | 194 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; |
66b13d99 | 195 | tw->tw_tos = inet->tos; |
c720c7e8 | 196 | tw->tw_num = inet->inet_num; |
c676270b ACM |
197 | tw->tw_state = TCP_TIME_WAIT; |
198 | tw->tw_substate = state; | |
c720c7e8 ED |
199 | tw->tw_sport = inet->inet_sport; |
200 | tw->tw_dport = inet->inet_dport; | |
c676270b ACM |
201 | tw->tw_family = sk->sk_family; |
202 | tw->tw_reuse = sk->sk_reuse; | |
3099a529 | 203 | tw->tw_reuseport = sk->sk_reuseport; |
81c3d547 | 204 | tw->tw_hash = sk->sk_hash; |
c676270b | 205 | tw->tw_ipv6only = 0; |
4bd0623f | 206 | tw->tw_transparent = inet_test_bit(TRANSPARENT, sk); |
c676270b | 207 | tw->tw_prot = sk->sk_prot_creator; |
33cf7c90 | 208 | atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie)); |
efd7ef1c | 209 | twsk_net_set(tw, sock_net(sk)); |
1ab791dc | 210 | timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED); |
47e1c323 ED |
211 | /* |
212 | * Because we use RCU lookups, we should not set tw_refcnt | |
213 | * to a non null value before everything is setup for this | |
214 | * timewait socket. | |
215 | */ | |
41c6d650 | 216 | refcount_set(&tw->tw_refcnt, 0); |
789f558c | 217 | |
eeb2b856 | 218 | __module_get(tw->tw_prot->owner); |
c676270b ACM |
219 | } |
220 | ||
221 | return tw; | |
222 | } | |
696ab2d3 ACM |
223 | EXPORT_SYMBOL_GPL(inet_twsk_alloc); |
224 | ||
696ab2d3 ACM |
225 | /* These are always called from BH context. See callers in |
226 | * tcp_input.c to verify this. | |
227 | */ | |
228 | ||
dbe7faa4 ED |
229 | /* This is for handling early-kills of TIME_WAIT sockets. |
230 | * Warning : consume reference. | |
231 | * Caller should not access tw anymore. | |
232 | */ | |
233 | void inet_twsk_deschedule_put(struct inet_timewait_sock *tw) | |
696ab2d3 | 234 | { |
789f558c ED |
235 | if (del_timer_sync(&tw->tw_timer)) |
236 | inet_twsk_kill(tw); | |
dbe7faa4 | 237 | inet_twsk_put(tw); |
696ab2d3 | 238 | } |
dbe7faa4 | 239 | EXPORT_SYMBOL(inet_twsk_deschedule_put); |
696ab2d3 | 240 | |
ed2e9239 | 241 | void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm) |
696ab2d3 | 242 | { |
696ab2d3 ACM |
243 | /* timeout := RTO * 3.5 |
244 | * | |
245 | * 3.5 = 1+2+0.5 to wait for two retransmits. | |
246 | * | |
247 | * RATIONALE: if FIN arrived and we entered TIME-WAIT state, | |
248 | * our ACK acking that FIN can be lost. If N subsequent retransmitted | |
249 | * FINs (or previous seqments) are lost (probability of such event | |
250 | * is p^(N+1), where p is probability to lose single packet and | |
251 | * time to detect the loss is about RTO*(2^N - 1) with exponential | |
252 | * backoff). Normal timewait length is calculated so, that we | |
253 | * waited at least for one retransmitted FIN (maximal RTO is 120sec). | |
254 | * [ BTW Linux. following BSD, violates this requirement waiting | |
255 | * only for 60sec, we should wait at least for 240 secs. | |
256 | * Well, 240 consumes too much of resources 8) | |
257 | * ] | |
258 | * This interval is not reduced to catch old duplicate and | |
259 | * responces to our wandering segments living for two MSLs. | |
260 | * However, if we use PAWS to detect | |
261 | * old duplicates, we can reduce the interval to bounds required | |
262 | * by RTO, rather than MSL. So, if peer understands PAWS, we | |
263 | * kill tw bucket after 3.5*RTO (it is important that this number | |
264 | * is greater than TS tick!) and detect old duplicates with help | |
265 | * of PAWS. | |
266 | */ | |
696ab2d3 | 267 | |
ed2e9239 | 268 | if (!rearm) { |
27dd35e0 ED |
269 | bool kill = timeo <= 4*HZ; |
270 | ||
271 | __NET_INC_STATS(twsk_net(tw), kill ? LINUX_MIB_TIMEWAITKILLED : | |
272 | LINUX_MIB_TIMEWAITED); | |
f3438bc7 | 273 | BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo)); |
fbb82952 | 274 | refcount_inc(&tw->tw_dr->tw_refcount); |
ed2e9239 ED |
275 | } else { |
276 | mod_timer_pending(&tw->tw_timer, jiffies + timeo); | |
696ab2d3 | 277 | } |
696ab2d3 | 278 | } |
ed2e9239 | 279 | EXPORT_SYMBOL_GPL(__inet_twsk_schedule); |
04c494e6 ED |
280 | |
281 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family) | |
282 | { | |
283 | struct inet_timewait_sock *tw; | |
284 | struct sock *sk; | |
285 | struct hlist_nulls_node *node; | |
286 | unsigned int slot; | |
287 | ||
288 | for (slot = 0; slot <= hashinfo->ehash_mask; slot++) { | |
289 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; | |
290 | restart_rcu: | |
291 | cond_resched(); | |
292 | rcu_read_lock(); | |
293 | restart: | |
294 | sk_nulls_for_each_rcu(sk, node, &head->chain) { | |
740ea3c4 KI |
295 | if (sk->sk_state != TCP_TIME_WAIT) { |
296 | /* A kernel listener socket might not hold refcnt for net, | |
297 | * so reqsk_timer_handler() could be fired after net is | |
298 | * freed. Userspace listener and reqsk never exist here. | |
299 | */ | |
300 | if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV && | |
301 | hashinfo->pernet)) { | |
302 | struct request_sock *req = inet_reqsk(sk); | |
303 | ||
304 | inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); | |
305 | } | |
306 | ||
04c494e6 | 307 | continue; |
740ea3c4 KI |
308 | } |
309 | ||
04c494e6 ED |
310 | tw = inet_twsk(sk); |
311 | if ((tw->tw_family != family) || | |
312 | refcount_read(&twsk_net(tw)->ns.count)) | |
313 | continue; | |
314 | ||
315 | if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt))) | |
316 | continue; | |
317 | ||
318 | if (unlikely((tw->tw_family != family) || | |
319 | refcount_read(&twsk_net(tw)->ns.count))) { | |
320 | inet_twsk_put(tw); | |
321 | goto restart; | |
322 | } | |
323 | ||
324 | rcu_read_unlock(); | |
325 | local_bh_disable(); | |
326 | inet_twsk_deschedule_put(tw); | |
327 | local_bh_enable(); | |
328 | goto restart_rcu; | |
329 | } | |
330 | /* If the nulls value we got at the end of this lookup is | |
331 | * not the expected one, we must restart lookup. | |
332 | * We probably met an item that was moved to another chain. | |
333 | */ | |
334 | if (get_nulls_value(node) != slot) | |
335 | goto restart; | |
336 | rcu_read_unlock(); | |
337 | } | |
338 | } | |
339 | EXPORT_SYMBOL_GPL(inet_twsk_purge); |