]>
Commit | Line | Data |
---|---|---|
e48c414e ACM |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Generic TIME_WAIT sockets functions | |
7 | * | |
8 | * From code orinally in TCP | |
9 | */ | |
10 | ||
172589cc | 11 | #include <linux/kernel.h> |
9e337b0f | 12 | #include <linux/kmemcheck.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
3a9a231d | 14 | #include <linux/module.h> |
e48c414e ACM |
15 | #include <net/inet_hashtables.h> |
16 | #include <net/inet_timewait_sock.h> | |
696ab2d3 | 17 | #include <net/ip.h> |
e48c414e | 18 | |
13475a30 | 19 | |
2a8875e7 ED |
20 | /** |
21 | * inet_twsk_unhash - unhash a timewait socket from established hash | |
22 | * @tw: timewait socket | |
23 | * | |
24 | * unhash a timewait socket from established hash, if hashed. | |
25 | * ehash lock must be held by caller. | |
26 | * Returns 1 if caller should call inet_twsk_put() after lock release. | |
13475a30 ED |
27 | */ |
28 | int inet_twsk_unhash(struct inet_timewait_sock *tw) | |
29 | { | |
30 | if (hlist_nulls_unhashed(&tw->tw_node)) | |
31 | return 0; | |
32 | ||
33 | hlist_nulls_del_rcu(&tw->tw_node); | |
34 | sk_nulls_node_init(&tw->tw_node); | |
2a8875e7 ED |
35 | /* |
36 | * We cannot call inet_twsk_put() ourself under lock, | |
37 | * caller must call it for us. | |
38 | */ | |
13475a30 ED |
39 | return 1; |
40 | } | |
41 | ||
2a8875e7 ED |
42 | /** |
43 | * inet_twsk_bind_unhash - unhash a timewait socket from bind hash | |
44 | * @tw: timewait socket | |
45 | * @hashinfo: hashinfo pointer | |
46 | * | |
47 | * unhash a timewait socket from bind hash, if hashed. | |
48 | * bind hash lock must be held by caller. | |
49 | * Returns 1 if caller should call inet_twsk_put() after lock release. | |
3cdaedae ED |
50 | */ |
51 | int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, | |
52 | struct inet_hashinfo *hashinfo) | |
53 | { | |
54 | struct inet_bind_bucket *tb = tw->tw_tb; | |
55 | ||
56 | if (!tb) | |
57 | return 0; | |
58 | ||
59 | __hlist_del(&tw->tw_bind_node); | |
60 | tw->tw_tb = NULL; | |
61 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | |
2a8875e7 ED |
62 | /* |
63 | * We cannot call inet_twsk_put() ourself under lock, | |
64 | * caller must call it for us. | |
65 | */ | |
3cdaedae ED |
66 | return 1; |
67 | } | |
68 | ||
e48c414e | 69 | /* Must be called with locally disabled BHs. */ |
acd159b6 AB |
70 | static void __inet_twsk_kill(struct inet_timewait_sock *tw, |
71 | struct inet_hashinfo *hashinfo) | |
e48c414e ACM |
72 | { |
73 | struct inet_bind_hashbucket *bhead; | |
13475a30 | 74 | int refcnt; |
e48c414e | 75 | /* Unlink from established hashes. */ |
9db66bdc | 76 | spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); |
e48c414e | 77 | |
9db66bdc | 78 | spin_lock(lock); |
13475a30 | 79 | refcnt = inet_twsk_unhash(tw); |
9db66bdc | 80 | spin_unlock(lock); |
e48c414e ACM |
81 | |
82 | /* Disassociate with bind bucket. */ | |
7f635ab7 PE |
83 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, |
84 | hashinfo->bhash_size)]; | |
3cdaedae | 85 | |
e48c414e | 86 | spin_lock(&bhead->lock); |
3cdaedae | 87 | refcnt += inet_twsk_bind_unhash(tw, hashinfo); |
e48c414e | 88 | spin_unlock(&bhead->lock); |
3cdaedae | 89 | |
e48c414e ACM |
90 | #ifdef SOCK_REFCNT_DEBUG |
91 | if (atomic_read(&tw->tw_refcnt) != 1) { | |
91df42be JP |
92 | pr_debug("%s timewait_sock %p refcnt=%d\n", |
93 | tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); | |
e48c414e ACM |
94 | } |
95 | #endif | |
13475a30 ED |
96 | while (refcnt) { |
97 | inet_twsk_put(tw); | |
98 | refcnt--; | |
99 | } | |
e48c414e ACM |
100 | } |
101 | ||
4dbc8ef7 | 102 | static noinline void inet_twsk_free(struct inet_timewait_sock *tw) |
7054fb93 | 103 | { |
4dbc8ef7 ACM |
104 | struct module *owner = tw->tw_prot->owner; |
105 | twsk_destructor((struct sock *)tw); | |
7054fb93 | 106 | #ifdef SOCK_REFCNT_DEBUG |
4dbc8ef7 | 107 | pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw); |
7054fb93 | 108 | #endif |
4dbc8ef7 ACM |
109 | release_net(twsk_net(tw)); |
110 | kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); | |
111 | module_put(owner); | |
112 | } | |
113 | ||
114 | void inet_twsk_put(struct inet_timewait_sock *tw) | |
115 | { | |
116 | if (atomic_dec_and_test(&tw->tw_refcnt)) | |
117 | inet_twsk_free(tw); | |
7054fb93 PE |
118 | } |
119 | EXPORT_SYMBOL_GPL(inet_twsk_put); | |
120 | ||
e48c414e ACM |
121 | /* |
122 | * Enter the time wait state. This is called with locally disabled BH. | |
123 | * Essentially we whip up a timewait bucket, copy the relevant info into it | |
124 | * from the SK, and mess with hash chains and list linkage. | |
125 | */ | |
126 | void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |
127 | struct inet_hashinfo *hashinfo) | |
128 | { | |
129 | const struct inet_sock *inet = inet_sk(sk); | |
463c84b9 | 130 | const struct inet_connection_sock *icsk = inet_csk(sk); |
81c3d547 | 131 | struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); |
9db66bdc | 132 | spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); |
e48c414e ACM |
133 | struct inet_bind_hashbucket *bhead; |
134 | /* Step 1: Put TW into bind hash. Original socket stays there too. | |
135 | Note, that any socket with inet->num != 0 MUST be bound in | |
136 | binding cache, even if it is closed. | |
137 | */ | |
c720c7e8 | 138 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, |
7f635ab7 | 139 | hashinfo->bhash_size)]; |
e48c414e | 140 | spin_lock(&bhead->lock); |
463c84b9 | 141 | tw->tw_tb = icsk->icsk_bind_hash; |
547b792c | 142 | WARN_ON(!icsk->icsk_bind_hash); |
e48c414e ACM |
143 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); |
144 | spin_unlock(&bhead->lock); | |
145 | ||
9db66bdc | 146 | spin_lock(lock); |
e48c414e | 147 | |
3ab5aee7 ED |
148 | /* |
149 | * Step 2: Hash TW into TIMEWAIT chain. | |
150 | * Should be done before removing sk from established chain | |
151 | * because readers are lockless and search established first. | |
152 | */ | |
3ab5aee7 ED |
153 | inet_twsk_add_node_rcu(tw, &ehead->twchain); |
154 | ||
155 | /* Step 3: Remove SK from established hash. */ | |
156 | if (__sk_nulls_del_node_init_rcu(sk)) | |
157 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | |
e48c414e | 158 | |
47e1c323 ED |
159 | /* |
160 | * Notes : | |
2a8875e7 | 161 | * - We initially set tw_refcnt to 0 in inet_twsk_alloc() |
47e1c323 ED |
162 | * - We add one reference for the bhash link |
163 | * - We add one reference for the ehash link | |
164 | * - We want this refcnt update done before allowing other | |
165 | * threads to find this tw in ehash chain. | |
166 | */ | |
167 | atomic_add(1 + 1 + 1, &tw->tw_refcnt); | |
168 | ||
9db66bdc | 169 | spin_unlock(lock); |
e48c414e | 170 | } |
696ab2d3 ACM |
171 | EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); |
172 | ||
c676270b ACM |
173 | struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) |
174 | { | |
6d6ee43e ACM |
175 | struct inet_timewait_sock *tw = |
176 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, | |
54e6ecb2 | 177 | GFP_ATOMIC); |
c676270b ACM |
178 | if (tw != NULL) { |
179 | const struct inet_sock *inet = inet_sk(sk); | |
180 | ||
9e337b0f VN |
181 | kmemcheck_annotate_bitfield(tw, flags); |
182 | ||
c676270b | 183 | /* Give us an identity. */ |
c720c7e8 ED |
184 | tw->tw_daddr = inet->inet_daddr; |
185 | tw->tw_rcv_saddr = inet->inet_rcv_saddr; | |
c676270b | 186 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; |
66b13d99 | 187 | tw->tw_tos = inet->tos; |
c720c7e8 | 188 | tw->tw_num = inet->inet_num; |
c676270b ACM |
189 | tw->tw_state = TCP_TIME_WAIT; |
190 | tw->tw_substate = state; | |
c720c7e8 ED |
191 | tw->tw_sport = inet->inet_sport; |
192 | tw->tw_dport = inet->inet_dport; | |
c676270b ACM |
193 | tw->tw_family = sk->sk_family; |
194 | tw->tw_reuse = sk->sk_reuse; | |
81c3d547 | 195 | tw->tw_hash = sk->sk_hash; |
c676270b | 196 | tw->tw_ipv6only = 0; |
f5715aea | 197 | tw->tw_transparent = inet->transparent; |
c676270b | 198 | tw->tw_prot = sk->sk_prot_creator; |
cd5342d9 | 199 | twsk_net_set(tw, hold_net(sock_net(sk))); |
47e1c323 ED |
200 | /* |
201 | * Because we use RCU lookups, we should not set tw_refcnt | |
202 | * to a non null value before everything is setup for this | |
203 | * timewait socket. | |
204 | */ | |
205 | atomic_set(&tw->tw_refcnt, 0); | |
c676270b | 206 | inet_twsk_dead_node_init(tw); |
eeb2b856 | 207 | __module_get(tw->tw_prot->owner); |
c676270b ACM |
208 | } |
209 | ||
210 | return tw; | |
211 | } | |
696ab2d3 ACM |
212 | EXPORT_SYMBOL_GPL(inet_twsk_alloc); |
213 | ||
214 | /* Returns non-zero if quota exceeded. */ | |
215 | static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, | |
216 | const int slot) | |
217 | { | |
218 | struct inet_timewait_sock *tw; | |
219 | struct hlist_node *node; | |
220 | unsigned int killed; | |
221 | int ret; | |
222 | ||
223 | /* NOTE: compare this to previous version where lock | |
224 | * was released after detaching chain. It was racy, | |
225 | * because tw buckets are scheduled in not serialized context | |
226 | * in 2.3 (with netfilter), and with softnet it is common, because | |
227 | * soft irqs are not sequenced. | |
228 | */ | |
229 | killed = 0; | |
230 | ret = 0; | |
231 | rescan: | |
232 | inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { | |
233 | __inet_twsk_del_dead_node(tw); | |
234 | spin_unlock(&twdr->death_lock); | |
235 | __inet_twsk_kill(tw, twdr->hashinfo); | |
f2bf415c PE |
236 | #ifdef CONFIG_NET_NS |
237 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); | |
238 | #endif | |
696ab2d3 ACM |
239 | inet_twsk_put(tw); |
240 | killed++; | |
241 | spin_lock(&twdr->death_lock); | |
242 | if (killed > INET_TWDR_TWKILL_QUOTA) { | |
243 | ret = 1; | |
244 | break; | |
245 | } | |
246 | ||
247 | /* While we dropped twdr->death_lock, another cpu may have | |
248 | * killed off the next TW bucket in the list, therefore | |
249 | * do a fresh re-read of the hlist head node with the | |
250 | * lock reacquired. We still use the hlist traversal | |
251 | * macro in order to get the prefetches. | |
252 | */ | |
253 | goto rescan; | |
254 | } | |
255 | ||
256 | twdr->tw_count -= killed; | |
f2bf415c PE |
257 | #ifndef CONFIG_NET_NS |
258 | NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed); | |
259 | #endif | |
696ab2d3 ACM |
260 | return ret; |
261 | } | |
262 | ||
263 | void inet_twdr_hangman(unsigned long data) | |
264 | { | |
265 | struct inet_timewait_death_row *twdr; | |
95c96174 | 266 | unsigned int need_timer; |
696ab2d3 ACM |
267 | |
268 | twdr = (struct inet_timewait_death_row *)data; | |
269 | spin_lock(&twdr->death_lock); | |
270 | ||
271 | if (twdr->tw_count == 0) | |
272 | goto out; | |
273 | ||
274 | need_timer = 0; | |
275 | if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { | |
276 | twdr->thread_slots |= (1 << twdr->slot); | |
696ab2d3 ACM |
277 | schedule_work(&twdr->twkill_work); |
278 | need_timer = 1; | |
279 | } else { | |
280 | /* We purged the entire slot, anything left? */ | |
281 | if (twdr->tw_count) | |
282 | need_timer = 1; | |
80a1096b | 283 | twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); |
696ab2d3 | 284 | } |
696ab2d3 ACM |
285 | if (need_timer) |
286 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | |
287 | out: | |
288 | spin_unlock(&twdr->death_lock); | |
289 | } | |
696ab2d3 ACM |
290 | EXPORT_SYMBOL_GPL(inet_twdr_hangman); |
291 | ||
65f27f38 | 292 | void inet_twdr_twkill_work(struct work_struct *work) |
696ab2d3 | 293 | { |
65f27f38 DH |
294 | struct inet_timewait_death_row *twdr = |
295 | container_of(work, struct inet_timewait_death_row, twkill_work); | |
696ab2d3 ACM |
296 | int i; |
297 | ||
95c9382a PE |
298 | BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) > |
299 | (sizeof(twdr->thread_slots) * 8)); | |
696ab2d3 ACM |
300 | |
301 | while (twdr->thread_slots) { | |
302 | spin_lock_bh(&twdr->death_lock); | |
303 | for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) { | |
304 | if (!(twdr->thread_slots & (1 << i))) | |
305 | continue; | |
306 | ||
307 | while (inet_twdr_do_twkill_work(twdr, i) != 0) { | |
308 | if (need_resched()) { | |
309 | spin_unlock_bh(&twdr->death_lock); | |
310 | schedule(); | |
311 | spin_lock_bh(&twdr->death_lock); | |
312 | } | |
313 | } | |
314 | ||
315 | twdr->thread_slots &= ~(1 << i); | |
316 | } | |
317 | spin_unlock_bh(&twdr->death_lock); | |
318 | } | |
319 | } | |
696ab2d3 ACM |
320 | EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); |
321 | ||
322 | /* These are always called from BH context. See callers in | |
323 | * tcp_input.c to verify this. | |
324 | */ | |
325 | ||
326 | /* This is for handling early-kills of TIME_WAIT sockets. */ | |
327 | void inet_twsk_deschedule(struct inet_timewait_sock *tw, | |
328 | struct inet_timewait_death_row *twdr) | |
329 | { | |
330 | spin_lock(&twdr->death_lock); | |
331 | if (inet_twsk_del_dead_node(tw)) { | |
332 | inet_twsk_put(tw); | |
333 | if (--twdr->tw_count == 0) | |
334 | del_timer(&twdr->tw_timer); | |
335 | } | |
336 | spin_unlock(&twdr->death_lock); | |
337 | __inet_twsk_kill(tw, twdr->hashinfo); | |
338 | } | |
696ab2d3 ACM |
339 | EXPORT_SYMBOL(inet_twsk_deschedule); |
340 | ||
341 | void inet_twsk_schedule(struct inet_timewait_sock *tw, | |
342 | struct inet_timewait_death_row *twdr, | |
343 | const int timeo, const int timewait_len) | |
344 | { | |
345 | struct hlist_head *list; | |
346 | int slot; | |
347 | ||
348 | /* timeout := RTO * 3.5 | |
349 | * | |
350 | * 3.5 = 1+2+0.5 to wait for two retransmits. | |
351 | * | |
352 | * RATIONALE: if FIN arrived and we entered TIME-WAIT state, | |
353 | * our ACK acking that FIN can be lost. If N subsequent retransmitted | |
354 | * FINs (or previous seqments) are lost (probability of such event | |
355 | * is p^(N+1), where p is probability to lose single packet and | |
356 | * time to detect the loss is about RTO*(2^N - 1) with exponential | |
357 | * backoff). Normal timewait length is calculated so, that we | |
358 | * waited at least for one retransmitted FIN (maximal RTO is 120sec). | |
359 | * [ BTW Linux. following BSD, violates this requirement waiting | |
360 | * only for 60sec, we should wait at least for 240 secs. | |
361 | * Well, 240 consumes too much of resources 8) | |
362 | * ] | |
363 | * This interval is not reduced to catch old duplicate and | |
364 | * responces to our wandering segments living for two MSLs. | |
365 | * However, if we use PAWS to detect | |
366 | * old duplicates, we can reduce the interval to bounds required | |
367 | * by RTO, rather than MSL. So, if peer understands PAWS, we | |
368 | * kill tw bucket after 3.5*RTO (it is important that this number | |
369 | * is greater than TS tick!) and detect old duplicates with help | |
370 | * of PAWS. | |
371 | */ | |
372 | slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; | |
373 | ||
374 | spin_lock(&twdr->death_lock); | |
375 | ||
376 | /* Unlink it, if it was scheduled */ | |
377 | if (inet_twsk_del_dead_node(tw)) | |
378 | twdr->tw_count--; | |
379 | else | |
380 | atomic_inc(&tw->tw_refcnt); | |
381 | ||
382 | if (slot >= INET_TWDR_RECYCLE_SLOTS) { | |
383 | /* Schedule to slow timer */ | |
384 | if (timeo >= timewait_len) { | |
385 | slot = INET_TWDR_TWKILL_SLOTS - 1; | |
386 | } else { | |
172589cc | 387 | slot = DIV_ROUND_UP(timeo, twdr->period); |
696ab2d3 ACM |
388 | if (slot >= INET_TWDR_TWKILL_SLOTS) |
389 | slot = INET_TWDR_TWKILL_SLOTS - 1; | |
390 | } | |
391 | tw->tw_ttd = jiffies + timeo; | |
392 | slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1); | |
393 | list = &twdr->cells[slot]; | |
394 | } else { | |
395 | tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK); | |
396 | ||
397 | if (twdr->twcal_hand < 0) { | |
398 | twdr->twcal_hand = 0; | |
399 | twdr->twcal_jiffie = jiffies; | |
400 | twdr->twcal_timer.expires = twdr->twcal_jiffie + | |
401 | (slot << INET_TWDR_RECYCLE_TICK); | |
402 | add_timer(&twdr->twcal_timer); | |
403 | } else { | |
404 | if (time_after(twdr->twcal_timer.expires, | |
405 | jiffies + (slot << INET_TWDR_RECYCLE_TICK))) | |
406 | mod_timer(&twdr->twcal_timer, | |
407 | jiffies + (slot << INET_TWDR_RECYCLE_TICK)); | |
408 | slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1); | |
409 | } | |
410 | list = &twdr->twcal_row[slot]; | |
411 | } | |
412 | ||
413 | hlist_add_head(&tw->tw_death_node, list); | |
414 | ||
415 | if (twdr->tw_count++ == 0) | |
416 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | |
417 | spin_unlock(&twdr->death_lock); | |
418 | } | |
696ab2d3 ACM |
419 | EXPORT_SYMBOL_GPL(inet_twsk_schedule); |
420 | ||
421 | void inet_twdr_twcal_tick(unsigned long data) | |
422 | { | |
423 | struct inet_timewait_death_row *twdr; | |
424 | int n, slot; | |
425 | unsigned long j; | |
426 | unsigned long now = jiffies; | |
427 | int killed = 0; | |
428 | int adv = 0; | |
429 | ||
430 | twdr = (struct inet_timewait_death_row *)data; | |
431 | ||
432 | spin_lock(&twdr->death_lock); | |
433 | if (twdr->twcal_hand < 0) | |
434 | goto out; | |
435 | ||
436 | slot = twdr->twcal_hand; | |
437 | j = twdr->twcal_jiffie; | |
438 | ||
439 | for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { | |
440 | if (time_before_eq(j, now)) { | |
441 | struct hlist_node *node, *safe; | |
442 | struct inet_timewait_sock *tw; | |
443 | ||
444 | inet_twsk_for_each_inmate_safe(tw, node, safe, | |
445 | &twdr->twcal_row[slot]) { | |
446 | __inet_twsk_del_dead_node(tw); | |
447 | __inet_twsk_kill(tw, twdr->hashinfo); | |
f2bf415c PE |
448 | #ifdef CONFIG_NET_NS |
449 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); | |
450 | #endif | |
696ab2d3 ACM |
451 | inet_twsk_put(tw); |
452 | killed++; | |
453 | } | |
454 | } else { | |
455 | if (!adv) { | |
456 | adv = 1; | |
457 | twdr->twcal_jiffie = j; | |
458 | twdr->twcal_hand = slot; | |
459 | } | |
460 | ||
461 | if (!hlist_empty(&twdr->twcal_row[slot])) { | |
462 | mod_timer(&twdr->twcal_timer, j); | |
463 | goto out; | |
464 | } | |
465 | } | |
466 | j += 1 << INET_TWDR_RECYCLE_TICK; | |
467 | slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1); | |
468 | } | |
469 | twdr->twcal_hand = -1; | |
470 | ||
471 | out: | |
472 | if ((twdr->tw_count -= killed) == 0) | |
473 | del_timer(&twdr->tw_timer); | |
f2bf415c PE |
474 | #ifndef CONFIG_NET_NS |
475 | NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed); | |
476 | #endif | |
696ab2d3 ACM |
477 | spin_unlock(&twdr->death_lock); |
478 | } | |
696ab2d3 | 479 | EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); |
d315492b | 480 | |
b099ce26 | 481 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, |
d315492b DL |
482 | struct inet_timewait_death_row *twdr, int family) |
483 | { | |
484 | struct inet_timewait_sock *tw; | |
485 | struct sock *sk; | |
3ab5aee7 | 486 | struct hlist_nulls_node *node; |
575f4cd5 | 487 | unsigned int slot; |
d315492b | 488 | |
575f4cd5 EB |
489 | for (slot = 0; slot <= hashinfo->ehash_mask; slot++) { |
490 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; | |
491 | restart_rcu: | |
492 | rcu_read_lock(); | |
d315492b | 493 | restart: |
575f4cd5 | 494 | sk_nulls_for_each_rcu(sk, node, &head->twchain) { |
d315492b | 495 | tw = inet_twsk(sk); |
b099ce26 EB |
496 | if ((tw->tw_family != family) || |
497 | atomic_read(&twsk_net(tw)->count)) | |
d315492b DL |
498 | continue; |
499 | ||
575f4cd5 EB |
500 | if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt))) |
501 | continue; | |
502 | ||
b099ce26 EB |
503 | if (unlikely((tw->tw_family != family) || |
504 | atomic_read(&twsk_net(tw)->count))) { | |
575f4cd5 EB |
505 | inet_twsk_put(tw); |
506 | goto restart; | |
507 | } | |
508 | ||
509 | rcu_read_unlock(); | |
91035f0b | 510 | local_bh_disable(); |
d315492b | 511 | inet_twsk_deschedule(tw, twdr); |
91035f0b | 512 | local_bh_enable(); |
d315492b | 513 | inet_twsk_put(tw); |
575f4cd5 | 514 | goto restart_rcu; |
d315492b | 515 | } |
575f4cd5 EB |
516 | /* If the nulls value we got at the end of this lookup is |
517 | * not the expected one, we must restart lookup. | |
518 | * We probably met an item that was moved to another chain. | |
519 | */ | |
520 | if (get_nulls_value(node) != slot) | |
521 | goto restart; | |
522 | rcu_read_unlock(); | |
d315492b | 523 | } |
d315492b DL |
524 | } |
525 | EXPORT_SYMBOL_GPL(inet_twsk_purge); |