]>
Commit | Line | Data |
---|---|---|
51c5d0c4 DM |
1 | #include <linux/rcupdate.h> |
2 | #include <linux/spinlock.h> | |
3 | #include <linux/jiffies.h> | |
ab92bb2f | 4 | #include <linux/module.h> |
4aabd8ef | 5 | #include <linux/cache.h> |
51c5d0c4 DM |
6 | #include <linux/slab.h> |
7 | #include <linux/init.h> | |
4aabd8ef | 8 | #include <linux/tcp.h> |
5815d5e7 | 9 | #include <linux/hash.h> |
d23ff701 | 10 | #include <linux/tcp_metrics.h> |
976a702a | 11 | #include <linux/vmalloc.h> |
4aabd8ef DM |
12 | |
13 | #include <net/inet_connection_sock.h> | |
51c5d0c4 | 14 | #include <net/net_namespace.h> |
ab92bb2f | 15 | #include <net/request_sock.h> |
51c5d0c4 | 16 | #include <net/inetpeer.h> |
4aabd8ef | 17 | #include <net/sock.h> |
51c5d0c4 | 18 | #include <net/ipv6.h> |
4aabd8ef DM |
19 | #include <net/dst.h> |
20 | #include <net/tcp.h> | |
d23ff701 | 21 | #include <net/genetlink.h> |
4aabd8ef DM |
22 | |
23 | int sysctl_tcp_nometrics_save __read_mostly; | |
24 | ||
1fe4c481 YC |
25 | struct tcp_fastopen_metrics { |
26 | u16 mss; | |
aab48743 YC |
27 | u16 syn_loss:10; /* Recurring Fast Open SYN losses */ |
28 | unsigned long last_syn_loss; /* Last Fast Open SYN loss */ | |
1fe4c481 YC |
29 | struct tcp_fastopen_cookie cookie; |
30 | }; | |
31 | ||
51c5d0c4 DM |
32 | struct tcp_metrics_block { |
33 | struct tcp_metrics_block __rcu *tcpm_next; | |
34 | struct inetpeer_addr tcpm_addr; | |
35 | unsigned long tcpm_stamp; | |
81166dd6 DM |
36 | u32 tcpm_ts; |
37 | u32 tcpm_ts_stamp; | |
51c5d0c4 | 38 | u32 tcpm_lock; |
d23ff701 | 39 | u32 tcpm_vals[TCP_METRIC_MAX + 1]; |
1fe4c481 | 40 | struct tcp_fastopen_metrics tcpm_fastopen; |
d23ff701 JA |
41 | |
42 | struct rcu_head rcu_head; | |
51c5d0c4 DM |
43 | }; |
44 | ||
45 | static bool tcp_metric_locked(struct tcp_metrics_block *tm, | |
46 | enum tcp_metric_index idx) | |
47 | { | |
48 | return tm->tcpm_lock & (1 << idx); | |
49 | } | |
50 | ||
51 | static u32 tcp_metric_get(struct tcp_metrics_block *tm, | |
52 | enum tcp_metric_index idx) | |
53 | { | |
54 | return tm->tcpm_vals[idx]; | |
55 | } | |
56 | ||
57 | static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm, | |
58 | enum tcp_metric_index idx) | |
59 | { | |
60 | return msecs_to_jiffies(tm->tcpm_vals[idx]); | |
61 | } | |
62 | ||
63 | static void tcp_metric_set(struct tcp_metrics_block *tm, | |
64 | enum tcp_metric_index idx, | |
65 | u32 val) | |
66 | { | |
67 | tm->tcpm_vals[idx] = val; | |
68 | } | |
69 | ||
70 | static void tcp_metric_set_msecs(struct tcp_metrics_block *tm, | |
71 | enum tcp_metric_index idx, | |
72 | u32 val) | |
73 | { | |
74 | tm->tcpm_vals[idx] = jiffies_to_msecs(val); | |
75 | } | |
76 | ||
77 | static bool addr_same(const struct inetpeer_addr *a, | |
78 | const struct inetpeer_addr *b) | |
79 | { | |
80 | const struct in6_addr *a6, *b6; | |
81 | ||
82 | if (a->family != b->family) | |
83 | return false; | |
84 | if (a->family == AF_INET) | |
85 | return a->addr.a4 == b->addr.a4; | |
86 | ||
87 | a6 = (const struct in6_addr *) &a->addr.a6[0]; | |
88 | b6 = (const struct in6_addr *) &b->addr.a6[0]; | |
89 | ||
90 | return ipv6_addr_equal(a6, b6); | |
91 | } | |
92 | ||
93 | struct tcpm_hash_bucket { | |
94 | struct tcp_metrics_block __rcu *chain; | |
95 | }; | |
96 | ||
97 | static DEFINE_SPINLOCK(tcp_metrics_lock); | |
98 | ||
efeaa555 ED |
99 | static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst, |
100 | bool fastopen_clear) | |
51c5d0c4 DM |
101 | { |
102 | u32 val; | |
103 | ||
9a0a9502 JA |
104 | tm->tcpm_stamp = jiffies; |
105 | ||
51c5d0c4 DM |
106 | val = 0; |
107 | if (dst_metric_locked(dst, RTAX_RTT)) | |
108 | val |= 1 << TCP_METRIC_RTT; | |
109 | if (dst_metric_locked(dst, RTAX_RTTVAR)) | |
110 | val |= 1 << TCP_METRIC_RTTVAR; | |
111 | if (dst_metric_locked(dst, RTAX_SSTHRESH)) | |
112 | val |= 1 << TCP_METRIC_SSTHRESH; | |
113 | if (dst_metric_locked(dst, RTAX_CWND)) | |
114 | val |= 1 << TCP_METRIC_CWND; | |
115 | if (dst_metric_locked(dst, RTAX_REORDERING)) | |
116 | val |= 1 << TCP_METRIC_REORDERING; | |
117 | tm->tcpm_lock = val; | |
118 | ||
119 | tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT); | |
120 | tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR); | |
121 | tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); | |
122 | tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); | |
123 | tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); | |
81166dd6 DM |
124 | tm->tcpm_ts = 0; |
125 | tm->tcpm_ts_stamp = 0; | |
efeaa555 ED |
126 | if (fastopen_clear) { |
127 | tm->tcpm_fastopen.mss = 0; | |
128 | tm->tcpm_fastopen.syn_loss = 0; | |
129 | tm->tcpm_fastopen.cookie.len = 0; | |
130 | } | |
51c5d0c4 DM |
131 | } |
132 | ||
133 | static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, | |
134 | struct inetpeer_addr *addr, | |
135 | unsigned int hash, | |
136 | bool reclaim) | |
137 | { | |
138 | struct tcp_metrics_block *tm; | |
139 | struct net *net; | |
140 | ||
141 | spin_lock_bh(&tcp_metrics_lock); | |
142 | net = dev_net(dst->dev); | |
143 | if (unlikely(reclaim)) { | |
144 | struct tcp_metrics_block *oldest; | |
145 | ||
146 | oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); | |
147 | for (tm = rcu_dereference(oldest->tcpm_next); tm; | |
148 | tm = rcu_dereference(tm->tcpm_next)) { | |
149 | if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) | |
150 | oldest = tm; | |
151 | } | |
152 | tm = oldest; | |
153 | } else { | |
154 | tm = kmalloc(sizeof(*tm), GFP_ATOMIC); | |
155 | if (!tm) | |
156 | goto out_unlock; | |
157 | } | |
158 | tm->tcpm_addr = *addr; | |
51c5d0c4 | 159 | |
efeaa555 | 160 | tcpm_suck_dst(tm, dst, true); |
51c5d0c4 DM |
161 | |
162 | if (likely(!reclaim)) { | |
163 | tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain; | |
164 | rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm); | |
165 | } | |
166 | ||
167 | out_unlock: | |
168 | spin_unlock_bh(&tcp_metrics_lock); | |
169 | return tm; | |
170 | } | |
171 | ||
172 | #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) | |
173 | ||
174 | static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) | |
175 | { | |
176 | if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) | |
efeaa555 | 177 | tcpm_suck_dst(tm, dst, false); |
51c5d0c4 DM |
178 | } |
179 | ||
180 | #define TCP_METRICS_RECLAIM_DEPTH 5 | |
181 | #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL | |
182 | ||
183 | static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) | |
184 | { | |
185 | if (tm) | |
186 | return tm; | |
187 | if (depth > TCP_METRICS_RECLAIM_DEPTH) | |
188 | return TCP_METRICS_RECLAIM_PTR; | |
189 | return NULL; | |
190 | } | |
191 | ||
192 | static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr, | |
193 | struct net *net, unsigned int hash) | |
194 | { | |
195 | struct tcp_metrics_block *tm; | |
196 | int depth = 0; | |
197 | ||
198 | for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; | |
199 | tm = rcu_dereference(tm->tcpm_next)) { | |
200 | if (addr_same(&tm->tcpm_addr, addr)) | |
201 | break; | |
202 | depth++; | |
203 | } | |
204 | return tcp_get_encode(tm, depth); | |
205 | } | |
206 | ||
207 | static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, | |
208 | struct dst_entry *dst) | |
209 | { | |
210 | struct tcp_metrics_block *tm; | |
211 | struct inetpeer_addr addr; | |
212 | unsigned int hash; | |
213 | struct net *net; | |
214 | ||
215 | addr.family = req->rsk_ops->family; | |
216 | switch (addr.family) { | |
217 | case AF_INET: | |
218 | addr.addr.a4 = inet_rsk(req)->rmt_addr; | |
219 | hash = (__force unsigned int) addr.addr.a4; | |
220 | break; | |
221 | case AF_INET6: | |
222 | *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr; | |
ddbe5032 | 223 | hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr); |
51c5d0c4 DM |
224 | break; |
225 | default: | |
226 | return NULL; | |
227 | } | |
228 | ||
51c5d0c4 | 229 | net = dev_net(dst->dev); |
5815d5e7 | 230 | hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); |
51c5d0c4 DM |
231 | |
232 | for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; | |
233 | tm = rcu_dereference(tm->tcpm_next)) { | |
234 | if (addr_same(&tm->tcpm_addr, &addr)) | |
235 | break; | |
236 | } | |
237 | tcpm_check_stamp(tm, dst); | |
238 | return tm; | |
239 | } | |
240 | ||
81166dd6 DM |
241 | static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw) |
242 | { | |
243 | struct inet6_timewait_sock *tw6; | |
244 | struct tcp_metrics_block *tm; | |
245 | struct inetpeer_addr addr; | |
246 | unsigned int hash; | |
247 | struct net *net; | |
248 | ||
249 | addr.family = tw->tw_family; | |
250 | switch (addr.family) { | |
251 | case AF_INET: | |
252 | addr.addr.a4 = tw->tw_daddr; | |
253 | hash = (__force unsigned int) addr.addr.a4; | |
254 | break; | |
255 | case AF_INET6: | |
256 | tw6 = inet6_twsk((struct sock *)tw); | |
257 | *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr; | |
ddbe5032 | 258 | hash = ipv6_addr_hash(&tw6->tw_v6_daddr); |
81166dd6 DM |
259 | break; |
260 | default: | |
261 | return NULL; | |
262 | } | |
263 | ||
81166dd6 | 264 | net = twsk_net(tw); |
5815d5e7 | 265 | hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); |
81166dd6 DM |
266 | |
267 | for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; | |
268 | tm = rcu_dereference(tm->tcpm_next)) { | |
269 | if (addr_same(&tm->tcpm_addr, &addr)) | |
270 | break; | |
271 | } | |
272 | return tm; | |
273 | } | |
274 | ||
51c5d0c4 DM |
275 | static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, |
276 | struct dst_entry *dst, | |
277 | bool create) | |
278 | { | |
279 | struct tcp_metrics_block *tm; | |
280 | struct inetpeer_addr addr; | |
281 | unsigned int hash; | |
282 | struct net *net; | |
283 | bool reclaim; | |
284 | ||
285 | addr.family = sk->sk_family; | |
286 | switch (addr.family) { | |
287 | case AF_INET: | |
288 | addr.addr.a4 = inet_sk(sk)->inet_daddr; | |
289 | hash = (__force unsigned int) addr.addr.a4; | |
290 | break; | |
291 | case AF_INET6: | |
292 | *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr; | |
ddbe5032 | 293 | hash = ipv6_addr_hash(&inet6_sk(sk)->daddr); |
51c5d0c4 DM |
294 | break; |
295 | default: | |
296 | return NULL; | |
297 | } | |
298 | ||
51c5d0c4 | 299 | net = dev_net(dst->dev); |
5815d5e7 | 300 | hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); |
51c5d0c4 DM |
301 | |
302 | tm = __tcp_get_metrics(&addr, net, hash); | |
303 | reclaim = false; | |
304 | if (tm == TCP_METRICS_RECLAIM_PTR) { | |
305 | reclaim = true; | |
306 | tm = NULL; | |
307 | } | |
308 | if (!tm && create) | |
309 | tm = tcpm_new(dst, &addr, hash, reclaim); | |
310 | else | |
311 | tcpm_check_stamp(tm, dst); | |
312 | ||
313 | return tm; | |
314 | } | |
315 | ||
4aabd8ef DM |
316 | /* Save metrics learned by this TCP session. This function is called |
317 | * only, when TCP finishes successfully i.e. when it enters TIME-WAIT | |
318 | * or goes from LAST-ACK to CLOSE. | |
319 | */ | |
320 | void tcp_update_metrics(struct sock *sk) | |
321 | { | |
51c5d0c4 | 322 | const struct inet_connection_sock *icsk = inet_csk(sk); |
4aabd8ef | 323 | struct dst_entry *dst = __sk_dst_get(sk); |
51c5d0c4 DM |
324 | struct tcp_sock *tp = tcp_sk(sk); |
325 | struct tcp_metrics_block *tm; | |
326 | unsigned long rtt; | |
327 | u32 val; | |
328 | int m; | |
4aabd8ef | 329 | |
51c5d0c4 | 330 | if (sysctl_tcp_nometrics_save || !dst) |
4aabd8ef DM |
331 | return; |
332 | ||
51c5d0c4 | 333 | if (dst->flags & DST_HOST) |
4aabd8ef DM |
334 | dst_confirm(dst); |
335 | ||
51c5d0c4 DM |
336 | rcu_read_lock(); |
337 | if (icsk->icsk_backoff || !tp->srtt) { | |
338 | /* This session failed to estimate rtt. Why? | |
339 | * Probably, no packets returned in time. Reset our | |
340 | * results. | |
341 | */ | |
342 | tm = tcp_get_metrics(sk, dst, false); | |
343 | if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT)) | |
344 | tcp_metric_set(tm, TCP_METRIC_RTT, 0); | |
345 | goto out_unlock; | |
346 | } else | |
347 | tm = tcp_get_metrics(sk, dst, true); | |
4aabd8ef | 348 | |
51c5d0c4 DM |
349 | if (!tm) |
350 | goto out_unlock; | |
4aabd8ef | 351 | |
51c5d0c4 DM |
352 | rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT); |
353 | m = rtt - tp->srtt; | |
4aabd8ef | 354 | |
51c5d0c4 DM |
355 | /* If newly calculated rtt larger than stored one, store new |
356 | * one. Otherwise, use EWMA. Remember, rtt overestimation is | |
357 | * always better than underestimation. | |
358 | */ | |
359 | if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) { | |
360 | if (m <= 0) | |
361 | rtt = tp->srtt; | |
362 | else | |
363 | rtt -= (m >> 3); | |
364 | tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt); | |
365 | } | |
4aabd8ef | 366 | |
51c5d0c4 DM |
367 | if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) { |
368 | unsigned long var; | |
4aabd8ef | 369 | |
51c5d0c4 DM |
370 | if (m < 0) |
371 | m = -m; | |
4aabd8ef | 372 | |
51c5d0c4 DM |
373 | /* Scale deviation to rttvar fixed point */ |
374 | m >>= 1; | |
375 | if (m < tp->mdev) | |
376 | m = tp->mdev; | |
4aabd8ef | 377 | |
51c5d0c4 DM |
378 | var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR); |
379 | if (m >= var) | |
380 | var = m; | |
381 | else | |
382 | var -= (var - m) >> 2; | |
4aabd8ef | 383 | |
51c5d0c4 DM |
384 | tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var); |
385 | } | |
386 | ||
387 | if (tcp_in_initial_slowstart(tp)) { | |
388 | /* Slow start still did not finish. */ | |
389 | if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { | |
390 | val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | |
391 | if (val && (tp->snd_cwnd >> 1) > val) | |
392 | tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | |
393 | tp->snd_cwnd >> 1); | |
394 | } | |
395 | if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | |
396 | val = tcp_metric_get(tm, TCP_METRIC_CWND); | |
397 | if (tp->snd_cwnd > val) | |
398 | tcp_metric_set(tm, TCP_METRIC_CWND, | |
399 | tp->snd_cwnd); | |
400 | } | |
401 | } else if (tp->snd_cwnd > tp->snd_ssthresh && | |
402 | icsk->icsk_ca_state == TCP_CA_Open) { | |
403 | /* Cong. avoidance phase, cwnd is reliable. */ | |
404 | if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) | |
405 | tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | |
406 | max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); | |
407 | if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | |
408 | val = tcp_metric_get(tm, TCP_METRIC_CWND); | |
2100844c | 409 | tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1); |
51c5d0c4 DM |
410 | } |
411 | } else { | |
412 | /* Else slow start did not finish, cwnd is non-sense, | |
413 | * ssthresh may be also invalid. | |
414 | */ | |
415 | if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | |
416 | val = tcp_metric_get(tm, TCP_METRIC_CWND); | |
417 | tcp_metric_set(tm, TCP_METRIC_CWND, | |
418 | (val + tp->snd_ssthresh) >> 1); | |
419 | } | |
420 | if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { | |
421 | val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | |
422 | if (val && tp->snd_ssthresh > val) | |
423 | tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | |
424 | tp->snd_ssthresh); | |
425 | } | |
426 | if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { | |
427 | val = tcp_metric_get(tm, TCP_METRIC_REORDERING); | |
428 | if (val < tp->reordering && | |
4aabd8ef | 429 | tp->reordering != sysctl_tcp_reordering) |
51c5d0c4 DM |
430 | tcp_metric_set(tm, TCP_METRIC_REORDERING, |
431 | tp->reordering); | |
4aabd8ef DM |
432 | } |
433 | } | |
51c5d0c4 DM |
434 | tm->tcpm_stamp = jiffies; |
435 | out_unlock: | |
436 | rcu_read_unlock(); | |
4aabd8ef DM |
437 | } |
438 | ||
439 | /* Initialize metrics on socket. */ | |
440 | ||
441 | void tcp_init_metrics(struct sock *sk) | |
442 | { | |
4aabd8ef | 443 | struct dst_entry *dst = __sk_dst_get(sk); |
51c5d0c4 DM |
444 | struct tcp_sock *tp = tcp_sk(sk); |
445 | struct tcp_metrics_block *tm; | |
446 | u32 val; | |
4aabd8ef DM |
447 | |
448 | if (dst == NULL) | |
449 | goto reset; | |
450 | ||
451 | dst_confirm(dst); | |
452 | ||
51c5d0c4 DM |
453 | rcu_read_lock(); |
454 | tm = tcp_get_metrics(sk, dst, true); | |
455 | if (!tm) { | |
456 | rcu_read_unlock(); | |
457 | goto reset; | |
458 | } | |
459 | ||
460 | if (tcp_metric_locked(tm, TCP_METRIC_CWND)) | |
461 | tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND); | |
462 | ||
463 | val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | |
464 | if (val) { | |
465 | tp->snd_ssthresh = val; | |
4aabd8ef DM |
466 | if (tp->snd_ssthresh > tp->snd_cwnd_clamp) |
467 | tp->snd_ssthresh = tp->snd_cwnd_clamp; | |
468 | } else { | |
469 | /* ssthresh may have been reduced unnecessarily during. | |
470 | * 3WHS. Restore it back to its initial default. | |
471 | */ | |
472 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | |
473 | } | |
51c5d0c4 DM |
474 | val = tcp_metric_get(tm, TCP_METRIC_REORDERING); |
475 | if (val && tp->reordering != val) { | |
4aabd8ef DM |
476 | tcp_disable_fack(tp); |
477 | tcp_disable_early_retrans(tp); | |
51c5d0c4 | 478 | tp->reordering = val; |
4aabd8ef DM |
479 | } |
480 | ||
51c5d0c4 DM |
481 | val = tcp_metric_get(tm, TCP_METRIC_RTT); |
482 | if (val == 0 || tp->srtt == 0) { | |
483 | rcu_read_unlock(); | |
4aabd8ef | 484 | goto reset; |
51c5d0c4 | 485 | } |
4aabd8ef DM |
486 | /* Initial rtt is determined from SYN,SYN-ACK. |
487 | * The segment is small and rtt may appear much | |
488 | * less than real one. Use per-dst memory | |
489 | * to make it more realistic. | |
490 | * | |
491 | * A bit of theory. RTT is time passed after "normal" sized packet | |
492 | * is sent until it is ACKed. In normal circumstances sending small | |
493 | * packets force peer to delay ACKs and calculation is correct too. | |
494 | * The algorithm is adaptive and, provided we follow specs, it | |
495 | * NEVER underestimate RTT. BUT! If peer tries to make some clever | |
496 | * tricks sort of "quick acks" for time long enough to decrease RTT | |
497 | * to low value, and then abruptly stops to do it and starts to delay | |
498 | * ACKs, wait for troubles. | |
499 | */ | |
51c5d0c4 DM |
500 | val = msecs_to_jiffies(val); |
501 | if (val > tp->srtt) { | |
502 | tp->srtt = val; | |
4aabd8ef DM |
503 | tp->rtt_seq = tp->snd_nxt; |
504 | } | |
51c5d0c4 DM |
505 | val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR); |
506 | if (val > tp->mdev) { | |
507 | tp->mdev = val; | |
4aabd8ef DM |
508 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); |
509 | } | |
51c5d0c4 DM |
510 | rcu_read_unlock(); |
511 | ||
4aabd8ef DM |
512 | tcp_set_rto(sk); |
513 | reset: | |
514 | if (tp->srtt == 0) { | |
515 | /* RFC6298: 5.7 We've failed to get a valid RTT sample from | |
516 | * 3WHS. This is most likely due to retransmission, | |
517 | * including spurious one. Reset the RTO back to 3secs | |
518 | * from the more aggressive 1sec to avoid more spurious | |
519 | * retransmission. | |
520 | */ | |
521 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; | |
522 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; | |
523 | } | |
524 | /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been | |
525 | * retransmitted. In light of RFC6298 more aggressive 1sec | |
526 | * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK | |
527 | * retransmission has occurred. | |
528 | */ | |
529 | if (tp->total_retrans > 1) | |
530 | tp->snd_cwnd = 1; | |
531 | else | |
532 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); | |
533 | tp->snd_cwnd_stamp = tcp_time_stamp; | |
534 | } | |
ab92bb2f | 535 | |
81166dd6 | 536 | bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check) |
ab92bb2f | 537 | { |
51c5d0c4 DM |
538 | struct tcp_metrics_block *tm; |
539 | bool ret; | |
540 | ||
ab92bb2f DM |
541 | if (!dst) |
542 | return false; | |
51c5d0c4 DM |
543 | |
544 | rcu_read_lock(); | |
545 | tm = __tcp_get_metrics_req(req, dst); | |
81166dd6 DM |
546 | if (paws_check) { |
547 | if (tm && | |
548 | (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL && | |
549 | (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW) | |
550 | ret = false; | |
551 | else | |
552 | ret = true; | |
553 | } else { | |
554 | if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp) | |
555 | ret = true; | |
556 | else | |
557 | ret = false; | |
558 | } | |
51c5d0c4 DM |
559 | rcu_read_unlock(); |
560 | ||
561 | return ret; | |
ab92bb2f DM |
562 | } |
563 | EXPORT_SYMBOL_GPL(tcp_peer_is_proven); | |
51c5d0c4 | 564 | |
81166dd6 DM |
565 | void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst) |
566 | { | |
567 | struct tcp_metrics_block *tm; | |
568 | ||
569 | rcu_read_lock(); | |
570 | tm = tcp_get_metrics(sk, dst, true); | |
571 | if (tm) { | |
572 | struct tcp_sock *tp = tcp_sk(sk); | |
573 | ||
574 | if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) { | |
575 | tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp; | |
576 | tp->rx_opt.ts_recent = tm->tcpm_ts; | |
577 | } | |
578 | } | |
579 | rcu_read_unlock(); | |
580 | } | |
581 | EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp); | |
582 | ||
583 | /* VJ's idea. Save last timestamp seen from this destination and hold | |
584 | * it at least for normal timewait interval to use for duplicate | |
585 | * segment detection in subsequent connections, before they enter | |
586 | * synchronized state. | |
587 | */ | |
588 | bool tcp_remember_stamp(struct sock *sk) | |
589 | { | |
590 | struct dst_entry *dst = __sk_dst_get(sk); | |
591 | bool ret = false; | |
592 | ||
593 | if (dst) { | |
594 | struct tcp_metrics_block *tm; | |
595 | ||
596 | rcu_read_lock(); | |
597 | tm = tcp_get_metrics(sk, dst, true); | |
598 | if (tm) { | |
599 | struct tcp_sock *tp = tcp_sk(sk); | |
600 | ||
601 | if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 || | |
602 | ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && | |
603 | tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { | |
604 | tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; | |
605 | tm->tcpm_ts = tp->rx_opt.ts_recent; | |
606 | } | |
607 | ret = true; | |
608 | } | |
609 | rcu_read_unlock(); | |
610 | } | |
611 | return ret; | |
612 | } | |
613 | ||
614 | bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw) | |
615 | { | |
616 | struct tcp_metrics_block *tm; | |
617 | bool ret = false; | |
618 | ||
619 | rcu_read_lock(); | |
620 | tm = __tcp_get_metrics_tw(tw); | |
9a0a9502 | 621 | if (tm) { |
81166dd6 DM |
622 | const struct tcp_timewait_sock *tcptw; |
623 | struct sock *sk = (struct sock *) tw; | |
624 | ||
625 | tcptw = tcp_twsk(sk); | |
626 | if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 || | |
627 | ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && | |
628 | tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { | |
629 | tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; | |
630 | tm->tcpm_ts = tcptw->tw_ts_recent; | |
631 | } | |
632 | ret = true; | |
633 | } | |
634 | rcu_read_unlock(); | |
635 | ||
636 | return ret; | |
637 | } | |
638 | ||
1fe4c481 YC |
639 | static DEFINE_SEQLOCK(fastopen_seqlock); |
640 | ||
641 | void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, | |
aab48743 YC |
642 | struct tcp_fastopen_cookie *cookie, |
643 | int *syn_loss, unsigned long *last_syn_loss) | |
1fe4c481 YC |
644 | { |
645 | struct tcp_metrics_block *tm; | |
646 | ||
647 | rcu_read_lock(); | |
648 | tm = tcp_get_metrics(sk, __sk_dst_get(sk), false); | |
649 | if (tm) { | |
650 | struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; | |
651 | unsigned int seq; | |
652 | ||
653 | do { | |
654 | seq = read_seqbegin(&fastopen_seqlock); | |
655 | if (tfom->mss) | |
656 | *mss = tfom->mss; | |
657 | *cookie = tfom->cookie; | |
aab48743 YC |
658 | *syn_loss = tfom->syn_loss; |
659 | *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0; | |
1fe4c481 YC |
660 | } while (read_seqretry(&fastopen_seqlock, seq)); |
661 | } | |
662 | rcu_read_unlock(); | |
663 | } | |
664 | ||
1fe4c481 | 665 | void tcp_fastopen_cache_set(struct sock *sk, u16 mss, |
aab48743 | 666 | struct tcp_fastopen_cookie *cookie, bool syn_lost) |
1fe4c481 YC |
667 | { |
668 | struct tcp_metrics_block *tm; | |
669 | ||
670 | rcu_read_lock(); | |
671 | tm = tcp_get_metrics(sk, __sk_dst_get(sk), true); | |
672 | if (tm) { | |
673 | struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; | |
674 | ||
675 | write_seqlock_bh(&fastopen_seqlock); | |
676 | tfom->mss = mss; | |
677 | if (cookie->len > 0) | |
678 | tfom->cookie = *cookie; | |
aab48743 YC |
679 | if (syn_lost) { |
680 | ++tfom->syn_loss; | |
681 | tfom->last_syn_loss = jiffies; | |
682 | } else | |
683 | tfom->syn_loss = 0; | |
1fe4c481 YC |
684 | write_sequnlock_bh(&fastopen_seqlock); |
685 | } | |
686 | rcu_read_unlock(); | |
687 | } | |
688 | ||
d23ff701 JA |
689 | static struct genl_family tcp_metrics_nl_family = { |
690 | .id = GENL_ID_GENERATE, | |
691 | .hdrsize = 0, | |
692 | .name = TCP_METRICS_GENL_NAME, | |
693 | .version = TCP_METRICS_GENL_VERSION, | |
694 | .maxattr = TCP_METRICS_ATTR_MAX, | |
695 | .netnsok = true, | |
696 | }; | |
697 | ||
698 | static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = { | |
699 | [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, }, | |
700 | [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY, | |
701 | .len = sizeof(struct in6_addr), }, | |
702 | /* Following attributes are not received for GET/DEL, | |
703 | * we keep them for reference | |
704 | */ | |
705 | #if 0 | |
706 | [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, }, | |
707 | [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, }, | |
708 | [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, }, | |
709 | [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, }, | |
710 | [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, }, | |
711 | [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, }, | |
712 | [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, }, | |
713 | [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY, | |
714 | .len = TCP_FASTOPEN_COOKIE_MAX, }, | |
715 | #endif | |
716 | }; | |
717 | ||
718 | /* Add attributes, caller cancels its header on failure */ | |
719 | static int tcp_metrics_fill_info(struct sk_buff *msg, | |
720 | struct tcp_metrics_block *tm) | |
721 | { | |
722 | struct nlattr *nest; | |
723 | int i; | |
724 | ||
725 | switch (tm->tcpm_addr.family) { | |
726 | case AF_INET: | |
727 | if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4, | |
728 | tm->tcpm_addr.addr.a4) < 0) | |
729 | goto nla_put_failure; | |
730 | break; | |
731 | case AF_INET6: | |
732 | if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16, | |
733 | tm->tcpm_addr.addr.a6) < 0) | |
734 | goto nla_put_failure; | |
735 | break; | |
736 | default: | |
737 | return -EAFNOSUPPORT; | |
738 | } | |
739 | ||
740 | if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, | |
741 | jiffies - tm->tcpm_stamp) < 0) | |
742 | goto nla_put_failure; | |
743 | if (tm->tcpm_ts_stamp) { | |
744 | if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP, | |
745 | (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0) | |
746 | goto nla_put_failure; | |
747 | if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL, | |
748 | tm->tcpm_ts) < 0) | |
749 | goto nla_put_failure; | |
750 | } | |
751 | ||
752 | { | |
753 | int n = 0; | |
754 | ||
755 | nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS); | |
756 | if (!nest) | |
757 | goto nla_put_failure; | |
758 | for (i = 0; i < TCP_METRIC_MAX + 1; i++) { | |
759 | if (!tm->tcpm_vals[i]) | |
760 | continue; | |
761 | if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0) | |
762 | goto nla_put_failure; | |
763 | n++; | |
764 | } | |
765 | if (n) | |
766 | nla_nest_end(msg, nest); | |
767 | else | |
768 | nla_nest_cancel(msg, nest); | |
769 | } | |
770 | ||
771 | { | |
772 | struct tcp_fastopen_metrics tfom_copy[1], *tfom; | |
773 | unsigned int seq; | |
774 | ||
775 | do { | |
776 | seq = read_seqbegin(&fastopen_seqlock); | |
777 | tfom_copy[0] = tm->tcpm_fastopen; | |
778 | } while (read_seqretry(&fastopen_seqlock, seq)); | |
779 | ||
780 | tfom = tfom_copy; | |
781 | if (tfom->mss && | |
782 | nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS, | |
783 | tfom->mss) < 0) | |
784 | goto nla_put_failure; | |
785 | if (tfom->syn_loss && | |
786 | (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS, | |
787 | tfom->syn_loss) < 0 || | |
788 | nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS, | |
789 | jiffies - tfom->last_syn_loss) < 0)) | |
790 | goto nla_put_failure; | |
791 | if (tfom->cookie.len > 0 && | |
792 | nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE, | |
793 | tfom->cookie.len, tfom->cookie.val) < 0) | |
794 | goto nla_put_failure; | |
795 | } | |
796 | ||
797 | return 0; | |
798 | ||
799 | nla_put_failure: | |
800 | return -EMSGSIZE; | |
801 | } | |
802 | ||
803 | static int tcp_metrics_dump_info(struct sk_buff *skb, | |
804 | struct netlink_callback *cb, | |
805 | struct tcp_metrics_block *tm) | |
806 | { | |
807 | void *hdr; | |
808 | ||
15e47304 | 809 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, |
d23ff701 JA |
810 | &tcp_metrics_nl_family, NLM_F_MULTI, |
811 | TCP_METRICS_CMD_GET); | |
812 | if (!hdr) | |
813 | return -EMSGSIZE; | |
814 | ||
815 | if (tcp_metrics_fill_info(skb, tm) < 0) | |
816 | goto nla_put_failure; | |
817 | ||
818 | return genlmsg_end(skb, hdr); | |
819 | ||
820 | nla_put_failure: | |
821 | genlmsg_cancel(skb, hdr); | |
822 | return -EMSGSIZE; | |
823 | } | |
824 | ||
825 | static int tcp_metrics_nl_dump(struct sk_buff *skb, | |
826 | struct netlink_callback *cb) | |
827 | { | |
828 | struct net *net = sock_net(skb->sk); | |
829 | unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log; | |
830 | unsigned int row, s_row = cb->args[0]; | |
831 | int s_col = cb->args[1], col = s_col; | |
832 | ||
833 | for (row = s_row; row < max_rows; row++, s_col = 0) { | |
834 | struct tcp_metrics_block *tm; | |
835 | struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row; | |
836 | ||
837 | rcu_read_lock(); | |
838 | for (col = 0, tm = rcu_dereference(hb->chain); tm; | |
839 | tm = rcu_dereference(tm->tcpm_next), col++) { | |
840 | if (col < s_col) | |
841 | continue; | |
842 | if (tcp_metrics_dump_info(skb, cb, tm) < 0) { | |
843 | rcu_read_unlock(); | |
844 | goto done; | |
845 | } | |
846 | } | |
847 | rcu_read_unlock(); | |
848 | } | |
849 | ||
850 | done: | |
851 | cb->args[0] = row; | |
852 | cb->args[1] = col; | |
853 | return skb->len; | |
854 | } | |
855 | ||
856 | static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, | |
857 | unsigned int *hash, int optional) | |
858 | { | |
859 | struct nlattr *a; | |
860 | ||
861 | a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4]; | |
862 | if (a) { | |
863 | addr->family = AF_INET; | |
864 | addr->addr.a4 = nla_get_be32(a); | |
865 | *hash = (__force unsigned int) addr->addr.a4; | |
866 | return 0; | |
867 | } | |
868 | a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6]; | |
869 | if (a) { | |
2c42a3fb | 870 | if (nla_len(a) != sizeof(struct in6_addr)) |
d23ff701 JA |
871 | return -EINVAL; |
872 | addr->family = AF_INET6; | |
873 | memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6)); | |
874 | *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6); | |
875 | return 0; | |
876 | } | |
877 | return optional ? 1 : -EAFNOSUPPORT; | |
878 | } | |
879 | ||
880 | static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info) | |
881 | { | |
882 | struct tcp_metrics_block *tm; | |
883 | struct inetpeer_addr addr; | |
884 | unsigned int hash; | |
885 | struct sk_buff *msg; | |
886 | struct net *net = genl_info_net(info); | |
887 | void *reply; | |
888 | int ret; | |
889 | ||
890 | ret = parse_nl_addr(info, &addr, &hash, 0); | |
891 | if (ret < 0) | |
892 | return ret; | |
893 | ||
894 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | |
895 | if (!msg) | |
896 | return -ENOMEM; | |
897 | ||
898 | reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0, | |
899 | info->genlhdr->cmd); | |
900 | if (!reply) | |
901 | goto nla_put_failure; | |
902 | ||
903 | hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); | |
904 | ret = -ESRCH; | |
905 | rcu_read_lock(); | |
906 | for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; | |
907 | tm = rcu_dereference(tm->tcpm_next)) { | |
908 | if (addr_same(&tm->tcpm_addr, &addr)) { | |
909 | ret = tcp_metrics_fill_info(msg, tm); | |
910 | break; | |
911 | } | |
912 | } | |
913 | rcu_read_unlock(); | |
914 | if (ret < 0) | |
915 | goto out_free; | |
916 | ||
917 | genlmsg_end(msg, reply); | |
918 | return genlmsg_reply(msg, info); | |
919 | ||
920 | nla_put_failure: | |
921 | ret = -EMSGSIZE; | |
922 | ||
923 | out_free: | |
924 | nlmsg_free(msg); | |
925 | return ret; | |
926 | } | |
927 | ||
928 | #define deref_locked_genl(p) \ | |
929 | rcu_dereference_protected(p, lockdep_genl_is_held() && \ | |
930 | lockdep_is_held(&tcp_metrics_lock)) | |
931 | ||
932 | #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held()) | |
933 | ||
934 | static int tcp_metrics_flush_all(struct net *net) | |
935 | { | |
936 | unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log; | |
937 | struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash; | |
938 | struct tcp_metrics_block *tm; | |
939 | unsigned int row; | |
940 | ||
941 | for (row = 0; row < max_rows; row++, hb++) { | |
942 | spin_lock_bh(&tcp_metrics_lock); | |
943 | tm = deref_locked_genl(hb->chain); | |
944 | if (tm) | |
945 | hb->chain = NULL; | |
946 | spin_unlock_bh(&tcp_metrics_lock); | |
947 | while (tm) { | |
948 | struct tcp_metrics_block *next; | |
949 | ||
950 | next = deref_genl(tm->tcpm_next); | |
951 | kfree_rcu(tm, rcu_head); | |
952 | tm = next; | |
953 | } | |
954 | } | |
955 | return 0; | |
956 | } | |
957 | ||
958 | static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) | |
959 | { | |
960 | struct tcpm_hash_bucket *hb; | |
961 | struct tcp_metrics_block *tm; | |
962 | struct tcp_metrics_block __rcu **pp; | |
963 | struct inetpeer_addr addr; | |
964 | unsigned int hash; | |
965 | struct net *net = genl_info_net(info); | |
966 | int ret; | |
967 | ||
968 | ret = parse_nl_addr(info, &addr, &hash, 1); | |
969 | if (ret < 0) | |
970 | return ret; | |
971 | if (ret > 0) | |
972 | return tcp_metrics_flush_all(net); | |
973 | ||
974 | hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); | |
975 | hb = net->ipv4.tcp_metrics_hash + hash; | |
976 | pp = &hb->chain; | |
977 | spin_lock_bh(&tcp_metrics_lock); | |
978 | for (tm = deref_locked_genl(*pp); tm; | |
979 | pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) { | |
980 | if (addr_same(&tm->tcpm_addr, &addr)) { | |
981 | *pp = tm->tcpm_next; | |
982 | break; | |
983 | } | |
984 | } | |
985 | spin_unlock_bh(&tcp_metrics_lock); | |
986 | if (!tm) | |
987 | return -ESRCH; | |
988 | kfree_rcu(tm, rcu_head); | |
989 | return 0; | |
990 | } | |
991 | ||
992 | static struct genl_ops tcp_metrics_nl_ops[] = { | |
993 | { | |
994 | .cmd = TCP_METRICS_CMD_GET, | |
995 | .doit = tcp_metrics_nl_cmd_get, | |
996 | .dumpit = tcp_metrics_nl_dump, | |
997 | .policy = tcp_metrics_nl_policy, | |
998 | .flags = GENL_ADMIN_PERM, | |
999 | }, | |
1000 | { | |
1001 | .cmd = TCP_METRICS_CMD_DEL, | |
1002 | .doit = tcp_metrics_nl_cmd_del, | |
1003 | .policy = tcp_metrics_nl_policy, | |
1004 | .flags = GENL_ADMIN_PERM, | |
1005 | }, | |
1006 | }; | |
1007 | ||
5815d5e7 | 1008 | static unsigned int tcpmhash_entries; |
51c5d0c4 DM |
1009 | static int __init set_tcpmhash_entries(char *str) |
1010 | { | |
1011 | ssize_t ret; | |
1012 | ||
1013 | if (!str) | |
1014 | return 0; | |
1015 | ||
5815d5e7 | 1016 | ret = kstrtouint(str, 0, &tcpmhash_entries); |
51c5d0c4 DM |
1017 | if (ret) |
1018 | return 0; | |
1019 | ||
1020 | return 1; | |
1021 | } | |
1022 | __setup("tcpmhash_entries=", set_tcpmhash_entries); | |
1023 | ||
1024 | static int __net_init tcp_net_metrics_init(struct net *net) | |
1025 | { | |
5815d5e7 ED |
1026 | size_t size; |
1027 | unsigned int slots; | |
51c5d0c4 DM |
1028 | |
1029 | slots = tcpmhash_entries; | |
1030 | if (!slots) { | |
1031 | if (totalram_pages >= 128 * 1024) | |
1032 | slots = 16 * 1024; | |
1033 | else | |
1034 | slots = 8 * 1024; | |
1035 | } | |
1036 | ||
5815d5e7 ED |
1037 | net->ipv4.tcp_metrics_hash_log = order_base_2(slots); |
1038 | size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log; | |
51c5d0c4 | 1039 | |
976a702a ED |
1040 | net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
1041 | if (!net->ipv4.tcp_metrics_hash) | |
1042 | net->ipv4.tcp_metrics_hash = vzalloc(size); | |
1043 | ||
51c5d0c4 DM |
1044 | if (!net->ipv4.tcp_metrics_hash) |
1045 | return -ENOMEM; | |
1046 | ||
51c5d0c4 DM |
1047 | return 0; |
1048 | } | |
1049 | ||
1050 | static void __net_exit tcp_net_metrics_exit(struct net *net) | |
1051 | { | |
36471012 ED |
1052 | unsigned int i; |
1053 | ||
1054 | for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) { | |
1055 | struct tcp_metrics_block *tm, *next; | |
1056 | ||
1057 | tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1); | |
1058 | while (tm) { | |
1059 | next = rcu_dereference_protected(tm->tcpm_next, 1); | |
1060 | kfree(tm); | |
1061 | tm = next; | |
1062 | } | |
1063 | } | |
976a702a ED |
1064 | if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash)) |
1065 | vfree(net->ipv4.tcp_metrics_hash); | |
1066 | else | |
1067 | kfree(net->ipv4.tcp_metrics_hash); | |
51c5d0c4 DM |
1068 | } |
1069 | ||
1070 | static __net_initdata struct pernet_operations tcp_net_metrics_ops = { | |
1071 | .init = tcp_net_metrics_init, | |
1072 | .exit = tcp_net_metrics_exit, | |
1073 | }; | |
1074 | ||
1075 | void __init tcp_metrics_init(void) | |
1076 | { | |
d23ff701 JA |
1077 | int ret; |
1078 | ||
1079 | ret = register_pernet_subsys(&tcp_net_metrics_ops); | |
1080 | if (ret < 0) | |
1081 | goto cleanup; | |
1082 | ret = genl_register_family_with_ops(&tcp_metrics_nl_family, | |
1083 | tcp_metrics_nl_ops, | |
1084 | ARRAY_SIZE(tcp_metrics_nl_ops)); | |
1085 | if (ret < 0) | |
1086 | goto cleanup_subsys; | |
1087 | return; | |
1088 | ||
1089 | cleanup_subsys: | |
1090 | unregister_pernet_subsys(&tcp_net_metrics_ops); | |
1091 | ||
1092 | cleanup: | |
1093 | return; | |
51c5d0c4 | 1094 | } |