]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INETPEER - A storage for permanent information about peers | |
3 | * | |
4 | * This source is covered by the GNU GPL, the same as all kernel sources. | |
5 | * | |
1da177e4 LT |
6 | * Authors: Andrey V. Savochkin <[email protected]> |
7 | */ | |
8 | ||
08009a76 | 9 | #include <linux/cache.h> |
1da177e4 LT |
10 | #include <linux/module.h> |
11 | #include <linux/types.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/random.h> | |
1da177e4 LT |
16 | #include <linux/timer.h> |
17 | #include <linux/time.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/net.h> | |
5faa5df1 | 21 | #include <linux/workqueue.h> |
20380731 | 22 | #include <net/ip.h> |
1da177e4 | 23 | #include <net/inetpeer.h> |
6e5714ea | 24 | #include <net/secure_seq.h> |
1da177e4 LT |
25 | |
26 | /* | |
27 | * Theory of operations. | |
28 | * We keep one entry for each peer IP address. The nodes contains long-living | |
29 | * information about the peer which doesn't depend on routes. | |
1da177e4 | 30 | * |
1da177e4 LT |
31 | * Nodes are removed only when reference counter goes to 0. |
32 | * When it's happened the node may be removed when a sufficient amount of | |
33 | * time has been passed since its last use. The less-recently-used entry can | |
34 | * also be removed if the pool is overloaded i.e. if the total amount of | |
35 | * entries is greater-or-equal than the threshold. | |
36 | * | |
b145425f | 37 | * Node pool is organised as an RB tree. |
1da177e4 LT |
38 | * Such an implementation has been chosen not just for fun. It's a way to |
39 | * prevent easy and efficient DoS attacks by creating hash collisions. A huge | |
40 | * amount of long living nodes in a single hash slot would significantly delay | |
41 | * lookups performed with disabled BHs. | |
42 | * | |
43 | * Serialisation issues. | |
aa1039e7 ED |
44 | * 1. Nodes may appear in the tree only with the pool lock held. |
45 | * 2. Nodes may disappear from the tree only with the pool lock held | |
1da177e4 | 46 | * AND reference count being 0. |
4b9d9be8 ED |
47 | * 3. Global variable peer_total is modified under the pool lock. |
48 | * 4. struct inet_peer fields modification: | |
b145425f | 49 | * rb_node: pool lock |
1da177e4 LT |
50 | * refcnt: atomically against modifications on other CPU; |
51 | * usually under some other lock to prevent node disappearing | |
582a72da | 52 | * daddr: unchangeable |
1da177e4 LT |
53 | */ |
54 | ||
08009a76 | 55 | static struct kmem_cache *peer_cachep __ro_after_init; |
1da177e4 | 56 | |
c3426b47 DM |
57 | void inet_peer_base_init(struct inet_peer_base *bp) |
58 | { | |
b145425f | 59 | bp->rb_root = RB_ROOT; |
c3426b47 DM |
60 | seqlock_init(&bp->lock); |
61 | bp->total = 0; | |
62 | } | |
63 | EXPORT_SYMBOL_GPL(inet_peer_base_init); | |
021e9299 | 64 | |
b145425f | 65 | #define PEER_MAX_GC 32 |
1da177e4 | 66 | |
1da177e4 | 67 | /* Exported for sysctl_net_ipv4. */ |
8bd2a055 | 68 | int inet_peer_threshold __read_mostly; /* start to throw entries more |
1da177e4 | 69 | * aggressively at this stage */ |
243bbcaa ED |
70 | int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ |
71 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ | |
1da177e4 | 72 | |
1da177e4 LT |
73 | /* Called from ip_output.c:ip_init */ |
74 | void __init inet_initpeers(void) | |
75 | { | |
8bd2a055 | 76 | u64 nr_entries; |
1da177e4 | 77 | |
8bd2a055 YD |
78 | /* 1% of physical memory */ |
79 | nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT, | |
80 | 100 * L1_CACHE_ALIGN(sizeof(struct inet_peer))); | |
81 | ||
82 | inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128); | |
1da177e4 | 83 | |
57f2c635 | 84 | peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC); |
1da177e4 LT |
85 | } |
86 | ||
b145425f ED |
87 | /* Called with rcu_read_lock() or base->lock held */ |
88 | static struct inet_peer *lookup(const struct inetpeer_addr *daddr, | |
89 | struct inet_peer_base *base, | |
90 | unsigned int seq, | |
91 | struct inet_peer *gc_stack[], | |
92 | unsigned int *gc_cnt, | |
93 | struct rb_node **parent_p, | |
94 | struct rb_node ***pp_p) | |
aa1039e7 | 95 | { |
4cc5b44b | 96 | struct rb_node **pp, *parent, *next; |
b145425f | 97 | struct inet_peer *p; |
50b362f2 | 98 | u32 now; |
b145425f ED |
99 | |
100 | pp = &base->rb_root.rb_node; | |
101 | parent = NULL; | |
4cc5b44b | 102 | while (1) { |
b145425f | 103 | int cmp; |
aa1039e7 | 104 | |
4cc5b44b ED |
105 | next = rcu_dereference_raw(*pp); |
106 | if (!next) | |
107 | break; | |
108 | parent = next; | |
b145425f ED |
109 | p = rb_entry(parent, struct inet_peer, rb_node); |
110 | cmp = inetpeer_addr_cmp(daddr, &p->daddr); | |
02663045 | 111 | if (cmp == 0) { |
50b362f2 ED |
112 | now = jiffies; |
113 | if (READ_ONCE(p->dtime) != now) | |
114 | WRITE_ONCE(p->dtime, now); | |
b145425f ED |
115 | return p; |
116 | } | |
117 | if (gc_stack) { | |
118 | if (*gc_cnt < PEER_MAX_GC) | |
119 | gc_stack[(*gc_cnt)++] = p; | |
120 | } else if (unlikely(read_seqretry(&base->lock, seq))) { | |
121 | break; | |
aa1039e7 | 122 | } |
02663045 | 123 | if (cmp == -1) |
35f493b8 | 124 | pp = &next->rb_left; |
aa1039e7 | 125 | else |
35f493b8 | 126 | pp = &next->rb_right; |
aa1039e7 | 127 | } |
b145425f ED |
128 | *parent_p = parent; |
129 | *pp_p = pp; | |
aa1039e7 ED |
130 | return NULL; |
131 | } | |
132 | ||
4b9d9be8 | 133 | /* perform garbage collect on all items stacked during a lookup */ |
b145425f ED |
134 | static void inet_peer_gc(struct inet_peer_base *base, |
135 | struct inet_peer *gc_stack[], | |
136 | unsigned int gc_cnt) | |
98158f5a | 137 | { |
3d32edf1 | 138 | int peer_threshold, peer_maxttl, peer_minttl; |
b145425f | 139 | struct inet_peer *p; |
4b9d9be8 | 140 | __u32 delta, ttl; |
b145425f | 141 | int i; |
d71209de | 142 | |
3d32edf1 KI |
143 | peer_threshold = READ_ONCE(inet_peer_threshold); |
144 | peer_maxttl = READ_ONCE(inet_peer_maxttl); | |
145 | peer_minttl = READ_ONCE(inet_peer_minttl); | |
146 | ||
147 | if (base->total >= peer_threshold) | |
4b9d9be8 ED |
148 | ttl = 0; /* be aggressive */ |
149 | else | |
3d32edf1 KI |
150 | ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * |
151 | base->total / peer_threshold * HZ; | |
b145425f ED |
152 | for (i = 0; i < gc_cnt; i++) { |
153 | p = gc_stack[i]; | |
71685eb4 | 154 | |
71685eb4 ED |
155 | delta = (__u32)jiffies - READ_ONCE(p->dtime); |
156 | ||
b145425f ED |
157 | if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) |
158 | gc_stack[i] = NULL; | |
1da177e4 | 159 | } |
b145425f ED |
160 | for (i = 0; i < gc_cnt; i++) { |
161 | p = gc_stack[i]; | |
162 | if (p) { | |
163 | rb_erase(&p->rb_node, &base->rb_root); | |
164 | base->total--; | |
bb5810d4 | 165 | kfree_rcu(p, rcu); |
b145425f | 166 | } |
4b9d9be8 | 167 | } |
1da177e4 LT |
168 | } |
169 | ||
a853c609 | 170 | /* Must be called under RCU : No refcount change is done here. */ |
c0efc887 | 171 | struct inet_peer *inet_getpeer(struct inet_peer_base *base, |
7a596a50 | 172 | const struct inetpeer_addr *daddr) |
1da177e4 | 173 | { |
b145425f ED |
174 | struct inet_peer *p, *gc_stack[PEER_MAX_GC]; |
175 | struct rb_node **pp, *parent; | |
176 | unsigned int gc_cnt, seq; | |
1da177e4 | 177 | |
4b9d9be8 | 178 | /* Attempt a lockless lookup first. |
aa1039e7 ED |
179 | * Because of a concurrent writer, we might not find an existing entry. |
180 | */ | |
b145425f ED |
181 | seq = read_seqbegin(&base->lock); |
182 | p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); | |
aa1039e7 | 183 | |
4b9d9be8 | 184 | if (p) |
aa1039e7 | 185 | return p; |
1da177e4 | 186 | |
aa1039e7 ED |
187 | /* retry an exact lookup, taking the lock before. |
188 | * At least, nodes should be hot in our cache. | |
189 | */ | |
b145425f | 190 | parent = NULL; |
65e8354e | 191 | write_seqlock_bh(&base->lock); |
aa1039e7 | 192 | |
b145425f ED |
193 | gc_cnt = 0; |
194 | p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); | |
7a596a50 | 195 | if (!p) { |
b145425f ED |
196 | p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); |
197 | if (p) { | |
198 | p->daddr = *daddr; | |
b6a37e5e | 199 | p->dtime = (__u32)jiffies; |
a853c609 | 200 | refcount_set(&p->refcnt, 1); |
b145425f ED |
201 | atomic_set(&p->rid, 0); |
202 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | |
203 | p->rate_tokens = 0; | |
c09551c6 | 204 | p->n_redirects = 0; |
b145425f ED |
205 | /* 60*HZ is arbitrary, but chosen enough high so that the first |
206 | * calculation of tokens is at its maximum. | |
207 | */ | |
208 | p->rate_last = jiffies - 60*HZ; | |
209 | ||
210 | rb_link_node(&p->rb_node, parent, pp); | |
211 | rb_insert_color(&p->rb_node, &base->rb_root); | |
212 | base->total++; | |
213 | } | |
aa1039e7 | 214 | } |
b145425f ED |
215 | if (gc_cnt) |
216 | inet_peer_gc(base, gc_stack, gc_cnt); | |
65e8354e | 217 | write_sequnlock_bh(&base->lock); |
1da177e4 | 218 | |
1da177e4 LT |
219 | return p; |
220 | } | |
b3419363 | 221 | EXPORT_SYMBOL_GPL(inet_getpeer); |
98158f5a | 222 | |
4663afe2 ED |
223 | void inet_putpeer(struct inet_peer *p) |
224 | { | |
b145425f | 225 | if (refcount_dec_and_test(&p->refcnt)) |
bb5810d4 | 226 | kfree_rcu(p, rcu); |
4663afe2 | 227 | } |
92d86829 DM |
228 | |
229 | /* | |
230 | * Check transmit rate limitation for given message. | |
231 | * The rate information is held in the inet_peer entries now. | |
232 | * This function is generic and could be used for other purposes | |
233 | * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. | |
234 | * | |
235 | * Note that the same inet_peer fields are modified by functions in | |
236 | * route.c too, but these work for packet destinations while xrlim_allow | |
237 | * works for icmp destinations. This means the rate limiting information | |
238 | * for one "ip object" is shared - and these ICMPs are twice limited: | |
239 | * by source and by destination. | |
240 | * | |
241 | * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate | |
242 | * SHOULD allow setting of rate limits | |
243 | * | |
244 | * Shared between ICMPv4 and ICMPv6. | |
245 | */ | |
246 | #define XRLIM_BURST_FACTOR 6 | |
247 | bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | |
248 | { | |
05dd04b2 | 249 | unsigned long now, token, otoken, delta; |
92d86829 DM |
250 | bool rc = false; |
251 | ||
252 | if (!peer) | |
253 | return true; | |
254 | ||
05dd04b2 | 255 | token = otoken = READ_ONCE(peer->rate_tokens); |
92d86829 | 256 | now = jiffies; |
05dd04b2 ED |
257 | delta = now - READ_ONCE(peer->rate_last); |
258 | if (delta) { | |
259 | WRITE_ONCE(peer->rate_last, now); | |
260 | token += delta; | |
261 | if (token > XRLIM_BURST_FACTOR * timeout) | |
262 | token = XRLIM_BURST_FACTOR * timeout; | |
263 | } | |
92d86829 DM |
264 | if (token >= timeout) { |
265 | token -= timeout; | |
266 | rc = true; | |
267 | } | |
05dd04b2 ED |
268 | if (token != otoken) |
269 | WRITE_ONCE(peer->rate_tokens, token); | |
92d86829 DM |
270 | return rc; |
271 | } | |
272 | EXPORT_SYMBOL(inet_peer_xrlim_allow); | |
5faa5df1 | 273 | |
56a6b248 | 274 | void inetpeer_invalidate_tree(struct inet_peer_base *base) |
5faa5df1 | 275 | { |
8f1975e3 | 276 | struct rb_node *p = rb_first(&base->rb_root); |
5faa5df1 | 277 | |
8f1975e3 ED |
278 | while (p) { |
279 | struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node); | |
280 | ||
281 | p = rb_next(p); | |
282 | rb_erase(&peer->rb_node, &base->rb_root); | |
283 | inet_putpeer(peer); | |
b145425f | 284 | cond_resched(); |
5faa5df1 SK |
285 | } |
286 | ||
b145425f | 287 | base->total = 0; |
5faa5df1 SK |
288 | } |
289 | EXPORT_SYMBOL(inetpeer_invalidate_tree); |