1 /* flow.c: Generic flow cache.
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
25 #include <linux/atomic.h>
26 #include <linux/security.h>
27 #include <net/net_namespace.h>
29 struct flow_cache_entry {
31 struct hlist_node hlist;
32 struct list_head gc_list;
39 struct flow_cache_object *object;
42 struct flow_flush_info {
43 struct flow_cache *cache;
45 struct completion completion;
48 static struct kmem_cache *flow_cachep __read_mostly;
50 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
51 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
53 static void flow_cache_new_hashrnd(unsigned long arg)
55 struct flow_cache *fc = (void *) arg;
58 for_each_possible_cpu(i)
59 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
61 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
62 add_timer(&fc->rnd_timer);
65 static int flow_entry_valid(struct flow_cache_entry *fle,
66 struct netns_xfrm *xfrm)
68 if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
70 if (fle->object && !fle->object->ops->check(fle->object))
75 static void flow_entry_kill(struct flow_cache_entry *fle,
76 struct netns_xfrm *xfrm)
79 fle->object->ops->delete(fle->object);
80 kmem_cache_free(flow_cachep, fle);
83 static void flow_cache_gc_task(struct work_struct *work)
85 struct list_head gc_list;
86 struct flow_cache_entry *fce, *n;
87 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
90 INIT_LIST_HEAD(&gc_list);
91 spin_lock_bh(&xfrm->flow_cache_gc_lock);
92 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
96 flow_entry_kill(fce, xfrm);
97 atomic_dec(&xfrm->flow_cache_gc_count);
101 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
102 int deleted, struct list_head *gc_list,
103 struct netns_xfrm *xfrm)
106 atomic_add(deleted, &xfrm->flow_cache_gc_count);
107 fcp->hash_count -= deleted;
108 spin_lock_bh(&xfrm->flow_cache_gc_lock);
109 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
110 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
111 schedule_work(&xfrm->flow_cache_gc_work);
115 static void __flow_cache_shrink(struct flow_cache *fc,
116 struct flow_cache_percpu *fcp,
119 struct flow_cache_entry *fle;
120 struct hlist_node *tmp;
123 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
126 for (i = 0; i < flow_cache_hash_size(fc); i++) {
129 hlist_for_each_entry_safe(fle, tmp,
130 &fcp->hash_table[i], u.hlist) {
131 if (saved < shrink_to &&
132 flow_entry_valid(fle, xfrm)) {
136 hlist_del(&fle->u.hlist);
137 list_add_tail(&fle->u.gc_list, &gc_list);
142 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
145 static void flow_cache_shrink(struct flow_cache *fc,
146 struct flow_cache_percpu *fcp)
148 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
150 __flow_cache_shrink(fc, fcp, shrink_to);
153 static void flow_new_hash_rnd(struct flow_cache *fc,
154 struct flow_cache_percpu *fcp)
156 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
157 fcp->hash_rnd_recalc = 0;
158 __flow_cache_shrink(fc, fcp, 0);
161 static u32 flow_hash_code(struct flow_cache *fc,
162 struct flow_cache_percpu *fcp,
163 const struct flowi *key,
166 const u32 *k = (const u32 *) key;
167 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
169 return jhash2(k, length, fcp->hash_rnd)
170 & (flow_cache_hash_size(fc) - 1);
173 /* I hear what you're saying, use memcmp. But memcmp cannot make
174 * important assumptions that we can here, such as alignment.
176 static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
179 const flow_compare_t *k1, *k1_lim, *k2;
181 k1 = (const flow_compare_t *) key1;
182 k1_lim = k1 + keysize;
184 k2 = (const flow_compare_t *) key2;
189 } while (k1 < k1_lim);
194 struct flow_cache_object *
195 flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
196 flow_resolve_t resolver, void *ctx)
198 struct flow_cache *fc = &net->xfrm.flow_cache_global;
199 struct flow_cache_percpu *fcp;
200 struct flow_cache_entry *fle, *tfle;
201 struct flow_cache_object *flo;
206 fcp = this_cpu_ptr(fc->percpu);
211 keysize = flow_key_size(family);
215 /* Packet really early in init? Making flow_cache_init a
216 * pre-smp initcall would solve this. --RR */
217 if (!fcp->hash_table)
220 if (fcp->hash_rnd_recalc)
221 flow_new_hash_rnd(fc, fcp);
223 hash = flow_hash_code(fc, fcp, key, keysize);
224 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
225 if (tfle->net == net &&
226 tfle->family == family &&
228 flow_key_compare(key, &tfle->key, keysize) == 0) {
234 if (unlikely(!fle)) {
235 if (fcp->hash_count > fc->high_watermark)
236 flow_cache_shrink(fc, fcp);
238 if (atomic_read(&net->xfrm.flow_cache_gc_count) >
239 2 * num_online_cpus() * fc->high_watermark) {
240 flo = ERR_PTR(-ENOBUFS);
244 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
247 fle->family = family;
249 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
251 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
254 } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
258 flo = flo->ops->get(flo);
261 } else if (fle->object) {
263 flo->ops->delete(flo);
273 flo = resolver(net, key, family, dir, flo, ctx);
275 fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
281 if (!IS_ERR_OR_NULL(flo))
282 flo->ops->delete(flo);
288 EXPORT_SYMBOL(flow_cache_lookup);
290 static void flow_cache_flush_tasklet(unsigned long data)
292 struct flow_flush_info *info = (void *)data;
293 struct flow_cache *fc = info->cache;
294 struct flow_cache_percpu *fcp;
295 struct flow_cache_entry *fle;
296 struct hlist_node *tmp;
299 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
302 fcp = this_cpu_ptr(fc->percpu);
303 for (i = 0; i < flow_cache_hash_size(fc); i++) {
304 hlist_for_each_entry_safe(fle, tmp,
305 &fcp->hash_table[i], u.hlist) {
306 if (flow_entry_valid(fle, xfrm))
310 hlist_del(&fle->u.hlist);
311 list_add_tail(&fle->u.gc_list, &gc_list);
315 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
317 if (atomic_dec_and_test(&info->cpuleft))
318 complete(&info->completion);
322 * Return whether a cpu needs flushing. Conservatively, we assume
323 * the presence of any entries means the core may require flushing,
324 * since the flow_cache_ops.check() function may assume it's running
325 * on the same core as the per-cpu cache component.
327 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
329 struct flow_cache_percpu *fcp;
332 fcp = per_cpu_ptr(fc->percpu, cpu);
333 for (i = 0; i < flow_cache_hash_size(fc); i++)
334 if (!hlist_empty(&fcp->hash_table[i]))
339 static void flow_cache_flush_per_cpu(void *data)
341 struct flow_flush_info *info = data;
342 struct tasklet_struct *tasklet;
344 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
345 tasklet->data = (unsigned long)info;
346 tasklet_schedule(tasklet);
349 void flow_cache_flush(struct net *net)
351 struct flow_flush_info info;
355 /* Track which cpus need flushing to avoid disturbing all cores. */
356 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
360 /* Don't want cpus going down or up during this. */
362 mutex_lock(&net->xfrm.flow_flush_sem);
363 info.cache = &net->xfrm.flow_cache_global;
364 for_each_online_cpu(i)
365 if (!flow_cache_percpu_empty(info.cache, i))
366 cpumask_set_cpu(i, mask);
367 atomic_set(&info.cpuleft, cpumask_weight(mask));
368 if (atomic_read(&info.cpuleft) == 0)
371 init_completion(&info.completion);
374 self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
375 on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
377 flow_cache_flush_tasklet((unsigned long)&info);
380 wait_for_completion(&info.completion);
383 mutex_unlock(&net->xfrm.flow_flush_sem);
385 free_cpumask_var(mask);
388 static void flow_cache_flush_task(struct work_struct *work)
390 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
391 flow_cache_flush_work);
392 struct net *net = container_of(xfrm, struct net, xfrm);
394 flow_cache_flush(net);
397 void flow_cache_flush_deferred(struct net *net)
399 schedule_work(&net->xfrm.flow_cache_flush_work);
402 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
404 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
405 size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
407 if (!fcp->hash_table) {
408 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
409 if (!fcp->hash_table) {
410 pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
413 fcp->hash_rnd_recalc = 1;
415 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
420 static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
422 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
424 return flow_cache_cpu_prepare(fc, cpu);
427 static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
429 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
430 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
432 __flow_cache_shrink(fc, fcp, 0);
436 int flow_cache_init(struct net *net)
439 struct flow_cache *fc = &net->xfrm.flow_cache_global;
442 flow_cachep = kmem_cache_create("flow_cache",
443 sizeof(struct flow_cache_entry),
444 0, SLAB_PANIC, NULL);
445 spin_lock_init(&net->xfrm.flow_cache_gc_lock);
446 INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
447 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
448 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
449 mutex_init(&net->xfrm.flow_flush_sem);
450 atomic_set(&net->xfrm.flow_cache_gc_count, 0);
453 fc->low_watermark = 2 * flow_cache_hash_size(fc);
454 fc->high_watermark = 4 * flow_cache_hash_size(fc);
456 fc->percpu = alloc_percpu(struct flow_cache_percpu);
460 if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
463 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
465 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
466 add_timer(&fc->rnd_timer);
471 for_each_possible_cpu(i) {
472 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
473 kfree(fcp->hash_table);
474 fcp->hash_table = NULL;
477 free_percpu(fc->percpu);
482 EXPORT_SYMBOL(flow_cache_init);
484 void flow_cache_fini(struct net *net)
487 struct flow_cache *fc = &net->xfrm.flow_cache_global;
489 del_timer_sync(&fc->rnd_timer);
491 cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
493 for_each_possible_cpu(i) {
494 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
495 kfree(fcp->hash_table);
496 fcp->hash_table = NULL;
499 free_percpu(fc->percpu);
502 EXPORT_SYMBOL(flow_cache_fini);
504 void __init flow_cache_hp_init(void)
508 ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
510 flow_cache_cpu_up_prep,
511 flow_cache_cpu_dead);