X-Git-Url: https://repo.jachan.dev/linux.git/blobdiff_plain/e952f31bce6e9f64db01f607abc46529ba57ac9e..565d9724b8ce49b530287de34aa17f45f21624d5:/net/core/flow.c diff --git a/net/core/flow.c b/net/core/flow.c index 55789f832eda..b16d31ae5e54 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -32,7 +32,6 @@ struct flow_cache_entry { u8 dir; struct flowi key; u32 genid; - u32 sk_sid; void *object; atomic_t *object_ref; }; @@ -79,13 +78,21 @@ static void flow_cache_new_hashrnd(unsigned long arg) { int i; - for_each_cpu(i) + for_each_possible_cpu(i) flow_hash_rnd_recalc(i) = 1; flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; add_timer(&flow_hash_rnd_timer); } +static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) +{ + if (fle->object) + atomic_dec(fle->object_ref); + kmem_cache_free(flow_cachep, fle); + flow_count(cpu)--; +} + static void __flow_cache_shrink(int cpu, int shrink_to) { struct flow_cache_entry *fle, **flp; @@ -101,10 +108,7 @@ static void __flow_cache_shrink(int cpu, int shrink_to) } while ((fle = *flp) != NULL) { *flp = fle->next; - if (fle->object) - atomic_dec(fle->object_ref); - kmem_cache_free(flow_cachep, fle); - flow_count(cpu)--; + flow_entry_kill(cpu, fle); } } } @@ -165,7 +169,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) return 0; } -void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir, +void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, flow_resolve_t resolver) { struct flow_cache_entry *fle, **head; @@ -189,7 +193,6 @@ void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir, for (fle = *head; fle; fle = fle->next) { if (fle->family == family && fle->dir == dir && - fle->sk_sid == sk_sid && flow_key_compare(key, &fle->key) == 0) { if (fle->genid == atomic_read(&flow_cache_genid)) { void *ret = fle->object; @@ -214,7 +217,6 @@ void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir, *head = fle; fle->family = family; fle->dir = dir; - fle->sk_sid = sk_sid; memcpy(&fle->key, key, sizeof(*key)); fle->object = NULL; flow_count(cpu)++; @@ -223,24 +225,33 @@ void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir, nocache: { + int err; void *obj; atomic_t *obj_ref; - resolver(key, sk_sid, family, dir, &obj, &obj_ref); + err = resolver(key, family, dir, &obj, &obj_ref); if (fle) { - fle->genid = atomic_read(&flow_cache_genid); - - if (fle->object) - atomic_dec(fle->object_ref); - - fle->object = obj; - fle->object_ref = obj_ref; - if (obj) - atomic_inc(fle->object_ref); + if (err) { + /* Force security policy check on next lookup */ + *head = fle->next; + flow_entry_kill(cpu, fle); + } else { + fle->genid = atomic_read(&flow_cache_genid); + + if (fle->object) + atomic_dec(fle->object_ref); + + fle->object = obj; + fle->object_ref = obj_ref; + if (obj) + atomic_inc(fle->object_ref); + } } local_bh_enable(); + if (err) + obj = ERR_PTR(err); return obj; } } @@ -318,12 +329,10 @@ static void __devinit flow_cache_cpu_prepare(int cpu) /* NOTHING */; flow_table(cpu) = (struct flow_cache_entry **) - __get_free_pages(GFP_KERNEL, order); + __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); if (!flow_table(cpu)) panic("NET: failed to allocate flow cache order %lu\n", order); - memset(flow_table(cpu), 0, PAGE_SIZE << order); - flow_hash_rnd_recalc(cpu) = 1; flow_count(cpu) = 0; @@ -348,12 +357,8 @@ static int __init flow_cache_init(void) flow_cachep = kmem_cache_create("flow_cache", sizeof(struct flow_cache_entry), - 0, SLAB_HWCACHE_ALIGN, + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - - if (!flow_cachep) - panic("NET: failed to allocate flow cache slab\n"); - flow_hash_shift = 10; flow_lwm = 2 * flow_hash_size; flow_hwm = 4 * flow_hash_size; @@ -363,7 +368,7 @@ static int __init flow_cache_init(void) flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; add_timer(&flow_hash_rnd_timer); - for_each_cpu(i) + for_each_possible_cpu(i) flow_cache_cpu_prepare(i); hotcpu_notifier(flow_cache_cpu, 0);