From: Pekka Enberg Date: Mon, 19 Sep 2011 14:46:07 +0000 (+0300) Subject: Merge branch 'slab/urgent' into slab/next X-Git-Tag: v3.2-rc1~152^2^2~2 X-Git-Url: https://repo.jachan.dev/linux.git/commitdiff_plain/d20bbfab01802e195a50435940f7e4aa747c217c?hp=-c Merge branch 'slab/urgent' into slab/next --- d20bbfab01802e195a50435940f7e4aa747c217c diff --combined mm/slab.c index 41fc5781c7cc,6d90a091fdca..5bfc2047afe1 --- a/mm/slab.c +++ b/mm/slab.c @@@ -622,6 -622,51 +622,51 @@@ int slab_is_available(void static struct lock_class_key on_slab_l3_key; static struct lock_class_key on_slab_alc_key; + static struct lock_class_key debugobj_l3_key; + static struct lock_class_key debugobj_alc_key; + + static void slab_set_lock_classes(struct kmem_cache *cachep, + struct lock_class_key *l3_key, struct lock_class_key *alc_key, + int q) + { + struct array_cache **alc; + struct kmem_list3 *l3; + int r; + + l3 = cachep->nodelists[q]; + if (!l3) + return; + + lockdep_set_class(&l3->list_lock, l3_key); + alc = l3->alien; + /* + * FIXME: This check for BAD_ALIEN_MAGIC + * should go away when common slab code is taught to + * work even without alien caches. + * Currently, non NUMA code returns BAD_ALIEN_MAGIC + * for alloc_alien_cache, + */ + if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) + return; + for_each_node(r) { + if (alc[r]) + lockdep_set_class(&alc[r]->lock, alc_key); + } + } + + static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) + { + slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); + } + + static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) + { + int node; + + for_each_online_node(node) + slab_set_debugobj_lock_classes_node(cachep, node); + } + static void init_node_lock_keys(int q) { struct cache_sizes *s = malloc_sizes; @@@ -630,29 -675,14 +675,14 @@@ return; for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { - struct array_cache **alc; struct kmem_list3 *l3; - int r; l3 = s->cs_cachep->nodelists[q]; if (!l3 || OFF_SLAB(s->cs_cachep)) continue; - lockdep_set_class(&l3->list_lock, &on_slab_l3_key); - alc = l3->alien; - /* - * FIXME: This check for BAD_ALIEN_MAGIC - * should go away when common slab code is taught to - * work even without alien caches. - * Currently, non NUMA code returns BAD_ALIEN_MAGIC - * for alloc_alien_cache, - */ - if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) - continue; - for_each_node(r) { - if (alc[r]) - lockdep_set_class(&alc[r]->lock, - &on_slab_alc_key); - } + + slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, + &on_slab_alc_key, q); } } @@@ -671,6 -701,14 +701,14 @@@ static void init_node_lock_keys(int q static inline void init_lock_keys(void) { } + + static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) + { + } + + static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) + { + } #endif /* @@@ -1264,6 -1302,8 +1302,8 @@@ static int __cpuinit cpuup_prepare(lon spin_unlock_irq(&l3->list_lock); kfree(shared); free_alien_cache(alien); + if (cachep->flags & SLAB_DEBUG_OBJECTS) + slab_set_debugobj_lock_classes_node(cachep, node); } init_node_lock_keys(node); @@@ -1626,6 -1666,9 +1666,9 @@@ void __init kmem_cache_init_late(void { struct kmem_cache *cachep; + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); + /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) @@@ -1636,9 -1679,6 +1679,6 @@@ /* Done! */ g_cpucache_up = FULL; - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); - /* * Register a cpu startup notifier callback that initializes * cpu_cache_get for all new cpus @@@ -1811,15 -1851,15 +1851,15 @@@ static void dump_line(char *data, int o unsigned char error = 0; int bad_count = 0; - printk(KERN_ERR "%03x:", offset); + printk(KERN_ERR "%03x: ", offset); for (i = 0; i < limit; i++) { if (data[offset + i] != POISON_FREE) { error = data[offset + i]; bad_count++; } - printk(" %02x", (unsigned char)data[offset + i]); } - printk("\n"); + print_hex_dump(KERN_CONT, "", 0, 16, 1, + &data[offset], limit, 1); if (bad_count == 1) { error ^= POISON_FREE; @@@ -2426,6 -2466,16 +2466,16 @@@ kmem_cache_create (const char *name, si goto oops; } + if (flags & SLAB_DEBUG_OBJECTS) { + /* + * Would deadlock through slab_destroy()->call_rcu()-> + * debug_object_activate()->kmem_cache_alloc(). + */ + WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); + + slab_set_debugobj_lock_classes(cachep); + } + /* cache setup completed, link it into the list */ list_add(&cachep->next, &cache_chain); oops: @@@ -2989,9 -3039,14 +3039,9 @@@ bad printk(KERN_ERR "slab: Internal list corruption detected in " "cache '%s'(%d), slabp %p(%d). Hexdump:\n", cachep->name, cachep->num, slabp, slabp->inuse); - for (i = 0; - i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); - i++) { - if (i % 16 == 0) - printk("\n%03x:", i); - printk(" %02x", ((unsigned char *)slabp)[i]); - } - printk("\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp, + sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t), + 1); BUG(); } } @@@ -3398,7 -3453,7 +3448,7 @@@ __cache_alloc_node(struct kmem_cache *c cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - if (nodeid == -1) + if (nodeid == NUMA_NO_NODE) nodeid = slab_node; if (unlikely(!cachep->nodelists[nodeid])) { @@@ -3929,7 -3984,7 +3979,7 @@@ fail struct ccupdate_struct { struct kmem_cache *cachep; - struct array_cache *new[NR_CPUS]; + struct array_cache *new[0]; }; static void do_ccupdate_local(void *info) @@@ -3951,7 -4006,8 +4001,8 @@@ static int do_tune_cpucache(struct kmem struct ccupdate_struct *new; int i; - new = kzalloc(sizeof(*new), gfp); + new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *), + gfp); if (!new) return -ENOMEM; diff --combined mm/slub.c index 2dc22160aff1,91a120f185d1..3b3f17bc0d17 --- a/mm/slub.c +++ b/mm/slub.c @@@ -467,8 -467,34 +467,8 @@@ static int disable_higher_order_debug */ static void print_section(char *text, u8 *addr, unsigned int length) { - int i, offset; - int newline = 1; - char ascii[17]; - - ascii[16] = 0; - - for (i = 0; i < length; i++) { - if (newline) { - printk(KERN_ERR "%8s 0x%p: ", text, addr + i); - newline = 0; - } - printk(KERN_CONT " %02x", addr[i]); - offset = i % 16; - ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; - if (offset == 15) { - printk(KERN_CONT " %s\n", ascii); - newline = 1; - } - } - if (!newline) { - i %= 16; - while (i < 16) { - printk(KERN_CONT " "); - ascii[i] = ' '; - i++; - } - printk(KERN_CONT " %s\n", ascii); - } + print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, + length, 1); } static struct track *get_track(struct kmem_cache *s, void *object, @@@ -599,12 -625,12 +599,12 @@@ static void print_trailer(struct kmem_c p, p - addr, get_freepointer(s, p)); if (p > addr + 16) - print_section("Bytes b4", p - 16, 16); - - print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); + print_section("Bytes b4 ", p - 16, 16); + print_section("Object ", p, min_t(unsigned long, s->objsize, + PAGE_SIZE)); if (s->flags & SLAB_RED_ZONE) - print_section("Redzone", p + s->objsize, + print_section("Redzone ", p + s->objsize, s->inuse - s->objsize); if (s->offset) @@@ -617,7 -643,7 +617,7 @@@ if (off != s->size) /* Beginning of the filler is the free pointer */ - print_section("Padding", p + off, s->size - off); + print_section("Padding ", p + off, s->size - off); dump_stack(); } @@@ -675,7 -701,7 +675,7 @@@ static u8 *check_bytes(u8 *start, u8 va return check_bytes8(start, value, bytes); value64 = value | value << 8 | value << 16 | value << 24; - value64 = value64 | value64 << 32; + value64 = (value64 & 0xffffffff) | value64 << 32; prefix = 8 - ((unsigned long)start) % 8; if (prefix) { @@@ -812,7 -838,7 +812,7 @@@ static int slab_pad_check(struct kmem_c end--; slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); - print_section("Padding", end - remainder, remainder); + print_section("Padding ", end - remainder, remainder); restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); return 0; @@@ -961,7 -987,7 +961,7 @@@ static void trace(struct kmem_cache *s page->freelist); if (!alloc) - print_section("Object", (void *)object, s->objsize); + print_section("Object ", (void *)object, s->objsize); dump_stack(); } @@@ -1508,7 -1534,7 +1508,7 @@@ static inline void add_partial(struct k struct page *page, int tail) { n->nr_partial++; - if (tail) + if (tail == DEACTIVATE_TO_TAIL) list_add_tail(&page->lru, &n->partial); else list_add(&page->lru, &n->partial); @@@ -1755,13 -1781,13 +1755,13 @@@ static void deactivate_slab(struct kmem enum slab_modes l = M_NONE, m = M_NONE; void *freelist; void *nextfree; - int tail = 0; + int tail = DEACTIVATE_TO_HEAD; struct page new; struct page old; if (page->freelist) { stat(s, DEACTIVATE_REMOTE_FREES); - tail = 1; + tail = DEACTIVATE_TO_TAIL; } c->tid = next_tid(c->tid); @@@ -1828,7 -1854,7 +1828,7 @@@ redo new.frozen = 0; - if (!new.inuse && n->nr_partial < s->min_partial) + if (!new.inuse && n->nr_partial > s->min_partial) m = M_FREE; else if (new.freelist) { m = M_PARTIAL; @@@ -1867,7 -1893,7 +1867,7 @@@ if (m == M_PARTIAL) { add_partial(n, page, tail); - stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); + stat(s, tail); } else if (m == M_FULL) { @@@ -2351,7 -2377,7 +2351,7 @@@ static void __slab_free(struct kmem_cac */ if (unlikely(!prior)) { remove_full(s, page); - add_partial(n, page, 0); + add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } } @@@ -2361,11 -2387,13 +2361,13 @@@ slab_empty: if (prior) { /* - * Slab still on the partial list. + * Slab on the partial list. */ remove_partial(n, page); stat(s, FREE_REMOVE_PARTIAL); - } + } else + /* Slab must be on the full list */ + remove_full(s, page); spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); @@@ -2667,7 -2695,7 +2669,7 @@@ static void early_kmem_cache_node_alloc init_kmem_cache_node(n, kmem_cache_node); inc_slabs_node(kmem_cache_node, node, page->objects); - add_partial(n, page, 0); + add_partial(n, page, DEACTIVATE_TO_HEAD); } static void free_kmem_cache_nodes(struct kmem_cache *s)