#include <linux/export.h>
#include <linux/jump_label.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/traps.h>
#include <asm/tlbflush.h>
m->socketid = cpu_data(m->extcpu).phys_proc_id;
m->apicid = cpu_data(m->extcpu).initial_apicid;
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+
+ if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
+ rdmsrl(MSR_PPIN, m->ppin);
}
DEFINE_PER_CPU(struct mce, injectm);
static struct notifier_block mce_srao_nb;
+static atomic_t num_notifiers;
+
void mce_register_decode_chain(struct notifier_block *nb)
{
+ atomic_inc(&num_notifiers);
+
/* Ensure SRAO notifier has the highest priority in the decode chain. */
if (nb != &mce_srao_nb && nb->priority == INT_MAX)
nb->priority -= 1;
void mce_unregister_decode_chain(struct notifier_block *nb)
{
+ atomic_dec(&num_notifiers);
+
atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
.misc = misc_reg
};
-static void print_mce(struct mce *m)
+static void __print_mce(struct mce *m)
{
- int ret = 0;
-
- pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
- m->extcpu, m->mcgstatus, m->bank, m->status);
+ pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
+ m->extcpu,
+ (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
+ m->mcgstatus, m->bank, m->status);
if (m->ip) {
pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
- m->cs, m->ip);
+ m->cs, m->ip);
if (m->cs == __KERNEL_CS)
print_symbol("{%s}", m->ip);
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
cpu_data(m->extcpu).microcode);
+}
+
+static void print_mce(struct mce *m)
+{
+ int ret = 0;
+
+ __print_mce(m);
/*
* Print out human-readable details about the MCE error,
static void mce_schedule_work(void)
{
- if (!mce_gen_pool_empty() && keventd_up())
+ if (!mce_gen_pool_empty())
schedule_work(&mce_work);
}
.priority = INT_MAX,
};
+static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *m = (struct mce *)data;
+
+ if (!m)
+ return NOTIFY_DONE;
+
+ /*
+ * Run the default notifier if we have only the SRAO
+ * notifier and us registered.
+ */
+ if (atomic_read(&num_notifiers) > 2)
+ return NOTIFY_DONE;
+
+ __print_mce(m);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mce_default_nb = {
+ .notifier_call = mce_default_notifier,
+ /* lowest prio, we want it to run last. */
+ .priority = 0,
+};
+
/*
* Read ADDR and MISC registers.
*/
mce_gather_info(&m, NULL);
+ /*
+ * m.tsc was set in mce_setup(). Clear it if not requested.
+ *
+ * FIXME: Propagate @flags to mce_gather_info/mce_setup() to avoid
+ * that dance.
+ */
+ if (!(flags & MCP_TIMESTAMP))
+ m.tsc = 0;
+
for (i = 0; i < mca_cfg.banks; i++) {
if (!mce_banks[i].ctl || !test_bit(i, *b))
continue;
m.misc = 0;
m.addr = 0;
m.bank = i;
- m.tsc = 0;
barrier();
m.status = mce_rdmsrl(msr_ops.status(i));
if (!(m.status & MCI_STATUS_VAL))
continue;
-
/*
* Uncorrected or signalled events are handled by the exception
* handler when it is enabled, so don't process those here.
mce_read_aux(&m, i);
- if (!(flags & MCP_TIMESTAMP))
- m.tsc = 0;
-
severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
- machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
+ machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
if (mce_intel_cmci_poll()) {
iv = mce_adjust_timer(iv);
add_timer_on(t, cpu);
}
+static void __mcheck_cpu_setup_timer(void)
+{
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
+ unsigned int cpu = smp_processor_id();
+
+ setup_pinned_timer(t, mce_timer_fn, cpu);
+}
+
static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_clear_banks();
- __mcheck_cpu_init_timer();
+ __mcheck_cpu_setup_timer();
}
/*
{
mcheck_intel_therm_init();
mce_register_decode_chain(&mce_srao_nb);
+ mce_register_decode_chain(&mce_default_nb);
mcheck_vendor_init_severity();
INIT_WORK(&mce_work, mce_process_work);
DEFINE_PER_CPU(struct device *, mce_device);
-void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
-
static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
{
return container_of(attr, struct mce_bank, attr);
if (!mce_available(&boot_cpu_data))
return -EIO;
+ dev = per_cpu(mce_device, cpu);
+ if (dev)
+ return 0;
+
dev = kzalloc(sizeof *dev, GFP_KERNEL);
if (!dev)
return -ENOMEM;
}
/* Make sure there are no machine checks on offlined CPUs. */
-static void mce_disable_cpu(void *h)
+static void mce_disable_cpu(void)
{
- unsigned long action = *(unsigned long *)h;
-
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
- if (!(action & CPU_TASKS_FROZEN))
+ if (!cpuhp_tasks_frozen)
cmci_clear();
vendor_disable_error_reporting();
}
-static void mce_reenable_cpu(void *h)
+static void mce_reenable_cpu(void)
{
- unsigned long action = *(unsigned long *)h;
int i;
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
- if (!(action & CPU_TASKS_FROZEN))
+ if (!cpuhp_tasks_frozen)
cmci_reenable();
for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
}
}
-/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
-mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+static int mce_cpu_dead(unsigned int cpu)
+{
+ mce_intel_hcpu_update(cpu);
+
+ /* intentionally ignoring frozen here */
+ if (!cpuhp_tasks_frozen)
+ cmci_rediscover();
+ return 0;
+}
+
+static int mce_cpu_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
struct timer_list *t = &per_cpu(mce_timer, cpu);
+ int ret;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- mce_device_create(cpu);
- if (threshold_cpu_callback)
- threshold_cpu_callback(action, cpu);
- break;
- case CPU_DEAD:
- if (threshold_cpu_callback)
- threshold_cpu_callback(action, cpu);
- mce_device_remove(cpu);
- mce_intel_hcpu_update(cpu);
+ mce_device_create(cpu);
- /* intentionally ignoring frozen here */
- if (!(action & CPU_TASKS_FROZEN))
- cmci_rediscover();
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
- del_timer_sync(t);
- break;
- case CPU_DOWN_FAILED:
- smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
- mce_start_timer(cpu, t);
- break;
+ ret = mce_threshold_create_device(cpu);
+ if (ret) {
+ mce_device_remove(cpu);
+ return ret;
}
-
- return NOTIFY_OK;
+ mce_reenable_cpu();
+ mce_start_timer(cpu, t);
+ return 0;
}
-static struct notifier_block mce_cpu_notifier = {
- .notifier_call = mce_cpu_callback,
-};
+static int mce_cpu_pre_down(unsigned int cpu)
+{
+ struct timer_list *t = &per_cpu(mce_timer, cpu);
+
+ mce_disable_cpu();
+ del_timer_sync(t);
+ mce_threshold_remove_device(cpu);
+ mce_device_remove(cpu);
+ return 0;
+}
static __init void mce_init_banks(void)
{
static __init int mcheck_init_device(void)
{
+ enum cpuhp_state hp_online;
int err;
- int i = 0;
if (!mce_available(&boot_cpu_data)) {
err = -EIO;
if (err)
goto err_out_mem;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- err = mce_device_create(i);
- if (err) {
- /*
- * Register notifier anyway (and do not unreg it) so
- * that we don't leave undeleted timers, see notifier
- * callback above.
- */
- __register_hotcpu_notifier(&mce_cpu_notifier);
- cpu_notifier_register_done();
- goto err_device_create;
- }
- }
+ err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
+ mce_cpu_dead);
+ if (err)
+ goto err_out_mem;
- __register_hotcpu_notifier(&mce_cpu_notifier);
- cpu_notifier_register_done();
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
+ mce_cpu_online, mce_cpu_pre_down);
+ if (err < 0)
+ goto err_out_online;
+ hp_online = err;
register_syscore_ops(&mce_syscore_ops);
err_register:
unregister_syscore_ops(&mce_syscore_ops);
+ cpuhp_remove_state(hp_online);
-err_device_create:
- /*
- * We didn't keep track of which devices were created above, but
- * even if we had, the set of online cpus might have changed.
- * Play safe and remove for every possible cpu, since
- * mce_device_remove() will do the right thing.
- */
- for_each_possible_cpu(i)
- mce_device_remove(i);
+err_out_online:
+ cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
err_out_mem:
free_cpumask_var(mce_device_initialized);
schedule_work(&console_work);
}
-static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
+static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int nr)
{
- unsigned short *d, *s;
+ u16 *clear, *d, *s;
- if (t+nr >= b)
+ if (t + nr >= b)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
- if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
+ if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, dir, nr))
return;
- d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
- s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
- scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
- scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
- vc->vc_size_row * nr);
-}
-static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
-{
- unsigned short *s;
- unsigned int step;
+ s = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * t);
+ d = (u16 *)(vc->vc_origin + vc->vc_size_row * (t + nr));
- if (t+nr >= b)
- nr = b - t - 1;
- if (b > vc->vc_rows || t >= b || nr < 1)
- return;
- if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
- return;
- s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
- step = vc->vc_cols * nr;
- scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
- scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
+ if (dir == SM_UP) {
+ clear = s + (b - t - nr) * vc->vc_cols;
+ swap(s, d);
+ }
+ scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
+ scr_memsetw(clear, vc->vc_video_erase_char, vc->vc_size_row * nr);
}
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
+ if (new_screen_size > (4 << 20))
+ return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
+ if (vc == sel_cons)
+ clear_selection();
+
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
* if below scrolling region
*/
if (vc->vc_y + 1 == vc->vc_bottom)
- scrup(vc, vc->vc_top, vc->vc_bottom, 1);
+ con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1);
else if (vc->vc_y < vc->vc_rows - 1) {
vc->vc_y++;
vc->vc_pos += vc->vc_size_row;
* if above scrolling region
*/
if (vc->vc_y == vc->vc_top)
- scrdown(vc, vc->vc_top, vc->vc_bottom, 1);
+ con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1);
else if (vc->vc_y > 0) {
vc->vc_y--;
vc->vc_pos -= vc->vc_size_row;
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
- vc->vc_screenbuf_size >> 1);
+ vc->vc_screenbuf_size);
set_origin(vc);
if (con_is_visible(vc))
update_screen(vc);
nr = vc->vc_rows - vc->vc_y;
else if (!nr)
nr = 1;
- scrdown(vc, vc->vc_y, vc->vc_bottom, nr);
+ con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_DOWN, nr);
vc->vc_need_wrap = 0;
}
nr = vc->vc_rows - vc->vc_y;
else if (!nr)
nr=1;
- scrup(vc, vc->vc_y, vc->vc_bottom, nr);
+ con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_UP, nr);
vc->vc_need_wrap = 0;
}
*/
static void blank_screen_t(unsigned long dummy)
{
- if (unlikely(!keventd_up())) {
- mod_timer(&console_timer, jiffies + (blankinterval * HZ));
- return;
- }
blank_timer_expired = 1;
schedule_work(&console_work);
}
notify_update(vc);
}
+void vc_scrolldelta_helper(struct vc_data *c, int lines,
+ unsigned int rolled_over, void *base, unsigned int size)
+{
+ unsigned long ubase = (unsigned long)base;
+ ptrdiff_t scr_end = (void *)c->vc_scr_end - base;
+ ptrdiff_t vorigin = (void *)c->vc_visible_origin - base;
+ ptrdiff_t origin = (void *)c->vc_origin - base;
+ int margin = c->vc_size_row * 4;
+ int from, wrap, from_off, avail;
+
+ /* Turn scrollback off */
+ if (!lines) {
+ c->vc_visible_origin = c->vc_origin;
+ return;
+ }
+
+ /* Do we have already enough to allow jumping from 0 to the end? */
+ if (rolled_over > scr_end + margin) {
+ from = scr_end;
+ wrap = rolled_over + c->vc_size_row;
+ } else {
+ from = 0;
+ wrap = size;
+ }
+
+ from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
+ avail = (origin - from + wrap) % wrap;
+
+ /* Only a little piece would be left? Show all incl. the piece! */
+ if (avail < 2 * margin)
+ margin = 0;
+ if (from_off < margin)
+ from_off = 0;
+ if (from_off > avail - margin)
+ from_off = avail;
+
+ c->vc_visible_origin = ubase + (from + from_off) % wrap;
+}
+EXPORT_SYMBOL_GPL(vc_scrolldelta_helper);
+
/*
* Visible symbols for modules
*/
int cpu;
};
-/*
- * A struct for workqueue attributes. This can be used to change
- * attributes of an unbound workqueue.
+/**
+ * struct workqueue_attrs - A struct for workqueue attributes.
*
- * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
- * only modifies how apply_workqueue_attrs() select pools and thus doesn't
- * participate in pool hash calculations or equality comparisons.
+ * This can be used to change attributes of an unbound workqueue.
*/
struct workqueue_attrs {
- int nice; /* nice level */
- cpumask_var_t cpumask; /* allowed CPUs */
- bool no_numa; /* disable NUMA affinity */
+ /**
+ * @nice: nice level
+ */
+ int nice;
+
+ /**
+ * @cpumask: allowed CPUs
+ */
+ cpumask_var_t cpumask;
+
+ /**
+ * @no_numa: disable NUMA affinity
+ *
+ * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
+ * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
+ * doesn't participate in pool hash calculations or equality comparisons.
+ */
+ bool no_numa;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
/*
* Workqueue flags and constants. For details, please refer to
- * Documentation/workqueue.txt.
+ * Documentation/core-api/workqueue.rst.
*/
enum {
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
* @args...: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
- * information on WQ_* flags, please refer to Documentation/workqueue.txt.
+ * information on WQ_* flags, please refer to
+ * Documentation/core-api/workqueue.rst.
*
* The __lock_name macro dance is to guarantee that single lock_class_key
* doesn't end up with different namesm, which isn't allowed by lockdep.
return queue_delayed_work(system_wq, dwork, delay);
}
- /**
- * keventd_up - is workqueue initialized yet?
- */
- static inline bool keventd_up(void)
- {
- return system_wq != NULL;
- }
-
#ifndef CONFIG_SMP
static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
int workqueue_offline_cpu(unsigned int cpu);
#endif
+ int __init workqueue_init_early(void);
+ int __init workqueue_init(void);
+
#endif
done = 1;
}
+void __init __weak arch_post_acpi_subsys_init(void) { }
+
void __init __weak smp_setup_processor_id(void)
{
}
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
idr_init_cache();
+
+ /*
+ * Allow workqueue creation and work item queueing/cancelling
+ * early. Work item execution depends on kthreads and starts after
+ * workqueue_init().
+ */
+ workqueue_init_early();
+
rcu_init();
/* trace_printk() and trace points may be used after this */
check_bugs();
acpi_subsystem_init();
+ arch_post_acpi_subsys_init();
sfi_init_late();
if (efi_enabled(EFI_RUNTIME_SERVICES)) {
return 0;
panic("No working init found. Try passing init= option to kernel. "
- "See Linux Documentation/init.txt for guidance.");
+ "See Linux Documentation/admin-guide/init.rst for guidance.");
}
static noinline void __init kernel_init_freeable(void)
smp_prepare_cpus(setup_max_cpus);
+ workqueue_init();
+
do_pre_smp_initcalls();
lockup_detector_init();
* initialized:
*/
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
- sched = keventd_up();
+ sched = 1;
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
__debug_object_init(addr, descr, 0);
}
+EXPORT_SYMBOL_GPL(debug_object_init);
/**
* debug_object_init_on_stack - debug checks when an object on stack is
__debug_object_init(addr, descr, 1);
}
+EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
/**
* debug_object_activate - debug checks when an object is activated
}
return 0;
}
+EXPORT_SYMBOL_GPL(debug_object_activate);
/**
* debug_object_deactivate - debug checks when an object is deactivated
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_deactivate);
/**
* debug_object_destroy - debug checks when an object is destroyed
out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_destroy);
/**
* debug_object_free - debug checks when an object is freed
out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_free);
/**
* debug_object_assert_init - debug checks when object should be init-ed
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_assert_init);
/**
* debug_object_active_state - debug checks object usage state machine
raw_spin_unlock_irqrestore(&db->lock, flags);
}
+EXPORT_SYMBOL_GPL(debug_object_active_state);
#ifdef CONFIG_DEBUG_OBJECTS_FREE
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
INIT_LIST_HEAD(&parent->slabs_full);
INIT_LIST_HEAD(&parent->slabs_partial);
INIT_LIST_HEAD(&parent->slabs_free);
+ parent->total_slabs = 0;
+ parent->free_slabs = 0;
parent->shared = NULL;
parent->alien = NULL;
parent->colour_next = 0;
{
struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
- /*
- * When this gets called from do_initcalls via cpucache_init(),
- * init_workqueues() has already run, so keventd will be setup
- * at that time.
- */
- if (keventd_up() && reap_work->work.func == NULL) {
+ if (reap_work->work.func == NULL) {
init_reap_node(cpu);
INIT_DEFERRABLE_WORK(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work,
* guaranteed to be valid until irq is re-enabled, because it will be
* freed after synchronize_sched().
*/
- if (force_change)
+ if (old_shared && force_change)
synchronize_sched();
fail:
{
#if DEBUG
struct kmem_cache_node *n;
- struct page *page;
unsigned long flags;
int node;
static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
cachep->name, cachep->size, cachep->gfporder);
for_each_kmem_cache_node(cachep, node, n) {
- unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
- unsigned long active_slabs = 0, num_slabs = 0;
+ unsigned long total_slabs, free_slabs, free_objs;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->slabs_full, lru) {
- active_objs += cachep->num;
- active_slabs++;
- }
- list_for_each_entry(page, &n->slabs_partial, lru) {
- active_objs += page->active;
- active_slabs++;
- }
- list_for_each_entry(page, &n->slabs_free, lru)
- num_slabs++;
-
- free_objects += n->free_objects;
+ total_slabs = n->total_slabs;
+ free_slabs = n->free_slabs;
+ free_objs = n->free_objects;
spin_unlock_irqrestore(&n->list_lock, flags);
- num_slabs += active_slabs;
- num_objs = num_slabs * cachep->num;
- pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
- node, active_slabs, num_slabs, active_objs, num_objs,
- free_objects);
+ pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
+ node, total_slabs - free_slabs, total_slabs,
+ (total_slabs * cachep->num) - free_objs,
+ total_slabs * cachep->num);
}
#endif
}
page = list_entry(p, struct page, lru);
list_del(&page->lru);
+ n->free_slabs--;
+ n->total_slabs--;
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
return nr_freed;
}
-int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *cachep)
{
int ret = 0;
int node;
int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
- return __kmem_cache_shrink(cachep, false);
+ return __kmem_cache_shrink(cachep);
}
void __kmem_cache_release(struct kmem_cache *cachep)
n = get_node(cachep, page_to_nid(page));
spin_lock(&n->list_lock);
- if (!page->active)
+ n->total_slabs++;
+ if (!page->active) {
list_add_tail(&page->lru, &(n->slabs_free));
- else
+ n->free_slabs++;
+ } else
fixup_slab_list(cachep, n, page, &list);
+
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
spin_unlock(&n->list_lock);
/* Move pfmemalloc slab to the end of list to speed up next search */
list_del(&page->lru);
- if (!page->active)
+ if (!page->active) {
list_add_tail(&page->lru, &n->slabs_free);
- else
+ n->free_slabs++;
+ } else
list_add_tail(&page->lru, &n->slabs_partial);
list_for_each_entry(page, &n->slabs_partial, lru) {
return page;
}
+ n->free_touched = 1;
list_for_each_entry(page, &n->slabs_free, lru) {
- if (!PageSlabPfmemalloc(page))
+ if (!PageSlabPfmemalloc(page)) {
+ n->free_slabs--;
return page;
+ }
}
return NULL;
{
struct page *page;
- page = list_first_entry_or_null(&n->slabs_partial,
- struct page, lru);
+ assert_spin_locked(&n->list_lock);
+ page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
if (!page) {
n->free_touched = 1;
- page = list_first_entry_or_null(&n->slabs_free,
- struct page, lru);
+ page = list_first_entry_or_null(&n->slabs_free, struct page,
+ lru);
+ if (page)
+ n->free_slabs--;
}
if (sk_memalloc_socks())
- return get_valid_first_slab(n, page, pfmemalloc);
+ page = get_valid_first_slab(n, page, pfmemalloc);
return page;
}
STATS_DEC_ACTIVE(cachep);
/* fixup slab chains */
- if (page->active == 0)
+ if (page->active == 0) {
list_add(&page->lru, &n->slabs_free);
- else {
+ n->free_slabs++;
+ } else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
page = list_last_entry(&n->slabs_free, struct page, lru);
list_move(&page->lru, list);
+ n->free_slabs--;
+ n->total_slabs--;
}
}
#ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
{
- struct page *page;
- unsigned long active_objs;
- unsigned long num_objs;
- unsigned long active_slabs = 0;
- unsigned long num_slabs, free_objects = 0, shared_avail = 0;
- const char *name;
- char *error = NULL;
+ unsigned long active_objs, num_objs, active_slabs;
+ unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
+ unsigned long free_slabs = 0;
int node;
struct kmem_cache_node *n;
- active_objs = 0;
- num_slabs = 0;
for_each_kmem_cache_node(cachep, node, n) {
-
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(page, &n->slabs_full, lru) {
- if (page->active != cachep->num && !error)
- error = "slabs_full accounting error";
- active_objs += cachep->num;
- active_slabs++;
- }
- list_for_each_entry(page, &n->slabs_partial, lru) {
- if (page->active == cachep->num && !error)
- error = "slabs_partial accounting error";
- if (!page->active && !error)
- error = "slabs_partial accounting error";
- active_objs += page->active;
- active_slabs++;
- }
- list_for_each_entry(page, &n->slabs_free, lru) {
- if (page->active && !error)
- error = "slabs_free accounting error";
- num_slabs++;
- }
- free_objects += n->free_objects;
+ total_slabs += n->total_slabs;
+ free_slabs += n->free_slabs;
+ free_objs += n->free_objects;
+
if (n->shared)
shared_avail += n->shared->avail;
spin_unlock_irq(&n->list_lock);
}
- num_slabs += active_slabs;
- num_objs = num_slabs * cachep->num;
- if (num_objs - active_objs != free_objects && !error)
- error = "free_objects accounting error";
-
- name = cachep->name;
- if (error)
- pr_err("slab: cache %s error: %s\n", name, error);
+ num_objs = total_slabs * cachep->num;
+ active_slabs = total_slabs - free_slabs;
+ active_objs = num_objs - free_objs;
sinfo->active_objs = active_objs;
sinfo->num_objs = num_objs;
sinfo->active_slabs = active_slabs;
- sinfo->num_slabs = num_slabs;
+ sinfo->num_slabs = total_slabs;
sinfo->shared_avail = shared_avail;
sinfo->limit = cachep->limit;
sinfo->batchcount = cachep->batchcount;