/*
* Ease the printing of nsec fields:
*/
-static long long nsec_high(long long nsec)
+static long long nsec_high(unsigned long long nsec)
{
- if (nsec < 0) {
+ if ((long long)nsec < 0) {
nsec = -nsec;
do_div(nsec, 1000000);
return -nsec;
return nsec;
}
-static unsigned long nsec_low(long long nsec)
+static unsigned long nsec_low(unsigned long long nsec)
{
- if (nsec < 0)
+ if ((long long)nsec < 0)
nsec = -nsec;
return do_div(nsec, 1000000);
PN(prev_clock_raw);
P(clock_warps);
P(clock_overflows);
+ P(clock_underflows);
P(clock_deep_idle_events);
PN(clock_max_delta);
P(cpu_load[0]);
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n",
+ SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
PN(se.exec_max);
PN(se.slice_max);
PN(se.wait_max);
+ PN(se.wait_sum);
+ P(se.wait_count);
P(sched_info.bkl_count);
P(se.nr_migrations);
P(se.nr_migrations_cold);
avg_atom = -1LL;
avg_per_cpu = p->se.sum_exec_runtime;
- if (p->se.nr_migrations)
- avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations);
- else
+ if (p->se.nr_migrations) {
+ avg_per_cpu = div64_64(avg_per_cpu,
+ p->se.nr_migrations);
+ } else {
avg_per_cpu = -1LL;
+ }
__PN(avg_atom);
__PN(avg_per_cpu);
{
#ifdef CONFIG_SCHEDSTATS
p->se.wait_max = 0;
+ p->se.wait_sum = 0;
+ p->se.wait_count = 0;
p->se.sleep_max = 0;
p->se.sum_sleep_runtime = 0;
p->se.block_max = 0;