]> Git Repo - linux.git/commitdiff
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <[email protected]>
Fri, 22 Jul 2011 23:43:21 +0000 (16:43 -0700)
committerLinus Torvalds <[email protected]>
Fri, 22 Jul 2011 23:43:21 +0000 (16:43 -0700)
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  lockdep: Fix lockdep_no_validate against IRQ states
  mutex: Make mutex_destroy() an inline function
  plist: Remove the need to supply locks to plist heads
  lockup detector: Fix reference to the non-existent CONFIG_DETECT_SOFTLOCKUP option

1  2 
kernel/fork.c
kernel/sched.c

diff --combined kernel/fork.c
index 4d4117e01504ec072c94f2b4fad8008bf41321f1,7517a53d50e3df5e61fe7f08f7ddf53df4caf0d9..ca339c5c5819db323d9e3734b0ab9c622cda2875
@@@ -37,6 -37,7 +37,6 @@@
  #include <linux/swap.h>
  #include <linux/syscalls.h>
  #include <linux/jiffies.h>
 -#include <linux/tracehook.h>
  #include <linux/futex.h>
  #include <linux/compat.h>
  #include <linux/kthread.h>
@@@ -1012,7 -1013,7 +1012,7 @@@ static void rt_mutex_init_task(struct t
  {
        raw_spin_lock_init(&p->pi_lock);
  #ifdef CONFIG_RT_MUTEXES
-       plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
+       plist_head_init(&p->pi_waiters);
        p->pi_blocked_on = NULL;
  #endif
  }
@@@ -1339,7 -1340,7 +1339,7 @@@ static struct task_struct *copy_process
        }
  
        if (likely(p->pid)) {
 -              tracehook_finish_clone(p, clone_flags, trace);
 +              ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
  
                if (thread_group_leader(p)) {
                        if (is_child_reaper(pid))
@@@ -1480,22 -1481,10 +1480,22 @@@ long do_fork(unsigned long clone_flags
        }
  
        /*
 -       * When called from kernel_thread, don't do user tracing stuff.
 +       * Determine whether and which event to report to ptracer.  When
 +       * called from kernel_thread or CLONE_UNTRACED is explicitly
 +       * requested, no event is reported; otherwise, report if the event
 +       * for the type of forking is enabled.
         */
 -      if (likely(user_mode(regs)))
 -              trace = tracehook_prepare_clone(clone_flags);
 +      if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
 +              if (clone_flags & CLONE_VFORK)
 +                      trace = PTRACE_EVENT_VFORK;
 +              else if ((clone_flags & CSIGNAL) != SIGCHLD)
 +                      trace = PTRACE_EVENT_CLONE;
 +              else
 +                      trace = PTRACE_EVENT_FORK;
 +
 +              if (likely(!ptrace_event_enabled(current, trace)))
 +                      trace = 0;
 +      }
  
        p = copy_process(clone_flags, stack_start, regs, stack_size,
                         child_tidptr, NULL, trace);
                }
  
                audit_finish_fork(p);
 -              tracehook_report_clone(regs, clone_flags, nr, p);
  
                /*
                 * We set PF_STARTING at creation in case tracing wants to
                 * use this to distinguish a fully live task from one that
 -               * hasn't gotten to tracehook_report_clone() yet.  Now we
 -               * clear it and set the child going.
 +               * hasn't finished SIGSTOP raising yet.  Now we clear it
 +               * and set the child going.
                 */
                p->flags &= ~PF_STARTING;
  
                wake_up_new_task(p);
  
 -              tracehook_report_clone_complete(trace, regs,
 -                                              clone_flags, nr, p);
 +              /* forking complete and child started to run, tell ptracer */
 +              if (unlikely(trace))
 +                      ptrace_event(trace, nr);
  
                if (clone_flags & CLONE_VFORK) {
                        freezer_do_not_count();
                        wait_for_completion(&vfork);
                        freezer_count();
 -                      tracehook_report_vfork_done(p, nr);
 +                      ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
                }
        } else {
                nr = PTR_ERR(p);
diff --combined kernel/sched.c
index fde6ff90352583d65ff890a407200f2fb0c3073e,71bc127e96ba696716f310611b38dc6bc2d243e7..c518b05fd062d07238f77562a3416d15022ca535
@@@ -292,8 -292,8 +292,8 @@@ static DEFINE_SPINLOCK(task_group_lock)
   * (The default weight is 1024 - so there's no practical
   *  limitation from this.)
   */
 -#define MIN_SHARES    2
 -#define MAX_SHARES    (1UL << (18 + SCHED_LOAD_RESOLUTION))
 +#define MIN_SHARES    (1UL <<  1)
 +#define MAX_SHARES    (1UL << 18)
  
  static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
  #endif
@@@ -2544,9 -2544,13 +2544,9 @@@ static int ttwu_remote(struct task_stru
  }
  
  #ifdef CONFIG_SMP
 -static void sched_ttwu_pending(void)
 +static void sched_ttwu_do_pending(struct task_struct *list)
  {
        struct rq *rq = this_rq();
 -      struct task_struct *list = xchg(&rq->wake_list, NULL);
 -
 -      if (!list)
 -              return;
  
        raw_spin_lock(&rq->lock);
  
        raw_spin_unlock(&rq->lock);
  }
  
 +#ifdef CONFIG_HOTPLUG_CPU
 +
 +static void sched_ttwu_pending(void)
 +{
 +      struct rq *rq = this_rq();
 +      struct task_struct *list = xchg(&rq->wake_list, NULL);
 +
 +      if (!list)
 +              return;
 +
 +      sched_ttwu_do_pending(list);
 +}
 +
 +#endif /* CONFIG_HOTPLUG_CPU */
 +
  void scheduler_ipi(void)
  {
 -      sched_ttwu_pending();
 +      struct rq *rq = this_rq();
 +      struct task_struct *list = xchg(&rq->wake_list, NULL);
 +
 +      if (!list)
 +              return;
 +
 +      /*
 +       * Not all reschedule IPI handlers call irq_enter/irq_exit, since
 +       * traditionally all their work was done from the interrupt return
 +       * path. Now that we actually do some work, we need to make sure
 +       * we do call them.
 +       *
 +       * Some archs already do call them, luckily irq_enter/exit nest
 +       * properly.
 +       *
 +       * Arguably we should visit all archs and update all handlers,
 +       * however a fair share of IPIs are still resched only so this would
 +       * somewhat pessimize the simple resched case.
 +       */
 +      irq_enter();
 +      sched_ttwu_do_pending(list);
 +      irq_exit();
  }
  
  static void ttwu_queue_remote(struct task_struct *p, int cpu)
@@@ -6589,7 -6557,7 +6589,7 @@@ static int sched_domain_debug_one(struc
                        break;
                }
  
 -              if (!group->cpu_power) {
 +              if (!group->sgp->power) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: domain->cpu_power not "
                                        "set\n");
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
  
                printk(KERN_CONT " %s", str);
 -              if (group->cpu_power != SCHED_POWER_SCALE) {
 +              if (group->sgp->power != SCHED_POWER_SCALE) {
                        printk(KERN_CONT " (cpu_power = %d)",
 -                              group->cpu_power);
 +                              group->sgp->power);
                }
  
                group = group->next;
@@@ -6806,39 -6774,11 +6806,39 @@@ static struct root_domain *alloc_rootdo
        return rd;
  }
  
 +static void free_sched_groups(struct sched_group *sg, int free_sgp)
 +{
 +      struct sched_group *tmp, *first;
 +
 +      if (!sg)
 +              return;
 +
 +      first = sg;
 +      do {
 +              tmp = sg->next;
 +
 +              if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
 +                      kfree(sg->sgp);
 +
 +              kfree(sg);
 +              sg = tmp;
 +      } while (sg != first);
 +}
 +
  static void free_sched_domain(struct rcu_head *rcu)
  {
        struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
 -      if (atomic_dec_and_test(&sd->groups->ref))
 +
 +      /*
 +       * If its an overlapping domain it has private groups, iterate and
 +       * nuke them all.
 +       */
 +      if (sd->flags & SD_OVERLAP) {
 +              free_sched_groups(sd->groups, 1);
 +      } else if (atomic_dec_and_test(&sd->groups->ref)) {
 +              kfree(sd->groups->sgp);
                kfree(sd->groups);
 +      }
        kfree(sd);
  }
  
@@@ -7005,7 -6945,6 +7005,7 @@@ int sched_smt_power_savings = 0, sched_
  struct sd_data {
        struct sched_domain **__percpu sd;
        struct sched_group **__percpu sg;
 +      struct sched_group_power **__percpu sgp;
  };
  
  struct s_data {
@@@ -7025,73 -6964,15 +7025,73 @@@ struct sched_domain_topology_level
  typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
  typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
  
 +#define SDTL_OVERLAP  0x01
 +
  struct sched_domain_topology_level {
        sched_domain_init_f init;
        sched_domain_mask_f mask;
 +      int                 flags;
        struct sd_data      data;
  };
  
 -/*
 - * Assumes the sched_domain tree is fully constructed
 - */
 +static int
 +build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 +{
 +      struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
 +      const struct cpumask *span = sched_domain_span(sd);
 +      struct cpumask *covered = sched_domains_tmpmask;
 +      struct sd_data *sdd = sd->private;
 +      struct sched_domain *child;
 +      int i;
 +
 +      cpumask_clear(covered);
 +
 +      for_each_cpu(i, span) {
 +              struct cpumask *sg_span;
 +
 +              if (cpumask_test_cpu(i, covered))
 +                      continue;
 +
 +              sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
 +                              GFP_KERNEL, cpu_to_node(i));
 +
 +              if (!sg)
 +                      goto fail;
 +
 +              sg_span = sched_group_cpus(sg);
 +
 +              child = *per_cpu_ptr(sdd->sd, i);
 +              if (child->child) {
 +                      child = child->child;
 +                      cpumask_copy(sg_span, sched_domain_span(child));
 +              } else
 +                      cpumask_set_cpu(i, sg_span);
 +
 +              cpumask_or(covered, covered, sg_span);
 +
 +              sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
 +              atomic_inc(&sg->sgp->ref);
 +
 +              if (cpumask_test_cpu(cpu, sg_span))
 +                      groups = sg;
 +
 +              if (!first)
 +                      first = sg;
 +              if (last)
 +                      last->next = sg;
 +              last = sg;
 +              last->next = first;
 +      }
 +      sd->groups = groups;
 +
 +      return 0;
 +
 +fail:
 +      free_sched_groups(first, 0);
 +
 +      return -ENOMEM;
 +}
 +
  static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
  {
        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
        if (child)
                cpu = cpumask_first(sched_domain_span(child));
  
 -      if (sg)
 +      if (sg) {
                *sg = *per_cpu_ptr(sdd->sg, cpu);
 +              (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
 +              atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
 +      }
  
        return cpu;
  }
  
  /*
 - * build_sched_groups takes the cpumask we wish to span, and a pointer
 - * to a function which identifies what group(along with sched group) a CPU
 - * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
 - * (due to the fact that we keep track of groups covered with a struct cpumask).
 - *
   * build_sched_groups will build a circular linked list of the groups
   * covered by the given span, and will set each group's ->cpumask correctly,
   * and ->cpu_power to 0.
 + *
 + * Assumes the sched_domain tree is fully constructed
   */
 -static void
 -build_sched_groups(struct sched_domain *sd)
 +static int
 +build_sched_groups(struct sched_domain *sd, int cpu)
  {
        struct sched_group *first = NULL, *last = NULL;
        struct sd_data *sdd = sd->private;
        struct cpumask *covered;
        int i;
  
 +      get_group(cpu, sdd, &sd->groups);
 +      atomic_inc(&sd->groups->ref);
 +
 +      if (cpu != cpumask_first(sched_domain_span(sd)))
 +              return 0;
 +
        lockdep_assert_held(&sched_domains_mutex);
        covered = sched_domains_tmpmask;
  
                        continue;
  
                cpumask_clear(sched_group_cpus(sg));
 -              sg->cpu_power = 0;
 +              sg->sgp->power = 0;
  
                for_each_cpu(j, span) {
                        if (get_group(j, sdd, NULL) != group)
                last = sg;
        }
        last->next = first;
 +
 +      return 0;
  }
  
  /*
   */
  static void init_sched_groups_power(int cpu, struct sched_domain *sd)
  {
 -      WARN_ON(!sd || !sd->groups);
 +      struct sched_group *sg = sd->groups;
  
 -      if (cpu != group_first_cpu(sd->groups))
 -              return;
 +      WARN_ON(!sd || !sg);
 +
 +      do {
 +              sg->group_weight = cpumask_weight(sched_group_cpus(sg));
 +              sg = sg->next;
 +      } while (sg != sd->groups);
  
 -      sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
 +      if (cpu != group_first_cpu(sg))
 +              return;
  
        update_group_power(sd, cpu);
  }
@@@ -7309,15 -7177,15 +7309,15 @@@ static enum s_alloc __visit_domain_allo
  static void claim_allocations(int cpu, struct sched_domain *sd)
  {
        struct sd_data *sdd = sd->private;
 -      struct sched_group *sg = sd->groups;
  
        WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
        *per_cpu_ptr(sdd->sd, cpu) = NULL;
  
 -      if (cpu == cpumask_first(sched_group_cpus(sg))) {
 -              WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
 +      if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
                *per_cpu_ptr(sdd->sg, cpu) = NULL;
 -      }
 +
 +      if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
 +              *per_cpu_ptr(sdd->sgp, cpu) = NULL;
  }
  
  #ifdef CONFIG_SCHED_SMT
@@@ -7342,7 -7210,7 +7342,7 @@@ static struct sched_domain_topology_lev
  #endif
        { sd_init_CPU, cpu_cpu_mask, },
  #ifdef CONFIG_NUMA
 -      { sd_init_NODE, cpu_node_mask, },
 +      { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
        { sd_init_ALLNODES, cpu_allnodes_mask, },
  #endif
        { NULL, },
@@@ -7366,14 -7234,9 +7366,14 @@@ static int __sdt_alloc(const struct cpu
                if (!sdd->sg)
                        return -ENOMEM;
  
 +              sdd->sgp = alloc_percpu(struct sched_group_power *);
 +              if (!sdd->sgp)
 +                      return -ENOMEM;
 +
                for_each_cpu(j, cpu_map) {
                        struct sched_domain *sd;
                        struct sched_group *sg;
 +                      struct sched_group_power *sgp;
  
                        sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
                                return -ENOMEM;
  
                        *per_cpu_ptr(sdd->sg, j) = sg;
 +
 +                      sgp = kzalloc_node(sizeof(struct sched_group_power),
 +                                      GFP_KERNEL, cpu_to_node(j));
 +                      if (!sgp)
 +                              return -ENOMEM;
 +
 +                      *per_cpu_ptr(sdd->sgp, j) = sgp;
                }
        }
  
@@@ -7410,15 -7266,11 +7410,15 @@@ static void __sdt_free(const struct cpu
                struct sd_data *sdd = &tl->data;
  
                for_each_cpu(j, cpu_map) {
 -                      kfree(*per_cpu_ptr(sdd->sd, j));
 +                      struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
 +                      if (sd && (sd->flags & SD_OVERLAP))
 +                              free_sched_groups(sd->groups, 0);
                        kfree(*per_cpu_ptr(sdd->sg, j));
 +                      kfree(*per_cpu_ptr(sdd->sgp, j));
                }
                free_percpu(sdd->sd);
                free_percpu(sdd->sg);
 +              free_percpu(sdd->sgp);
        }
  }
  
@@@ -7464,13 -7316,8 +7464,13 @@@ static int build_sched_domains(const st
                struct sched_domain_topology_level *tl;
  
                sd = NULL;
 -              for (tl = sched_domain_topology; tl->init; tl++)
 +              for (tl = sched_domain_topology; tl->init; tl++) {
                        sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
 +                      if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
 +                              sd->flags |= SD_OVERLAP;
 +                      if (cpumask_equal(cpu_map, sched_domain_span(sd)))
 +                              break;
 +              }
  
                while (sd->child)
                        sd = sd->child;
        for_each_cpu(i, cpu_map) {
                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
                        sd->span_weight = cpumask_weight(sched_domain_span(sd));
 -                      get_group(i, sd->private, &sd->groups);
 -                      atomic_inc(&sd->groups->ref);
 -
 -                      if (i != cpumask_first(sched_domain_span(sd)))
 -                              continue;
 -
 -                      build_sched_groups(sd);
 +                      if (sd->flags & SD_OVERLAP) {
 +                              if (build_overlap_sched_groups(sd, i))
 +                                      goto error;
 +                      } else {
 +                              if (build_sched_groups(sd, i))
 +                                      goto error;
 +                      }
                }
        }
  
@@@ -7910,9 -7757,6 +7910,9 @@@ static void init_cfs_rq(struct cfs_rq *
  #endif
  #endif
        cfs_rq->min_vruntime = (u64)(-(1LL << 20));
 +#ifndef CONFIG_64BIT
 +      cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
 +#endif
  }
  
  static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
  #ifdef CONFIG_SMP
        rt_rq->rt_nr_migratory = 0;
        rt_rq->overloaded = 0;
-       plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
+       plist_head_init(&rt_rq->pushable_tasks);
  #endif
  
        rt_rq->rt_time = 0;
@@@ -8142,7 -7986,7 +8142,7 @@@ void __init sched_init(void
  #endif
  
  #ifdef CONFIG_RT_MUTEXES
-       plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
+       plist_head_init(&init_task.pi_waiters);
  #endif
  
        /*
@@@ -8606,7 -8450,10 +8606,7 @@@ int sched_group_set_shares(struct task_
        if (!tg->se[0])
                return -EINVAL;
  
 -      if (shares < MIN_SHARES)
 -              shares = MIN_SHARES;
 -      else if (shares > MAX_SHARES)
 -              shares = MAX_SHARES;
 +      shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
  
        mutex_lock(&shares_mutex);
        if (tg->shares == shares)
This page took 0.145835 seconds and 4 git commands to generate.