]> Git Repo - linux.git/commitdiff
Merge tag 'rcu-fixes-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
authorLinus Torvalds <[email protected]>
Wed, 8 Nov 2023 17:47:52 +0000 (09:47 -0800)
committerLinus Torvalds <[email protected]>
Wed, 8 Nov 2023 17:47:52 +0000 (09:47 -0800)
Pull RCU fixes from Frederic Weisbecker:

 - Fix a lock inversion between scheduler and RCU introduced in
   v6.2-rc4. The scenario could trigger on any user of RCU_NOCB
   (mostly Android but also nohz_full)

 - Fix PF_IDLE semantic changes introduced in v6.6-rc3 breaking
   some RCU-Tasks and RCU-Tasks-Trace expectations as to what
   exactly is an idle task. This resulted in potential spurious
   stalls and warnings.

* tag 'rcu-fixes-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks:
  rcu/tasks-trace: Handle new PF_IDLE semantics
  rcu/tasks: Handle new PF_IDLE semantics
  rcu: Introduce rcu_cpu_online()
  rcu: Break rcu_node_0 --> &rq->__lock order

1  2 
kernel/rcu/tree.c

diff --combined kernel/rcu/tree.c
index d3a97e1290203f8a00c4dc97fa0087676926b08e,c3359f4c88307b2b434c9a6591e2dfbd9e746823..3ac3c846105fb4c059a001ae3cf52a3c7747aac5
@@@ -755,14 -755,19 +755,19 @@@ static int dyntick_save_progress_counte
  }
  
  /*
-  * Return true if the specified CPU has passed through a quiescent
-  * state by virtue of being in or having passed through an dynticks
-  * idle state since the last call to dyntick_save_progress_counter()
-  * for this same CPU, or by virtue of having been offline.
+  * Returns positive if the specified CPU has passed through a quiescent state
+  * by virtue of being in or having passed through an dynticks idle state since
+  * the last call to dyntick_save_progress_counter() for this same CPU, or by
+  * virtue of having been offline.
+  *
+  * Returns negative if the specified CPU needs a force resched.
+  *
+  * Returns zero otherwise.
   */
  static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  {
        unsigned long jtsq;
+       int ret = 0;
        struct rcu_node *rnp = rdp->mynode;
  
        /*
            (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
             rcu_state.cbovld)) {
                WRITE_ONCE(rdp->rcu_urgent_qs, true);
-               resched_cpu(rdp->cpu);
                WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+               ret = -1;
        }
  
        /*
        if (time_after(jiffies, rcu_state.jiffies_resched)) {
                if (time_after(jiffies,
                               READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
-                       resched_cpu(rdp->cpu);
                        WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+                       ret = -1;
                }
                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
                    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
                }
        }
  
-       return 0;
+       return ret;
  }
  
  /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
@@@ -2271,15 -2276,15 +2276,15 @@@ static void force_qs_rnp(int (*f)(struc
  {
        int cpu;
        unsigned long flags;
-       unsigned long mask;
-       struct rcu_data *rdp;
        struct rcu_node *rnp;
  
        rcu_state.cbovld = rcu_state.cbovldnext;
        rcu_state.cbovldnext = false;
        rcu_for_each_leaf_node(rnp) {
+               unsigned long mask = 0;
+               unsigned long rsmask = 0;
                cond_resched_tasks_rcu_qs();
-               mask = 0;
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                rcu_state.cbovldnext |= !!rnp->cbovldmask;
                if (rnp->qsmask == 0) {
                        continue;
                }
                for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
+                       struct rcu_data *rdp;
+                       int ret;
                        rdp = per_cpu_ptr(&rcu_data, cpu);
-                       if (f(rdp)) {
+                       ret = f(rdp);
+                       if (ret > 0) {
                                mask |= rdp->grpmask;
                                rcu_disable_urgency_upon_qs(rdp);
                        }
+                       if (ret < 0)
+                               rsmask |= rdp->grpmask;
                }
                if (mask != 0) {
                        /* Idle/offline CPUs, report (releases rnp->lock). */
                        /* Nothing to do here, so just drop the lock. */
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
+               for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
+                       resched_cpu(cpu);
        }
  }
  
@@@ -3471,6 -3485,13 +3485,6 @@@ kfree_rcu_shrink_scan(struct shrinker *
        return freed == 0 ? SHRINK_STOP : freed;
  }
  
 -static struct shrinker kfree_rcu_shrinker = {
 -      .count_objects = kfree_rcu_shrink_count,
 -      .scan_objects = kfree_rcu_shrink_scan,
 -      .batch = 0,
 -      .seeks = DEFAULT_SEEKS,
 -};
 -
  void __init kfree_rcu_scheduler_running(void)
  {
        int cpu;
@@@ -4195,6 -4216,13 +4209,13 @@@ static bool rcu_rdp_cpu_online(struct r
        return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
  }
  
+ bool rcu_cpu_online(int cpu)
+ {
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       return rcu_rdp_cpu_online(rdp);
+ }
  #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
  
  /*
@@@ -5022,7 -5050,6 +5043,7 @@@ static void __init kfree_rcu_batch_init
  {
        int cpu;
        int i, j;
 +      struct shrinker *kfree_rcu_shrinker;
  
        /* Clamp it to [0:100] seconds interval. */
        if (rcu_delay_page_cache_fill_msec < 0 ||
                INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
                krcp->initialized = true;
        }
 -      if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
 -              pr_err("Failed to register kfree_rcu() shrinker!\n");
 +
 +      kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree");
 +      if (!kfree_rcu_shrinker) {
 +              pr_err("Failed to allocate kfree_rcu() shrinker!\n");
 +              return;
 +      }
 +
 +      kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count;
 +      kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan;
 +
 +      shrinker_register(kfree_rcu_shrinker);
  }
  
  void __init rcu_init(void)
This page took 0.071941 seconds and 4 git commands to generate.