]> Git Repo - linux.git/blobdiff - kernel/sched/rt.c
Merge tag 'sysctl-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof...
[linux.git] / kernel / sched / rt.c
index 55f39c8f42032817f8be1c1304c0585903dfbd5b..d869bcf898ccb768eaec98f36fed5990fbd984e9 100644 (file)
@@ -509,7 +509,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
        unsigned int cpu_cap;
 
        /* Only heterogeneous systems can benefit from this check */
-       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+       if (!sched_asym_cpucap_active())
                return true;
 
        min_cap = uclamp_eff_value(p, UCLAMP_MIN);
@@ -843,7 +843,7 @@ static void __disable_runtime(struct rq *rq)
                 * We cannot be left wanting - that would mean some runtime
                 * leaked out of the system.
                 */
-               BUG_ON(want);
+               WARN_ON_ONCE(want);
 balanced:
                /*
                 * Disable all the borrow logic by pretending we have inf
@@ -1062,11 +1062,7 @@ static void update_curr_rt(struct rq *rq)
 
        trace_sched_stat_runtime(curr, delta_exec, 0);
 
-       curr->se.sum_exec_runtime += delta_exec;
-       account_group_exec_runtime(curr, delta_exec);
-
-       curr->se.exec_start = now;
-       cgroup_account_cputime(curr, delta_exec);
+       update_current_exec_runtime(curr, now, delta_exec);
 
        if (!rt_bandwidth_enabled())
                return;
@@ -1849,7 +1845,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
-       if (!task_running(rq, p) &&
+       if (!task_on_cpu(rq, p) &&
            cpumask_test_cpu(cpu, &p->cpus_mask))
                return 1;
 
@@ -1897,7 +1893,7 @@ static int find_lowest_rq(struct task_struct *task)
         * If we're on asym system ensure we consider the different capacities
         * of the CPUs when searching for the lowest_mask.
         */
-       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+       if (sched_asym_cpucap_active()) {
 
                ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
                                          task, lowest_mask,
@@ -2004,7 +2000,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                         */
                        if (unlikely(task_rq(task) != rq ||
                                     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
-                                    task_running(rq, task) ||
+                                    task_on_cpu(rq, task) ||
                                     !rt_task(task) ||
                                     !task_on_rq_queued(task))) {
 
@@ -2462,7 +2458,7 @@ skip:
  */
 static void task_woken_rt(struct rq *rq, struct task_struct *p)
 {
-       bool need_to_push = !task_running(rq, p) &&
+       bool need_to_push = !task_on_cpu(rq, p) &&
                            !test_tsk_need_resched(rq->curr) &&
                            p->nr_cpus_allowed > 1 &&
                            (dl_task(rq->curr) || rt_task(rq->curr)) &&
This page took 0.035822 seconds and 4 git commands to generate.