@@ -2680,7 +2680,7 @@ need_resched:
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
- idle_balance(cpu, rq);
+ idle_balance(rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
@@ -6359,12 +6359,13 @@ out:
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*/
-void idle_balance(int this_cpu, struct rq *this_rq)
+void idle_balance(struct rq *this_rq)
{
struct sched_domain *sd;
int pulled_task = 0;
unsigned long next_balance = jiffies + HZ;
u64 curr_cost = 0;
+ int this_cpu = this_rq->cpu;
this_rq->idle_stamp = rq_clock(this_rq);
@@ -1176,7 +1176,7 @@ extern const struct sched_class idle_sched_class;
extern void update_group_power(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
-extern void idle_balance(int this_cpu, struct rq *this_rq);
+extern void idle_balance(struct rq *this_rq);
extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq);
The cpu parameter passed to idle_balance is not needed as it could be retrieved from the struct rq. Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> --- kernel/sched/core.c | 2 +- kernel/sched/fair.c | 3 ++- kernel/sched/sched.h | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-)