@@ -4773,6 +4773,9 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
cpumask_copy(&p->cpus_allowed, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
+
+ if (p->sched_class->post_set_cpus_allowed)
+ p->sched_class->post_set_cpus_allowed(p);
}
/*
@@ -2280,6 +2280,20 @@ static void set_cpus_allowed_rt(struct task_struct *p,
update_rt_migration(&rq->rt);
}
+static void post_set_cpus_allowed_rt(struct task_struct *p)
+{
+ struct rq *rq;
+
+ if (!task_on_rq_queued(p))
+ return;
+
+ rq = task_rq(p);
+ if (!task_running(rq, p) &&
+ p->nr_cpus_allowed > 1 &&
+ !test_tsk_need_resched(rq->curr))
+ push_rt_tasks(rq);
+}
+
/* Assumes rq->lock is held */
static void rq_online_rt(struct rq *rq)
{
@@ -2494,6 +2508,7 @@ const struct sched_class rt_sched_class = {
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_rt,
+ .post_set_cpus_allowed = post_set_cpus_allowed_rt,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.post_schedule = post_schedule_rt,
@@ -1191,6 +1191,7 @@ struct sched_class {
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
+ void (*post_set_cpus_allowed)(struct task_struct *p);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);