@@ -4769,6 +4769,9 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
cpumask_copy(&p->cpus_allowed, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
+
+ if (p->sched_class->post_set_cpus_allowed)
+ p->sched_class->post_set_cpus_allowed(p);
}
/*
@@ -2136,6 +2136,29 @@ static void set_cpus_allowed_rt(struct task_struct *p,
update_rt_migration(&rq->rt);
}
+static void post_set_cpus_allowed_rt(struct task_struct *p)
+{
+ struct rq *rq;
+
+ if (!task_on_rq_queued(p))
+ return;
+
+ rq = task_rq(p);
+ if (p->nr_cpus_allowed > 1 &&
+ rq->rt.rt_nr_running > 1 &&
+ rt_task(rq->curr) && !test_tsk_need_resched(rq->curr)) {
+ if (!task_running(rq, p)) {
+ push_rt_task(rq);
+ } else if (cpumask_test_cpu(task_cpu(p), &p->cpus_allowed)) {
+ /*
+ * p(current) may get migratable due to the change
+ * of its affinity, let's try to push it away.
+ */
+ check_preempt_equal_prio_common(rq);
+ }
+ }
+}
+
/* Assumes rq->lock is held */
static void rq_online_rt(struct rq *rq)
{
@@ -2350,6 +2373,7 @@ const struct sched_class rt_sched_class = {
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_rt,
+ .post_set_cpus_allowed = post_set_cpus_allowed_rt,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.post_schedule = post_schedule_rt,
@@ -1191,6 +1191,7 @@ struct sched_class {
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
+ void (*post_set_cpus_allowed)(struct task_struct *p);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);