@@ -1433,10 +1433,9 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
return next;
}
-static struct task_struct *_pick_next_task_rt(struct rq *rq)
+static struct task_struct *peek_next_task_rt(struct rq *rq)
{
struct sched_rt_entity *rt_se;
- struct task_struct *p;
struct rt_rq *rt_rq = &rq->rt;
do {
@@ -1445,7 +1444,14 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
rt_rq = group_rt_rq(rt_se);
} while (rt_rq);
- p = rt_task_of(rt_se);
+ return rt_task_of(rt_se);
+}
+
+static inline struct task_struct *_pick_next_task_rt(struct rq *rq)
+{
+ struct task_struct *p;
+
+ p = peek_next_task_rt(rq);
p->se.exec_start = rq_clock_task(rq);
return p;
@@ -1895,28 +1901,74 @@ static void set_cpus_allowed_rt(struct task_struct *p,
const struct cpumask *new_mask)
{
struct rq *rq;
- int weight;
+ int old_weight, new_weight;
+ int preempt_push = 0, direct_push = 0;
BUG_ON(!rt_task(p));
if (!task_on_rq_queued(p))
return;
- weight = cpumask_weight(new_mask);
+ old_weight = p->nr_cpus_allowed;
+ new_weight = cpumask_weight(new_mask);
+
+ rq = task_rq(p);
+
+ if (new_weight > 1 &&
+ rt_task(rq->curr) &&
+ !test_tsk_need_resched(rq->curr)) {
+ /*
+ * We own p->pi_lock and rq->lock. rq->lock might
+ * get released when doing direct pushing, however
+ * p->pi_lock is always held, so it's safe to assign
+ * the new_mask and new_weight to p below.
+ */
+ if (!task_running(rq, p)) {
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ p->nr_cpus_allowed = new_weight;
+ direct_push = 1;
+ } else if (cpumask_test_cpu(task_cpu(p), new_mask)) {
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ p->nr_cpus_allowed = new_weight;
+ if (!cpupri_find(&rq->rd->cpupri, p, NULL))
+ goto update;
+
+ /*
+ * At this point, current task gets migratable most
+ * likely due to the change of its affinity, let's
+ * figure out if we can migrate it.
+ *
+ * Is there any task with the same priority as that
+ * of current task? If found one, we should resched.
+ * NOTE: The target may be unpushable.
+ */
+ if (p->prio == rq->rt.highest_prio.next) {
+ /* One target just in pushable_tasks list. */
+ requeue_task_rt(rq, p, 0);
+ preempt_push = 1;
+ } else if (rq->rt.rt_nr_total > 1) {
+ struct task_struct *next;
+
+ requeue_task_rt(rq, p, 0);
+ next = peek_next_task_rt(rq);
+ if (next != p && next->prio == p->prio)
+ preempt_push = 1;
+ }
+ }
+ }
+update:
/*
* Only update if the process changes its state from whether it
* can migrate or not.
*/
- if ((p->nr_cpus_allowed > 1) == (weight > 1))
- return;
-
- rq = task_rq(p);
+ if ((old_weight > 1) == (new_weight > 1))
+ goto out;
/*
* The process used to be able to migrate OR it can now migrate
*/
- if (weight <= 1) {
+ if (new_weight <= 1) {
if (!task_current(rq, p))
dequeue_pushable_task(rq, p);
BUG_ON(!rq->rt.rt_nr_migratory);
@@ -1928,6 +1980,12 @@ static void set_cpus_allowed_rt(struct task_struct *p,
}
update_rt_migration(&rq->rt);
+
+out:
+ if (direct_push)
+ push_rt_tasks(rq);
+ else if (preempt_push)
+ resched_curr(rq);
}
/* Assumes rq->lock is held */