@@ -1045,6 +1045,46 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
}
#ifdef CONFIG_SMP
+
+/* rq->lock is held */
+static void __balance_callback(struct rq *rq)
+{
+ struct callback_head *head, *next;
+ void (*func)(struct rq *rq);
+
+ head = rq->balance_callback;
+ rq->balance_callback = NULL;
+ while (head) {
+ func = (void (*)(struct rq *))head->func;
+ next = head->next;
+ head->next = NULL;
+ head = next;
+
+ func(rq);
+ }
+}
+
+/* preemption is disabled */
+static inline void balance_callback(struct rq *rq)
+{
+ if (unlikely(rq->balance_callback)) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ __balance_callback(rq);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+}
+
+#else
+
+static inline void balance_callback(struct rq *rq)
+{
+}
+
+#endif
+
+#ifdef CONFIG_SMP
/*
* This is how migration works:
*
@@ -1187,6 +1227,16 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
}
do_set_cpus_allowed(p, new_mask);
+ /*
+ * rq->lock might get released during __balance_callback(),
+ * but if there's any successful migrating of @p, task_cpu(p)
+ * will obviously be in the new_mask, as p->pi_lock is never
+ * released; Thus, subsequent cpumask_test_cpu() is true and
+ * will make it return safely in such case.
+ */
+ lockdep_unpin_lock(&rq->lock);
+ __balance_callback(rq);
+ lockdep_pin_lock(&rq->lock);
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), new_mask))
@@ -2480,43 +2530,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
return rq;
}
-#ifdef CONFIG_SMP
-
-/* rq->lock is NOT held, but preemption is disabled */
-static void __balance_callback(struct rq *rq)
-{
- struct callback_head *head, *next;
- void (*func)(struct rq *rq);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- head = rq->balance_callback;
- rq->balance_callback = NULL;
- while (head) {
- func = (void (*)(struct rq *))head->func;
- next = head->next;
- head->next = NULL;
- head = next;
-
- func(rq);
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-
-static inline void balance_callback(struct rq *rq)
-{
- if (unlikely(rq->balance_callback))
- __balance_callback(rq);
-}
-
-#else
-
-static inline void balance_callback(struct rq *rq)
-{
-}
-
-#endif
-
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
@@ -2089,14 +2089,15 @@ static void set_cpus_allowed_rt(struct task_struct *p,
weight = cpumask_weight(new_mask);
+ rq = task_rq(p);
+
/*
- * Only update if the process changes its state from whether it
- * can migrate or not.
+ * Skip updating the migration stuff if the process doesn't change
+ * its migrate state, but still need to check if it can be pushed
+ * away due to its new affinity.
*/
if ((p->nr_cpus_allowed > 1) == (weight > 1))
- return;
-
- rq = task_rq(p);
+ goto queue_push;
/*
* The process used to be able to migrate OR it can now migrate
@@ -2113,6 +2114,13 @@ static void set_cpus_allowed_rt(struct task_struct *p,
}
update_rt_migration(&rq->rt);
+
+queue_push:
+ if (weight > 1 &&
+ !task_running(rq, p) &&
+ !test_tsk_need_resched(rq->curr) &&
+ !cpumask_subset(new_mask, &p->cpus_allowed))
+ queue_push_tasks(rq);
}
/* Assumes rq->lock is held */