diff mbox

[v4,2/3] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases

Message ID 1424079144-5194-2-git-send-email-xlpang@126.com
State New
Headers show

Commit Message

Xunlei Pang Feb. 16, 2015, 9:32 a.m. UTC
From: Xunlei Pang <pang.xunlei@linaro.org>

Currently, SMP RT scheduler has some trouble in dealing with
equal prio cases.

For example, in check_preempt_equal_prio():
When RT1(current task) gets preempted by RT2, if there is a
migratable RT3 with same prio, RT3 will be pushed away instead
of RT1 afterwards, because RT1 will be enqueued to the tail of
the pushable list when going through succeeding put_prev_task_rt()
triggered by resched. This broke FIFO.

Furthermore, this is also problematic for normal preempted cases
if there're some rt tasks queued with the same prio as current,
because current will be put behind these tasks in the pushable
queue.

So, if a task is running and gets preempted by a higher priority
task (or even with same priority for migrating), this patch ensures
that it is put before any existing task with the same priority in
the pushable queue.

Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
---
 kernel/sched/rt.c | 23 ++++++++++++++++-------
 1 file changed, 16 insertions(+), 7 deletions(-)
diff mbox

Patch

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f4d4b07..65de40e 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -347,11 +347,15 @@  static inline void set_post_schedule(struct rq *rq)
 	rq->post_schedule = has_pushable_tasks(rq);
 }
 
-static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+static void enqueue_pushable_task(struct rq *rq,
+				struct task_struct *p, bool head)
 {
 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 	plist_node_init(&p->pushable_tasks, p->prio);
-	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+	if (head)
+		plist_add_head(&p->pushable_tasks, &rq->rt.pushable_tasks);
+	else
+		plist_add_tail(&p->pushable_tasks, &rq->rt.pushable_tasks);
 
 	/* Update the highest prio pushable task */
 	if (p->prio < rq->rt.highest_prio.next)
@@ -373,7 +377,8 @@  static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 
 #else
 
-static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+static inline void enqueue_pushable_task(struct rq *rq,
+					struct task_struct *p, bool head)
 {
 }
 
@@ -1248,7 +1253,7 @@  enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
 
 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
-		enqueue_pushable_task(rq, p);
+		enqueue_pushable_task(rq, p, 0);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1494,8 +1499,12 @@  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 	 * The previous task needs to be made eligible for pushing
 	 * if it is still active
 	 */
-	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
-		enqueue_pushable_task(rq, p);
+	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) {
+		if (task_running(rq, p) && (preempt_count() & PREEMPT_ACTIVE))
+			enqueue_pushable_task(rq, p, 1);
+		else
+			enqueue_pushable_task(rq, p, 0);
+	}
 }
 
 #ifdef CONFIG_SMP
@@ -1914,7 +1923,7 @@  static void set_cpus_allowed_rt(struct task_struct *p,
 		rq->rt.rt_nr_migratory--;
 	} else {
 		if (!task_current(rq, p))
-			enqueue_pushable_task(rq, p);
+			enqueue_pushable_task(rq, p, 0);
 		rq->rt.rt_nr_migratory++;
 	}