diff mbox

[RFC,RESEND,3/4] sched/rt: Fix wrong SMP scheduler behavior for equal prio cases

Message ID 1430117318-2080-4-git-send-email-xlpang@126.com
State New
Headers show

Commit Message

Xunlei Pang April 27, 2015, 6:48 a.m. UTC
From: Xunlei Pang <pang.xunlei@linaro.org>

We know, there are two main queues each cpu for RT scheduler:
Let's call them "run queue" and "pushable queue" respectively.

For RT tasks, the scheduler uses "plist" to manage the pushable queue,
so when there are multiple tasks queued at the same priority, they get
queued in the strict FIFO order.

Currently, when an rt task gets queued, it is put to the head or the
tail of its "run queue" at the same priority according to different
scenarios. Then if it is migratable, it will also and always be put to
the tail of its "pushable queue" at the same priority.

For one cpu, assuming initially it has some migratable tasks queued
at the same priority as current(RT) both in "run queue" and "pushable
queue" in the same order. At some time, when current gets preempted, it
will be put behind these tasks in the "pushable queue", while it still
stays ahead of these tasks in the "run queue". Afterwards, if there comes
a pull from other cpu or a push from local cpu, the task behind current
in the "run queue" will be removed from the "pushable queue" and gets
running, as the global rt scheduler fetches tasks from the head of the
"pushable queue" to do pulling or pushing.

Obviously, to maintain the right order for the two queues, when current
is preempted(not involving re-queue in the "run queue"), we want to put it
ahead of all those tasks queued at the same priority in the "pushable queue".

So, if a task is running and gets preempted by a higher priority
task or even with same priority for migrating, this patch ensures
that it is put ahead of any existing task with the same priority in
the "pushable queue".

The handling logic used here:
 - Add a new variable named "rt_preempt"(define a new flag named
   RT_PREEMPT_QUEUEAHEAD for it) in task_struct, used by RT.
 - When doing preempted resched_curr() for current RT, set the flag.
   Create a new resched_curr_preempted_rt() for this function, and
   replace all the possible resched_curr() used for rt preemption with
   resched_curr_preempted_rt().
 - In put_prev_task_rt(), test RT_PREEMPT_QUEUEAHEAD if set, enqueue
   the task ahead in the "pushable queue" and clear the flag.

Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
---
 include/linux/sched.h    |  5 +++
 include/linux/sched/rt.h | 16 ++++++++
 kernel/sched/core.c      |  6 ++-
 kernel/sched/rt.c        | 96 ++++++++++++++++++++++++++++++++++++++++++------
 4 files changed, 110 insertions(+), 13 deletions(-)
diff mbox

Patch

diff --git a/include/linux/sched.h b/include/linux/sched.h
index f74d4cc..24e0f72 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1321,6 +1321,11 @@  struct task_struct {
 	const struct sched_class *sched_class;
 	struct sched_entity se;
 	struct sched_rt_entity rt;
+
+#ifdef CONFIG_SMP
+	unsigned long rt_preempt; /* Used by rt */
+#endif
+
 #ifdef CONFIG_CGROUP_SCHED
 	struct task_group *sched_task_group;
 #endif
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 6341f5b..69e3c82 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -15,6 +15,22 @@  static inline int rt_task(struct task_struct *p)
 	return rt_prio(p->prio);
 }
 
+struct rq;
+
+#ifdef CONFIG_SMP
+extern void resched_curr_preempted_rt(struct rq *rq);
+
+static inline void resched_curr_preempted(struct rq *rq)
+{
+	resched_curr_preempted_rt(rq);
+}
+#else
+static inline void resched_curr_preempted(struct rq *rq)
+{
+	rsched_curr(rq);
+}
+#endif
+
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(struct task_struct *p);
 extern void rt_mutex_setprio(struct task_struct *p, int prio);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9123a8..d13fc13 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1002,7 +1002,7 @@  void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 			if (class == rq->curr->sched_class)
 				break;
 			if (class == p->sched_class) {
-				resched_curr(rq);
+				resched_curr_preempted(rq);
 				break;
 			}
 		}
@@ -1833,6 +1833,10 @@  static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 
 	INIT_LIST_HEAD(&p->rt.run_list);
 
+#ifdef CONFIG_SMP
+	p->rt_preempt = 0;
+#endif
+
 #ifdef CONFIG_PREEMPT_NOTIFIERS
 	INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 0c0f4df..7439121 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -254,8 +254,33 @@  int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+
 #ifdef CONFIG_SMP
 
+#define RT_PREEMPT_QUEUEAHEAD    1UL
+
+/*
+ * p(current) was preempted, and to be put ahead of
+ * any task with the same priority in pushable queue.
+ */
+static inline bool rt_preempted(struct task_struct *p)
+{
+	return !!(p->rt_preempt & RT_PREEMPT_QUEUEAHEAD);
+}
+
+static inline void clear_rt_preempted(struct task_struct *p)
+{
+	p->rt_preempt = 0;
+}
+
+void resched_curr_preempted_rt(struct rq *rq)
+{
+	if (rt_task(rq->curr))
+		rq->curr->rt_preempt |= RT_PREEMPT_QUEUEAHEAD;
+
+	resched_curr(rq);
+}
+
 static int pull_rt_task(struct rq *this_rq);
 
 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
@@ -359,17 +384,32 @@  static inline void set_post_schedule(struct rq *rq)
 	rq->post_schedule = has_pushable_tasks(rq);
 }
 
-static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+static void
+__enqueue_pushable_task(struct rq *rq, struct task_struct *p, bool head)
 {
 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 	plist_node_init(&p->pushable_tasks, p->prio);
-	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+	if (head)
+		plist_add_head(&p->pushable_tasks, &rq->rt.pushable_tasks);
+	else
+		plist_add_tail(&p->pushable_tasks, &rq->rt.pushable_tasks);
 
 	/* Update the highest prio pushable task */
 	if (p->prio < rq->rt.highest_prio.next)
 		rq->rt.highest_prio.next = p->prio;
 }
 
+static inline
+void enqueue_pushable_task_preempted(struct rq *rq, struct task_struct *curr)
+{
+	__enqueue_pushable_task(rq, curr, true);
+}
+
+static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+	__enqueue_pushable_task(rq, p, false);
+}
+
 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 {
 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -385,6 +425,25 @@  static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 
 #else
 
+static inline bool rt_preempted(struct task_struct *p)
+{
+	return false;
+}
+
+static inline void clear_rt_preempted(struct task_struct *p)
+{
+}
+
+static inline void resched_curr_preempted_rt(struct rq *rq)
+{
+	resched_curr(rq);
+}
+
+static inline
+void enqueue_pushable_task_preempted(struct rq *rq, struct task_struct *p)
+{
+}
+
 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 {
 }
@@ -489,7 +548,7 @@  static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 			enqueue_rt_entity(rt_se, false);
 
 		if (rt_rq->highest_prio.curr < curr->prio)
-			resched_curr(rq);
+			resched_curr_preempted_rt(rq);
 	}
 }
 
@@ -967,7 +1026,7 @@  static void update_curr_rt(struct rq *rq)
 			raw_spin_lock(&rt_rq->rt_runtime_lock);
 			rt_rq->rt_time += delta_exec;
 			if (sched_rt_runtime_exceeded(rt_rq))
-				resched_curr(rq);
+				resched_curr_preempted_rt(rq);
 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 		}
 	}
@@ -1409,7 +1468,7 @@  static void check_preempt_equal_prio_common(struct rq *rq)
 	 * to try and push current away.
 	 */
 	requeue_task_rt(rq, next, 1);
-	resched_curr(rq);
+	resched_curr_preempted_rt(rq);
 }
 
 static inline
@@ -1434,7 +1493,7 @@  void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
 {
 	if (p->prio < rq->curr->prio) {
-		resched_curr(rq);
+		resched_curr_preempted_rt(rq);
 		return;
 	}
 
@@ -1544,8 +1603,21 @@  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 	 * The previous task needs to be made eligible for pushing
 	 * if it is still active
 	 */
-	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
-		enqueue_pushable_task(rq, p);
+	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) {
+		/*
+		 * When put_prev_task_rt() is called by
+		 * pick_next_task_rt(), if the current rt task
+		 * is being preempted, to maintain FIFO, it must
+		 * stay ahead of any other task that is queued
+		 * at the same priority.
+		 */
+		if (rt_preempted(p))
+			enqueue_pushable_task_preempted(rq, p);
+		else
+			enqueue_pushable_task(rq, p);
+	}
+
+	clear_rt_preempted(p);
 }
 
 #ifdef CONFIG_SMP
@@ -1764,7 +1836,7 @@  retry:
 	 * just reschedule current.
 	 */
 	if (unlikely(next_task->prio < rq->curr->prio)) {
-		resched_curr(rq);
+		resched_curr_preempted_rt(rq);
 		return 0;
 	}
 
@@ -1811,7 +1883,7 @@  retry:
 	activate_task(lowest_rq, next_task, 0);
 	ret = 1;
 
-	resched_curr(lowest_rq);
+	resched_curr_preempted_rt(lowest_rq);
 
 	double_unlock_balance(rq, lowest_rq);
 
@@ -2213,7 +2285,7 @@  static void switched_to_rt(struct rq *rq, struct task_struct *p)
 			check_resched = 0;
 #endif /* CONFIG_SMP */
 		if (check_resched && p->prio < rq->curr->prio)
-			resched_curr(rq);
+			resched_curr_preempted_rt(rq);
 	}
 }
 
@@ -2255,7 +2327,7 @@  prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
 		 * then reschedule.
 		 */
 		if (p->prio < rq->curr->prio)
-			resched_curr(rq);
+			resched_curr_preempted_rt(rq);
 	}
 }