@@ -1321,6 +1321,11 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+
+#ifdef CONFIG_SMP
+ unsigned long rt_preempt; /* Used by rt */
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
@@ -15,6 +15,22 @@ static inline int rt_task(struct task_struct *p)
return rt_prio(p->prio);
}
+struct rq;
+
+#ifdef CONFIG_SMP
+extern void resched_curr_preempted_rt(struct rq *rq);
+
+static inline void resched_curr_preempted(struct rq *rq)
+{
+ resched_curr_preempted_rt(rq);
+}
+#else
+static inline void resched_curr_preempted(struct rq *rq)
+{
+ rsched_curr(rq);
+}
+#endif
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
@@ -1002,7 +1002,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
if (class == rq->curr->sched_class)
break;
if (class == p->sched_class) {
- resched_curr(rq);
+ resched_curr_preempted(rq);
break;
}
}
@@ -1833,6 +1833,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
INIT_LIST_HEAD(&p->rt.run_list);
+#ifdef CONFIG_SMP
+ p->rt_preempt = 0;
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
@@ -254,8 +254,33 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
}
#endif /* CONFIG_RT_GROUP_SCHED */
+
#ifdef CONFIG_SMP
+#define RT_PREEMPT_QUEUEAHEAD 1UL
+
+/*
+ * p(current) was preempted, and to be put ahead of
+ * any task with the same priority in pushable queue.
+ */
+static inline bool rt_preempted(struct task_struct *p)
+{
+ return !!(p->rt_preempt & RT_PREEMPT_QUEUEAHEAD);
+}
+
+static inline void clear_rt_preempted(struct task_struct *p)
+{
+ p->rt_preempt = 0;
+}
+
+void resched_curr_preempted_rt(struct rq *rq)
+{
+ if (rt_task(rq->curr))
+ rq->curr->rt_preempt |= RT_PREEMPT_QUEUEAHEAD;
+
+ resched_curr(rq);
+}
+
static int pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
@@ -359,17 +384,32 @@ static inline void set_post_schedule(struct rq *rq)
rq->post_schedule = has_pushable_tasks(rq);
}
-static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+static void
+__enqueue_pushable_task(struct rq *rq, struct task_struct *p, bool head)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
plist_node_init(&p->pushable_tasks, p->prio);
- plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+ if (head)
+ plist_add_head(&p->pushable_tasks, &rq->rt.pushable_tasks);
+ else
+ plist_add_tail(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the highest prio pushable task */
if (p->prio < rq->rt.highest_prio.next)
rq->rt.highest_prio.next = p->prio;
}
+static inline
+void enqueue_pushable_task_preempted(struct rq *rq, struct task_struct *curr)
+{
+ __enqueue_pushable_task(rq, curr, true);
+}
+
+static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+ __enqueue_pushable_task(rq, p, false);
+}
+
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -385,6 +425,25 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
#else
+static inline bool rt_preempted(struct task_struct *p)
+{
+ return false;
+}
+
+static inline void clear_rt_preempted(struct task_struct *p)
+{
+}
+
+static inline void resched_curr_preempted_rt(struct rq *rq)
+{
+ resched_curr(rq);
+}
+
+static inline
+void enqueue_pushable_task_preempted(struct rq *rq, struct task_struct *p)
+{
+}
+
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
@@ -489,7 +548,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
enqueue_rt_entity(rt_se, false);
if (rt_rq->highest_prio.curr < curr->prio)
- resched_curr(rq);
+ resched_curr_preempted_rt(rq);
}
}
@@ -967,7 +1026,7 @@ static void update_curr_rt(struct rq *rq)
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
if (sched_rt_runtime_exceeded(rt_rq))
- resched_curr(rq);
+ resched_curr_preempted_rt(rq);
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
@@ -1409,7 +1468,7 @@ static void check_preempt_equal_prio_common(struct rq *rq)
* to try and push current away.
*/
requeue_task_rt(rq, next, 1);
- resched_curr(rq);
+ resched_curr_preempted_rt(rq);
}
static inline
@@ -1434,7 +1493,7 @@ void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
{
if (p->prio < rq->curr->prio) {
- resched_curr(rq);
+ resched_curr_preempted_rt(rq);
return;
}
@@ -1544,8 +1603,21 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
* The previous task needs to be made eligible for pushing
* if it is still active
*/
- if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
- enqueue_pushable_task(rq, p);
+ if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) {
+ /*
+ * When put_prev_task_rt() is called by
+ * pick_next_task_rt(), if the current rt task
+ * is being preempted, to maintain FIFO, it must
+ * stay ahead of any other task that is queued
+ * at the same priority.
+ */
+ if (rt_preempted(p))
+ enqueue_pushable_task_preempted(rq, p);
+ else
+ enqueue_pushable_task(rq, p);
+ }
+
+ clear_rt_preempted(p);
}
#ifdef CONFIG_SMP
@@ -1764,7 +1836,7 @@ retry:
* just reschedule current.
*/
if (unlikely(next_task->prio < rq->curr->prio)) {
- resched_curr(rq);
+ resched_curr_preempted_rt(rq);
return 0;
}
@@ -1811,7 +1883,7 @@ retry:
activate_task(lowest_rq, next_task, 0);
ret = 1;
- resched_curr(lowest_rq);
+ resched_curr_preempted_rt(lowest_rq);
double_unlock_balance(rq, lowest_rq);
@@ -2213,7 +2285,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
check_resched = 0;
#endif /* CONFIG_SMP */
if (check_resched && p->prio < rq->curr->prio)
- resched_curr(rq);
+ resched_curr_preempted_rt(rq);
}
}
@@ -2255,7 +2327,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
* then reschedule.
*/
if (p->prio < rq->curr->prio)
- resched_curr(rq);
+ resched_curr_preempted_rt(rq);
}
}