@@ -347,11 +347,15 @@ static inline void set_post_schedule(struct rq *rq)
rq->post_schedule = has_pushable_tasks(rq);
}
-static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+static void enqueue_pushable_task(struct rq *rq,
+ struct task_struct *p, bool head)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
plist_node_init(&p->pushable_tasks, p->prio);
- plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+ if (head)
+ plist_add_head(&p->pushable_tasks, &rq->rt.pushable_tasks);
+ else
+ plist_add_tail(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the highest prio pushable task */
if (p->prio < rq->rt.highest_prio.next)
@@ -373,7 +377,8 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
#else
-static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+static inline void enqueue_pushable_task(struct rq *rq,
+ struct task_struct *p, bool head)
{
}
@@ -1248,7 +1253,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
- enqueue_pushable_task(rq, p);
+ enqueue_pushable_task(rq, p, 0);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1494,8 +1499,12 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
* The previous task needs to be made eligible for pushing
* if it is still active
*/
- if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
- enqueue_pushable_task(rq, p);
+ if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) {
+ if (task_running(rq, p) && (preempt_count() & PREEMPT_ACTIVE))
+ enqueue_pushable_task(rq, p, 1);
+ else
+ enqueue_pushable_task(rq, p, 0);
+ }
}
#ifdef CONFIG_SMP
@@ -1914,7 +1923,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
rq->rt.rt_nr_migratory--;
} else {
if (!task_current(rq, p))
- enqueue_pushable_task(rq, p);
+ enqueue_pushable_task(rq, p, 0);
rq->rt.rt_nr_migratory++;
}