@@ -35,10 +35,8 @@ static inline void local_bh_enable(void)
#ifdef CONFIG_PREEMPT_RT
extern bool local_bh_blocked(void);
-extern void softirq_preempt(void);
#else
static inline bool local_bh_blocked(void) { return false; }
-static inline void softirq_preempt(void) { }
#endif
#endif /* _LINUX_BH_H */
@@ -1838,7 +1838,6 @@ static inline int dl_task_check_affinity(struct task_struct *p, const struct cpu
}
#endif
-extern bool task_is_pi_boosted(const struct task_struct *p);
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
@@ -7533,21 +7533,6 @@ static inline void preempt_dynamic_init(void) { }
#endif /* CONFIG_PREEMPT_DYNAMIC */
-/*
- * task_is_pi_boosted - Check if task has been PI boosted.
- * @p: Task to check.
- *
- * Return true if task is subject to priority inheritance.
- */
-bool task_is_pi_boosted(const struct task_struct *p)
-{
- int prio = p->prio;
-
- if (!rt_prio(prio))
- return false;
- return prio != p->normal_prio;
-}
-
int io_schedule_prepare(void)
{
int old_iowait = current->in_iowait;
@@ -2176,11 +2176,8 @@ static int rto_next_cpu(struct root_domain *rd)
rd->rto_cpu = cpu;
- if (cpu < nr_cpu_ids) {
- if (!has_pushable_tasks(cpu_rq(cpu)))
- continue;
+ if (cpu < nr_cpu_ids)
return cpu;
- }
rd->rto_cpu = -1;
@@ -248,19 +248,6 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
}
EXPORT_SYMBOL(__local_bh_enable_ip);
-void softirq_preempt(void)
-{
- if (WARN_ON_ONCE(!preemptible()))
- return;
-
- if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
- return;
-
- __local_bh_enable(SOFTIRQ_OFFSET, true);
- /* preemption point */
- __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
-}
-
/*
* Invoked from ksoftirqd_run() outside of the interrupt disabled section
* to acquire the per CPU local lock for reentrancy protection.
@@ -1564,16 +1564,9 @@ static void timer_sync_wait_running(struct timer_base *base)
__releases(&base->lock) __releases(&base->expiry_lock)
__acquires(&base->expiry_lock) __acquires(&base->lock)
{
- bool need_preempt;
-
- need_preempt = task_is_pi_boosted(current);
- if (need_preempt || atomic_read(&base->timer_waiters)) {
+ if (atomic_read(&base->timer_waiters)) {
raw_spin_unlock_irq(&base->lock);
spin_unlock(&base->expiry_lock);
-
- if (need_preempt)
- softirq_preempt();
-
spin_lock(&base->expiry_lock);
raw_spin_lock_irq(&base->lock);
}
@@ -1 +1 @@
--rt1
+-rt2