@@ -71,8 +71,6 @@ static int snooze_loop(struct cpuidle_device *dev,
{
u64 snooze_exit_time;
- set_thread_flag(TIF_POLLING_NRFLAG);
-
local_irq_enable();
snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
@@ -81,21 +79,13 @@ static int snooze_loop(struct cpuidle_device *dev,
HMT_very_low();
while (!need_resched()) {
if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) {
- /*
- * Task has not woken up but we are exiting the polling
- * loop anyway. Require a barrier after polling is
- * cleared to order subsequent test of need_resched().
- */
- clear_thread_flag(TIF_POLLING_NRFLAG);
dev->poll_time_limit = true;
- smp_mb();
break;
}
}
HMT_medium();
ppc64_runlatch_on();
- clear_thread_flag(TIF_POLLING_NRFLAG);
local_irq_disable();
@@ -40,8 +40,6 @@ int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
{
u64 snooze_exit_time;
- set_thread_flag(TIF_POLLING_NRFLAG);
-
pseries_idle_prolog();
raw_local_irq_enable();
snooze_exit_time = get_tb() + snooze_timeout;
@@ -51,21 +49,12 @@ int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
HMT_low();
HMT_very_low();
if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) {
- /*
- * Task has not woken up but we are exiting the polling
- * loop anyway. Require a barrier after polling is
- * cleared to order subsequent test of need_resched().
- */
dev->poll_time_limit = true;
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb();
break;
}
}
HMT_medium();
- clear_thread_flag(TIF_POLLING_NRFLAG);
-
raw_local_irq_disable();
pseries_idle_epilog();
@@ -237,7 +237,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
broadcast = false;
}
- polling = target_state->flags & CPUIDLE_FLAG_MWAIT;
+ polling = target_state->flags & (CPUIDLE_FLAG_MWAIT | CPUIDLE_FLAG_POLLING);
/*
* If the target state doesn't poll on need_resched(), this is
@@ -13,35 +13,29 @@
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- u64 time_start;
-
- time_start = local_clock_noinstr();
+ u64 time_start = local_clock_noinstr();
+ unsigned int loop_count = 0;
+ u64 limit;
dev->poll_time_limit = false;
raw_local_irq_enable();
- if (!current_set_polling_and_test()) {
- unsigned int loop_count = 0;
- u64 limit;
- limit = cpuidle_poll_time(drv, dev);
+ limit = cpuidle_poll_time(drv, dev);
- while (!need_resched()) {
- cpu_relax();
- if (loop_count++ < POLL_IDLE_RELAX_COUNT)
- continue;
+ while (!need_resched()) {
+ cpu_relax();
+ if (loop_count++ < POLL_IDLE_RELAX_COUNT)
+ continue;
- loop_count = 0;
- if (local_clock_noinstr() - time_start > limit) {
- dev->poll_time_limit = true;
- break;
- }
+ loop_count = 0;
+ if (local_clock_noinstr() - time_start > limit) {
+ dev->poll_time_limit = true;
+ break;
}
}
raw_local_irq_disable();
- current_clr_polling();
-
return index;
}
Software polling idle states set again TIF_NR_POLLING and clear it upon exit. This involves error prone duplicated code and wasted cycles performing atomic operations, sometimes RmW fully ordered. To avoid this, benefit instead from the same generic TIF_NR_POLLING handling that is currently in use for hardware monitoring states. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> --- drivers/cpuidle/cpuidle-powernv.c | 10 ---------- drivers/cpuidle/cpuidle-pseries.c | 11 ----------- drivers/cpuidle/cpuidle.c | 2 +- drivers/cpuidle/poll_state.c | 30 ++++++++++++------------------ 4 files changed, 13 insertions(+), 40 deletions(-)