@@ -225,7 +225,7 @@ static void blk_delay_work(struct work_struct *work)
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
if (likely(!blk_queue_dead(q)))
- queue_delayed_work(kblockd_workqueue, &q->delay_work,
+ queue_delayed_work_on_any_cpu(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);
@@ -2852,14 +2852,14 @@ EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
{
- return queue_work(kblockd_workqueue, work);
+ return queue_work_on_any_cpu(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *dwork, unsigned long delay)
{
- return queue_delayed_work(kblockd_workqueue, dwork, delay);
+ return queue_delayed_work_on_any_cpu(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
@@ -144,7 +144,7 @@ void put_io_context(struct io_context *ioc)
if (atomic_long_dec_and_test(&ioc->refcount)) {
spin_lock_irqsave(&ioc->lock, flags);
if (!hlist_empty(&ioc->icq_list))
- schedule_work(&ioc->release_work);
+ queue_work_on_any_cpu(system_wq, &ioc->release_work);
else
free_ioc = true;
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -1488,9 +1488,11 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
intv = disk_events_poll_jiffies(disk);
set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now)
- queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
+ queue_delayed_work_on_any_cpu(system_freezable_wq, &ev->dwork,
+ 0);
else if (intv)
- queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
+ queue_delayed_work_on_any_cpu(system_freezable_wq, &ev->dwork,
+ intv);
out_unlock:
spin_unlock_irqrestore(&ev->lock, flags);
}
@@ -1626,7 +1628,8 @@ static void disk_check_events(struct disk_events *ev,
intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)
- queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
+ queue_delayed_work_on_any_cpu(system_freezable_wq, &ev->dwork,
+ intv);
spin_unlock_irq(&ev->lock);
block layer uses workqueues for multiple purposes. There is no real dependency of scheduling these on the cpu which scheduled them. On a idle system, it is observed that and idle cpu wakes up many times just to service this work. It would be better if we can schedule it on a cpu which isn't idle to save on power. By idle cpu (from scheduler's perspective) we mean: - Current task is idle task - nr_running == 0 - wake_list is empty This patch replaces schedule_work() and queue_[delayed_]work() with queue_[delayed_]work_on_any_cpu() siblings. These routines would look for the closest (via scheduling domains) non-idle cpu (non-idle from schedulers perspective). If the current cpu is not idle or all cpus are idle, work will be scheduled on local cpu. Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> --- block/blk-core.c | 6 +++--- block/blk-ioc.c | 2 +- block/genhd.c | 9 ++++++--- 3 files changed, 10 insertions(+), 7 deletions(-)