@@ -135,7 +135,8 @@ loop:
*/
if (expires > 4*HZ)
expires = round_jiffies_relative(expires);
- schedule_delayed_work(&dst_gc_work, expires);
+ queue_delayed_work(system_power_efficient_wq,
+ &dst_gc_work, expires);
}
spin_unlock_bh(&dst_garbage.lock);
@@ -223,7 +224,7 @@ void __dst_free(struct dst_entry *dst)
if (dst_garbage.timer_inc > DST_GC_INC) {
dst_garbage.timer_inc = DST_GC_INC;
dst_garbage.timer_expires = DST_GC_MIN;
- mod_delayed_work(system_wq, &dst_gc_work,
+ mod_delayed_work(system_power_efficient_wq, &dst_gc_work,
dst_garbage.timer_expires);
}
spin_unlock_bh(&dst_garbage.lock);
@@ -135,9 +135,10 @@ static void linkwatch_schedule_work(int urgent)
* override the existing timer.
*/
if (test_bit(LW_URGENT, &linkwatch_flags))
- mod_delayed_work(system_wq, &linkwatch_work, 0);
+ mod_delayed_work(system_power_efficient_wq, &linkwatch_work, 0);
else
- schedule_delayed_work(&linkwatch_work, delay);
+ queue_delayed_work(system_power_efficient_wq,
+ &linkwatch_work, delay);
}
@@ -101,7 +101,8 @@ static void queue_process(struct work_struct *work)
__netif_tx_unlock(txq);
local_irq_restore(flags);
- schedule_delayed_work(&npinfo->tx_work, HZ/10);
+ queue_delayed_work(system_power_efficient_wq,
+ &npinfo->tx_work, HZ/10);
return;
}
__netif_tx_unlock(txq);
@@ -423,7 +424,8 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (status != NETDEV_TX_OK) {
skb_queue_tail(&npinfo->txq, skb);
- schedule_delayed_work(&npinfo->tx_work,0);
+ queue_delayed_work(system_power_efficient_wq,
+ &npinfo->tx_work, 0);
}
}
EXPORT_SYMBOL(netpoll_send_skb_on_dev);