@@ -2725,24 +2725,18 @@ void scsi_start_queue(struct scsi_device *sdev)
blk_mq_unquiesce_queue(sdev->request_queue);
}
-static void scsi_stop_queue(struct scsi_device *sdev, bool nowait)
+static void scsi_stop_queue(struct scsi_device *sdev)
{
/*
* The atomic variable of ->queue_stopped covers that
* blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue.
*
* However, we still need to wait until quiesce is done
- * in case that queue has been stopped.
+ * in case that queue has been stopped. This is done in
+ * scsi_target_block() for all devices of the target.
*/
- if (!cmpxchg(&sdev->queue_stopped, 0, 1)) {
- if (nowait)
- blk_mq_quiesce_queue_nowait(sdev->request_queue);
- else
- blk_mq_quiesce_queue(sdev->request_queue);
- } else {
- if (!nowait)
- blk_mq_wait_quiesce_done(sdev->request_queue->tag_set);
- }
+ if (!cmpxchg(&sdev->queue_stopped, 0, 1))
+ blk_mq_quiesce_queue_nowait(sdev->request_queue);
}
/**
@@ -2769,7 +2763,7 @@ int scsi_internal_device_block_nowait(struct scsi_device *sdev)
* request queue.
*/
if (!ret)
- scsi_stop_queue(sdev, true);
+ scsi_stop_queue(sdev);
return ret;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
@@ -2795,9 +2789,9 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
mutex_lock(&sdev->state_mutex);
err = __scsi_internal_device_block_nowait(sdev);
- if (err == 0)
- scsi_stop_queue(sdev, false);
mutex_unlock(&sdev->state_mutex);
+ if (err == 0)
+ scsi_stop_queue(sdev);
return err;
}
@@ -2910,6 +2904,13 @@ scsi_target_block(struct device *dev)
device_block);
else
device_for_each_child(dev, NULL, target_block);
+
+ /*
+ * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag,
+ * so blk_mq_wait_quiesce_done() comes down to just synchronize_rcu().
+ * Just calling it once is enough.
+ */
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(scsi_target_block);