diff mbox series

[08/10] scsi/ibmvfc: Replace tasklet with work

Message ID 20220530231512.9729-9-dave@stgolabs.net
State New
Headers show
Series scsi: Replace tasklets as BH | expand

Commit Message

Davidlohr Bueso May 30, 2022, 11:15 p.m. UTC
Tasklets have long been deprecated as being too heavy on the system
by running in irq context - and this is not a performance critical
path. If a higher priority process wants to run, it must wait for
the tasklet to finish before doing so. Use a workqueue instead and
run in task context - albeit the increased concurrency (tasklets
safe against themselves), but the handler is done under both the
vhost's host_lock + crq.q_lock so should be safe.

Cc: Tyrel Datwyler <tyreld@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
 drivers/scsi/ibmvscsi/ibmvfc.c | 21 ++++++++++++---------
 drivers/scsi/ibmvscsi/ibmvfc.h |  3 ++-
 2 files changed, 14 insertions(+), 10 deletions(-)

Comments

Sebastian Andrzej Siewior June 9, 2022, 12:30 p.m. UTC | #1
On 2022-05-30 16:15:10 [-0700], Davidlohr Bueso wrote:
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index d0eab5700dc5..31b1900489e7 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -891,7 +891,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
>  
>  	ibmvfc_dbg(vhost, "Releasing CRQ\n");
>  	free_irq(vdev->irq, vhost);
> -	tasklet_kill(&vhost->tasklet);
> +        cancel_work_sync(&vhost->work);
s/ {8}/\t/

is there a reason not to use threaded interrupts? The workqueue _might_
migrate to another CPU. The locking ensures that nothing bad happens but
ibmvfc_tasklet() has this piece:

|         spin_lock_irqsave(vhost->host->host_lock, flags);
…
|                 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
|                         ibmvfc_handle_async(async, vhost);
|                         async->valid = 0;
|                         wmb();
|                 }
…
|                 vio_enable_interrupts(vdev);
potentially enables interrupts which fires right away.

|                 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
|                         vio_disable_interrupts(vdev);

disables it again.

|         }
| 
|         spin_unlock(vhost->crq.q_lock);
|         spin_unlock_irqrestore(vhost->host->host_lock, flags);

If the worker runs on another CPU then the CPU servicing the interrupt
will be blocked on the lock which is not nice.

My guess is that you could enable interrupts right before unlocking but
is a different story.

>  	do {
>  		if (rc)
>  			msleep(100);

Sebastian
Davidlohr Bueso June 28, 2022, 3:18 p.m. UTC | #2
On Thu, 09 Jun 2022, Sebastian Andrzej Siewior wrote:

>On 2022-05-30 16:15:10 [-0700], Davidlohr Bueso wrote:
>> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
>> index d0eab5700dc5..31b1900489e7 100644
>> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
>> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
>> @@ -891,7 +891,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
>>
>>	ibmvfc_dbg(vhost, "Releasing CRQ\n");
>>	free_irq(vdev->irq, vhost);
>> -	tasklet_kill(&vhost->tasklet);
>> +        cancel_work_sync(&vhost->work);
>s/ {8}/\t/
>
>is there a reason not to use threaded interrupts?

I went with a workqueue here because the resume from suspend also schedules
async processing, so threaded irqs didn't seem like a good fit. This is also
similar to patch 2 (but in that case I overlooked the resume caller which you
pointed out).

Thanks,
Davidlohr
diff mbox series

Patch

diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index d0eab5700dc5..31b1900489e7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -891,7 +891,7 @@  static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
 
 	ibmvfc_dbg(vhost, "Releasing CRQ\n");
 	free_irq(vdev->irq, vhost);
-	tasklet_kill(&vhost->tasklet);
+        cancel_work_sync(&vhost->work);
 	do {
 		if (rc)
 			msleep(100);
@@ -3689,22 +3689,22 @@  static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
 
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	vio_disable_interrupts(to_vio_dev(vhost->dev));
-	tasklet_schedule(&vhost->tasklet);
+	schedule_work(&vhost->work);
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
 	return IRQ_HANDLED;
 }
 
 /**
- * ibmvfc_tasklet - Interrupt handler tasklet
+ * ibmvfc_work - work handler
  * @data:		ibmvfc host struct
  *
  * Returns:
  *	Nothing
  **/
-static void ibmvfc_tasklet(void *data)
+static void ibmvfc_workfn(struct work_struct *work)
 {
-	struct ibmvfc_host *vhost = data;
-	struct vio_dev *vdev = to_vio_dev(vhost->dev);
+	struct ibmvfc_host *vhost;
+	struct vio_dev *vdev;
 	struct ibmvfc_crq *crq;
 	struct ibmvfc_async_crq *async;
 	struct ibmvfc_event *evt, *temp;
@@ -3712,6 +3712,9 @@  static void ibmvfc_tasklet(void *data)
 	int done = 0;
 	LIST_HEAD(evt_doneq);
 
+	vhost = container_of(work, struct ibmvfc_host, work);
+	vdev = to_vio_dev(vhost->dev);
+
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	spin_lock(vhost->crq.q_lock);
 	while (!done) {
@@ -5722,7 +5725,7 @@  static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
 
 	retrc = 0;
 
-	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
+	INIT_WORK(&vhost->work, ibmvfc_workfn);
 
 	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
@@ -5738,7 +5741,7 @@  static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
 	return retrc;
 
 req_irq_failed:
-	tasklet_kill(&vhost->tasklet);
+	cancel_work_sync(&vhost->work);
 	do {
 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -6213,7 +6216,7 @@  static int ibmvfc_resume(struct device *dev)
 
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	vio_disable_interrupts(vdev);
-	tasklet_schedule(&vhost->tasklet);
+	schedule_work(&vhost->work);
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
 	return 0;
 }
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3718406e0988..7eca3622a2fa 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -12,6 +12,7 @@ 
 
 #include <linux/list.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 #include <scsi/viosrp.h>
 
 #define IBMVFC_NAME	"ibmvfc"
@@ -892,7 +893,7 @@  struct ibmvfc_host {
 	char partition_name[97];
 	void (*job_step) (struct ibmvfc_host *);
 	struct task_struct *work_thread;
-	struct tasklet_struct tasklet;
+	struct work_struct work;
 	struct work_struct rport_add_work_q;
 	wait_queue_head_t init_wait_q;
 	wait_queue_head_t work_wait_q;