diff mbox series

[2/3] qedf: Wait for stag work during unload.

Message ID 20240315100600.19568-3-skashyap@marvell.com
State New
Headers show
Series qedf misc bug fixes | expand

Commit Message

Saurav Kashyap March 15, 2024, 10:05 a.m. UTC
If stag work is already schedule and unload is called, it can
lead to issues as unload cleanups the work element, wait for
stag work to get completed before cleanup during unload.

Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
Signed-off-by: Nilesh Javali <njavali@marvell.com>
---
 drivers/scsi/qedf/qedf.h      |  1 +
 drivers/scsi/qedf/qedf_main.c | 26 +++++++++++++++++++++++++-
 2 files changed, 26 insertions(+), 1 deletion(-)

Comments

kernel test robot March 16, 2024, 1:34 a.m. UTC | #1
Hi Saurav,

kernel test robot noticed the following build warnings:

[auto build test WARNING on mkp-scsi/for-next]
[also build test WARNING on jejb-scsi/for-next linus/master v6.8 next-20240315]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Saurav-Kashyap/qedf-Don-t-process-stag-work-during-unload-and-recovery/20240315-180830
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git for-next
patch link:    https://lore.kernel.org/r/20240315100600.19568-3-skashyap%40marvell.com
patch subject: [PATCH 2/3] qedf: Wait for stag work during unload.
config: x86_64-allmodconfig (https://download.01.org/0day-ci/archive/20240316/202403160959.uNobO4UE-lkp@intel.com/config)
compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240316/202403160959.uNobO4UE-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202403160959.uNobO4UE-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/scsi/qedf/qedf_main.c:923:37: warning: variable 'qedf' is uninitialized when used here [-Wuninitialized]
     923 |                 clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
         |                                                   ^~~~
   drivers/scsi/qedf/qedf_main.c:919:23: note: initialize the variable 'qedf' to silence this warning
     919 |         struct qedf_ctx *qedf;
         |                              ^
         |                               = NULL
   1 warning generated.


vim +/qedf +923 drivers/scsi/qedf/qedf_main.c

   915	
   916	/* Performs soft reset of qedf_ctx by simulating a link down/up */
   917	void qedf_ctx_soft_reset(struct fc_lport *lport)
   918	{
   919		struct qedf_ctx *qedf;
   920		struct qed_link_output if_link;
   921	
   922		if (lport->vport) {
 > 923			clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
   924			printk_ratelimited("Cannot issue host reset on NPIV port.\n");
   925			return;
   926		}
   927	
   928		qedf = lport_priv(lport);
   929	
   930		qedf->flogi_pending = 0;
   931		/* For host reset, essentially do a soft link up/down */
   932		atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
   933		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
   934			  "Queuing link down work.\n");
   935		queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
   936		    0);
   937	
   938		if (qedf_wait_for_upload(qedf) == false) {
   939			QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
   940			WARN_ON(atomic_read(&qedf->num_offloads));
   941		}
   942	
   943		/* Before setting link up query physical link state */
   944		qed_ops->common->get_link(qedf->cdev, &if_link);
   945		/* Bail if the physical link is not up */
   946		if (!if_link.link_up) {
   947			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
   948				  "Physical link is not up.\n");
   949			clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
   950			return;
   951		}
   952		/* Flush and wait to make sure link down is processed */
   953		flush_delayed_work(&qedf->link_update);
   954		msleep(500);
   955	
   956		atomic_set(&qedf->link_state, QEDF_LINK_UP);
   957		qedf->vlan_id  = 0;
   958		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
   959			  "Queue link up work.\n");
   960		queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
   961		    0);
   962		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
   963	}
   964
diff mbox series

Patch

diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 5058e01b65a2..98afdfe63600 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -363,6 +363,7 @@  struct qedf_ctx {
 #define QEDF_IN_RECOVERY		5
 #define QEDF_DBG_STOP_IO		6
 #define QEDF_PROBING			8
+#define QEDF_STAG_IN_PROGRESS		9
 	unsigned long flags; /* Miscellaneous state flags */
 	int fipvlan_retries;
 	u8 num_queues;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index e882aec86765..d441054dadb7 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -318,11 +318,18 @@  static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
 	 */
 	if (resp == fc_lport_flogi_resp) {
 		qedf->flogi_cnt++;
+		qedf->flogi_pending++;
+
+		if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+			QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+			qedf->flogi_pending = 0;
+		}
+
 		if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
 			schedule_delayed_work(&qedf->stag_work, 2);
 			return NULL;
 		}
-		qedf->flogi_pending++;
+
 		return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
 		    arg, timeout);
 	}
@@ -913,6 +920,7 @@  void qedf_ctx_soft_reset(struct fc_lport *lport)
 	struct qed_link_output if_link;
 
 	if (lport->vport) {
+		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
 		printk_ratelimited("Cannot issue host reset on NPIV port.\n");
 		return;
 	}
@@ -938,6 +946,7 @@  void qedf_ctx_soft_reset(struct fc_lport *lport)
 	if (!if_link.link_up) {
 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
 			  "Physical link is not up.\n");
+		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
 		return;
 	}
 	/* Flush and wait to make sure link down is processed */
@@ -950,6 +959,7 @@  void qedf_ctx_soft_reset(struct fc_lport *lport)
 		  "Queue link up work.\n");
 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
 	    0);
+	clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
 }
 
 /* Reset the host by gracefully logging out and then logging back in */
@@ -3721,6 +3731,7 @@  static void __qedf_remove(struct pci_dev *pdev, int mode)
 {
 	struct qedf_ctx *qedf;
 	int rc;
+	int cnt = 0;
 
 	if (!pdev) {
 		QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3738,6 +3749,17 @@  static void __qedf_remove(struct pci_dev *pdev, int mode)
 		return;
 	}
 
+stag_in_prog:
+	if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
+		cnt++;
+
+		if (cnt < 5) {
+			msleep(500);
+			goto stag_in_prog;
+		}
+	}
+
 	if (mode != QEDF_MODE_RECOVERY)
 		set_bit(QEDF_UNLOADING, &qedf->flags);
 
@@ -4013,6 +4035,8 @@  void qedf_stag_change_work(struct work_struct *work)
 		return;
 	}
 
+	set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+
 	printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
 			dev_name(&qedf->pdev->dev), __func__, __LINE__,
 			qedf->dbg_ctx.host_no);