@@ -170,17 +170,14 @@ __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
unsigned long clearbits)
{
unsigned long flags = 0;
- unsigned long host_lock_flags = 0;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
if (clearbits)
fnic->state_flags &= ~st_flags;
else
fnic->state_flags |= st_flags;
- spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
@@ -427,14 +424,27 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
int io_lock_acquired = 0;
struct fc_rport_libfc_priv *rp;
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host,
+ "fnic<%d>: %s: %d: fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
+ fnic->fnic_num, __func__, __LINE__, fnic->state_flags);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host,
+ "fnic<%d>: %s: %d: fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
+ fnic->fnic_num, __func__, __LINE__, fnic->state_flags);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"returning DID_NO_CONNECT for IO as rport is NULL\n");
sc->result = DID_NO_CONNECT << 16;
@@ -444,6 +454,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
ret = fc_remote_port_chkready(rport);
if (ret) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"rport is not ready\n");
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
@@ -454,6 +465,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
rp = rport->dd_data;
if (!rp || rp->rp_state == RPORT_ST_DELETE) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"rport 0x%x removed, returning DID_NO_CONNECT\n",
rport->port_id);
@@ -465,6 +477,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
}
if (rp->rp_state != RPORT_ST_READY) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
rport->port_id, rp->rp_state);
@@ -474,17 +487,17 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
return 0;
}
- if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host,
+ "fnic<%d>: %s: %d: state not ready: %d/link not up: %d Returning HOST_BUSY\n",
+ fnic->fnic_num, __func__, __LINE__, lp->state, lp->link_up);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
atomic_inc(&fnic->in_flight);
- /*
- * Release host lock, use driver resource specific locks from here.
- * Don't re-enable interrupts in case they were disabled prior to the
- * caller disabling them.
- */
- spin_unlock(lp->host->host_lock);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
@@ -569,8 +582,6 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
mempool_free(io_req, fnic->io_req_pool);
}
atomic_dec(&fnic->in_flight);
- /* acquire host lock before returning to SCSI */
- spin_lock(lp->host->host_lock);
return ret;
} else {
atomic64_inc(&fnic_stats->io_stats.active_ios);
@@ -598,8 +609,6 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
spin_unlock_irqrestore(io_lock, flags);
atomic_dec(&fnic->in_flight);
- /* acquire host lock before returning to SCSI */
- spin_lock(lp->host->host_lock);
return ret;
}
@@ -1477,18 +1486,17 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
- struct Scsi_Host *host = fnic->lport->host;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
- spin_lock_irqsave(host->host_lock, flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
- spin_unlock_irqrestore(host->host_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return 1;
} else
atomic_inc(&fnic->in_flight);
- spin_unlock_irqrestore(host->host_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
@@ -1923,20 +1931,19 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
- struct Scsi_Host *host = fnic->lport->host;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
- spin_lock_irqsave(host->host_lock, intr_flags);
+ spin_lock_irqsave(&fnic->fnic_lock, intr_flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
- spin_unlock_irqrestore(host->host_lock, intr_flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, intr_flags);
return FAILED;
} else
atomic_inc(&fnic->in_flight);
- spin_unlock_irqrestore(host->host_lock, intr_flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, intr_flags);
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);