@@ -308,23 +308,24 @@ void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
struct fnic *fnic = iport->fnic;
struct reclaim_entry_s *reclaim_entry;
unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ unsigned long flags;
int idx;
- spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Schedule oxid free. oxid idx: %d\n", idx);
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
reclaim_entry = kzalloc(sizeof(*reclaim_entry), GFP_KERNEL);
- spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
if (!reclaim_entry) {
schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry,
msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME));
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
@@ -339,7 +340,7 @@ void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
}
}
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
static bool fdls_is_oxid_fabric_req(uint16_t oxid)