@@ -3084,6 +3084,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
{
+ unsigned int irq;
struct adapter_reply_queue *reply_q, *next;
if (list_empty(&ioc->reply_queue_list))
@@ -3096,9 +3097,10 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
continue;
}
- if (ioc->smp_affinity_enable)
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- reply_q->msix_index), NULL);
+ if (ioc->smp_affinity_enable) {
+ irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
+ irq_update_affinity_hint(irq, NULL);
+ }
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
reply_q);
kfree(reply_q);
@@ -3165,18 +3167,15 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
* @ioc: per adapter object
*
* The enduser would need to set the affinity via /proc/irq/#/smp_affinity
- *
- * It would nice if we could call irq_set_affinity, however it is not
- * an exported symbol
*/
static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{
- unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
struct adapter_reply_queue *reply_q;
- int local_numa_node;
int iopoll_q_count = ioc->reply_queue_count -
ioc->iopoll_q_start_index;
+ const struct cpumask *mask;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -3199,11 +3198,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
* corresponding to high iops queues.
*/
if (ioc->high_iops_queues) {
- local_numa_node = dev_to_node(&ioc->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
for (index = 0; index < ioc->high_iops_queues;
index++) {
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- index), cpumask_of_node(local_numa_node));
+ irq = pci_irq_vector(ioc->pdev, index);
+ irq_set_affinity_and_hint(irq, mask);
}
}