@@ -1527,7 +1527,7 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
}
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u16 qdep, u64 addr, unsigned mask)
+ u16 qdep, u64 addr, unsigned mask, u32 *fault)
{
struct qi_desc desc;
@@ -1554,12 +1554,12 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(iommu, &desc, 1, 0, NULL);
+ qi_submit_sync(iommu, &desc, 1, 0, fault);
}
/* PASID-based IOTLB invalidation */
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
- unsigned long npages, bool ih)
+ unsigned long npages, bool ih, u32 *fault)
{
struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
@@ -1595,12 +1595,13 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
QI_EIOTLB_AM(mask);
}
- qi_submit_sync(iommu, &desc, 1, 0, NULL);
+ qi_submit_sync(iommu, &desc, 1, 0, fault);
}
/* PASID-based device IOTLB Invalidate */
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order, u32 *fault)
{
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
@@ -1648,7 +1649,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw1 |= QI_DEV_EIOTLB_SIZE;
}
- qi_submit_sync(iommu, &desc, 1, 0, NULL);
+ qi_submit_sync(iommu, &desc, 1, 0, fault);
}
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
@@ -1462,7 +1462,7 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
+ qdep, addr, mask, NULL);
quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
}
@@ -1490,7 +1490,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
PCI_DEVID(info->bus, info->devfn),
info->pfsid, dev_pasid->pasid,
info->ats_qdep, addr,
- mask);
+ mask, NULL);
}
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -1505,10 +1505,10 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain)
- qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih);
+ qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih, NULL);
if (!list_empty(&domain->devices))
- qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
+ qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih, NULL);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -5195,10 +5195,10 @@ void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
sid = PCI_DEVID(info->bus, info->devfn);
if (pasid == IOMMU_NO_PASID) {
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, address, mask);
+ qdep, address, mask, NULL);
} else {
qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid,
- pasid, qdep, address, mask);
+ pasid, qdep, address, mask, NULL);
}
}
@@ -866,14 +866,14 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did,
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u16 qdep, u64 addr, unsigned mask);
+ u16 qdep, u64 addr, unsigned mask, u32 *fault);
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
- unsigned long npages, bool ih);
+ unsigned long npages, bool ih, u32 *fault);
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u32 pasid, u16 qdep, u64 addr,
- unsigned int size_order);
+ unsigned int size_order, u32 *fault);
void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
unsigned long address, unsigned long pages,
u32 pasid, u16 qdep);
@@ -492,9 +492,11 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
* efficient to flush devTLB specific to the PASID.
*/
if (pasid == IOMMU_NO_PASID)
- qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
+ qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0,
+ 64 - VTD_PAGE_SHIFT, NULL);
else
- qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
+ qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0,
+ 64 - VTD_PAGE_SHIFT, NULL);
}
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
@@ -521,7 +523,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
- qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0, NULL);
else
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
@@ -543,7 +545,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0, NULL);
} else {
iommu_flush_write_buffer(iommu);
}
@@ -834,7 +836,7 @@ void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
* Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
*/
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0, NULL);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
@@ -179,11 +179,11 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
if (WARN_ON(!pages))
return;
- qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
+ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih, NULL);
if (info->ats_enabled) {
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
svm->pasid, sdev->qdep, address,
- order_base_2(pages));
+ order_base_2(pages), NULL);
quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
svm->pasid, sdev->qdep);
}
@@ -225,11 +225,11 @@ static void intel_flush_svm_all(struct intel_svm *svm)
list_for_each_entry_rcu(sdev, &svm->devs, list) {
info = dev_iommu_priv_get(sdev->dev);
- qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
+ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0, NULL);
if (info->ats_enabled) {
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
svm->pasid, sdev->qdep,
- 0, 64 - VTD_PAGE_SHIFT);
+ 0, 64 - VTD_PAGE_SHIFT, NULL);
quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
svm->pasid, sdev->qdep);
}