@@ -4,6 +4,7 @@
#include <linux/dma-map-ops.h>
#include <linux/pci.h>
+#include <linux/hyperv.h>
#include <xen/swiotlb-xen.h>
#include <asm/xen/hypervisor.h>
@@ -91,6 +92,6 @@ int pci_xen_swiotlb_init_late(void)
EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
IOMMU_INIT_FINISH(pci_xen_swiotlb_detect,
- NULL,
+ hyperv_swiotlb_detect,
pci_xen_swiotlb_init,
NULL);
@@ -23,6 +23,7 @@
#include <linux/cpu.h>
#include <linux/sched/task_stack.h>
+#include <linux/dma-map-ops.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ptrace.h>
@@ -2080,6 +2081,7 @@ struct hv_device *vmbus_device_create(const guid_t *type,
return child_device_obj;
}
+static u64 vmbus_dma_mask = DMA_BIT_MASK(64);
/*
* vmbus_device_register - Register the child device
*/
@@ -2120,6 +2122,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
}
hv_debug_add_dev_dir(child_device_obj);
+ child_device_obj->device.dma_mask = &vmbus_dma_mask;
return 0;
err_kset_unregister:
@@ -13,14 +13,22 @@
#include <linux/irq.h>
#include <linux/iommu.h>
#include <linux/module.h>
+#include <linux/hyperv.h>
+#include <linux/io.h>
#include <asm/apic.h>
#include <asm/cpu.h>
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
+#include <asm/iommu.h>
+#include <asm/iommu_table.h>
#include <asm/irq_remapping.h>
#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
+#include <asm/swiotlb.h>
+#include <linux/dma-map-ops.h>
+#include <linux/dma-direct.h>
+#include <linux/set_memory.h>
#include "irq_remapping.h"
@@ -36,6 +44,8 @@
static cpumask_t ioapic_max_cpumask = { CPU_BITS_NONE };
static struct irq_domain *ioapic_ir_domain;
+static unsigned long hyperv_io_tlb_start, hyperv_io_tlb_size;
+
static int hyperv_ir_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
@@ -337,4 +347,56 @@ static const struct irq_domain_ops hyperv_root_ir_domain_ops = {
.free = hyperv_root_irq_remapping_free,
};
+void __init hyperv_iommu_swiotlb_init(void)
+{
+ unsigned long bytes;
+ void *vstart;
+
+ /*
+ * Allocate Hyper-V swiotlb bounce buffer at early place
+ * to reserve large contiguous memory.
+ */
+ hyperv_io_tlb_size = 200 * 1024 * 1024;
+ hyperv_io_tlb_start = memblock_alloc_low(PAGE_ALIGN(hyperv_io_tlb_size),
+ HV_HYP_PAGE_SIZE);
+
+ if (!hyperv_io_tlb_start) {
+ pr_warn("Fail to allocate Hyper-V swiotlb buffer.\n");
+ return;
+ }
+}
+
+int __init hyperv_swiotlb_detect(void)
+{
+ if (hypervisor_is_type(X86_HYPER_MS_HYPERV)
+ && hv_is_isolation_supported()) {
+ /*
+ * Enable swiotlb force mode in Isolation VM to
+ * use swiotlb bounce buffer for dma transaction.
+ */
+ swiotlb_force = SWIOTLB_FORCE;
+ return 1;
+ }
+
+ return 0;
+}
+
+void __init hyperv_iommu_swiotlb_later_init(void)
+{
+ void *hyperv_io_tlb_remap;
+ int ret;
+
+ /*
+ * Swiotlb bounce buffer needs to be mapped in extra address
+ * space. Map function doesn't work in the early place and so
+ * call swiotlb_late_init_with_tbl() here.
+ */
+ swiotlb_late_init_with_tbl(hyperv_io_tlb_start,
+ hyperv_io_tlb_size >> IO_TLB_SHIFT);
+}
+
+IOMMU_INIT_FINISH(hyperv_swiotlb_detect,
+ NULL, hyperv_iommu_swiotlb_init,
+ hyperv_iommu_swiotlb_later_init);
+
#endif
@@ -1759,6 +1759,7 @@ int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
void (*block_invalidate)(void *context,
u64 block_mask));
+int __init hyperv_swiotlb_detect(void);
struct hyperv_pci_block_ops {
int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,