@@ -101,6 +101,7 @@ struct iommu_dev_data {
bool pri_tlp; /* PASID TLB required for
PPR completions */
u32 errata; /* Bitmap for errata to apply */
+ u32 guest_mode;
};
/*
@@ -3145,6 +3146,10 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (!iommu)
return;
+ if ((amd_iommu_guest_ir >= AMD_IOMMU_GUEST_IR_GA) &&
+ (dom->type == IOMMU_DOMAIN_UNMANAGED))
+ dev_data->guest_mode = 0;
+
iommu_completion_wait(iommu);
}
@@ -3170,6 +3175,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
ret = attach_device(dev, domain);
+ if (amd_iommu_guest_ir >= AMD_IOMMU_GUEST_IR_GA) {
+ if (dom->type == IOMMU_DOMAIN_UNMANAGED)
+ dev_data->guest_mode = 1;
+ else
+ dev_data->guest_mode = 0;
+ }
+
iommu_completion_wait(iommu);
return ret;
@@ -3698,20 +3710,6 @@ EXPORT_SYMBOL(amd_iommu_device_info);
*
*****************************************************************************/
-struct irq_2_irte {
- u16 devid; /* Device ID for IRTE table */
- u16 index; /* Index into IRTE table*/
-};
-
-struct amd_ir_data {
- struct irq_2_irte irq_2_irte;
- union irte irte_entry;
- struct irte_ga irte_ga_entry;
- union {
- struct msi_msg msi_entry;
- };
-};
-
static struct irq_chip amd_ir_chip;
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
@@ -4067,6 +4065,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
struct irq_2_irte *irte_info = &data->irq_2_irte;
struct msi_msg *msg = &data->msi_entry;
struct IO_APIC_route_entry *entry;
+ struct iommu_dev_data *dev_data = search_dev_data(devid);
data->irq_2_irte.devid = devid;
data->irq_2_irte.index = index + sub_handle;
@@ -4086,7 +4085,8 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
irte->lo.val = 0;
irte->hi.val = 0;
- irte->lo.fields_remap.guest_mode = 0;
+ irte->lo.fields_remap.guest_mode = dev_data ?
+ dev_data->guest_mode : 0;
irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
irte->lo.fields_remap.dm = apic->irq_dest_mode;
irte->hi.fields.vector = irq_cfg->vector;
@@ -4259,6 +4259,70 @@ static struct irq_domain_ops amd_ir_domain_ops = {
.deactivate = irq_remapping_deactivate,
};
+static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+{
+ unsigned long flags;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pi_data *pi_data = vcpu_info;
+ struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
+ struct amd_ir_data *ir_data = data->chip_data;
+ struct irte_ga *irte = &ir_data->irte_ga_entry;
+ struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
+ struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
+
+ /* Note:
+ * This device has never been set up for guest mode.
+ * we should not modify the IRTE
+ */
+ if (!dev_data || !dev_data->guest_mode)
+ return 0;
+
+ /* Note:
+ * SVM tries to set up for GA mode, but we are in
+ * legacy mode. So, we force legacy mode instead.
+ */
+ if (amd_iommu_guest_ir < AMD_IOMMU_GUEST_IR_GA) {
+ pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
+ __func__);
+ vcpu_pi_info = NULL;
+ }
+
+ iommu = amd_iommu_rlookup_table[irte_info->devid];
+ if (iommu == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(&iommu->ga_hash_lock, flags);
+
+ if (vcpu_pi_info) {
+ /* Setting */
+ irte->hi.fields.vector = vcpu_pi_info->vector;
+ irte->lo.fields_vapic.guest_mode = 1;
+ irte->lo.fields_vapic.ga_tag =
+ AMD_IOMMU_GATAG(pi_data->avic_tag, pi_data->vcpu_id);
+
+ if (!hash_hashed(&ir_data->hnode))
+ hash_add(iommu->ga_hash, &ir_data->hnode,
+ (u16)(irte->lo.fields_vapic.ga_tag));
+ } else {
+ /* Un-Setting */
+ struct irq_cfg *cfg = irqd_cfg(data);
+
+ irte->hi.val = 0;
+ irte->lo.val = 0;
+ irte->hi.fields.vector = cfg->vector;
+ irte->lo.fields_remap.guest_mode = 0;
+ irte->lo.fields_remap.destination = cfg->dest_apicid;
+ irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
+ irte->lo.fields_remap.dm = apic->irq_dest_mode;
+
+ hash_del(&ir_data->hnode);
+ }
+
+ spin_unlock_irqrestore(&iommu->ga_hash_lock, flags);
+
+ return modify_irte_ga(irte_info->devid, irte_info->index, irte);
+}
+
static int amd_ir_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
@@ -4266,6 +4330,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data;
+ struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
int ret;
ret = parent->chip->irq_set_affinity(parent, mask, force);
@@ -4281,7 +4346,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
ir_data->irte_entry.fields.destination = cfg->dest_apicid;
modify_irte(irte_info->devid, irte_info->index,
ir_data->irte_entry);
- } else {
+ } else if (!dev_data || !dev_data->guest_mode) {
struct irte_ga *entry = &ir_data->irte_ga_entry;
entry->hi.fields.vector = cfg->vector;
@@ -4311,6 +4376,7 @@ static struct irq_chip amd_ir_chip = {
.name = "AMD-IR-IRQ-CHIP",
.irq_ack = ir_ack_apic_edge,
.irq_set_affinity = amd_ir_set_affinity,
+ .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
.irq_compose_msi_msg = ir_compose_msi_msg,
};
@@ -1370,6 +1370,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
!iommu_feature(iommu, FEATURE_GAM_VAPIC))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ if (amd_iommu_guest_ir >= AMD_IOMMU_GUEST_IR_GA) {
+ hash_init(iommu->ga_hash);
+ spin_lock_init(&iommu->ga_hash_lock);
+ }
ret = iommu_init_ga_log(iommu);
if (ret)
@@ -22,10 +22,12 @@
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/msi.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/irqreturn.h>
+#include <linux/hashtable.h>
/*
* Maximum number of IOMMUs supported
@@ -119,6 +121,14 @@
#define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9)
#define MMIO_STATUS_GALOG_INT_MASK (1 << 10)
+#define AMD_IOMMU_GA_HASH_BITS 16
+#define AMD_IOMMU_GA_HASH_MASK ((1U << AMD_IOMMU_GA_HASH_BITS) - 1)
+#define AMD_IOMMU_GATAG(x, y) \
+ ((((x & 0xFF) << 8) | (y & 0xFF)) & AMD_IOMMU_GA_HASH_MASK)
+
+#define GATAG_TO_AVICTAG(x) ((x >> 8) & 0xFF)
+#define GATAG_TO_VCPUID(x) (x & 0xFF)
+
/* event logging constants */
#define EVENT_ENTRY_SIZE 0x10
#define EVENT_TYPE_SHIFT 28
@@ -556,6 +566,8 @@ struct amd_iommu {
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
#endif
+ DECLARE_HASHTABLE(ga_hash, AMD_IOMMU_GA_HASH_BITS);
+ spinlock_t ga_hash_lock;
};
struct devid_map {
@@ -801,4 +813,19 @@ struct irte_ga {
union irte_ga_hi hi;
};
+struct irq_2_irte {
+ u16 devid; /* Device ID for IRTE table */
+ u16 index; /* Index into IRTE table*/
+};
+
+struct amd_ir_data {
+ struct hlist_node hnode;
+ struct irq_2_irte irq_2_irte;
+ union irte irte_entry;
+ struct irte_ga irte_ga_entry;
+ union {
+ struct msi_msg msi_entry;
+ };
+};
+
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
@@ -22,6 +22,12 @@
#include <linux/types.h>
+struct amd_iommu_pi_data {
+ u32 vcpu_id;
+ u32 avic_tag;
+ struct vcpu_data *vcpu_data;
+};
+
#ifdef CONFIG_AMD_IOMMU
struct task_struct;