@@ -229,6 +229,7 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
priv->resume_translation = qcom_adreno_smmu_resume_translation;
+ priv->tlb_inv_by_id = arm_smmu_tlb_inv_by_id;
return 0;
}
@@ -252,7 +252,7 @@ static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
-static void arm_smmu_tlb_inv_context_s1(void *cookie)
+static void arm_smmu_tlb_inv_context_s1_asid(void *cookie, u16 asid)
{
struct arm_smmu_domain *smmu_domain = cookie;
/*
@@ -261,21 +261,56 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
*/
wmb();
arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
- ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
+ ARM_SMMU_CB_S1_TLBIASID, asid);
arm_smmu_tlb_sync_context(smmu_domain);
}
-static void arm_smmu_tlb_inv_context_s2(void *cookie)
+static void arm_smmu_tlb_inv_context_s1(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+
+ arm_smmu_tlb_inv_context_s1_asid(cookie, smmu_domain->cfg.asid);
+}
+
+static void arm_smmu_tlb_inv_context_s2_vmid(void *cookie, u16 vmid)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
/* See above */
wmb();
- arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, vmid);
arm_smmu_tlb_sync_global(smmu);
}
+static void arm_smmu_tlb_inv_context_s2(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+
+ arm_smmu_tlb_inv_context_s2_vmid(cookie, smmu_domain->cfg.vmid);
+}
+
+void arm_smmu_tlb_inv_by_id(const void *cookie, u16 id)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ arm_smmu_rpm_get(smmu);
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ arm_smmu_tlb_inv_context_s1_asid(smmu_domain, id);
+ break;
+ case ARM_SMMU_DOMAIN_S2:
+ case ARM_SMMU_DOMAIN_NESTED:
+ arm_smmu_tlb_inv_context_s2_vmid(smmu_domain, id);
+ break;
+ case ARM_SMMU_DOMAIN_BYPASS:
+ break;
+ }
+
+ arm_smmu_rpm_put(smmu);
+}
+
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
size_t granule, void *cookie, int reg)
{
@@ -527,6 +527,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
+void arm_smmu_tlb_inv_by_id(const void *cookie, u16 id);
void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx);
int arm_mmu500_reset(struct arm_smmu_device *smmu);
@@ -50,6 +50,7 @@ struct adreno_smmu_fault_info {
* before set_ttbr0_cfg(). If stalling on fault is enabled,
* the GPU driver must call resume_translation()
* @resume_translation: Resume translation after a fault
+ * @tlb_inv_by_id: Flush TLB by ASID/VMID
*
*
* The GPU driver (drm/msm) and adreno-smmu work together for controlling
@@ -69,6 +70,7 @@ struct adreno_smmu_priv {
void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
void (*set_stall)(const void *cookie, bool enabled);
void (*resume_translation)(const void *cookie, bool terminate);
+ void (*tlb_inv_by_id)(const void *cookie, u16 id);
};
#endif /* __ADRENO_SMMU_PRIV_H */