@@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi
dev_name(dev), err);
}
+static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg,
+ struct device *dev)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ const struct device_node *np = smmu->dev->of_node;
+
+ /*
+ * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
+ * entries to not be invalidated correctly. The problem is that the walk
+ * cache index generated for IOVA is not same across translation and
+ * invalidation requests. This is leading to page faults when PMD entry
+ * is released during unmap and populated with new PTE table during
+ * subsequent map request. Disabling large page mappings avoids the
+ * release of PMD entry and avoid translations seeing stale PMD entry in
+ * walk cache.
+ * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
+ * Tegra234.
+ */
+ if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
+ of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
+ smmu->pgsize_bitmap = PAGE_SIZE;
+ pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
+ }
+
+ return 0;
+}
+
static const struct arm_smmu_impl nvidia_smmu_impl = {
.read_reg = nvidia_smmu_read_reg,
.write_reg = nvidia_smmu_write_reg,
@@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
.global_fault = nvidia_smmu_global_fault,
.context_fault = nvidia_smmu_context_fault,
.probe_finalize = nvidia_smmu_probe_finalize,
+ .init_context = nvidia_smmu_init_context,
};
static const struct arm_smmu_impl nvidia_smmu_single_impl = {
.probe_finalize = nvidia_smmu_probe_finalize,
+ .init_context = nvidia_smmu_init_context,
};
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)