diff mbox series

[v4,06/13] iommu/arm-smmu-v3: Add context descriptor tables allocators

Message ID 20191219163033.2608177-7-jean-philippe@linaro.org
State New
Headers show
Series [v4,01/13] iommu/arm-smmu-v3: Drop __GFP_ZERO flag from DMA allocation | expand

Commit Message

Jean-Philippe Brucker Dec. 19, 2019, 4:30 p.m. UTC
Support for SSID will require allocating context descriptor tables. Move
the context descriptor allocation to separate functions.

Reviewed-by: Eric Auger <eric.auger@redhat.com>

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>

Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>

---
 drivers/iommu/arm-smmu-v3.c | 57 ++++++++++++++++++++++++++++++-------
 1 file changed, 46 insertions(+), 11 deletions(-)

-- 
2.24.1

Comments

Will Deacon Jan. 14, 2020, 11:06 a.m. UTC | #1
On Thu, Dec 19, 2019 at 05:30:26PM +0100, Jean-Philippe Brucker wrote:
> Support for SSID will require allocating context descriptor tables. Move
> the context descriptor allocation to separate functions.
> 
> Reviewed-by: Eric Auger <eric.auger@redhat.com>
> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
> ---
>  drivers/iommu/arm-smmu-v3.c | 57 ++++++++++++++++++++++++++++++-------
>  1 file changed, 46 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
> index b287e303b1d7..43d6a7ded6e4 100644
> --- a/drivers/iommu/arm-smmu-v3.c
> +++ b/drivers/iommu/arm-smmu-v3.c
> @@ -568,6 +568,7 @@ struct arm_smmu_cd_table {
>  struct arm_smmu_s1_cfg {
>  	struct arm_smmu_cd_table	table;
>  	struct arm_smmu_ctx_desc	cd;
> +	u8				s1cdmax;
>  };
>  
>  struct arm_smmu_s2_cfg {
> @@ -1455,6 +1456,31 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
>  }
>  
>  /* Context descriptor manipulation functions */
> +static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
> +					struct arm_smmu_cd_table *table,
> +					size_t num_entries)
> +{
> +	size_t size = num_entries * (CTXDESC_CD_DWORDS << 3);
> +
> +	table->ptr = dmam_alloc_coherent(smmu->dev, size, &table->ptr_dma,
> +					 GFP_KERNEL);
> +	if (!table->ptr) {
> +		dev_warn(smmu->dev,
> +			 "failed to allocate context descriptor table\n");
> +		return -ENOMEM;
> +	}
> +	return 0;
> +}
> +
> +static void arm_smmu_free_cd_leaf_table(struct arm_smmu_device *smmu,
> +					struct arm_smmu_cd_table *table,
> +					size_t num_entries)
> +{
> +	size_t size = num_entries * (CTXDESC_CD_DWORDS << 3);
> +
> +	dmam_free_coherent(smmu->dev, size, table->ptr, table->ptr_dma);
> +}

I think we'd be better off taking the 'arm_smmu_s1_cfg' as a parameter here
instead of the table pointer and a num_entries value, since the code above
implies that we support partial freeing of the context descriptors.

I can do that as a follow-up patch if you agree. Thoughts?

Will
Will Deacon Jan. 14, 2020, 11:56 a.m. UTC | #2
On Tue, Jan 14, 2020 at 12:52:30PM +0100, Jean-Philippe Brucker wrote:
> On Tue, Jan 14, 2020 at 11:06:52AM +0000, Will Deacon wrote:
> > >  /* Context descriptor manipulation functions */
> > > +static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
> > > +					struct arm_smmu_cd_table *table,
> > > +					size_t num_entries)
> > > +{
> > > +	size_t size = num_entries * (CTXDESC_CD_DWORDS << 3);
> > > +
> > > +	table->ptr = dmam_alloc_coherent(smmu->dev, size, &table->ptr_dma,
> > > +					 GFP_KERNEL);
> > > +	if (!table->ptr) {
> > > +		dev_warn(smmu->dev,
> > > +			 "failed to allocate context descriptor table\n");
> > > +		return -ENOMEM;
> > > +	}
> > > +	return 0;
> > > +}
> > > +
> > > +static void arm_smmu_free_cd_leaf_table(struct arm_smmu_device *smmu,
> > > +					struct arm_smmu_cd_table *table,
> > > +					size_t num_entries)
> > > +{
> > > +	size_t size = num_entries * (CTXDESC_CD_DWORDS << 3);
> > > +
> > > +	dmam_free_coherent(smmu->dev, size, table->ptr, table->ptr_dma);
> > > +}
> > 
> > I think we'd be better off taking the 'arm_smmu_s1_cfg' as a parameter here
> > instead of the table pointer and a num_entries value, since the code above
> > implies that we support partial freeing of the context descriptors.
> > 
> > I can do that as a follow-up patch if you agree. Thoughts?
> 
> Do you mean only changing the arguments of arm_smmu_free_cd_leaf_table(),
> or arm_smmu_alloc_cd_leaf_table() as well? For free() I agree, for alloc()
> I'm not sure it would look better.

Yeah, just for free(). I'll spin something on top after I've finished
reviewing the series.

> For my tests I have a debug patch that allocates PASIDs randomly which
> quickly consumes DMA for leaf tables. So I do have to free the leaves
> individually when they aren't used, but it will be easy for me to update.

Cool.

Will
diff mbox series

Patch

diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index b287e303b1d7..43d6a7ded6e4 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -568,6 +568,7 @@  struct arm_smmu_cd_table {
 struct arm_smmu_s1_cfg {
 	struct arm_smmu_cd_table	table;
 	struct arm_smmu_ctx_desc	cd;
+	u8				s1cdmax;
 };
 
 struct arm_smmu_s2_cfg {
@@ -1455,6 +1456,31 @@  static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
 }
 
 /* Context descriptor manipulation functions */
+static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
+					struct arm_smmu_cd_table *table,
+					size_t num_entries)
+{
+	size_t size = num_entries * (CTXDESC_CD_DWORDS << 3);
+
+	table->ptr = dmam_alloc_coherent(smmu->dev, size, &table->ptr_dma,
+					 GFP_KERNEL);
+	if (!table->ptr) {
+		dev_warn(smmu->dev,
+			 "failed to allocate context descriptor table\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void arm_smmu_free_cd_leaf_table(struct arm_smmu_device *smmu,
+					struct arm_smmu_cd_table *table,
+					size_t num_entries)
+{
+	size_t size = num_entries * (CTXDESC_CD_DWORDS << 3);
+
+	dmam_free_coherent(smmu->dev, size, table->ptr, table->ptr_dma);
+}
+
 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
 {
 	u64 val = 0;
@@ -1502,6 +1528,23 @@  static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
 	cdptr[3] = cpu_to_le64(cfg->cd.mair);
 }
 
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+
+	return arm_smmu_alloc_cd_leaf_table(smmu, &cfg->table,
+					    1 << cfg->s1cdmax);
+}
+
+static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+
+	arm_smmu_free_cd_leaf_table(smmu, &cfg->table, 1 << cfg->s1cdmax);
+}
+
 /* Stream table manipulation functions */
 static void
 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
@@ -2145,11 +2188,7 @@  static void arm_smmu_domain_free(struct iommu_domain *domain)
 		struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
 
 		if (cfg->table.ptr) {
-			dmam_free_coherent(smmu_domain->smmu->dev,
-					   CTXDESC_CD_DWORDS << 3,
-					   cfg->table.ptr,
-					   cfg->table.ptr_dma);
-
+			arm_smmu_free_cd_tables(smmu_domain);
 			arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
 		}
 	} else {
@@ -2173,13 +2212,9 @@  static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
 	if (asid < 0)
 		return asid;
 
-	cfg->table.ptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
-					     &cfg->table.ptr_dma, GFP_KERNEL);
-	if (!cfg->table.ptr) {
-		dev_warn(smmu->dev, "failed to allocate context descriptor\n");
-		ret = -ENOMEM;
+	ret = arm_smmu_alloc_cd_tables(smmu_domain);
+	if (ret)
 		goto out_free_asid;
-	}
 
 	cfg->cd.asid	= (u16)asid;
 	cfg->cd.ttbr	= pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];