Message ID | 20210805080724.480-3-shameerali.kolothum.thodi@huawei.com |
---|---|
State | New |
Headers | show |
Series | ACPI/IORT: Support for IORT RMR node | expand |
On Thu, Aug 05, 2021 at 09:07:17AM +0100, Shameer Kolothum wrote: [...] > +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) > +{ > + struct acpi_iort_node *smmu; > + struct acpi_iort_rmr *rmr; > + struct acpi_iort_rmr_desc *rmr_desc; > + u32 map_count = iort_node->mapping_count; > + u32 sid; > + int i; > + > + if (!iort_node->mapping_offset || map_count != 1) { > + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", > + iort_node); > + return; > + } > + > + /* Retrieve associated smmu and stream id */ > + smmu = iort_node_get_id(iort_node, &sid, 0); > + if (!smmu) { > + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR node %p\n", > + iort_node); > + return; > + } > + > + /* Retrieve RMR data */ > + rmr = (struct acpi_iort_rmr *)iort_node->node_data; > + if (!rmr->rmr_offset || !rmr->rmr_count) { > + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR node %p\n", > + iort_node); > + return; > + } > + > + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, > + rmr->rmr_offset); > + > + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); > + > + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { > + struct iommu_resv_region *region; > + enum iommu_resv_type type; > + int prot = IOMMU_READ | IOMMU_WRITE; > + u64 addr = rmr_desc->base_address, size = rmr_desc->length; > + > + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { > + /* PAGE align base addr and size */ > + addr &= PAGE_MASK; > + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); > + > + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", > + rmr_desc->base_address, > + rmr_desc->base_address + rmr_desc->length - 1, > + addr, addr + size - 1); > + } > + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { > + type = IOMMU_RESV_DIRECT_RELAXABLE; > + /* > + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is > + * normally used for allocated system memory that is > + * then used for device specific reserved regions. > + */ > + prot |= IOMMU_CACHE; > + } else { > + type = IOMMU_RESV_DIRECT; > + /* > + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used > + * for device memory like MSI doorbell. > + */ > + prot |= IOMMU_MMIO; > + } On the prot value assignment based on the remapping flag, I'd like to hear Robin/Joerg's opinion, I'd avoid being in a situation where "normally" this would work but then we have to quirk it. Is this a valid assumption _always_ ? Thanks, Lorenzo > + > + region = iommu_alloc_resv_region(addr, size, prot, type); > + if (region) { > + region->fw_data.rmr.flags = rmr->flags; > + region->fw_data.rmr.sid = sid; > + region->fw_data.rmr.smmu = smmu; > + list_add_tail(®ion->list, &iort_rmr_list); > + } > + } > +} > + > +static void __init iort_parse_rmr(void) > +{ > + struct acpi_iort_node *iort_node, *iort_end; > + struct acpi_table_iort *iort; > + int i; > + > + if (iort_table->revision < 3) > + return; > + > + iort = (struct acpi_table_iort *)iort_table; > + > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, > + iort->node_offset); > + iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, > + iort_table->length); > + > + for (i = 0; i < iort->node_count; i++) { > + if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, > + "IORT node pointer overflows, bad table!\n")) > + return; > + > + if (iort_node->type == ACPI_IORT_NODE_RMR) > + iort_node_get_rmr_info(iort_node); > + > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, > + iort_node->length); > + } > +} > > static void __init iort_init_platform_devices(void) > { > @@ -1636,6 +1767,7 @@ void __init acpi_iort_init(void) > } > > iort_init_platform_devices(); > + iort_parse_rmr(); > } > > #ifdef CONFIG_ZONE_DMA > -- > 2.17.1 >
On Thu, Aug 5, 2021 at 6:03 PM Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> wrote: > > On Thu, Aug 05, 2021 at 09:07:17AM +0100, Shameer Kolothum wrote: > > [...] > > > +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) > > +{ > > + struct acpi_iort_node *smmu; > > + struct acpi_iort_rmr *rmr; > > + struct acpi_iort_rmr_desc *rmr_desc; > > + u32 map_count = iort_node->mapping_count; > > + u32 sid; > > + int i; > > + > > + if (!iort_node->mapping_offset || map_count != 1) { > > + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", > > + iort_node); > > + return; > > + } > > + > > + /* Retrieve associated smmu and stream id */ > > + smmu = iort_node_get_id(iort_node, &sid, 0); > > + if (!smmu) { > > + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR node %p\n", > > + iort_node); > > + return; > > + } > > + > > + /* Retrieve RMR data */ > > + rmr = (struct acpi_iort_rmr *)iort_node->node_data; > > + if (!rmr->rmr_offset || !rmr->rmr_count) { > > + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR node %p\n", > > + iort_node); > > + return; > > + } > > + > > + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, > > + rmr->rmr_offset); > > + > > + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); > > + > > + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { > > + struct iommu_resv_region *region; > > + enum iommu_resv_type type; > > + int prot = IOMMU_READ | IOMMU_WRITE; > > + u64 addr = rmr_desc->base_address, size = rmr_desc->length; > > + > > + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { > > + /* PAGE align base addr and size */ > > + addr &= PAGE_MASK; > > + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); > > + > > + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", > > + rmr_desc->base_address, > > + rmr_desc->base_address + rmr_desc->length - 1, > > + addr, addr + size - 1); > > + } > > + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { > > + type = IOMMU_RESV_DIRECT_RELAXABLE; > > + /* > > + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is > > + * normally used for allocated system memory that is > > + * then used for device specific reserved regions. > > + */ > > + prot |= IOMMU_CACHE; > > + } else { > > + type = IOMMU_RESV_DIRECT; > > + /* > > + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used > > + * for device memory like MSI doorbell. > > + */ > > + prot |= IOMMU_MMIO; > > + } > > On the prot value assignment based on the remapping flag, I'd like to > hear Robin/Joerg's opinion, I'd avoid being in a situation where > "normally" this would work but then we have to quirk it. > > Is this a valid assumption _always_ ? These assumptions were made based on the historic use cases I could find reading the history. There aren't many known examples "in the wild" because so far we haven't had a mechanism other than quirks based around device-tree implementations. Ultimately I believe the proper solution will need to be another flag in the RMR table that specifies the type of memory an RMR Node describes, not just the base and length. -Jon > > Thanks, > Lorenzo > > > + > > + region = iommu_alloc_resv_region(addr, size, prot, type); > > + if (region) { > > + region->fw_data.rmr.flags = rmr->flags; > > + region->fw_data.rmr.sid = sid; > > + region->fw_data.rmr.smmu = smmu; > > + list_add_tail(®ion->list, &iort_rmr_list); > > + } > > + } > > +} > > + > > +static void __init iort_parse_rmr(void) > > +{ > > + struct acpi_iort_node *iort_node, *iort_end; > > + struct acpi_table_iort *iort; > > + int i; > > + > > + if (iort_table->revision < 3) > > + return; > > + > > + iort = (struct acpi_table_iort *)iort_table; > > + > > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, > > + iort->node_offset); > > + iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, > > + iort_table->length); > > + > > + for (i = 0; i < iort->node_count; i++) { > > + if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, > > + "IORT node pointer overflows, bad table!\n")) > > + return; > > + > > + if (iort_node->type == ACPI_IORT_NODE_RMR) > > + iort_node_get_rmr_info(iort_node); > > + > > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, > > + iort_node->length); > > + } > > +} > > > > static void __init iort_init_platform_devices(void) > > { > > @@ -1636,6 +1767,7 @@ void __init acpi_iort_init(void) > > } > > > > iort_init_platform_devices(); > > + iort_parse_rmr(); > > } > > > > #ifdef CONFIG_ZONE_DMA > > -- > > 2.17.1 > >
On 8/5/2021 7:03 PM, Lorenzo Pieralisi wrote: > On Thu, Aug 05, 2021 at 09:07:17AM +0100, Shameer Kolothum wrote: > > [...] > >> +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) >> +{ >> + struct acpi_iort_node *smmu; >> + struct acpi_iort_rmr *rmr; >> + struct acpi_iort_rmr_desc *rmr_desc; >> + u32 map_count = iort_node->mapping_count; >> + u32 sid; >> + int i; >> + >> + if (!iort_node->mapping_offset || map_count != 1) { >> + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", >> + iort_node); >> + return; >> + } >> + >> + /* Retrieve associated smmu and stream id */ >> + smmu = iort_node_get_id(iort_node, &sid, 0); >> + if (!smmu) { >> + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR node %p\n", >> + iort_node); >> + return; >> + } >> + >> + /* Retrieve RMR data */ >> + rmr = (struct acpi_iort_rmr *)iort_node->node_data; >> + if (!rmr->rmr_offset || !rmr->rmr_count) { >> + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR node %p\n", >> + iort_node); >> + return; >> + } >> + >> + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, >> + rmr->rmr_offset); >> + >> + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); >> + >> + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { >> + struct iommu_resv_region *region; >> + enum iommu_resv_type type; >> + int prot = IOMMU_READ | IOMMU_WRITE; >> + u64 addr = rmr_desc->base_address, size = rmr_desc->length; >> + >> + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { >> + /* PAGE align base addr and size */ >> + addr &= PAGE_MASK; >> + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); >> + >> + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", >> + rmr_desc->base_address, >> + rmr_desc->base_address + rmr_desc->length - 1, >> + addr, addr + size - 1); >> + } >> + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { >> + type = IOMMU_RESV_DIRECT_RELAXABLE; >> + /* >> + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is >> + * normally used for allocated system memory that is >> + * then used for device specific reserved regions. >> + */ >> + prot |= IOMMU_CACHE; >> + } else { >> + type = IOMMU_RESV_DIRECT; >> + /* >> + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used >> + * for device memory like MSI doorbell. >> + */ >> + prot |= IOMMU_MMIO; >> + } > > On the prot value assignment based on the remapping flag, I'd like to > hear Robin/Joerg's opinion, I'd avoid being in a situation where > "normally" this would work but then we have to quirk it. > > Is this a valid assumption _always_ ? I think we enable quite a bit of platforms with this assumption, so IMHO it's a fair compromise for now. As per Jon's comment and oob discussions, in the long run the spec should probably be updated to include a way of explicitly specifying memory attributes. --- Thanks & Best Regards, Laurentiu > >> + >> + region = iommu_alloc_resv_region(addr, size, prot, type); >> + if (region) { >> + region->fw_data.rmr.flags = rmr->flags; >> + region->fw_data.rmr.sid = sid; >> + region->fw_data.rmr.smmu = smmu; >> + list_add_tail(®ion->list, &iort_rmr_list); >> + } >> + } >> +} >> + >> +static void __init iort_parse_rmr(void) >> +{ >> + struct acpi_iort_node *iort_node, *iort_end; >> + struct acpi_table_iort *iort; >> + int i; >> + >> + if (iort_table->revision < 3) >> + return; >> + >> + iort = (struct acpi_table_iort *)iort_table; >> + >> + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, >> + iort->node_offset); >> + iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, >> + iort_table->length); >> + >> + for (i = 0; i < iort->node_count; i++) { >> + if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, >> + "IORT node pointer overflows, bad table!\n")) >> + return; >> + >> + if (iort_node->type == ACPI_IORT_NODE_RMR) >> + iort_node_get_rmr_info(iort_node); >> + >> + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, >> + iort_node->length); >> + } >> +} >> >> static void __init iort_init_platform_devices(void) >> { >> @@ -1636,6 +1767,7 @@ void __init acpi_iort_init(void) >> } >> >> iort_init_platform_devices(); >> + iort_parse_rmr(); >> } >> >> #ifdef CONFIG_ZONE_DMA >> -- >> 2.17.1 >> > _______________________________________________ > iommu mailing list > iommu@lists.linux-foundation.org > https://eur01.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.linuxfoundation.org%2Fmailman%2Flistinfo%2Fiommu&data=04%7C01%7Claurentiu.tudor%40nxp.com%7Cb020e5093dee4374ee0b08d9582a9238%7C686ea1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%7C637637762131278563%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8Q%2Fu5qawL94YhbKLujOAlJjTVEWZircjviccWnnqPxs%3D&reserved=0 >
On 2021-08-05 17:03, Lorenzo Pieralisi wrote: > On Thu, Aug 05, 2021 at 09:07:17AM +0100, Shameer Kolothum wrote: > > [...] > >> +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) >> +{ >> + struct acpi_iort_node *smmu; >> + struct acpi_iort_rmr *rmr; >> + struct acpi_iort_rmr_desc *rmr_desc; >> + u32 map_count = iort_node->mapping_count; >> + u32 sid; >> + int i; >> + >> + if (!iort_node->mapping_offset || map_count != 1) { >> + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", >> + iort_node); >> + return; >> + } >> + >> + /* Retrieve associated smmu and stream id */ >> + smmu = iort_node_get_id(iort_node, &sid, 0); >> + if (!smmu) { >> + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR node %p\n", >> + iort_node); >> + return; >> + } >> + >> + /* Retrieve RMR data */ >> + rmr = (struct acpi_iort_rmr *)iort_node->node_data; >> + if (!rmr->rmr_offset || !rmr->rmr_count) { >> + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR node %p\n", >> + iort_node); >> + return; >> + } >> + >> + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, >> + rmr->rmr_offset); >> + >> + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); >> + >> + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { >> + struct iommu_resv_region *region; >> + enum iommu_resv_type type; >> + int prot = IOMMU_READ | IOMMU_WRITE; >> + u64 addr = rmr_desc->base_address, size = rmr_desc->length; >> + >> + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { >> + /* PAGE align base addr and size */ >> + addr &= PAGE_MASK; >> + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); >> + >> + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", >> + rmr_desc->base_address, >> + rmr_desc->base_address + rmr_desc->length - 1, >> + addr, addr + size - 1); >> + } >> + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { >> + type = IOMMU_RESV_DIRECT_RELAXABLE; >> + /* >> + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is >> + * normally used for allocated system memory that is >> + * then used for device specific reserved regions. >> + */ >> + prot |= IOMMU_CACHE; >> + } else { >> + type = IOMMU_RESV_DIRECT; >> + /* >> + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used >> + * for device memory like MSI doorbell. >> + */ >> + prot |= IOMMU_MMIO; >> + } > > On the prot value assignment based on the remapping flag, I'd like to > hear Robin/Joerg's opinion, I'd avoid being in a situation where > "normally" this would work but then we have to quirk it. > > Is this a valid assumption _always_ ? No. Certainly applying IOMMU_CACHE without reference to the device's _CCA attribute or how CPUs may be accessing a shared buffer could lead to a loss of coherency. At worst, applying IOMMU_MMIO to a device-private buffer *could* cause the device to lose coherency with itself if the memory underlying the RMR may have allocated into system caches. Note that the expected use for non-remappable RMRs is the device holding some sort of long-lived private data in system RAM - the MSI doorbell trick is far more of a niche hack really. At the very least I think we need to refer to the device's memory access properties here. Jon, Laurentiu - how do RMRs correspond to the EFI memory map on your firmware? I'm starting to think that as long as the underlying memory is described appropriately there then we should be able to infer correct attributes from the EFI memory type and flags. Robin.
On Mon, Sep 6, 2021 at 7:44 PM Robin Murphy <robin.murphy@arm.com> wrote: > > On 2021-08-05 17:03, Lorenzo Pieralisi wrote: > > On Thu, Aug 05, 2021 at 09:07:17AM +0100, Shameer Kolothum wrote: > > > > [...] > > > >> +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) > >> +{ > >> + struct acpi_iort_node *smmu; > >> + struct acpi_iort_rmr *rmr; > >> + struct acpi_iort_rmr_desc *rmr_desc; > >> + u32 map_count = iort_node->mapping_count; > >> + u32 sid; > >> + int i; > >> + > >> + if (!iort_node->mapping_offset || map_count != 1) { > >> + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", > >> + iort_node); > >> + return; > >> + } > >> + > >> + /* Retrieve associated smmu and stream id */ > >> + smmu = iort_node_get_id(iort_node, &sid, 0); > >> + if (!smmu) { > >> + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR node %p\n", > >> + iort_node); > >> + return; > >> + } > >> + > >> + /* Retrieve RMR data */ > >> + rmr = (struct acpi_iort_rmr *)iort_node->node_data; > >> + if (!rmr->rmr_offset || !rmr->rmr_count) { > >> + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR node %p\n", > >> + iort_node); > >> + return; > >> + } > >> + > >> + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, > >> + rmr->rmr_offset); > >> + > >> + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); > >> + > >> + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { > >> + struct iommu_resv_region *region; > >> + enum iommu_resv_type type; > >> + int prot = IOMMU_READ | IOMMU_WRITE; > >> + u64 addr = rmr_desc->base_address, size = rmr_desc->length; > >> + > >> + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { > >> + /* PAGE align base addr and size */ > >> + addr &= PAGE_MASK; > >> + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); > >> + > >> + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", > >> + rmr_desc->base_address, > >> + rmr_desc->base_address + rmr_desc->length - 1, > >> + addr, addr + size - 1); > >> + } > >> + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { > >> + type = IOMMU_RESV_DIRECT_RELAXABLE; > >> + /* > >> + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is > >> + * normally used for allocated system memory that is > >> + * then used for device specific reserved regions. > >> + */ > >> + prot |= IOMMU_CACHE; > >> + } else { > >> + type = IOMMU_RESV_DIRECT; > >> + /* > >> + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used > >> + * for device memory like MSI doorbell. > >> + */ > >> + prot |= IOMMU_MMIO; > >> + } > > > > On the prot value assignment based on the remapping flag, I'd like to > > hear Robin/Joerg's opinion, I'd avoid being in a situation where > > "normally" this would work but then we have to quirk it. > > > > Is this a valid assumption _always_ ? > > No. Certainly applying IOMMU_CACHE without reference to the device's > _CCA attribute or how CPUs may be accessing a shared buffer could lead > to a loss of coherency. At worst, applying IOMMU_MMIO to a > device-private buffer *could* cause the device to lose coherency with > itself if the memory underlying the RMR may have allocated into system > caches. Note that the expected use for non-remappable RMRs is the device > holding some sort of long-lived private data in system RAM - the MSI > doorbell trick is far more of a niche hack really. > > At the very least I think we need to refer to the device's memory access > properties here. > > Jon, Laurentiu - how do RMRs correspond to the EFI memory map on your > firmware? I'm starting to think that as long as the underlying memory is > described appropriately there then we should be able to infer correct > attributes from the EFI memory type and flags. The devices are all cache coherent and marked as _CCA, 1. The Memory regions are in the virt table as ARM_MEMORY_REGION_ATTRIBUTE_DEVICE. The current chicken and egg problem we have is that during the fsl-mc-bus initialization we call error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, &mc_stream_id); which gets deferred because the SMMU has not been initialized yet. Then we initialize the RMR tables but there is no device reference there to be able to query device properties, only the stream id. After the IORT tables are parsed and the SMMU is setup, on the second device probe we associate everything based on the stream id and the fsl-mc-bus device is able to claim its 1-1 DMA mappings. cat /sys/kernel/iommu_groups/0/reserved_regions 0x0000000001000000 0x0000000010ffffff direct-relaxable 0x0000000008000000 0x00000000080fffff msi 0x000000080c000000 0x000000081bffffff direct-relaxable 0x0000001c00000000 0x0000001c001fffff direct-relaxable 0x0000002080000000 0x000000209fffffff direct-relaxable -Jon > > Robin.
> -----Original Message----- > From: Jon Nettleton [mailto:jon@solid-run.com] > Sent: 06 September 2021 20:51 > To: Robin Murphy <robin.murphy@arm.com> > Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>; Shameerali Kolothum Thodi > <shameerali.kolothum.thodi@huawei.com>; Laurentiu Tudor > <laurentiu.tudor@nxp.com>; linux-arm-kernel > <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling List > <linux-acpi@vger.kernel.org>; Linux IOMMU > <iommu@lists.linux-foundation.org>; Linuxarm <linuxarm@huawei.com>; > Joerg Roedel <joro@8bytes.org>; Will Deacon <will@kernel.org>; > wanghuiqiang <wanghuiqiang@huawei.com>; Guohanjun (Hanjun Guo) > <guohanjun@huawei.com>; Steven Price <steven.price@arm.com>; Sami > Mujawar <Sami.Mujawar@arm.com>; Eric Auger <eric.auger@redhat.com>; > yangyicong <yangyicong@huawei.com> > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node parsing > [...] > > > > > > On the prot value assignment based on the remapping flag, I'd like > > > to hear Robin/Joerg's opinion, I'd avoid being in a situation where > > > "normally" this would work but then we have to quirk it. > > > > > > Is this a valid assumption _always_ ? > > > > No. Certainly applying IOMMU_CACHE without reference to the device's > > _CCA attribute or how CPUs may be accessing a shared buffer could lead > > to a loss of coherency. At worst, applying IOMMU_MMIO to a > > device-private buffer *could* cause the device to lose coherency with > > itself if the memory underlying the RMR may have allocated into system > > caches. Note that the expected use for non-remappable RMRs is the > > device holding some sort of long-lived private data in system RAM - > > the MSI doorbell trick is far more of a niche hack really. > > > > At the very least I think we need to refer to the device's memory > > access properties here. > > > > Jon, Laurentiu - how do RMRs correspond to the EFI memory map on your > > firmware? I'm starting to think that as long as the underlying memory > > is described appropriately there then we should be able to infer > > correct attributes from the EFI memory type and flags. > > The devices are all cache coherent and marked as _CCA, 1. The Memory > regions are in the virt table as ARM_MEMORY_REGION_ATTRIBUTE_DEVICE. > > The current chicken and egg problem we have is that during the fsl-mc-bus > initialization we call > > error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, > &mc_stream_id); > > which gets deferred because the SMMU has not been initialized yet. Then we > initialize the RMR tables but there is no device reference there to be able to > query device properties, only the stream id. After the IORT tables are parsed > and the SMMU is setup, on the second device probe we associate everything > based on the stream id and the fsl-mc-bus device is able to claim its 1-1 DMA > mappings. Can we solve this order problem by delaying the iommu_alloc_resv_region() to the iommu_dma_get_rmr_resv_regions(dev, list) ? We could invoke device_get_dma_attr() from there which I believe will return the _CCA attribute. Or is that still early to invoke that? Thanks, Shameer > cat /sys/kernel/iommu_groups/0/reserved_regions > 0x0000000001000000 0x0000000010ffffff direct-relaxable > 0x0000000008000000 0x00000000080fffff msi > 0x000000080c000000 0x000000081bffffff direct-relaxable > 0x0000001c00000000 0x0000001c001fffff direct-relaxable > 0x0000002080000000 0x000000209fffffff direct-relaxable > > -Jon > > > > > Robin.
On Thu, Sep 16, 2021 at 9:26 AM Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> wrote: > > > > > -----Original Message----- > > From: Jon Nettleton [mailto:jon@solid-run.com] > > Sent: 06 September 2021 20:51 > > To: Robin Murphy <robin.murphy@arm.com> > > Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>; Shameerali Kolothum Thodi > > <shameerali.kolothum.thodi@huawei.com>; Laurentiu Tudor > > <laurentiu.tudor@nxp.com>; linux-arm-kernel > > <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling List > > <linux-acpi@vger.kernel.org>; Linux IOMMU > > <iommu@lists.linux-foundation.org>; Linuxarm <linuxarm@huawei.com>; > > Joerg Roedel <joro@8bytes.org>; Will Deacon <will@kernel.org>; > > wanghuiqiang <wanghuiqiang@huawei.com>; Guohanjun (Hanjun Guo) > > <guohanjun@huawei.com>; Steven Price <steven.price@arm.com>; Sami > > Mujawar <Sami.Mujawar@arm.com>; Eric Auger <eric.auger@redhat.com>; > > yangyicong <yangyicong@huawei.com> > > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node parsing > > > [...] > > > > > > > > > On the prot value assignment based on the remapping flag, I'd like > > > > to hear Robin/Joerg's opinion, I'd avoid being in a situation where > > > > "normally" this would work but then we have to quirk it. > > > > > > > > Is this a valid assumption _always_ ? > > > > > > No. Certainly applying IOMMU_CACHE without reference to the device's > > > _CCA attribute or how CPUs may be accessing a shared buffer could lead > > > to a loss of coherency. At worst, applying IOMMU_MMIO to a > > > device-private buffer *could* cause the device to lose coherency with > > > itself if the memory underlying the RMR may have allocated into system > > > caches. Note that the expected use for non-remappable RMRs is the > > > device holding some sort of long-lived private data in system RAM - > > > the MSI doorbell trick is far more of a niche hack really. > > > > > > At the very least I think we need to refer to the device's memory > > > access properties here. > > > > > > Jon, Laurentiu - how do RMRs correspond to the EFI memory map on your > > > firmware? I'm starting to think that as long as the underlying memory > > > is described appropriately there then we should be able to infer > > > correct attributes from the EFI memory type and flags. > > > > The devices are all cache coherent and marked as _CCA, 1. The Memory > > regions are in the virt table as ARM_MEMORY_REGION_ATTRIBUTE_DEVICE. > > > > The current chicken and egg problem we have is that during the fsl-mc-bus > > initialization we call > > > > error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, > > &mc_stream_id); > > > > which gets deferred because the SMMU has not been initialized yet. Then we > > initialize the RMR tables but there is no device reference there to be able to > > query device properties, only the stream id. After the IORT tables are parsed > > and the SMMU is setup, on the second device probe we associate everything > > based on the stream id and the fsl-mc-bus device is able to claim its 1-1 DMA > > mappings. > > Can we solve this order problem by delaying the iommu_alloc_resv_region() > to the iommu_dma_get_rmr_resv_regions(dev, list) ? We could invoke > device_get_dma_attr() from there which I believe will return the _CCA attribute. > > Or is that still early to invoke that? That looks like it should work. Do we then also need to parse through the VirtualMemoryTable matching the start and end addresses to determine the other memory attributes like MMIO? -Jon > > Thanks, > Shameer > > > cat /sys/kernel/iommu_groups/0/reserved_regions > > 0x0000000001000000 0x0000000010ffffff direct-relaxable > > 0x0000000008000000 0x00000000080fffff msi > > 0x000000080c000000 0x000000081bffffff direct-relaxable > > 0x0000001c00000000 0x0000001c001fffff direct-relaxable > > 0x0000002080000000 0x000000209fffffff direct-relaxable > > > > -Jon > > > > > > > > Robin.
> -----Original Message----- > From: Jon Nettleton [mailto:jon@solid-run.com] > Sent: 16 September 2021 08:52 > To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> > Cc: Robin Murphy <robin.murphy@arm.com>; Lorenzo Pieralisi > <lorenzo.pieralisi@arm.com>; Laurentiu Tudor <laurentiu.tudor@nxp.com>; > linux-arm-kernel <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling > List <linux-acpi@vger.kernel.org>; Linux IOMMU > <iommu@lists.linux-foundation.org>; Joerg Roedel <joro@8bytes.org>; Will > Deacon <will@kernel.org>; wanghuiqiang <wanghuiqiang@huawei.com>; > Guohanjun (Hanjun Guo) <guohanjun@huawei.com>; Steven Price > <steven.price@arm.com>; Sami Mujawar <Sami.Mujawar@arm.com>; Eric > Auger <eric.auger@redhat.com>; yangyicong <yangyicong@huawei.com> > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node parsing > > On Thu, Sep 16, 2021 at 9:26 AM Shameerali Kolothum Thodi > <shameerali.kolothum.thodi@huawei.com> wrote: > > > > > > > > > -----Original Message----- > > > From: Jon Nettleton [mailto:jon@solid-run.com] > > > Sent: 06 September 2021 20:51 > > > To: Robin Murphy <robin.murphy@arm.com> > > > Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>; Shameerali > > > Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>; Laurentiu > > > Tudor <laurentiu.tudor@nxp.com>; linux-arm-kernel > > > <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling List > > > <linux-acpi@vger.kernel.org>; Linux IOMMU > > > <iommu@lists.linux-foundation.org>; Linuxarm <linuxarm@huawei.com>; > > > Joerg Roedel <joro@8bytes.org>; Will Deacon <will@kernel.org>; > > > wanghuiqiang <wanghuiqiang@huawei.com>; Guohanjun (Hanjun Guo) > > > <guohanjun@huawei.com>; Steven Price <steven.price@arm.com>; Sami > > > Mujawar <Sami.Mujawar@arm.com>; Eric Auger > <eric.auger@redhat.com>; > > > yangyicong <yangyicong@huawei.com> > > > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node > > > parsing > > > > > [...] > > > > > > > > > > > > On the prot value assignment based on the remapping flag, I'd > > > > > like to hear Robin/Joerg's opinion, I'd avoid being in a > > > > > situation where "normally" this would work but then we have to quirk > it. > > > > > > > > > > Is this a valid assumption _always_ ? > > > > > > > > No. Certainly applying IOMMU_CACHE without reference to the > > > > device's _CCA attribute or how CPUs may be accessing a shared > > > > buffer could lead to a loss of coherency. At worst, applying > > > > IOMMU_MMIO to a device-private buffer *could* cause the device to > > > > lose coherency with itself if the memory underlying the RMR may > > > > have allocated into system caches. Note that the expected use for > > > > non-remappable RMRs is the device holding some sort of long-lived > > > > private data in system RAM - the MSI doorbell trick is far more of a niche > hack really. > > > > > > > > At the very least I think we need to refer to the device's memory > > > > access properties here. > > > > > > > > Jon, Laurentiu - how do RMRs correspond to the EFI memory map on > > > > your firmware? I'm starting to think that as long as the > > > > underlying memory is described appropriately there then we should > > > > be able to infer correct attributes from the EFI memory type and flags. > > > > > > The devices are all cache coherent and marked as _CCA, 1. The > > > Memory regions are in the virt table as > ARM_MEMORY_REGION_ATTRIBUTE_DEVICE. > > > > > > The current chicken and egg problem we have is that during the > > > fsl-mc-bus initialization we call > > > > > > error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, > > > &mc_stream_id); > > > > > > which gets deferred because the SMMU has not been initialized yet. > > > Then we initialize the RMR tables but there is no device reference > > > there to be able to query device properties, only the stream id. > > > After the IORT tables are parsed and the SMMU is setup, on the > > > second device probe we associate everything based on the stream id > > > and the fsl-mc-bus device is able to claim its 1-1 DMA mappings. > > > > Can we solve this order problem by delaying the > > iommu_alloc_resv_region() to the iommu_dma_get_rmr_resv_regions(dev, > > list) ? We could invoke > > device_get_dma_attr() from there which I believe will return the _CCA > attribute. > > > > Or is that still early to invoke that? > > That looks like it should work. Do we then also need to parse through the > VirtualMemoryTable matching the start and end addresses to determine the > other memory attributes like MMIO? Yes. But that looks tricky as I can't find that readily available on Arm, like the efi_mem_attributes(). I will take a look. Please let me know if there is one or any other easy way to retrieve it. Thanks, Shameer > > -Jon > > > > > Thanks, > > Shameer > > > > > cat /sys/kernel/iommu_groups/0/reserved_regions > > > 0x0000000001000000 0x0000000010ffffff direct-relaxable > > > 0x0000000008000000 0x00000000080fffff msi > > > 0x000000080c000000 0x000000081bffffff direct-relaxable > > > 0x0000001c00000000 0x0000001c001fffff direct-relaxable > > > 0x0000002080000000 0x000000209fffffff direct-relaxable > > > > > > -Jon > > > > > > > > > > > Robin.
On Thu, Sep 16, 2021 at 10:26 AM Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> wrote: > > > > > -----Original Message----- > > From: Jon Nettleton [mailto:jon@solid-run.com] > > Sent: 16 September 2021 08:52 > > To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> > > Cc: Robin Murphy <robin.murphy@arm.com>; Lorenzo Pieralisi > > <lorenzo.pieralisi@arm.com>; Laurentiu Tudor <laurentiu.tudor@nxp.com>; > > linux-arm-kernel <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling > > List <linux-acpi@vger.kernel.org>; Linux IOMMU > > <iommu@lists.linux-foundation.org>; Joerg Roedel <joro@8bytes.org>; Will > > Deacon <will@kernel.org>; wanghuiqiang <wanghuiqiang@huawei.com>; > > Guohanjun (Hanjun Guo) <guohanjun@huawei.com>; Steven Price > > <steven.price@arm.com>; Sami Mujawar <Sami.Mujawar@arm.com>; Eric > > Auger <eric.auger@redhat.com>; yangyicong <yangyicong@huawei.com> > > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node parsing > > > > On Thu, Sep 16, 2021 at 9:26 AM Shameerali Kolothum Thodi > > <shameerali.kolothum.thodi@huawei.com> wrote: > > > > > > > > > > > > > -----Original Message----- > > > > From: Jon Nettleton [mailto:jon@solid-run.com] > > > > Sent: 06 September 2021 20:51 > > > > To: Robin Murphy <robin.murphy@arm.com> > > > > Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>; Shameerali > > > > Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>; Laurentiu > > > > Tudor <laurentiu.tudor@nxp.com>; linux-arm-kernel > > > > <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling List > > > > <linux-acpi@vger.kernel.org>; Linux IOMMU > > > > <iommu@lists.linux-foundation.org>; Linuxarm <linuxarm@huawei.com>; > > > > Joerg Roedel <joro@8bytes.org>; Will Deacon <will@kernel.org>; > > > > wanghuiqiang <wanghuiqiang@huawei.com>; Guohanjun (Hanjun Guo) > > > > <guohanjun@huawei.com>; Steven Price <steven.price@arm.com>; Sami > > > > Mujawar <Sami.Mujawar@arm.com>; Eric Auger > > <eric.auger@redhat.com>; > > > > yangyicong <yangyicong@huawei.com> > > > > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node > > > > parsing > > > > > > > [...] > > > > > > > > > > > > > > > On the prot value assignment based on the remapping flag, I'd > > > > > > like to hear Robin/Joerg's opinion, I'd avoid being in a > > > > > > situation where "normally" this would work but then we have to quirk > > it. > > > > > > > > > > > > Is this a valid assumption _always_ ? > > > > > > > > > > No. Certainly applying IOMMU_CACHE without reference to the > > > > > device's _CCA attribute or how CPUs may be accessing a shared > > > > > buffer could lead to a loss of coherency. At worst, applying > > > > > IOMMU_MMIO to a device-private buffer *could* cause the device to > > > > > lose coherency with itself if the memory underlying the RMR may > > > > > have allocated into system caches. Note that the expected use for > > > > > non-remappable RMRs is the device holding some sort of long-lived > > > > > private data in system RAM - the MSI doorbell trick is far more of a niche > > hack really. > > > > > > > > > > At the very least I think we need to refer to the device's memory > > > > > access properties here. > > > > > > > > > > Jon, Laurentiu - how do RMRs correspond to the EFI memory map on > > > > > your firmware? I'm starting to think that as long as the > > > > > underlying memory is described appropriately there then we should > > > > > be able to infer correct attributes from the EFI memory type and flags. > > > > > > > > The devices are all cache coherent and marked as _CCA, 1. The > > > > Memory regions are in the virt table as > > ARM_MEMORY_REGION_ATTRIBUTE_DEVICE. > > > > > > > > The current chicken and egg problem we have is that during the > > > > fsl-mc-bus initialization we call > > > > > > > > error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, > > > > &mc_stream_id); > > > > > > > > which gets deferred because the SMMU has not been initialized yet. > > > > Then we initialize the RMR tables but there is no device reference > > > > there to be able to query device properties, only the stream id. > > > > After the IORT tables are parsed and the SMMU is setup, on the > > > > second device probe we associate everything based on the stream id > > > > and the fsl-mc-bus device is able to claim its 1-1 DMA mappings. > > > > > > Can we solve this order problem by delaying the > > > iommu_alloc_resv_region() to the iommu_dma_get_rmr_resv_regions(dev, > > > list) ? We could invoke > > > device_get_dma_attr() from there which I believe will return the _CCA > > attribute. > > > > > > Or is that still early to invoke that? > > > > That looks like it should work. Do we then also need to parse through the > > VirtualMemoryTable matching the start and end addresses to determine the > > other memory attributes like MMIO? > > Yes. But that looks tricky as I can't find that readily available on Arm, like the > efi_mem_attributes(). I will take a look. > > Please let me know if there is one or any other easy way to retrieve it. maybe we don't need to. Maybe it is enough to just move iommu_alloc_resv_regions and then set the IOMMU_CACHE flag if type = IOMMU_RESV_DIRECT_RELAXABLE and _CCN=1? -Jon > > Thanks, > Shameer > > > > > -Jon > > > > > > > > Thanks, > > > Shameer > > > > > > > cat /sys/kernel/iommu_groups/0/reserved_regions > > > > 0x0000000001000000 0x0000000010ffffff direct-relaxable > > > > 0x0000000008000000 0x00000000080fffff msi > > > > 0x000000080c000000 0x000000081bffffff direct-relaxable > > > > 0x0000001c00000000 0x0000001c001fffff direct-relaxable > > > > 0x0000002080000000 0x000000209fffffff direct-relaxable > > > > > > > > -Jon > > > > > > > > > > > > > > Robin.
> -----Original Message----- > From: Jon Nettleton [mailto:jon@solid-run.com] > Sent: 16 September 2021 12:17 > To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> > Cc: Robin Murphy <robin.murphy@arm.com>; Lorenzo Pieralisi > <lorenzo.pieralisi@arm.com>; Laurentiu Tudor <laurentiu.tudor@nxp.com>; > linux-arm-kernel <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling > List <linux-acpi@vger.kernel.org>; Linux IOMMU > <iommu@lists.linux-foundation.org>; Joerg Roedel <joro@8bytes.org>; Will > Deacon <will@kernel.org>; wanghuiqiang <wanghuiqiang@huawei.com>; > Guohanjun (Hanjun Guo) <guohanjun@huawei.com>; Steven Price > <steven.price@arm.com>; Sami Mujawar <Sami.Mujawar@arm.com>; Eric > Auger <eric.auger@redhat.com>; yangyicong <yangyicong@huawei.com> > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node parsing > > On Thu, Sep 16, 2021 at 10:26 AM Shameerali Kolothum Thodi > <shameerali.kolothum.thodi@huawei.com> wrote: > > > > > > > > > -----Original Message----- > > > From: Jon Nettleton [mailto:jon@solid-run.com] > > > Sent: 16 September 2021 08:52 > > > To: Shameerali Kolothum Thodi > <shameerali.kolothum.thodi@huawei.com> > > > Cc: Robin Murphy <robin.murphy@arm.com>; Lorenzo Pieralisi > > > <lorenzo.pieralisi@arm.com>; Laurentiu Tudor > > > <laurentiu.tudor@nxp.com>; linux-arm-kernel > > > <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling List > > > <linux-acpi@vger.kernel.org>; Linux IOMMU > > > <iommu@lists.linux-foundation.org>; Joerg Roedel <joro@8bytes.org>; > > > Will Deacon <will@kernel.org>; wanghuiqiang > > > <wanghuiqiang@huawei.com>; Guohanjun (Hanjun Guo) > > > <guohanjun@huawei.com>; Steven Price <steven.price@arm.com>; Sami > > > Mujawar <Sami.Mujawar@arm.com>; Eric Auger > <eric.auger@redhat.com>; > > > yangyicong <yangyicong@huawei.com> > > > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node > > > parsing > > > > > > On Thu, Sep 16, 2021 at 9:26 AM Shameerali Kolothum Thodi > > > <shameerali.kolothum.thodi@huawei.com> wrote: > > > > > > > > > > > > > > > > > -----Original Message----- > > > > > From: Jon Nettleton [mailto:jon@solid-run.com] > > > > > Sent: 06 September 2021 20:51 > > > > > To: Robin Murphy <robin.murphy@arm.com> > > > > > Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>; Shameerali > > > > > Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>; Laurentiu > > > > > Tudor <laurentiu.tudor@nxp.com>; linux-arm-kernel > > > > > <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling List > > > > > <linux-acpi@vger.kernel.org>; Linux IOMMU > > > > > <iommu@lists.linux-foundation.org>; Linuxarm > > > > > <linuxarm@huawei.com>; Joerg Roedel <joro@8bytes.org>; Will > > > > > Deacon <will@kernel.org>; wanghuiqiang > > > > > <wanghuiqiang@huawei.com>; Guohanjun (Hanjun Guo) > > > > > <guohanjun@huawei.com>; Steven Price <steven.price@arm.com>; > > > > > Sami Mujawar <Sami.Mujawar@arm.com>; Eric Auger > > > <eric.auger@redhat.com>; > > > > > yangyicong <yangyicong@huawei.com> > > > > > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node > > > > > parsing > > > > > > > > > [...] > > > > > > > > > > > > > > > > > > On the prot value assignment based on the remapping flag, > > > > > > > I'd like to hear Robin/Joerg's opinion, I'd avoid being in a > > > > > > > situation where "normally" this would work but then we have > > > > > > > to quirk > > > it. > > > > > > > > > > > > > > Is this a valid assumption _always_ ? > > > > > > > > > > > > No. Certainly applying IOMMU_CACHE without reference to the > > > > > > device's _CCA attribute or how CPUs may be accessing a shared > > > > > > buffer could lead to a loss of coherency. At worst, applying > > > > > > IOMMU_MMIO to a device-private buffer *could* cause the device > > > > > > to lose coherency with itself if the memory underlying the RMR > > > > > > may have allocated into system caches. Note that the expected > > > > > > use for non-remappable RMRs is the device holding some sort of > > > > > > long-lived private data in system RAM - the MSI doorbell trick > > > > > > is far more of a niche > > > hack really. > > > > > > > > > > > > At the very least I think we need to refer to the device's > > > > > > memory access properties here. > > > > > > > > > > > > Jon, Laurentiu - how do RMRs correspond to the EFI memory map > > > > > > on your firmware? I'm starting to think that as long as the > > > > > > underlying memory is described appropriately there then we > > > > > > should be able to infer correct attributes from the EFI memory type > and flags. > > > > > > > > > > The devices are all cache coherent and marked as _CCA, 1. The > > > > > Memory regions are in the virt table as > > > ARM_MEMORY_REGION_ATTRIBUTE_DEVICE. > > > > > > > > > > The current chicken and egg problem we have is that during the > > > > > fsl-mc-bus initialization we call > > > > > > > > > > error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, > > > > > &mc_stream_id); > > > > > > > > > > which gets deferred because the SMMU has not been initialized yet. > > > > > Then we initialize the RMR tables but there is no device > > > > > reference there to be able to query device properties, only the stream > id. > > > > > After the IORT tables are parsed and the SMMU is setup, on the > > > > > second device probe we associate everything based on the stream > > > > > id and the fsl-mc-bus device is able to claim its 1-1 DMA mappings. > > > > > > > > Can we solve this order problem by delaying the > > > > iommu_alloc_resv_region() to the > > > > iommu_dma_get_rmr_resv_regions(dev, > > > > list) ? We could invoke > > > > device_get_dma_attr() from there which I believe will return the > > > > _CCA > > > attribute. > > > > > > > > Or is that still early to invoke that? > > > > > > That looks like it should work. Do we then also need to parse > > > through the VirtualMemoryTable matching the start and end addresses > > > to determine the other memory attributes like MMIO? > > > > Yes. But that looks tricky as I can't find that readily available on > > Arm, like the efi_mem_attributes(). I will take a look. > > > > Please let me know if there is one or any other easy way to retrieve it. > > maybe we don't need to. Maybe it is enough to just move > iommu_alloc_resv_regions and then set the IOMMU_CACHE flag if type = > IOMMU_RESV_DIRECT_RELAXABLE and _CCN=1? It looks like we could simply call efi_mem_type() and check for EFI_MEMORY_MAPPED_IO. I have updated the code to set the RMR prot value based on _CCA and EFI md type. Please see the last commit on this branch here(not tested), https://github.com/hisilicon/kernel-dev/commits/private-v5.14-rc4-rmr-v7-ext Please take a look and let me know if this is good enough to solve this problem. Thanks, Shameer
On 9/17/2021 2:26 PM, Shameerali Kolothum Thodi wrote: > > >> -----Original Message----- >> From: Jon Nettleton [mailto:jon@solid-run.com] >> Sent: 16 September 2021 12:17 >> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> >> Cc: Robin Murphy <robin.murphy@arm.com>; Lorenzo Pieralisi >> <lorenzo.pieralisi@arm.com>; Laurentiu Tudor <laurentiu.tudor@nxp.com>; >> linux-arm-kernel <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling >> List <linux-acpi@vger.kernel.org>; Linux IOMMU >> <iommu@lists.linux-foundation.org>; Joerg Roedel <joro@8bytes.org>; Will >> Deacon <will@kernel.org>; wanghuiqiang <wanghuiqiang@huawei.com>; >> Guohanjun (Hanjun Guo) <guohanjun@huawei.com>; Steven Price >> <steven.price@arm.com>; Sami Mujawar <Sami.Mujawar@arm.com>; Eric >> Auger <eric.auger@redhat.com>; yangyicong <yangyicong@huawei.com> >> Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node parsing >> >> On Thu, Sep 16, 2021 at 10:26 AM Shameerali Kolothum Thodi >> <shameerali.kolothum.thodi@huawei.com> wrote: >>> >>> >>> >>>> -----Original Message----- >>>> From: Jon Nettleton [mailto:jon@solid-run.com] >>>> Sent: 16 September 2021 08:52 >>>> To: Shameerali Kolothum Thodi >> <shameerali.kolothum.thodi@huawei.com> >>>> Cc: Robin Murphy <robin.murphy@arm.com>; Lorenzo Pieralisi >>>> <lorenzo.pieralisi@arm.com>; Laurentiu Tudor >>>> <laurentiu.tudor@nxp.com>; linux-arm-kernel >>>> <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling List >>>> <linux-acpi@vger.kernel.org>; Linux IOMMU >>>> <iommu@lists.linux-foundation.org>; Joerg Roedel <joro@8bytes.org>; >>>> Will Deacon <will@kernel.org>; wanghuiqiang >>>> <wanghuiqiang@huawei.com>; Guohanjun (Hanjun Guo) >>>> <guohanjun@huawei.com>; Steven Price <steven.price@arm.com>; Sami >>>> Mujawar <Sami.Mujawar@arm.com>; Eric Auger >> <eric.auger@redhat.com>; >>>> yangyicong <yangyicong@huawei.com> >>>> Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node >>>> parsing >>>> >>>> On Thu, Sep 16, 2021 at 9:26 AM Shameerali Kolothum Thodi >>>> <shameerali.kolothum.thodi@huawei.com> wrote: >>>>> >>>>> >>>>> >>>>>> -----Original Message----- >>>>>> From: Jon Nettleton [mailto:jon@solid-run.com] >>>>>> Sent: 06 September 2021 20:51 >>>>>> To: Robin Murphy <robin.murphy@arm.com> >>>>>> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>; Shameerali >>>>>> Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>; Laurentiu >>>>>> Tudor <laurentiu.tudor@nxp.com>; linux-arm-kernel >>>>>> <linux-arm-kernel@lists.infradead.org>; ACPI Devel Maling List >>>>>> <linux-acpi@vger.kernel.org>; Linux IOMMU >>>>>> <iommu@lists.linux-foundation.org>; Linuxarm >>>>>> <linuxarm@huawei.com>; Joerg Roedel <joro@8bytes.org>; Will >>>>>> Deacon <will@kernel.org>; wanghuiqiang >>>>>> <wanghuiqiang@huawei.com>; Guohanjun (Hanjun Guo) >>>>>> <guohanjun@huawei.com>; Steven Price <steven.price@arm.com>; >>>>>> Sami Mujawar <Sami.Mujawar@arm.com>; Eric Auger >>>> <eric.auger@redhat.com>; >>>>>> yangyicong <yangyicong@huawei.com> >>>>>> Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node >>>>>> parsing >>>>>> >>>>> [...] >>>>> >>>>>>>> >>>>>>>> On the prot value assignment based on the remapping flag, >>>>>>>> I'd like to hear Robin/Joerg's opinion, I'd avoid being in a >>>>>>>> situation where "normally" this would work but then we have >>>>>>>> to quirk >>>> it. >>>>>>>> >>>>>>>> Is this a valid assumption _always_ ? >>>>>>> >>>>>>> No. Certainly applying IOMMU_CACHE without reference to the >>>>>>> device's _CCA attribute or how CPUs may be accessing a shared >>>>>>> buffer could lead to a loss of coherency. At worst, applying >>>>>>> IOMMU_MMIO to a device-private buffer *could* cause the device >>>>>>> to lose coherency with itself if the memory underlying the RMR >>>>>>> may have allocated into system caches. Note that the expected >>>>>>> use for non-remappable RMRs is the device holding some sort of >>>>>>> long-lived private data in system RAM - the MSI doorbell trick >>>>>>> is far more of a niche >>>> hack really. >>>>>>> >>>>>>> At the very least I think we need to refer to the device's >>>>>>> memory access properties here. >>>>>>> >>>>>>> Jon, Laurentiu - how do RMRs correspond to the EFI memory map >>>>>>> on your firmware? I'm starting to think that as long as the >>>>>>> underlying memory is described appropriately there then we >>>>>>> should be able to infer correct attributes from the EFI memory type >> and flags. >>>>>> >>>>>> The devices are all cache coherent and marked as _CCA, 1. The >>>>>> Memory regions are in the virt table as >>>> ARM_MEMORY_REGION_ATTRIBUTE_DEVICE. >>>>>> >>>>>> The current chicken and egg problem we have is that during the >>>>>> fsl-mc-bus initialization we call >>>>>> >>>>>> error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, >>>>>> &mc_stream_id); >>>>>> >>>>>> which gets deferred because the SMMU has not been initialized yet. >>>>>> Then we initialize the RMR tables but there is no device >>>>>> reference there to be able to query device properties, only the stream >> id. >>>>>> After the IORT tables are parsed and the SMMU is setup, on the >>>>>> second device probe we associate everything based on the stream >>>>>> id and the fsl-mc-bus device is able to claim its 1-1 DMA mappings. >>>>> >>>>> Can we solve this order problem by delaying the >>>>> iommu_alloc_resv_region() to the >>>>> iommu_dma_get_rmr_resv_regions(dev, >>>>> list) ? We could invoke >>>>> device_get_dma_attr() from there which I believe will return the >>>>> _CCA >>>> attribute. >>>>> >>>>> Or is that still early to invoke that? >>>> >>>> That looks like it should work. Do we then also need to parse >>>> through the VirtualMemoryTable matching the start and end addresses >>>> to determine the other memory attributes like MMIO? >>> >>> Yes. But that looks tricky as I can't find that readily available on >>> Arm, like the efi_mem_attributes(). I will take a look. >>> >>> Please let me know if there is one or any other easy way to retrieve it. >> >> maybe we don't need to. Maybe it is enough to just move >> iommu_alloc_resv_regions and then set the IOMMU_CACHE flag if type = >> IOMMU_RESV_DIRECT_RELAXABLE and _CCN=1? > > It looks like we could simply call efi_mem_type() and check for > EFI_MEMORY_MAPPED_IO. I have updated the code to set the > RMR prot value based on _CCA and EFI md type. Please see the > last commit on this branch here(not tested), > > https://github.com/hisilicon/kernel-dev/commits/private-v5.14-rc4-rmr-v7-ext > > Please take a look and let me know if this is good enough to solve this problem. > Sorry for the delay, I managed to test on a NXP LX2160A and things look fine, so: Tested-by: Laurentiu Tudor <laurentiu.tudor@nxp.com> --- Best Regards, Laurentiu
On 2021-08-05 09:07, Shameer Kolothum wrote: > Add support for parsing RMR node information from ACPI. > > Find the associated streamid and smmu node info from the > RMR node and populate a linked list with RMR memory > descriptors. > > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> > --- > drivers/acpi/arm64/iort.c | 134 +++++++++++++++++++++++++++++++++++++- > 1 file changed, 133 insertions(+), 1 deletion(-) > > diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c > index 3b23fb775ac4..d76ba46ebe67 100644 > --- a/drivers/acpi/arm64/iort.c > +++ b/drivers/acpi/arm64/iort.c > @@ -40,6 +40,8 @@ struct iort_fwnode { > static LIST_HEAD(iort_fwnode_list); > static DEFINE_SPINLOCK(iort_fwnode_lock); > > +static LIST_HEAD(iort_rmr_list); /* list of RMR regions from ACPI */ > + > /** > * iort_set_fwnode() - Create iort_fwnode and use it to register > * iommu data in the iort_fwnode_list > @@ -393,7 +395,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, > if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || > node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || > node->type == ACPI_IORT_NODE_SMMU_V3 || > - node->type == ACPI_IORT_NODE_PMCG) { > + node->type == ACPI_IORT_NODE_PMCG || > + node->type == ACPI_IORT_NODE_RMR) { > *id_out = map->output_base; > return parent; > } > @@ -1566,6 +1569,134 @@ static void __init iort_enable_acs(struct acpi_iort_node *iort_node) > #else > static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } > #endif > +static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc, u32 count) > +{ > + int i, j; > + > + for (i = 0; i < count; i++) { > + u64 end, start = desc[i].base_address, length = desc[i].length; > + > + end = start + length - 1; > + > + /* Check for address overlap */ > + for (j = i + 1; j < count; j++) { > + u64 e_start = desc[j].base_address; > + u64 e_end = e_start + desc[j].length - 1; > + > + if (start <= e_end && end >= e_start) > + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n", > + start, end); > + } > + } > +} > + > +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) > +{ > + struct acpi_iort_node *smmu; > + struct acpi_iort_rmr *rmr; > + struct acpi_iort_rmr_desc *rmr_desc; > + u32 map_count = iort_node->mapping_count; > + u32 sid; > + int i; > + > + if (!iort_node->mapping_offset || map_count != 1) { > + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", > + iort_node); > + return; > + } > + > + /* Retrieve associated smmu and stream id */ > + smmu = iort_node_get_id(iort_node, &sid, 0); > + if (!smmu) { > + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR node %p\n", > + iort_node); > + return; > + } > + > + /* Retrieve RMR data */ > + rmr = (struct acpi_iort_rmr *)iort_node->node_data; > + if (!rmr->rmr_offset || !rmr->rmr_count) { > + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR node %p\n", > + iort_node); > + return; > + } > + > + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, > + rmr->rmr_offset); > + > + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); > + > + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { > + struct iommu_resv_region *region; > + enum iommu_resv_type type; > + int prot = IOMMU_READ | IOMMU_WRITE; > + u64 addr = rmr_desc->base_address, size = rmr_desc->length; > + > + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { > + /* PAGE align base addr and size */ > + addr &= PAGE_MASK; > + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); > + > + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", > + rmr_desc->base_address, > + rmr_desc->base_address + rmr_desc->length - 1, > + addr, addr + size - 1); > + } > + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { > + type = IOMMU_RESV_DIRECT_RELAXABLE; > + /* > + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is > + * normally used for allocated system memory that is > + * then used for device specific reserved regions. > + */ > + prot |= IOMMU_CACHE; > + } else { > + type = IOMMU_RESV_DIRECT; > + /* > + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used > + * for device memory like MSI doorbell. > + */ > + prot |= IOMMU_MMIO; > + } I'm not sure we ever got a definitive answer to this - does DPAA2 actually go wrong if we use IOMMU_MMIO here? I'd still much prefer to make the fewest possible assumptions, since at this point it's basically just a stop-gap until we can fix the spec. It's become clear that we can't reliably rely on guessing attributes, so I'm not too fussed about theoretical cases that currently don't work (due to complete lack of RMR support) continuing to not work for the moment, as long as we can make the real-world cases we actually have work at all. Anything which only affects performance I'd rather leave until firmware can tell us what to do. > + region = iommu_alloc_resv_region(addr, size, prot, type); > + if (region) { > + region->fw_data.rmr.flags = rmr->flags; > + region->fw_data.rmr.sid = sid; > + region->fw_data.rmr.smmu = smmu; > + list_add_tail(®ion->list, &iort_rmr_list); > + } > + } > +} > + > +static void __init iort_parse_rmr(void) > +{ > + struct acpi_iort_node *iort_node, *iort_end; > + struct acpi_table_iort *iort; > + int i; > + > + if (iort_table->revision < 3) > + return; > + > + iort = (struct acpi_table_iort *)iort_table; > + > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, > + iort->node_offset); > + iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, > + iort_table->length); > + > + for (i = 0; i < iort->node_count; i++) { > + if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, > + "IORT node pointer overflows, bad table!\n")) > + return; > + > + if (iort_node->type == ACPI_IORT_NODE_RMR) > + iort_node_get_rmr_info(iort_node); > + > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, > + iort_node->length); > + } > +} > > static void __init iort_init_platform_devices(void) > { > @@ -1636,6 +1767,7 @@ void __init acpi_iort_init(void) > } > > iort_init_platform_devices(); > + iort_parse_rmr(); I guess initcall ordering vs. driver registration probably covers it up, but for the sake of cleanliness I'd rather make sure the RMRs are fully discovered *before* we create the SMMU devices that we expect to start consuming them. Robin. > } > > #ifdef CONFIG_ZONE_DMA >
On Fri, Oct 8, 2021 at 2:49 PM Robin Murphy <robin.murphy@arm.com> wrote: > > On 2021-08-05 09:07, Shameer Kolothum wrote: > > Add support for parsing RMR node information from ACPI. > > > > Find the associated streamid and smmu node info from the > > RMR node and populate a linked list with RMR memory > > descriptors. > > > > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> > > --- > > drivers/acpi/arm64/iort.c | 134 +++++++++++++++++++++++++++++++++++++- > > 1 file changed, 133 insertions(+), 1 deletion(-) > > > > diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c > > index 3b23fb775ac4..d76ba46ebe67 100644 > > --- a/drivers/acpi/arm64/iort.c > > +++ b/drivers/acpi/arm64/iort.c > > @@ -40,6 +40,8 @@ struct iort_fwnode { > > static LIST_HEAD(iort_fwnode_list); > > static DEFINE_SPINLOCK(iort_fwnode_lock); > > > > +static LIST_HEAD(iort_rmr_list); /* list of RMR regions from ACPI */ > > + > > /** > > * iort_set_fwnode() - Create iort_fwnode and use it to register > > * iommu data in the iort_fwnode_list > > @@ -393,7 +395,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, > > if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || > > node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || > > node->type == ACPI_IORT_NODE_SMMU_V3 || > > - node->type == ACPI_IORT_NODE_PMCG) { > > + node->type == ACPI_IORT_NODE_PMCG || > > + node->type == ACPI_IORT_NODE_RMR) { > > *id_out = map->output_base; > > return parent; > > } > > @@ -1566,6 +1569,134 @@ static void __init iort_enable_acs(struct acpi_iort_node *iort_node) > > #else > > static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } > > #endif > > +static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc, u32 count) > > +{ > > + int i, j; > > + > > + for (i = 0; i < count; i++) { > > + u64 end, start = desc[i].base_address, length = desc[i].length; > > + > > + end = start + length - 1; > > + > > + /* Check for address overlap */ > > + for (j = i + 1; j < count; j++) { > > + u64 e_start = desc[j].base_address; > > + u64 e_end = e_start + desc[j].length - 1; > > + > > + if (start <= e_end && end >= e_start) > > + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n", > > + start, end); > > + } > > + } > > +} > > + > > +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) > > +{ > > + struct acpi_iort_node *smmu; > > + struct acpi_iort_rmr *rmr; > > + struct acpi_iort_rmr_desc *rmr_desc; > > + u32 map_count = iort_node->mapping_count; > > + u32 sid; > > + int i; > > + > > + if (!iort_node->mapping_offset || map_count != 1) { > > + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", > > + iort_node); > > + return; > > + } > > + > > + /* Retrieve associated smmu and stream id */ > > + smmu = iort_node_get_id(iort_node, &sid, 0); > > + if (!smmu) { > > + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR node %p\n", > > + iort_node); > > + return; > > + } > > + > > + /* Retrieve RMR data */ > > + rmr = (struct acpi_iort_rmr *)iort_node->node_data; > > + if (!rmr->rmr_offset || !rmr->rmr_count) { > > + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR node %p\n", > > + iort_node); > > + return; > > + } > > + > > + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, > > + rmr->rmr_offset); > > + > > + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); > > + > > + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { > > + struct iommu_resv_region *region; > > + enum iommu_resv_type type; > > + int prot = IOMMU_READ | IOMMU_WRITE; > > + u64 addr = rmr_desc->base_address, size = rmr_desc->length; > > + > > + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { > > + /* PAGE align base addr and size */ > > + addr &= PAGE_MASK; > > + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); > > + > > + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", > > + rmr_desc->base_address, > > + rmr_desc->base_address + rmr_desc->length - 1, > > + addr, addr + size - 1); > > + } > > + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { > > + type = IOMMU_RESV_DIRECT_RELAXABLE; > > + /* > > + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is > > + * normally used for allocated system memory that is > > + * then used for device specific reserved regions. > > + */ > > + prot |= IOMMU_CACHE; > > + } else { > > + type = IOMMU_RESV_DIRECT; > > + /* > > + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used > > + * for device memory like MSI doorbell. > > + */ > > + prot |= IOMMU_MMIO; > > + } > > I'm not sure we ever got a definitive answer to this - does DPAA2 > actually go wrong if we use IOMMU_MMIO here? I'd still much prefer to > make the fewest possible assumptions, since at this point it's basically > just a stop-gap until we can fix the spec. It's become clear that we > can't reliably rely on guessing attributes, so I'm not too fussed about > theoretical cases that currently don't work (due to complete lack of RMR > support) continuing to not work for the moment, as long as we can make > the real-world cases we actually have work at all. Anything which only > affects performance I'd rather leave until firmware can tell us what to do. Well it isn't DPAA2, it is FSL_MC_BUS that fails with IOMMU_MMIO mappings. DPAA2 is just one connected device. -Jon > > > + region = iommu_alloc_resv_region(addr, size, prot, type); > > + if (region) { > > + region->fw_data.rmr.flags = rmr->flags; > > + region->fw_data.rmr.sid = sid; > > + region->fw_data.rmr.smmu = smmu; > > + list_add_tail(®ion->list, &iort_rmr_list); > > + } > > + } > > +} > > + > > +static void __init iort_parse_rmr(void) > > +{ > > + struct acpi_iort_node *iort_node, *iort_end; > > + struct acpi_table_iort *iort; > > + int i; > > + > > + if (iort_table->revision < 3) > > + return; > > + > > + iort = (struct acpi_table_iort *)iort_table; > > + > > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, > > + iort->node_offset); > > + iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, > > + iort_table->length); > > + > > + for (i = 0; i < iort->node_count; i++) { > > + if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, > > + "IORT node pointer overflows, bad table!\n")) > > + return; > > + > > + if (iort_node->type == ACPI_IORT_NODE_RMR) > > + iort_node_get_rmr_info(iort_node); > > + > > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, > > + iort_node->length); > > + } > > +} > > > > static void __init iort_init_platform_devices(void) > > { > > @@ -1636,6 +1767,7 @@ void __init acpi_iort_init(void) > > } > > > > iort_init_platform_devices(); > > + iort_parse_rmr(); > > I guess initcall ordering vs. driver registration probably covers it up, > but for the sake of cleanliness I'd rather make sure the RMRs are fully > discovered *before* we create the SMMU devices that we expect to start > consuming them. > > Robin. > > > } > > > > #ifdef CONFIG_ZONE_DMA > >
> -----Original Message----- > From: Robin Murphy [mailto:robin.murphy@arm.com] > Sent: 08 October 2021 13:49 > To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>; > linux-arm-kernel@lists.infradead.org; linux-acpi@vger.kernel.org; > iommu@lists.linux-foundation.org > Cc: jon@solid-run.com; Linuxarm <linuxarm@huawei.com>; > steven.price@arm.com; Guohanjun (Hanjun Guo) <guohanjun@huawei.com>; > yangyicong <yangyicong@huawei.com>; Sami.Mujawar@arm.com; > will@kernel.org; wanghuiqiang <wanghuiqiang@huawei.com> > Subject: Re: [PATCH v7 2/9] ACPI/IORT: Add support for RMR node parsing > > On 2021-08-05 09:07, Shameer Kolothum wrote: > > Add support for parsing RMR node information from ACPI. > > > > Find the associated streamid and smmu node info from the > > RMR node and populate a linked list with RMR memory > > descriptors. > > > > Signed-off-by: Shameer Kolothum > <shameerali.kolothum.thodi@huawei.com> > > --- > > drivers/acpi/arm64/iort.c | 134 > +++++++++++++++++++++++++++++++++++++- > > 1 file changed, 133 insertions(+), 1 deletion(-) > > > > diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c > > index 3b23fb775ac4..d76ba46ebe67 100644 > > --- a/drivers/acpi/arm64/iort.c > > +++ b/drivers/acpi/arm64/iort.c > > @@ -40,6 +40,8 @@ struct iort_fwnode { > > static LIST_HEAD(iort_fwnode_list); > > static DEFINE_SPINLOCK(iort_fwnode_lock); > > > > +static LIST_HEAD(iort_rmr_list); /* list of RMR regions from ACPI */ > > + > > /** > > * iort_set_fwnode() - Create iort_fwnode and use it to register > > * iommu data in the iort_fwnode_list > > @@ -393,7 +395,8 @@ static struct acpi_iort_node > *iort_node_get_id(struct acpi_iort_node *node, > > if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || > > node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || > > node->type == ACPI_IORT_NODE_SMMU_V3 || > > - node->type == ACPI_IORT_NODE_PMCG) { > > + node->type == ACPI_IORT_NODE_PMCG || > > + node->type == ACPI_IORT_NODE_RMR) { > > *id_out = map->output_base; > > return parent; > > } > > @@ -1566,6 +1569,134 @@ static void __init iort_enable_acs(struct > acpi_iort_node *iort_node) > > #else > > static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } > > #endif > > +static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc, > u32 count) > > +{ > > + int i, j; > > + > > + for (i = 0; i < count; i++) { > > + u64 end, start = desc[i].base_address, length = desc[i].length; > > + > > + end = start + length - 1; > > + > > + /* Check for address overlap */ > > + for (j = i + 1; j < count; j++) { > > + u64 e_start = desc[j].base_address; > > + u64 e_end = e_start + desc[j].length - 1; > > + > > + if (start <= e_end && end >= e_start) > > + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, > continue anyway\n", > > + start, end); > > + } > > + } > > +} > > + > > +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) > > +{ > > + struct acpi_iort_node *smmu; > > + struct acpi_iort_rmr *rmr; > > + struct acpi_iort_rmr_desc *rmr_desc; > > + u32 map_count = iort_node->mapping_count; > > + u32 sid; > > + int i; > > + > > + if (!iort_node->mapping_offset || map_count != 1) { > > + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", > > + iort_node); > > + return; > > + } > > + > > + /* Retrieve associated smmu and stream id */ > > + smmu = iort_node_get_id(iort_node, &sid, 0); > > + if (!smmu) { > > + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR > node %p\n", > > + iort_node); > > + return; > > + } > > + > > + /* Retrieve RMR data */ > > + rmr = (struct acpi_iort_rmr *)iort_node->node_data; > > + if (!rmr->rmr_offset || !rmr->rmr_count) { > > + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR > node %p\n", > > + iort_node); > > + return; > > + } > > + > > + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, > > + rmr->rmr_offset); > > + > > + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); > > + > > + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { > > + struct iommu_resv_region *region; > > + enum iommu_resv_type type; > > + int prot = IOMMU_READ | IOMMU_WRITE; > > + u64 addr = rmr_desc->base_address, size = rmr_desc->length; > > + > > + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { > > + /* PAGE align base addr and size */ > > + addr &= PAGE_MASK; > > + size = PAGE_ALIGN(size + > offset_in_page(rmr_desc->base_address)); > > + > > + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to > 64K, continue with [0x%llx - 0x%llx]\n", > > + rmr_desc->base_address, > > + rmr_desc->base_address + rmr_desc->length - 1, > > + addr, addr + size - 1); > > + } > > + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { > > + type = IOMMU_RESV_DIRECT_RELAXABLE; > > + /* > > + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is > > + * normally used for allocated system memory that is > > + * then used for device specific reserved regions. > > + */ > > + prot |= IOMMU_CACHE; > > + } else { > > + type = IOMMU_RESV_DIRECT; > > + /* > > + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally > used > > + * for device memory like MSI doorbell. > > + */ > > + prot |= IOMMU_MMIO; > > + } > > I'm not sure we ever got a definitive answer to this - does DPAA2 > actually go wrong if we use IOMMU_MMIO here? I'd still much prefer to > make the fewest possible assumptions, since at this point it's basically > just a stop-gap until we can fix the spec. It's become clear that we > can't reliably rely on guessing attributes, so I'm not too fussed about > theoretical cases that currently don't work (due to complete lack of RMR > support) continuing to not work for the moment, as long as we can make > the real-world cases we actually have work at all. Anything which only > affects performance I'd rather leave until firmware can tell us what to do. Just to report back, we have done some basic sanity tests with IOMMU_MMIO set as default and it works for us. But I see that it doesn't for Jon's case. So not sure what the stop-gap can be.. Can we use the _CCA + EFI approach and override it later when the spec gets updated? Thanks, Shameer > > > + region = iommu_alloc_resv_region(addr, size, prot, type); > > + if (region) { > > + region->fw_data.rmr.flags = rmr->flags; > > + region->fw_data.rmr.sid = sid; > > + region->fw_data.rmr.smmu = smmu; > > + list_add_tail(®ion->list, &iort_rmr_list); > > + } > > + } > > +} > > + > > +static void __init iort_parse_rmr(void) > > +{ > > + struct acpi_iort_node *iort_node, *iort_end; > > + struct acpi_table_iort *iort; > > + int i; > > + > > + if (iort_table->revision < 3) > > + return; > > + > > + iort = (struct acpi_table_iort *)iort_table; > > + > > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, > > + iort->node_offset); > > + iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, > > + iort_table->length); > > + > > + for (i = 0; i < iort->node_count; i++) { > > + if (WARN_TAINT(iort_node >= iort_end, > TAINT_FIRMWARE_WORKAROUND, > > + "IORT node pointer overflows, bad table!\n")) > > + return; > > + > > + if (iort_node->type == ACPI_IORT_NODE_RMR) > > + iort_node_get_rmr_info(iort_node); > > + > > + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, > > + iort_node->length); > > + } > > +} > > > > static void __init iort_init_platform_devices(void) > > { > > @@ -1636,6 +1767,7 @@ void __init acpi_iort_init(void) > > } > > > > iort_init_platform_devices(); > > + iort_parse_rmr(); > > I guess initcall ordering vs. driver registration probably covers it up, > but for the sake of cleanliness I'd rather make sure the RMRs are fully > discovered *before* we create the SMMU devices that we expect to start > consuming them. > > Robin. > > > } > > > > #ifdef CONFIG_ZONE_DMA > >
On 2021-10-09 08:06, Jon Nettleton wrote: [...] >>> + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { >>> + type = IOMMU_RESV_DIRECT_RELAXABLE; >>> + /* >>> + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is >>> + * normally used for allocated system memory that is >>> + * then used for device specific reserved regions. >>> + */ >>> + prot |= IOMMU_CACHE; >>> + } else { >>> + type = IOMMU_RESV_DIRECT; >>> + /* >>> + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used >>> + * for device memory like MSI doorbell. >>> + */ >>> + prot |= IOMMU_MMIO; >>> + } >> >> I'm not sure we ever got a definitive answer to this - does DPAA2 >> actually go wrong if we use IOMMU_MMIO here? I'd still much prefer to >> make the fewest possible assumptions, since at this point it's basically >> just a stop-gap until we can fix the spec. It's become clear that we >> can't reliably rely on guessing attributes, so I'm not too fussed about >> theoretical cases that currently don't work (due to complete lack of RMR >> support) continuing to not work for the moment, as long as we can make >> the real-world cases we actually have work at all. Anything which only >> affects performance I'd rather leave until firmware can tell us what to do. > > Well it isn't DPAA2, it is FSL_MC_BUS that fails with IOMMU_MMIO > mappings. DPAA2 is just one connected device. Apologies if I'm being overly loose with terminology there - my point of reference for this hardware is documentation for the old LS2080A, where the "DPAA2 Reference Manual" gives a strong impression that the MC is a component belonging to the overall DPAA2 architecture. Either way it technically stands to reason that the other DPAA2 components would only be usable if the MC itself works (unless I've been holding a major misconception about that for years as well). In the context of this discussion, please consider any reference I may make to bits of NXP's hardware to be shorthand for "the thing for which NXP have a vested interest in IORT RMRs". Thanks, Robin.
On Tue, Oct 12, 2021 at 10:00:24AM +0200, Jon Nettleton wrote: > On Mon, Oct 11, 2021 at 4:04 PM Robin Murphy <robin.murphy@arm.com> wrote: > > > > On 2021-10-09 08:06, Jon Nettleton wrote: > > [...] > > >>> + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { > > >>> + type = IOMMU_RESV_DIRECT_RELAXABLE; > > >>> + /* > > >>> + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is > > >>> + * normally used for allocated system memory that is > > >>> + * then used for device specific reserved regions. > > >>> + */ > > >>> + prot |= IOMMU_CACHE; > > >>> + } else { > > >>> + type = IOMMU_RESV_DIRECT; > > >>> + /* > > >>> + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used > > >>> + * for device memory like MSI doorbell. > > >>> + */ > > >>> + prot |= IOMMU_MMIO; > > >>> + } > > >> > > >> I'm not sure we ever got a definitive answer to this - does DPAA2 > > >> actually go wrong if we use IOMMU_MMIO here? I'd still much prefer to > > >> make the fewest possible assumptions, since at this point it's basically > > >> just a stop-gap until we can fix the spec. It's become clear that we > > >> can't reliably rely on guessing attributes, so I'm not too fussed about > > >> theoretical cases that currently don't work (due to complete lack of RMR > > >> support) continuing to not work for the moment, as long as we can make > > >> the real-world cases we actually have work at all. Anything which only > > >> affects performance I'd rather leave until firmware can tell us what to do. > > > > > > Well it isn't DPAA2, it is FSL_MC_BUS that fails with IOMMU_MMIO > > > mappings. DPAA2 is just one connected device. > > > > Apologies if I'm being overly loose with terminology there - my point of > > reference for this hardware is documentation for the old LS2080A, where > > the "DPAA2 Reference Manual" gives a strong impression that the MC is a > > component belonging to the overall DPAA2 architecture. Either way it > > technically stands to reason that the other DPAA2 components would only > > be usable if the MC itself works (unless I've been holding a major > > misconception about that for years as well). > > > > In the context of this discussion, please consider any reference I may > > make to bits of NXP's hardware to be shorthand for "the thing for which > > NXP have a vested interest in IORT RMRs". > > Ultimately the spec doesn't mention what IOMMU properties the regions > should have. It will have to and that's what we are working on. > Even marking them as IOMMU_READ/WRITE is as much of an assumption as > using IOMMU_MMIO or IOMMU_CACHE. It just seems IOMMU_MMIO is the most > popular since all the examples use it for MSI doorbells in the > documentation. We don't merge code based on assumptions that can easily break because the specifications don't contemplate the details that are required. > I am interested why this concern is only being brought up at this point > on a patchset that has been on the mailing list for 8+ months? See above. We don't merge code that we know can break and is based on assumptions, we need to update the IORT specifications to make them cover all the use cases - in a predictable way - and that's what we are working on. > This is based on a spec that has existed from Arm since 2020 with the > most recent revisions published in Feb 2021. The lack of RMR support > in the kernel is affecting real world products, and the ability for > SystemReady ES certified systems from just fully working with recent > distributions. I answered above - if you have any questions please ask them, here, as far as Linux code is concerned. I understand this is taking a long time, it is also helping us understand all the possible use cases and how to cover them in a way that is maintainable in the long run. Thanks, Lorenzo > Even worse, is that without this patchset customers are forced to jump > through hoops to purposefully re-enable smmu bypass making their > systems less secure. > > How is this a good experience for customers of SystemReady hardware > when for any mainline distribution to work the first thing they have > to do is make their system less secure? > > -Jon > > > > > Thanks, > > Robin. >
On Wed, Dec 8, 2021 at 1:19 PM Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> wrote: > > On Tue, Oct 12, 2021 at 10:00:24AM +0200, Jon Nettleton wrote: > > On Mon, Oct 11, 2021 at 4:04 PM Robin Murphy <robin.murphy@arm.com> wrote: > > > > > > On 2021-10-09 08:06, Jon Nettleton wrote: > > > [...] > > > >>> + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { > > > >>> + type = IOMMU_RESV_DIRECT_RELAXABLE; > > > >>> + /* > > > >>> + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is > > > >>> + * normally used for allocated system memory that is > > > >>> + * then used for device specific reserved regions. > > > >>> + */ > > > >>> + prot |= IOMMU_CACHE; > > > >>> + } else { > > > >>> + type = IOMMU_RESV_DIRECT; > > > >>> + /* > > > >>> + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used > > > >>> + * for device memory like MSI doorbell. > > > >>> + */ > > > >>> + prot |= IOMMU_MMIO; > > > >>> + } > > > >> > > > >> I'm not sure we ever got a definitive answer to this - does DPAA2 > > > >> actually go wrong if we use IOMMU_MMIO here? I'd still much prefer to > > > >> make the fewest possible assumptions, since at this point it's basically > > > >> just a stop-gap until we can fix the spec. It's become clear that we > > > >> can't reliably rely on guessing attributes, so I'm not too fussed about > > > >> theoretical cases that currently don't work (due to complete lack of RMR > > > >> support) continuing to not work for the moment, as long as we can make > > > >> the real-world cases we actually have work at all. Anything which only > > > >> affects performance I'd rather leave until firmware can tell us what to do. > > > > > > > > Well it isn't DPAA2, it is FSL_MC_BUS that fails with IOMMU_MMIO > > > > mappings. DPAA2 is just one connected device. > > > > > > Apologies if I'm being overly loose with terminology there - my point of > > > reference for this hardware is documentation for the old LS2080A, where > > > the "DPAA2 Reference Manual" gives a strong impression that the MC is a > > > component belonging to the overall DPAA2 architecture. Either way it > > > technically stands to reason that the other DPAA2 components would only > > > be usable if the MC itself works (unless I've been holding a major > > > misconception about that for years as well). > > > > > > In the context of this discussion, please consider any reference I may > > > make to bits of NXP's hardware to be shorthand for "the thing for which > > > NXP have a vested interest in IORT RMRs". > > > > Ultimately the spec doesn't mention what IOMMU properties the regions > > should have. > > It will have to and that's what we are working on. Where is this being worked on? I see no open tickets for this. > > > Even marking them as IOMMU_READ/WRITE is as much of an assumption as > > using IOMMU_MMIO or IOMMU_CACHE. It just seems IOMMU_MMIO is the most > > popular since all the examples use it for MSI doorbells in the > > documentation. > > We don't merge code based on assumptions that can easily break because > the specifications don't contemplate the details that are required. > > > I am interested why this concern is only being brought up at this point > > on a patchset that has been on the mailing list for 8+ months? > > See above. We don't merge code that we know can break and is based on > assumptions, we need to update the IORT specifications to make them > cover all the use cases - in a predictable way - and that's what we are > working on. This is not really an answer to the question. The latest version of the IORT RMR spec was published in Feb 2021. Why was this issue not brought up with Rev 1 of this patchset? Instead you have wasted 10 months of developer and customer time. This could have easily been turned into a code first spec change request, which is a valid option for ACPI changes. > > > This is based on a spec that has existed from Arm since 2020 with the > > most recent revisions published in Feb 2021. The lack of RMR support > > in the kernel is affecting real world products, and the ability for > > SystemReady ES certified systems from just fully working with recent > > distributions. > > I answered above - if you have any questions please ask them, here, > as far as Linux code is concerned. > > I understand this is taking a long time, it is also helping us > understand all the possible use cases and how to cover them in > a way that is maintainable in the long run. Every month that this patchset has sat being unattended by the maintainers is another kernel dev cycle missed, it is another another distribution release where users need to add hackish kernel command-line options to disable security features that were forced on by default. Not to mention Linux is just one platform. What if other platforms have already adopted the existing spec? These are Arm specs and Arm maintainers and yet nobody seems to agree on anything and absolutely nothing has been achieved except wasting the time of Shameer, myself, our companies, and our customers. -Jon > > Thanks, > Lorenzo > > > Even worse, is that without this patchset customers are forced to jump > > through hoops to purposefully re-enable smmu bypass making their > > systems less secure. > > > > How is this a good experience for customers of SystemReady hardware > > when for any mainline distribution to work the first thing they have > > to do is make their system less secure? > > > > -Jon > > > > > > > > Thanks, > > > Robin. > >
Jon, On 2021-12-08 13:26, Jon Nettleton wrote: [...] >>> Even marking them as IOMMU_READ/WRITE is as much of an assumption as >>> using IOMMU_MMIO or IOMMU_CACHE. It just seems IOMMU_MMIO is the most >>> popular since all the examples use it for MSI doorbells in the >>> documentation. >> >> We don't merge code based on assumptions that can easily break because >> the specifications don't contemplate the details that are required. >> >>> I am interested why this concern is only being brought up at this point >>> on a patchset that has been on the mailing list for 8+ months? >> >> See above. We don't merge code that we know can break and is based on >> assumptions, we need to update the IORT specifications to make them >> cover all the use cases - in a predictable way - and that's what we are >> working on. > > This is not really an answer to the question. The latest version of the > IORT RMR spec was published in Feb 2021. Why was this issue not > brought up with Rev 1 of this patchset? Instead you have wasted > 10 months of developer and customer time. This could have easily been > turned into a code first spec change request, which is a valid option > for ACPI changes. It was only on v5 of the patchset - *six months* after the original RFC posting - that anyone even first started to question the initial assumptions made about attributes[1], and even then somebody familiar countered that it didn't appear to matter[2]. Sorry, but you don't get to U-turn and throw unjust shade at Arm for not being prescient. Yes, when those of us within Arm set out the initial RMR spec, an assumption was made that it seemed reasonable for an OS to simply pick some default strong memory type (Device or Normal-NC) and full permissions if it did need to map RMRs at stage 1. That spec was reviewed and published externally and no interested parties came forth asking "hey, what about attributes?". Linux patches were written around that assumption and proceeded through many rounds of review until we eventually received sufficient feedback to demonstrate that the assumption did not in fact hold well enough in general and there seemed to be a genuine need for RMR attributes, and at that point we started work on revising the spec. In the meantime, these patches have sat at v7 for four months - the *other* outstanding review comments have not been addressed; I still don't recall seeing an answer about whether LX2160 or anything else currently deployed actually *needs* cacheable mappings or whether it could muddle through with the IOMMU_MMIO assumption until proper "RMR v2" support arrived later; even if so, an interim workaround specific to LX2160 could have been proposed but hasn't. It is hardly reasonable to pretend that Arm or the upstream maintainers are responsible for a lack of development activity on the part of the submitters, no matter how much blatant misinformation is repeated on Twitter. Regards, Robin. [1] https://lore.kernel.org/linux-iommu/13c2499e-cc0c-d395-0d60-6c3437f206ac@nxp.com/ [2] https://lore.kernel.org/linux-iommu/CABdtJHv2QBHNoWTyp51H-J_apc75imPj0FbrV70Tm8xuNjpiTA@mail.gmail.com/ >> >>> This is based on a spec that has existed from Arm since 2020 with the >>> most recent revisions published in Feb 2021. The lack of RMR support >>> in the kernel is affecting real world products, and the ability for >>> SystemReady ES certified systems from just fully working with recent >>> distributions. >> >> I answered above - if you have any questions please ask them, here, >> as far as Linux code is concerned. >> >> I understand this is taking a long time, it is also helping us >> understand all the possible use cases and how to cover them in >> a way that is maintainable in the long run. > > Every month that this patchset has sat being unattended by the > maintainers is another kernel dev cycle missed, it is another > another distribution release where users need to add hackish > kernel command-line options to disable security features that > were forced on by default. Not to mention Linux is just one > platform. What if other platforms have already adopted the > existing spec? These are Arm specs and Arm maintainers and > yet nobody seems to agree on anything and absolutely nothing > has been achieved except wasting the time of Shameer, myself, > our companies, and our customers. > > -Jon > >> >> Thanks, >> Lorenzo >> >>> Even worse, is that without this patchset customers are forced to jump >>> through hoops to purposefully re-enable smmu bypass making their >>> systems less secure. >>> >>> How is this a good experience for customers of SystemReady hardware >>> when for any mainline distribution to work the first thing they have >>> to do is make their system less secure? >>> >>> -Jon >>> >>>> >>>> Thanks, >>>> Robin. >>>
On Wed, Dec 8, 2021 at 3:37 PM Robin Murphy <robin.murphy@arm.com> wrote: > > Jon, > > On 2021-12-08 13:26, Jon Nettleton wrote: > [...] > >>> Even marking them as IOMMU_READ/WRITE is as much of an assumption as > >>> using IOMMU_MMIO or IOMMU_CACHE. It just seems IOMMU_MMIO is the most > >>> popular since all the examples use it for MSI doorbells in the > >>> documentation. > >> > >> We don't merge code based on assumptions that can easily break because > >> the specifications don't contemplate the details that are required. > >> > >>> I am interested why this concern is only being brought up at this point > >>> on a patchset that has been on the mailing list for 8+ months? > >> > >> See above. We don't merge code that we know can break and is based on > >> assumptions, we need to update the IORT specifications to make them > >> cover all the use cases - in a predictable way - and that's what we are > >> working on. > > > > This is not really an answer to the question. The latest version of the > > IORT RMR spec was published in Feb 2021. Why was this issue not > > brought up with Rev 1 of this patchset? Instead you have wasted > > 10 months of developer and customer time. This could have easily been > > turned into a code first spec change request, which is a valid option > > for ACPI changes. > > It was only on v5 of the patchset - *six months* after the original RFC > posting - that anyone even first started to question the initial > assumptions made about attributes[1], and even then somebody familiar > countered that it didn't appear to matter[2]. Sorry, but you don't get > to U-turn and throw unjust shade at Arm for not being prescient. > > Yes, when those of us within Arm set out the initial RMR spec, an > assumption was made that it seemed reasonable for an OS to simply pick > some default strong memory type (Device or Normal-NC) and full > permissions if it did need to map RMRs at stage 1. That spec was > reviewed and published externally and no interested parties came forth > asking "hey, what about attributes?". Linux patches were written around > that assumption and proceeded through many rounds of review until we > eventually received sufficient feedback to demonstrate that the > assumption did not in fact hold well enough in general and there seemed > to be a genuine need for RMR attributes, and at that point we started > work on revising the spec. Was it documented anywhere that the RMR spec mandated Device or Normal-NC memory attributes? I have read through the spec pretty thoroughly and not seen this requirement documented anywhere. Also please feel free to point out where we can find information regarding how the spec is being revised. I am on causeway and in all the SC meetings and haven't seen this topic brought up at all. > > In the meantime, these patches have sat at v7 for four months - the > *other* outstanding review comments have not been addressed; I still > don't recall seeing an answer about whether LX2160 or anything else > currently deployed actually *needs* cacheable mappings or whether it > could muddle through with the IOMMU_MMIO assumption until proper "RMR > v2" support arrived later; even if so, an interim workaround specific to > LX2160 could have been proposed but hasn't. It is hardly reasonable to > pretend that Arm or the upstream maintainers are responsible for a lack > of development activity on the part of the submitters, no matter how > much blatant misinformation is repeated on Twitter. Oh the "other" comments where after 7 series of patches you decided that the approach that was agreed upon on the mailing list was no longer to your liking? Not to mention the month the patchset sat idle after initial comments from Ard that were cleared up, when I pinged the thread and it was ignored. If there was some sort of prompt response on the threads by the maintainers with accurate information about why the patches were being held up, or that they were working on a new spec maybe developers would have bothered to push the patchset forward. There is no misinformation on Twitter. After 7 series on a patchset after initial discussion that it would be designed so device-tree could leverage the backend work, you just changed your mind, and basically sent everything back to the start. Meanwhile only now in this thread are we finding out that the spec is getting re-worked again, which means that we will need to update our firmware, and wait for someone to write patches for the new spec, because guess what?... Arm didn't write the patches for any of the initial specs. Arm maintainers should be helping to find ways to get Arm "specifications", integrated for SystemReady customers to use. Instead we get delayed responses, about right turns on the path of the patches, or just outright ignored for a month at a time. Maybe if Arm developers had actually written the patches when the spec was released there wouldn't have been almost a year of wasted time by the hardware manufacturers actually trying to build and deploy products. -Jon > > Regards, > Robin. > > [1] > https://lore.kernel.org/linux-iommu/13c2499e-cc0c-d395-0d60-6c3437f206ac@nxp.com/ > [2] > https://lore.kernel.org/linux-iommu/CABdtJHv2QBHNoWTyp51H-J_apc75imPj0FbrV70Tm8xuNjpiTA@mail.gmail.com/ > > >> > >>> This is based on a spec that has existed from Arm since 2020 with the > >>> most recent revisions published in Feb 2021. The lack of RMR support > >>> in the kernel is affecting real world products, and the ability for > >>> SystemReady ES certified systems from just fully working with recent > >>> distributions. > >> > >> I answered above - if you have any questions please ask them, here, > >> as far as Linux code is concerned. > >> > >> I understand this is taking a long time, it is also helping us > >> understand all the possible use cases and how to cover them in > >> a way that is maintainable in the long run. > > > > Every month that this patchset has sat being unattended by the > > maintainers is another kernel dev cycle missed, it is another > > another distribution release where users need to add hackish > > kernel command-line options to disable security features that > > were forced on by default. Not to mention Linux is just one > > platform. What if other platforms have already adopted the > > existing spec? These are Arm specs and Arm maintainers and > > yet nobody seems to agree on anything and absolutely nothing > > has been achieved except wasting the time of Shameer, myself, > > our companies, and our customers. > > > > -Jon > > > >> > >> Thanks, > >> Lorenzo > >> > >>> Even worse, is that without this patchset customers are forced to jump > >>> through hoops to purposefully re-enable smmu bypass making their > >>> systems less secure. > >>> > >>> How is this a good experience for customers of SystemReady hardware > >>> when for any mainline distribution to work the first thing they have > >>> to do is make their system less secure? > >>> > >>> -Jon > >>> > >>>> > >>>> Thanks, > >>>> Robin. > >>>
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 3b23fb775ac4..d76ba46ebe67 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -40,6 +40,8 @@ struct iort_fwnode { static LIST_HEAD(iort_fwnode_list); static DEFINE_SPINLOCK(iort_fwnode_lock); +static LIST_HEAD(iort_rmr_list); /* list of RMR regions from ACPI */ + /** * iort_set_fwnode() - Create iort_fwnode and use it to register * iommu data in the iort_fwnode_list @@ -393,7 +395,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || node->type == ACPI_IORT_NODE_SMMU_V3 || - node->type == ACPI_IORT_NODE_PMCG) { + node->type == ACPI_IORT_NODE_PMCG || + node->type == ACPI_IORT_NODE_RMR) { *id_out = map->output_base; return parent; } @@ -1566,6 +1569,134 @@ static void __init iort_enable_acs(struct acpi_iort_node *iort_node) #else static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } #endif +static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc, u32 count) +{ + int i, j; + + for (i = 0; i < count; i++) { + u64 end, start = desc[i].base_address, length = desc[i].length; + + end = start + length - 1; + + /* Check for address overlap */ + for (j = i + 1; j < count; j++) { + u64 e_start = desc[j].base_address; + u64 e_end = e_start + desc[j].length - 1; + + if (start <= e_end && end >= e_start) + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n", + start, end); + } + } +} + +static void __init iort_node_get_rmr_info(struct acpi_iort_node *iort_node) +{ + struct acpi_iort_node *smmu; + struct acpi_iort_rmr *rmr; + struct acpi_iort_rmr_desc *rmr_desc; + u32 map_count = iort_node->mapping_count; + u32 sid; + int i; + + if (!iort_node->mapping_offset || map_count != 1) { + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", + iort_node); + return; + } + + /* Retrieve associated smmu and stream id */ + smmu = iort_node_get_id(iort_node, &sid, 0); + if (!smmu) { + pr_err(FW_BUG "Invalid SMMU reference, skipping RMR node %p\n", + iort_node); + return; + } + + /* Retrieve RMR data */ + rmr = (struct acpi_iort_rmr *)iort_node->node_data; + if (!rmr->rmr_offset || !rmr->rmr_count) { + pr_err(FW_BUG "Invalid RMR descriptor array, skipping RMR node %p\n", + iort_node); + return; + } + + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, iort_node, + rmr->rmr_offset); + + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); + + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { + struct iommu_resv_region *region; + enum iommu_resv_type type; + int prot = IOMMU_READ | IOMMU_WRITE; + u64 addr = rmr_desc->base_address, size = rmr_desc->length; + + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { + /* PAGE align base addr and size */ + addr &= PAGE_MASK; + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); + + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", + rmr_desc->base_address, + rmr_desc->base_address + rmr_desc->length - 1, + addr, addr + size - 1); + } + if (rmr->flags & IOMMU_RMR_REMAP_PERMITTED) { + type = IOMMU_RESV_DIRECT_RELAXABLE; + /* + * Set IOMMU_CACHE as IOMMU_RESV_DIRECT_RELAXABLE is + * normally used for allocated system memory that is + * then used for device specific reserved regions. + */ + prot |= IOMMU_CACHE; + } else { + type = IOMMU_RESV_DIRECT; + /* + * Set IOMMU_MMIO as IOMMU_RESV_DIRECT is normally used + * for device memory like MSI doorbell. + */ + prot |= IOMMU_MMIO; + } + + region = iommu_alloc_resv_region(addr, size, prot, type); + if (region) { + region->fw_data.rmr.flags = rmr->flags; + region->fw_data.rmr.sid = sid; + region->fw_data.rmr.smmu = smmu; + list_add_tail(®ion->list, &iort_rmr_list); + } + } +} + +static void __init iort_parse_rmr(void) +{ + struct acpi_iort_node *iort_node, *iort_end; + struct acpi_table_iort *iort; + int i; + + if (iort_table->revision < 3) + return; + + iort = (struct acpi_table_iort *)iort_table; + + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, + iort->node_offset); + iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, + iort_table->length); + + for (i = 0; i < iort->node_count; i++) { + if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, + "IORT node pointer overflows, bad table!\n")) + return; + + if (iort_node->type == ACPI_IORT_NODE_RMR) + iort_node_get_rmr_info(iort_node); + + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, + iort_node->length); + } +} static void __init iort_init_platform_devices(void) { @@ -1636,6 +1767,7 @@ void __init acpi_iort_init(void) } iort_init_platform_devices(); + iort_parse_rmr(); } #ifdef CONFIG_ZONE_DMA
Add support for parsing RMR node information from ACPI. Find the associated streamid and smmu node info from the RMR node and populate a linked list with RMR memory descriptors. Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> --- drivers/acpi/arm64/iort.c | 134 +++++++++++++++++++++++++++++++++++++- 1 file changed, 133 insertions(+), 1 deletion(-) -- 2.17.1