@@ -201,15 +201,6 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
phys_addr_t rgnbase = rgn[i].base;
phys_size_t rgnsize = rgn[i].size;
phys_size_t rgnflags = rgn[i].flags;
- phys_addr_t end = base + size - 1;
- phys_addr_t rgnend = rgnbase + rgnsize - 1;
- if (rgnbase <= base && end <= rgnend) {
- if (flags == rgnflags)
- /* Already have this region, so we're done */
- return 0;
- else
- return -1; /* regions with new flags */
- }
ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
if (ret > 0) {
@@ -529,6 +529,26 @@ static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
+ /* Try to allocate a page twice */
+ b = lmb_alloc_addr_flags(alloc_addr_a, 0x1000, LMB_NONE);
+ ut_asserteq(b, alloc_addr_a);
+ b = lmb_alloc_addr_flags(alloc_addr_a, 0x1000, LMB_NOOVERWRITE);
+ ut_asserteq(b, 0);
+ b = lmb_alloc_addr_flags(alloc_addr_a, 0x1000, LMB_NONE);
+ ut_asserteq(b, alloc_addr_a);
+ b = lmb_alloc_addr_flags(alloc_addr_a, 0x2000, LMB_NONE);
+ ut_asserteq(b, alloc_addr_a);
+ ret = lmb_free(alloc_addr_a, 0x2000);
+ ut_asserteq(ret, 0);
+ b = lmb_alloc_addr_flags(alloc_addr_a, 0x1000, LMB_NOOVERWRITE);
+ ut_asserteq(b, alloc_addr_a);
+ b = lmb_alloc_addr_flags(alloc_addr_a, 0x1000, LMB_NONE);
+ ut_asserteq(b, 0);
+ b = lmb_alloc_addr_flags(alloc_addr_a, 0x1000, LMB_NOOVERWRITE);
+ ut_asserteq(b, 0);
+ ret = lmb_free(alloc_addr_a, 0x1000);
+ ut_asserteq(ret, 0);
+
/* reserve 3 blocks */
ret = lmb_reserve(alloc_addr_a, 0x10000);
ut_asserteq(ret, 0);
@@ -734,7 +754,7 @@ static int lib_test_lmb_flags(struct unit_test_state *uts)
/* reserve again, same flag */
ret = lmb_reserve_flags(0x40010000, 0x10000, LMB_NOMAP);
- ut_asserteq(ret, 0);
+ ut_asserteq(ret, -1L);
ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
At the moment the LMB allocator will return 'success' immediately on two consecutive allocations if the second one is smaller and the flags match without resizing the reserved area. This is problematic for two reasons, first of all the new updated allocation won't update the size and we end up holding more memory than needed, but most importantly it breaks the EFI SCT tests since EFI now allocates via LMB. More specifically when EFI requests a specific address twice with the EFI_ALLOCATE_ADDRESS flag set, the first allocation will succeed and update the EFI memory map. Due to the LMB behavior the second allocation will also succeed but the address ranges are already in the EFI memory map due the first allocation. EFI will then fail to update the memory map, returning EFI_OUT_OF_RESOURCES instead of EFI_NOT_FOUND which break EFI conformance. So let's remove the fast check with is problematic anyway and leave LMB resize and calculate address properly. LMB will now - try to resize the reservations for LMB_NONE - return -1 if the memory is not LMB_NONE and already reserved The LMB code needs some cleanup in that part, but since we are close to 2025.01 do the easy fix and plan to refactor it later. Also update the dm tests with the new behavior. Fixes: commit 22f2c9ed9f53 ("efi: memory: use the lmb API's for allocating and freeing memory") Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> --- Changes since v1: - check for -1 instead of 0xffffffff in lmb_reserve_flags() lib/lmb.c | 9 --------- test/lib/lmb.c | 22 +++++++++++++++++++++- 2 files changed, 21 insertions(+), 10 deletions(-) -- 2.45.2