@@ -1129,6 +1129,8 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
void (*swap_slot_free_notify)(struct block_device *, unsigned long);
unsigned int i;
+ clear_shadow_from_swap_cache(si->type, begin, end);
+
/*
* Use atomic clear_bit operations only on zeromap instead of non-atomic
* bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
@@ -1149,7 +1151,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
swap_slot_free_notify(si->bdev, offset);
offset++;
}
- clear_shadow_from_swap_cache(si->type, begin, end);
/*
* Make sure that try_to_unuse() observes si->inuse_pages reaching 0
@@ -1502,6 +1503,8 @@ static void swap_entry_range_free(struct swap_info_struct *si,
unsigned char *map = si->swap_map + offset;
unsigned char *map_end = map + nr_pages;
+ mem_cgroup_uncharge_swap(entry, nr_pages);
+
/* It should never free entries across different clusters */
VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
VM_BUG_ON(cluster_is_empty(ci));
@@ -1513,7 +1516,6 @@ static void swap_entry_range_free(struct swap_info_struct *si,
*map = 0;
} while (++map < map_end);
- mem_cgroup_uncharge_swap(entry, nr_pages);
swap_range_free(si, offset, nr_pages);
if (!ci->count)
In the swap free path, certain steps (cgroup uncharging and shadow clearing) will be handled at the virtual layer eventually. To facilitate this change, rearrange these functions a bit in their caller. There should not be any functional change. Signed-off-by: Nhat Pham <nphamcs@gmail.com> --- mm/swapfile.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)