@@ -124,11 +124,6 @@ static struct swap_info_struct *swap_type_to_swap_info(int type)
return READ_ONCE(swap_info[type]); /* rcu_dereference() */
}
-static inline unsigned char swap_count(unsigned char ent)
-{
- return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
-}
-
/*
* Use the second highest bit of inuse_pages counter as the indicator
* if one swap device is on the available plist, so the atomic can
@@ -161,6 +156,11 @@ static long swap_usage_in_pages(struct swap_info_struct *si)
/* Reclaim directly, bypass the slot cache and don't touch device lock */
#define TTRS_DIRECT 0x8
+static inline unsigned char swap_count(unsigned char ent)
+{
+ return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
+}
+
static bool swap_is_has_cache(struct swap_info_struct *si,
unsigned long offset, int nr_pages)
{
@@ -1326,46 +1326,6 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
return NULL;
}
-static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
- unsigned long offset,
- unsigned char usage)
-{
- unsigned char count;
- unsigned char has_cache;
-
- count = si->swap_map[offset];
-
- has_cache = count & SWAP_HAS_CACHE;
- count &= ~SWAP_HAS_CACHE;
-
- if (usage == SWAP_HAS_CACHE) {
- VM_BUG_ON(!has_cache);
- has_cache = 0;
- } else if (count == SWAP_MAP_SHMEM) {
- /*
- * Or we could insist on shmem.c using a special
- * swap_shmem_free() and free_shmem_swap_and_cache()...
- */
- count = 0;
- } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
- if (count == COUNT_CONTINUED) {
- if (swap_count_continued(si, offset, count))
- count = SWAP_MAP_MAX | COUNT_CONTINUED;
- else
- count = SWAP_MAP_MAX;
- } else
- count--;
- }
-
- usage = count | has_cache;
- if (usage)
- WRITE_ONCE(si->swap_map[offset], usage);
- else
- WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
-
- return usage;
-}
-
/*
* When we get a swap entry, if there aren't some other ways to
* prevent swapoff, such as the folio in swap cache is locked, RCU
@@ -1432,6 +1392,46 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry)
return NULL;
}
+static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
+ unsigned long offset,
+ unsigned char usage)
+{
+ unsigned char count;
+ unsigned char has_cache;
+
+ count = si->swap_map[offset];
+
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
+
+ if (usage == SWAP_HAS_CACHE) {
+ VM_BUG_ON(!has_cache);
+ has_cache = 0;
+ } else if (count == SWAP_MAP_SHMEM) {
+ /*
+ * Or we could insist on shmem.c using a special
+ * swap_shmem_free() and free_shmem_swap_and_cache()...
+ */
+ count = 0;
+ } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
+ if (count == COUNT_CONTINUED) {
+ if (swap_count_continued(si, offset, count))
+ count = SWAP_MAP_MAX | COUNT_CONTINUED;
+ else
+ count = SWAP_MAP_MAX;
+ } else
+ count--;
+ }
+
+ usage = count | has_cache;
+ if (usage)
+ WRITE_ONCE(si->swap_map[offset], usage);
+ else
+ WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
+
+ return usage;
+}
+
static unsigned char __swap_entry_free(struct swap_info_struct *si,
swp_entry_t entry)
{
@@ -1585,25 +1585,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
unlock_cluster(ci);
}
-void swapcache_free_entries(swp_entry_t *entries, int n)
-{
- int i;
- struct swap_cluster_info *ci;
- struct swap_info_struct *si = NULL;
-
- if (n <= 0)
- return;
-
- for (i = 0; i < n; ++i) {
- si = _swap_info_get(entries[i]);
- if (si) {
- ci = lock_cluster(si, swp_offset(entries[i]));
- swap_entry_range_free(si, ci, entries[i], 1);
- unlock_cluster(ci);
- }
- }
-}
-
int __swap_count(swp_entry_t entry)
{
struct swap_info_struct *si = swp_swap_info(entry);
@@ -1717,57 +1698,6 @@ static bool folio_swapped(struct folio *folio)
return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
}
-static bool folio_swapcache_freeable(struct folio *folio)
-{
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
-
- if (!folio_test_swapcache(folio))
- return false;
- if (folio_test_writeback(folio))
- return false;
-
- /*
- * Once hibernation has begun to create its image of memory,
- * there's a danger that one of the calls to folio_free_swap()
- * - most probably a call from __try_to_reclaim_swap() while
- * hibernation is allocating its own swap pages for the image,
- * but conceivably even a call from memory reclaim - will free
- * the swap from a folio which has already been recorded in the
- * image as a clean swapcache folio, and then reuse its swap for
- * another page of the image. On waking from hibernation, the
- * original folio might be freed under memory pressure, then
- * later read back in from swap, now with the wrong data.
- *
- * Hibernation suspends storage while it is writing the image
- * to disk so check that here.
- */
- if (pm_suspended_storage())
- return false;
-
- return true;
-}
-
-/**
- * folio_free_swap() - Free the swap space used for this folio.
- * @folio: The folio to remove.
- *
- * If swap is getting full, or if there are no more mappings of this folio,
- * then call folio_free_swap to free its swap space.
- *
- * Return: true if we were able to release the swap space.
- */
-bool folio_free_swap(struct folio *folio)
-{
- if (!folio_swapcache_freeable(folio))
- return false;
- if (folio_swapped(folio))
- return false;
-
- delete_from_swap_cache(folio);
- folio_set_dirty(folio);
- return true;
-}
-
/**
* free_swap_and_cache_nr() - Release reference on range of swap entries and
* reclaim their cache if no more references remain.
@@ -1842,6 +1772,76 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
put_swap_device(si);
}
+void swapcache_free_entries(swp_entry_t *entries, int n)
+{
+ int i;
+ struct swap_cluster_info *ci;
+ struct swap_info_struct *si = NULL;
+
+ if (n <= 0)
+ return;
+
+ for (i = 0; i < n; ++i) {
+ si = _swap_info_get(entries[i]);
+ if (si) {
+ ci = lock_cluster(si, swp_offset(entries[i]));
+ swap_entry_range_free(si, ci, entries[i], 1);
+ unlock_cluster(ci);
+ }
+ }
+}
+
+static bool folio_swapcache_freeable(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+
+ if (!folio_test_swapcache(folio))
+ return false;
+ if (folio_test_writeback(folio))
+ return false;
+
+ /*
+ * Once hibernation has begun to create its image of memory,
+ * there's a danger that one of the calls to folio_free_swap()
+ * - most probably a call from __try_to_reclaim_swap() while
+ * hibernation is allocating its own swap pages for the image,
+ * but conceivably even a call from memory reclaim - will free
+ * the swap from a folio which has already been recorded in the
+ * image as a clean swapcache folio, and then reuse its swap for
+ * another page of the image. On waking from hibernation, the
+ * original folio might be freed under memory pressure, then
+ * later read back in from swap, now with the wrong data.
+ *
+ * Hibernation suspends storage while it is writing the image
+ * to disk so check that here.
+ */
+ if (pm_suspended_storage())
+ return false;
+
+ return true;
+}
+
+/**
+ * folio_free_swap() - Free the swap space used for this folio.
+ * @folio: The folio to remove.
+ *
+ * If swap is getting full, or if there are no more mappings of this folio,
+ * then call folio_free_swap to free its swap space.
+ *
+ * Return: true if we were able to release the swap space.
+ */
+bool folio_free_swap(struct folio *folio)
+{
+ if (!folio_swapcache_freeable(folio))
+ return false;
+ if (folio_swapped(folio))
+ return false;
+
+ delete_from_swap_cache(folio);
+ folio_set_dirty(folio);
+ return true;
+}
+
#ifdef CONFIG_HIBERNATION
swp_entry_t get_swap_page_of_type(int type)
@@ -1957,6 +1957,37 @@ unsigned int count_swap_pages(int type, int free)
}
#endif /* CONFIG_HIBERNATION */
+/*
+ * Scan swap_map from current position to next entry still in use.
+ * Return 0 if there are no inuse entries after prev till end of
+ * the map.
+ */
+static unsigned int find_next_to_unuse(struct swap_info_struct *si,
+ unsigned int prev)
+{
+ unsigned int i;
+ unsigned char count;
+
+ /*
+ * No need for swap_lock here: we're just looking
+ * for whether an entry is in use, not modifying it; false
+ * hits are okay, and sys_swapoff() has already prevented new
+ * allocations from this area (while holding swap_lock).
+ */
+ for (i = prev + 1; i < si->max; i++) {
+ count = READ_ONCE(si->swap_map[i]);
+ if (count && swap_count(count) != SWAP_MAP_BAD)
+ break;
+ if ((i % LATENCY_LIMIT) == 0)
+ cond_resched();
+ }
+
+ if (i == si->max)
+ i = 0;
+
+ return i;
+}
+
static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
{
return pte_same(pte_swp_clear_flags(pte), swp_pte);
@@ -2241,37 +2272,6 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type)
return ret;
}
-/*
- * Scan swap_map from current position to next entry still in use.
- * Return 0 if there are no inuse entries after prev till end of
- * the map.
- */
-static unsigned int find_next_to_unuse(struct swap_info_struct *si,
- unsigned int prev)
-{
- unsigned int i;
- unsigned char count;
-
- /*
- * No need for swap_lock here: we're just looking
- * for whether an entry is in use, not modifying it; false
- * hits are okay, and sys_swapoff() has already prevented new
- * allocations from this area (while holding swap_lock).
- */
- for (i = prev + 1; i < si->max; i++) {
- count = READ_ONCE(si->swap_map[i]);
- if (count && swap_count(count) != SWAP_MAP_BAD)
- break;
- if ((i % LATENCY_LIMIT) == 0)
- cond_resched();
- }
-
- if (i == si->max)
- i = 0;
-
- return i;
-}
-
static int try_to_unuse(unsigned int type)
{
struct mm_struct *prev_mm;
@@ -3525,6 +3525,26 @@ void si_swapinfo(struct sysinfo *val)
spin_unlock(&swap_lock);
}
+struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+{
+ return swap_type_to_swap_info(swp_type(entry));
+}
+
+/*
+ * out-of-line methods to avoid include hell.
+ */
+struct address_space *swapcache_mapping(struct folio *folio)
+{
+ return swp_swap_info(folio->swap)->swap_file->f_mapping;
+}
+EXPORT_SYMBOL_GPL(swapcache_mapping);
+
+pgoff_t __folio_swap_cache_index(struct folio *folio)
+{
+ return swap_cache_index(folio->swap);
+}
+EXPORT_SYMBOL_GPL(__folio_swap_cache_index);
+
/*
* Verify that nr swap entries are valid and increment their swap map counts.
*
@@ -3658,26 +3678,6 @@ void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
}
-struct swap_info_struct *swp_swap_info(swp_entry_t entry)
-{
- return swap_type_to_swap_info(swp_type(entry));
-}
-
-/*
- * out-of-line methods to avoid include hell.
- */
-struct address_space *swapcache_mapping(struct folio *folio)
-{
- return swp_swap_info(folio->swap)->swap_file->f_mapping;
-}
-EXPORT_SYMBOL_GPL(swapcache_mapping);
-
-pgoff_t __folio_swap_cache_index(struct folio *folio)
-{
- return swap_cache_index(folio->swap);
-}
-EXPORT_SYMBOL_GPL(__folio_swap_cache_index);
-
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
Rearrange some functions in preparation for the rest of the series. No functional change intended. Signed-off-by: Nhat Pham <nphamcs@gmail.com> --- mm/swapfile.c | 332 +++++++++++++++++++++++++------------------------- 1 file changed, 166 insertions(+), 166 deletions(-)