@@ -268,17 +268,12 @@ static inline swp_entry_t swap_nth(swp_entry_t entry, long n)
return (swp_entry_t) { entry.val + n };
}
-/* similar to swap_nth, but check the backing physical slots as well. */
+/* temporary disallow batched swap operations */
static inline swp_entry_t swap_move(swp_entry_t entry, long delta)
{
- swp_slot_t slot = swp_entry_to_swp_slot(entry), next_slot;
- swp_entry_t next_entry = swap_nth(entry, delta);
-
- next_slot = swp_entry_to_swp_slot(next_entry);
- if (swp_slot_type(slot) != swp_slot_type(next_slot) ||
- swp_slot_offset(slot) + delta != swp_slot_offset(next_slot))
- next_entry.val = 0;
+ swp_entry_t next_entry;
+ next_entry.val = 0;
return next_entry;
}
#else
@@ -349,6 +344,8 @@ static inline pte_t pte_next_swp_offset(pte_t pte)
* max_nr must be at least one and must be limited by the caller so scanning
* cannot exceed a single page table.
*
+ * Note that for virtual swap space, we will not batch anything for now.
+ *
* Return: the number of table entries in the batch.
*/
static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
@@ -363,6 +360,9 @@ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
VM_WARN_ON(!is_swap_pte(pte));
VM_WARN_ON(non_swap_entry(entry));
+ if (IS_ENABLED(CONFIG_VIRTUAL_SWAP))
+ return 1;
+
cgroup_id = lookup_swap_cgroup_id(entry);
while (ptep < end_ptep) {
pte = ptep_get(ptep);
@@ -4230,8 +4230,10 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
* A large swapped out folio could be partially or fully in zswap. We
* lack handling for such cases, so fallback to swapping in order-0
* folio.
+ *
+ * We also disable THP swapin on the virtual swap implementation, for now.
*/
- if (!zswap_never_enabled())
+ if (!zswap_never_enabled() || IS_ENABLED(CONFIG_VIRTUAL_SWAP))
goto fallback;
entry = pte_to_swp_entry(vmf->orig_pte);
Disable THP swapin on virtual swap implementation, for now. Similarly, only operate on one swap entry at a time when we zap a PTE range. There is no real reason why we cannot build support for this in the new design. It is simply to make the following patch, which decouples swap backends, smaller and more manageable for reviewers - these capabilities will be restored in a following patch. Signed-off-by: Nhat Pham <nphamcs@gmail.com> --- mm/internal.h | 16 ++++++++-------- mm/memory.c | 4 +++- 2 files changed, 11 insertions(+), 9 deletions(-)