diff mbox series

[4/4] x86/mm/pat: Make num_pages consistent in populate_{pte,pud,pgd}

Message ID 20200303205445.3965393-5-nivedita@alum.mit.edu
State New
Headers show
Series Bugfix + small cleanup to populate_p[mug]d | expand

Commit Message

Arvind Sankar March 3, 2020, 8:54 p.m. UTC
The number of pages is currently all of int, unsigned int, long and
unsigned long in different places.

Change it to be consistently unsigned long.

Remove the unnecessary min(num_pages, cur_pages), since pre_end has
already been min'd with start + num_pages << PAGE_SHIFT. This gets rid
of two conversions to int/unsigned int.

Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
---
 arch/x86/include/asm/pgtable_types.h |  2 +-
 arch/x86/mm/pat/set_memory.c         | 13 ++++++-------
 2 files changed, 7 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0239998d8cdc..894569255a95 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -574,7 +574,7 @@  extern pmd_t *lookup_pmd_address(unsigned long address);
 extern phys_addr_t slow_virt_to_phys(void *__address);
 extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
 					  unsigned long address,
-					  unsigned numpages,
+					  unsigned long numpages,
 					  unsigned long page_flags);
 extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
 					    unsigned long numpages);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 2f98423ef69a..51b64937cc16 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1230,7 +1230,7 @@  static int alloc_pmd_page(pud_t *pud)
 
 static void populate_pte(struct cpa_data *cpa,
 			 unsigned long start, unsigned long end,
-			 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
+			 unsigned long num_pages, pmd_t *pmd, pgprot_t pgprot)
 {
 	pte_t *pte;
 
@@ -1249,9 +1249,9 @@  static void populate_pte(struct cpa_data *cpa,
 
 static int populate_pmd(struct cpa_data *cpa,
 			unsigned long start, unsigned long end,
-			unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+			unsigned long num_pages, pud_t *pud, pgprot_t pgprot)
 {
-	long cur_pages = 0;
+	unsigned long cur_pages = 0;
 	pmd_t *pmd;
 	pgprot_t pmd_pgprot;
 
@@ -1264,7 +1264,6 @@  static int populate_pmd(struct cpa_data *cpa,
 
 		pre_end   = min_t(unsigned long, pre_end, next_page);
 		cur_pages = (pre_end - start) >> PAGE_SHIFT;
-		cur_pages = min_t(unsigned int, num_pages, cur_pages);
 
 		/*
 		 * Need a PTE page?
@@ -1326,7 +1325,7 @@  static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
 {
 	pud_t *pud;
 	unsigned long end;
-	long cur_pages = 0;
+	unsigned long cur_pages = 0;
 	pgprot_t pud_pgprot;
 	int ret;
 
@@ -1342,7 +1341,6 @@  static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
 
 		pre_end   = min_t(unsigned long, end, next_page);
 		cur_pages = (pre_end - start) >> PAGE_SHIFT;
-		cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
 
 		pud = pud_offset(p4d, start);
 
@@ -2231,7 +2229,8 @@  bool kernel_page_present(struct page *page)
 #endif /* CONFIG_HIBERNATION */
 
 int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
-				   unsigned numpages, unsigned long page_flags)
+				   unsigned long numpages,
+				   unsigned long page_flags)
 {
 	int retval = -EINVAL;