summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2019-09-23 18:35:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 18:54:09 -0400
commit782de70c42930baae55234f3df0dc90774924447 (patch)
tree3381e20d3c3a51631eb0f8fac3d6bfe87a61d894
parent1b9a9d8564cbc49e1e5529460ca70bf7353aa4d1 (diff)
mm: consolidate pgtable_cache_init() and pgd_cache_init()
Both pgtable_cache_init() and pgd_cache_init() are used to initialize kmem cache for page table allocations on several architectures that do not use PAGE_SIZE tables for one or more levels of the page table hierarchy. Most architectures do not implement these functions and use __weak default NOP implementation of pgd_cache_init(). Since there is no such default for pgtable_cache_init(), its empty stub is duplicated among most architectures. Rename the definitions of pgd_cache_init() to pgtable_cache_init() and drop empty stubs of pgtable_cache_init(). Link: http://lkml.kernel.org/r/1566457046-22637-1-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Will Deacon <will@kernel.org> [arm64] Acked-by: Thomas Gleixner <tglx@linutronix.de> [x86] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/alpha/include/asm/pgtable.h5
-rw-r--r--arch/arc/include/asm/pgtable.h5
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h5
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/mm/pgd.c2
-rw-r--r--arch/c6x/include/asm/pgtable.h5
-rw-r--r--arch/csky/include/asm/pgtable.h5
-rw-r--r--arch/h8300/include/asm/pgtable.h6
-rw-r--r--arch/hexagon/include/asm/pgtable.h3
-rw-r--r--arch/hexagon/mm/Makefile2
-rw-r--r--arch/hexagon/mm/pgalloc.c10
-rw-r--r--arch/ia64/include/asm/pgtable.h5
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h5
-rw-r--r--arch/m68k/include/asm/pgtable_no.h5
-rw-r--r--arch/microblaze/include/asm/pgtable.h7
-rw-r--r--arch/mips/include/asm/pgtable.h5
-rw-r--r--arch/nds32/include/asm/pgtable.h2
-rw-r--r--arch/nios2/include/asm/pgtable.h2
-rw-r--r--arch/openrisc/include/asm/pgtable.h5
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h1
-rw-r--r--arch/riscv/include/asm/pgtable.h5
-rw-r--r--arch/s390/include/asm/pgtable.h5
-rw-r--r--arch/sh/include/asm/pgtable.h5
-rw-r--r--arch/sh/mm/nommu.c4
-rw-r--r--arch/sparc/include/asm/pgtable_32.h5
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/um/include/asm/pgtable.h2
-rw-r--r--arch/unicore32/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_32.h1
-rw-r--r--arch/x86/include/asm/pgtable_64.h2
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--init/main.c3
36 files changed, 5 insertions, 130 deletions
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 89c2032f9960..065b57f408c3 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -359,11 +359,6 @@ extern void paging_init(void);
359 359
360#include <asm-generic/pgtable.h> 360#include <asm-generic/pgtable.h>
361 361
362/*
363 * No page table caches to initialise
364 */
365#define pgtable_cache_init() do { } while (0)
366
367/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ 362/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
368#define HAVE_ARCH_UNMAPPED_AREA 363#define HAVE_ARCH_UNMAPPED_AREA
369 364
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 1d87c18a2976..7addd0301c51 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -395,11 +395,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
395/* to cope with aliasing VIPT cache */ 395/* to cope with aliasing VIPT cache */
396#define HAVE_ARCH_UNMAPPED_AREA 396#define HAVE_ARCH_UNMAPPED_AREA
397 397
398/*
399 * No page table caches to initialise
400 */
401#define pgtable_cache_init() do { } while (0)
402
403#endif /* __ASSEMBLY__ */ 398#endif /* __ASSEMBLY__ */
404 399
405#endif 400#endif
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index d0de24f06724..010fa1a35a68 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -71,11 +71,6 @@ typedef pte_t *pte_addr_t;
71extern unsigned int kobjsize(const void *objp); 71extern unsigned int kobjsize(const void *objp);
72 72
73/* 73/*
74 * No page table caches to initialise.
75 */
76#define pgtable_cache_init() do { } while (0)
77
78/*
79 * All 32bit addresses are effectively valid for vmalloc... 74 * All 32bit addresses are effectively valid for vmalloc...
80 * Sort of meaningless for non-VM targets. 75 * Sort of meaningless for non-VM targets.
81 */ 76 */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f2e990dc27e7..3ae120cd1715 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -368,8 +368,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
368#define HAVE_ARCH_UNMAPPED_AREA 368#define HAVE_ARCH_UNMAPPED_AREA
369#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 369#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
370 370
371#define pgtable_cache_init() do { } while (0)
372
373#endif /* !__ASSEMBLY__ */ 371#endif /* !__ASSEMBLY__ */
374 372
375#endif /* CONFIG_MMU */ 373#endif /* CONFIG_MMU */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 57427d17580e..7576df00eb50 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -861,8 +861,6 @@ extern int kern_addr_valid(unsigned long addr);
861 861
862#include <asm-generic/pgtable.h> 862#include <asm-generic/pgtable.h>
863 863
864static inline void pgtable_cache_init(void) { }
865
866/* 864/*
867 * On AArch64, the cache coherency is handled via the set_pte_at() function. 865 * On AArch64, the cache coherency is handled via the set_pte_at() function.
868 */ 866 */
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7548f9ca1f11..4a64089e5771 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -35,7 +35,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
35 kmem_cache_free(pgd_cache, pgd); 35 kmem_cache_free(pgd_cache, pgd);
36} 36}
37 37
38void __init pgd_cache_init(void) 38void __init pgtable_cache_init(void)
39{ 39{
40 if (PGD_SIZE == PAGE_SIZE) 40 if (PGD_SIZE == PAGE_SIZE)
41 return; 41 return;
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
index 0bd805964ea6..0b6919c00413 100644
--- a/arch/c6x/include/asm/pgtable.h
+++ b/arch/c6x/include/asm/pgtable.h
@@ -60,11 +60,6 @@ extern unsigned long empty_zero_page;
60#define swapper_pg_dir ((pgd_t *) 0) 60#define swapper_pg_dir ((pgd_t *) 0)
61 61
62/* 62/*
63 * No page table caches to initialise
64 */
65#define pgtable_cache_init() do { } while (0)
66
67/*
68 * c6x is !MMU, so define the simpliest implementation 63 * c6x is !MMU, so define the simpliest implementation
69 */ 64 */
70#define pgprot_writecombine pgprot_noncached 65#define pgprot_writecombine pgprot_noncached
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index c429a6f347de..0040b3a05b61 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -296,11 +296,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
296/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 296/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
297#define kern_addr_valid(addr) (1) 297#define kern_addr_valid(addr) (1)
298 298
299/*
300 * No page table caches to initialise
301 */
302#define pgtable_cache_init() do {} while (0)
303
304#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 299#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
305 remap_pfn_range(vma, vaddr, pfn, size, prot) 300 remap_pfn_range(vma, vaddr, pfn, size, prot)
306 301
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index a99caa49d265..4d00152fab58 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -4,7 +4,6 @@
4#define __ARCH_USE_5LEVEL_HACK 4#define __ARCH_USE_5LEVEL_HACK
5#include <asm-generic/pgtable-nopud.h> 5#include <asm-generic/pgtable-nopud.h>
6#include <asm-generic/pgtable.h> 6#include <asm-generic/pgtable.h>
7#define pgtable_cache_init() do { } while (0)
8extern void paging_init(void); 7extern void paging_init(void);
9#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */ 8#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
10#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */ 9#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
@@ -35,11 +34,6 @@ extern unsigned int kobjsize(const void *objp);
35extern int is_in_rom(unsigned long); 34extern int is_in_rom(unsigned long);
36 35
37/* 36/*
38 * No page table caches to initialise
39 */
40#define pgtable_cache_init() do { } while (0)
41
42/*
43 * All 32bit addresses are effectively valid for vmalloc... 37 * All 32bit addresses are effectively valid for vmalloc...
44 * Sort of meaningless for non-VM targets. 38 * Sort of meaningless for non-VM targets.
45 */ 39 */
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index a3ff6d24c09e..2fec20ad939e 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -431,9 +431,6 @@ static inline int pte_exec(pte_t pte)
431 431
432#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 432#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
433 433
434/* I think this is in case we have page table caches; needed by init/main.c */
435#define pgtable_cache_init() do { } while (0)
436
437/* 434/*
438 * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is 435 * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
439 * interpreted as swap information. The remaining free bits are interpreted as 436 * interpreted as swap information. The remaining free bits are interpreted as
diff --git a/arch/hexagon/mm/Makefile b/arch/hexagon/mm/Makefile
index 1894263ae5bc..893838499591 100644
--- a/arch/hexagon/mm/Makefile
+++ b/arch/hexagon/mm/Makefile
@@ -3,5 +3,5 @@
3# Makefile for Hexagon memory management subsystem 3# Makefile for Hexagon memory management subsystem
4# 4#
5 5
6obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o 6obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o
7obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o 7obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
diff --git a/arch/hexagon/mm/pgalloc.c b/arch/hexagon/mm/pgalloc.c
deleted file mode 100644
index 4d4316140237..000000000000
--- a/arch/hexagon/mm/pgalloc.c
+++ /dev/null
@@ -1,10 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/init.h>
7
8void __init pgtable_cache_init(void)
9{
10}
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index b1e7468eb65a..d602e7c622db 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -566,11 +566,6 @@ extern struct page *zero_page_memmap_ptr;
566#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M 566#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
567#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) 567#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
568 568
569/*
570 * No page table caches to initialise
571 */
572#define pgtable_cache_init() do { } while (0)
573
574/* These tell get_user_pages() that the first gate page is accessible from user-level. */ 569/* These tell get_user_pages() that the first gate page is accessible from user-level. */
575#define FIXADDR_USER_START GATE_ADDR 570#define FIXADDR_USER_START GATE_ADDR
576#ifdef HAVE_BUGGY_SEGREL 571#ifdef HAVE_BUGGY_SEGREL
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index cc476c1d72e5..646c174fff99 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -176,9 +176,4 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot);
176#include <asm-generic/pgtable.h> 176#include <asm-generic/pgtable.h>
177#endif /* !__ASSEMBLY__ */ 177#endif /* !__ASSEMBLY__ */
178 178
179/*
180 * No page table caches to initialise
181 */
182#define pgtable_cache_init() do { } while (0)
183
184#endif /* _M68K_PGTABLE_H */ 179#endif /* _M68K_PGTABLE_H */
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index 69e271101223..c18165b0d904 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -45,11 +45,6 @@ extern void paging_init(void);
45#define ZERO_PAGE(vaddr) (virt_to_page(0)) 45#define ZERO_PAGE(vaddr) (virt_to_page(0))
46 46
47/* 47/*
48 * No page table caches to initialise.
49 */
50#define pgtable_cache_init() do { } while (0)
51
52/*
53 * All 32bit addresses are effectively valid for vmalloc... 48 * All 32bit addresses are effectively valid for vmalloc...
54 * Sort of meaningless for non-VM targets. 49 * Sort of meaningless for non-VM targets.
55 */ 50 */
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 142d3f004848..954b69af451f 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -46,8 +46,6 @@ extern int mem_init_done;
46 46
47#define swapper_pg_dir ((pgd_t *) NULL) 47#define swapper_pg_dir ((pgd_t *) NULL)
48 48
49#define pgtable_cache_init() do {} while (0)
50
51#define arch_enter_lazy_cpu_mode() do {} while (0) 49#define arch_enter_lazy_cpu_mode() do {} while (0)
52 50
53#define pgprot_noncached_wc(prot) prot 51#define pgprot_noncached_wc(prot) prot
@@ -526,11 +524,6 @@ extern unsigned long iopa(unsigned long addr);
526/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 524/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
527#define kern_addr_valid(addr) (1) 525#define kern_addr_valid(addr) (1)
528 526
529/*
530 * No page table caches to initialise
531 */
532#define pgtable_cache_init() do { } while (0)
533
534void do_page_fault(struct pt_regs *regs, unsigned long address, 527void do_page_fault(struct pt_regs *regs, unsigned long address,
535 unsigned long error_code); 528 unsigned long error_code);
536 529
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 4dca733d5076..f85bd5b15f51 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -661,9 +661,4 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
661#define HAVE_ARCH_UNMAPPED_AREA 661#define HAVE_ARCH_UNMAPPED_AREA
662#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 662#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
663 663
664/*
665 * No page table caches to initialise
666 */
667#define pgtable_cache_init() do { } while (0)
668
669#endif /* _ASM_PGTABLE_H */ 664#endif /* _ASM_PGTABLE_H */
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index c70cc56bec09..0588ec99725c 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -403,8 +403,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
403 * into virtual address `from' 403 * into virtual address `from'
404 */ 404 */
405 405
406#define pgtable_cache_init() do { } while (0)
407
408#endif /* !__ASSEMBLY__ */ 406#endif /* !__ASSEMBLY__ */
409 407
410#endif /* _ASMNDS32_PGTABLE_H */ 408#endif /* _ASMNDS32_PGTABLE_H */
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 95237b7f6fc1..99985d8b7166 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -291,8 +291,6 @@ static inline void pte_clear(struct mm_struct *mm,
291 291
292#include <asm-generic/pgtable.h> 292#include <asm-generic/pgtable.h>
293 293
294#define pgtable_cache_init() do { } while (0)
295
296extern void __init paging_init(void); 294extern void __init paging_init(void);
297extern void __init mmu_init(void); 295extern void __init mmu_init(void);
298 296
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 2fe9ff5b5d6f..248d22d8faa7 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -443,11 +443,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
443 443
444#include <asm-generic/pgtable.h> 444#include <asm-generic/pgtable.h>
445 445
446/*
447 * No page table caches to initialise
448 */
449#define pgtable_cache_init() do { } while (0)
450
451typedef pte_t *pte_addr_t; 446typedef pte_t *pte_addr_t;
452 447
453#endif /* __ASSEMBLY__ */ 448#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 6d58c1739b42..4ac374b3a99f 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -132,8 +132,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
132#define PTRS_PER_PTE (1UL << BITS_PER_PTE) 132#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
133 133
134/* Definitions for 2nd level */ 134/* Definitions for 2nd level */
135#define pgtable_cache_init() do { } while (0)
136
137#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) 135#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
138#define PMD_SIZE (1UL << PMD_SHIFT) 136#define PMD_SIZE (1UL << PMD_SHIFT)
139#define PMD_MASK (~(PMD_SIZE-1)) 137#define PMD_MASK (~(PMD_SIZE-1))
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 8b7865a2d576..4053b2ab427c 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -87,7 +87,6 @@ extern unsigned long ioremap_bot;
87unsigned long vmalloc_to_phys(void *vmalloc_addr); 87unsigned long vmalloc_to_phys(void *vmalloc_addr);
88 88
89void pgtable_cache_add(unsigned int shift); 89void pgtable_cache_add(unsigned int shift);
90void pgtable_cache_init(void);
91 90
92#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) 91#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
93void mark_initmem_nx(void); 92void mark_initmem_nx(void);
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 80905b27ee98..c60123f018f5 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -424,11 +424,6 @@ extern void *dtb_early_va;
424extern void setup_bootmem(void); 424extern void setup_bootmem(void);
425extern void paging_init(void); 425extern void paging_init(void);
426 426
427static inline void pgtable_cache_init(void)
428{
429 /* No page table caches to initialize */
430}
431
432#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 427#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
433#define VMALLOC_END (PAGE_OFFSET - 1) 428#define VMALLOC_END (PAGE_OFFSET - 1)
434#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 429#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 8f59454ac407..36c578c0ff96 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1682,11 +1682,6 @@ extern void s390_reset_cmma(struct mm_struct *mm);
1682#define HAVE_ARCH_UNMAPPED_AREA 1682#define HAVE_ARCH_UNMAPPED_AREA
1683#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1683#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1684 1684
1685/*
1686 * No page table caches to initialise
1687 */
1688static inline void pgtable_cache_init(void) { }
1689
1690#include <asm-generic/pgtable.h> 1685#include <asm-generic/pgtable.h>
1691 1686
1692#endif /* _S390_PAGE_H */ 1687#endif /* _S390_PAGE_H */
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 9085d1142fa3..cbd0f3c55a0c 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -123,11 +123,6 @@ typedef pte_t *pte_addr_t;
123 123
124#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) 124#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
125 125
126/*
127 * Initialise the page table caches
128 */
129extern void pgtable_cache_init(void);
130
131struct vm_area_struct; 126struct vm_area_struct;
132struct mm_struct; 127struct mm_struct;
133 128
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index cc779a90d917..dca946f426c6 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -97,7 +97,3 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
97void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 97void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
98{ 98{
99} 99}
100
101void pgtable_cache_init(void)
102{
103}
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 4eebed6c6781..31da44826645 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -445,9 +445,4 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
445/* We provide our own get_unmapped_area to cope with VA holes for userland */ 445/* We provide our own get_unmapped_area to cope with VA holes for userland */
446#define HAVE_ARCH_UNMAPPED_AREA 446#define HAVE_ARCH_UNMAPPED_AREA
447 447
448/*
449 * No page table caches to initialise
450 */
451#define pgtable_cache_init() do { } while (0)
452
453#endif /* !(_SPARC_PGTABLE_H) */ 448#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1599de730532..b57f9c631eca 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1135,7 +1135,6 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1135 unsigned long); 1135 unsigned long);
1136#define HAVE_ARCH_FB_UNMAPPED_AREA 1136#define HAVE_ARCH_FB_UNMAPPED_AREA
1137 1137
1138void pgtable_cache_init(void);
1139void sun4v_register_fault_status(void); 1138void sun4v_register_fault_status(void);
1140void sun4v_ktsb_register(void); 1139void sun4v_ktsb_register(void);
1141void __init cheetah_ecache_flush_init(void); 1140void __init cheetah_ecache_flush_init(void);
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index e4d3ed980d82..36a44d58f373 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -32,8 +32,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
32/* zero page used for uninitialized stuff */ 32/* zero page used for uninitialized stuff */
33extern unsigned long *empty_zero_page; 33extern unsigned long *empty_zero_page;
34 34
35#define pgtable_cache_init() do ; while (0)
36
37/* Just any arbitrary offset to the start of the vmalloc VM area: the 35/* Just any arbitrary offset to the start of the vmalloc VM area: the
38 * current 8MB value just means that there will be a 8MB "hole" after the 36 * current 8MB value just means that there will be a 8MB "hole" after the
39 * physical memory until the kernel virtual memory starts. That means that 37 * physical memory until the kernel virtual memory starts. That means that
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index 126e961a8cb0..c8f7ba12f309 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -285,8 +285,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
285 285
286#include <asm-generic/pgtable.h> 286#include <asm-generic/pgtable.h>
287 287
288#define pgtable_cache_init() do { } while (0)
289
290#endif /* !__ASSEMBLY__ */ 288#endif /* !__ASSEMBLY__ */
291 289
292#endif /* __UNICORE_PGTABLE_H__ */ 290#endif /* __UNICORE_PGTABLE_H__ */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index b9b9f8aa963e..0dca7f7aeff2 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -29,7 +29,6 @@ extern pgd_t swapper_pg_dir[1024];
29extern pgd_t initial_page_table[1024]; 29extern pgd_t initial_page_table[1024];
30extern pmd_t initial_pg_pmd[]; 30extern pmd_t initial_pg_pmd[];
31 31
32static inline void pgtable_cache_init(void) { }
33void paging_init(void); 32void paging_init(void);
34void sync_initial_page_table(void); 33void sync_initial_page_table(void);
35 34
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index a26d2d58b9c9..0b6c4042942a 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -241,8 +241,6 @@ extern void cleanup_highmap(void);
241#define HAVE_ARCH_UNMAPPED_AREA 241#define HAVE_ARCH_UNMAPPED_AREA
242#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 242#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
243 243
244#define pgtable_cache_init() do { } while (0)
245
246#define PAGE_AGP PAGE_KERNEL_NOCACHE 244#define PAGE_AGP PAGE_KERNEL_NOCACHE
247#define HAVE_PAGE_AGP 1 245#define HAVE_PAGE_AGP 1
248 246
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 44816ff6411f..463940faf52f 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -357,7 +357,7 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
357 357
358static struct kmem_cache *pgd_cache; 358static struct kmem_cache *pgd_cache;
359 359
360void __init pgd_cache_init(void) 360void __init pgtable_cache_init(void)
361{ 361{
362 /* 362 /*
363 * When PAE kernel is running as a Xen domain, it does not use 363 * When PAE kernel is running as a Xen domain, it does not use
@@ -402,10 +402,6 @@ static inline void _pgd_free(pgd_t *pgd)
402} 402}
403#else 403#else
404 404
405void __init pgd_cache_init(void)
406{
407}
408
409static inline pgd_t *_pgd_alloc(void) 405static inline pgd_t *_pgd_alloc(void)
410{ 406{
411 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, 407 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index ce3ff5e591b9..3f7fe5a8c286 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -238,7 +238,6 @@ extern void paging_init(void);
238# define swapper_pg_dir NULL 238# define swapper_pg_dir NULL
239static inline void paging_init(void) { } 239static inline void paging_init(void) { }
240#endif 240#endif
241static inline void pgtable_cache_init(void) { }
242 241
243/* 242/*
244 * The pmd contains the kernel virtual address of the pte page. 243 * The pmd contains the kernel virtual address of the pte page.
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 75d9d68a6de7..fae6abb3d586 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1126,7 +1126,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1126static inline void init_espfix_bsp(void) { } 1126static inline void init_espfix_bsp(void) { }
1127#endif 1127#endif
1128 1128
1129extern void __init pgd_cache_init(void); 1129extern void __init pgtable_cache_init(void);
1130 1130
1131#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED 1131#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1132static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) 1132static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
diff --git a/init/main.c b/init/main.c
index 3ca67e8b92fd..99a5f55e0d02 100644
--- a/init/main.c
+++ b/init/main.c
@@ -507,7 +507,7 @@ void __init __weak mem_encrypt_init(void) { }
507 507
508void __init __weak poking_init(void) { } 508void __init __weak poking_init(void) { }
509 509
510void __init __weak pgd_cache_init(void) { } 510void __init __weak pgtable_cache_init(void) { }
511 511
512bool initcall_debug; 512bool initcall_debug;
513core_param(initcall_debug, initcall_debug, bool, 0644); 513core_param(initcall_debug, initcall_debug, bool, 0644);
@@ -565,7 +565,6 @@ static void __init mm_init(void)
565 init_espfix_bsp(); 565 init_espfix_bsp();
566 /* Should be run after espfix64 is set up. */ 566 /* Should be run after espfix64 is set up. */
567 pti_init(); 567 pti_init();
568 pgd_cache_init();
569} 568}
570 569
571void __init __weak arch_call_rest_init(void) 570void __init __weak arch_call_rest_init(void)