From af1839eb4bd4fe079a125eb199205fceb6ae19e6 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 8 Oct 2012 16:28:08 -0700 Subject: Kconfig: clean up the long arch list for the UID16 config option Introduce HAVE_UID16 config option and select it in corresponding architecture Kconfig files. UID16 now only depends on HAVE_UID16. Signed-off-by: Catalin Marinas Acked-by: Geert Uytterhoeven Cc: Russell King Cc: Mike Frysinger Cc: Mikael Starvik Cc: Jesper Nilsson Cc: David Howells Cc: Yoshinori Sato Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Paul Mundt Cc: "David S. Miller" Cc: Jeff Dike Cc: Richard Weinberger Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/Kconfig | 1 + arch/arm64/Kconfig | 1 + arch/blackfin/Kconfig | 1 + arch/cris/Kconfig | 1 + arch/frv/Kconfig | 1 + arch/h8300/Kconfig | 1 + arch/m68k/Kconfig | 1 + arch/s390/Kconfig | 1 + arch/sh/Kconfig | 1 + arch/sparc/Kconfig | 2 ++ arch/um/Kconfig.common | 1 + arch/x86/Kconfig | 2 ++ 12 files changed, 14 insertions(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 6d2f7f5c0036..5f5439672932 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -39,6 +39,7 @@ config ARM select HARDIRQS_SW_RESEND select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW + select HAVE_UID16 select ARCH_WANT_IPC_PARSE_VERSION select HARDIRQS_SW_RESEND select CPU_PM if (SUSPEND || CPU_IDLE) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 767ba5685454..e61acae0d891 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -193,6 +193,7 @@ config COMPAT bool "Kernel support for 32-bit EL0" depends on !ARM64_64K_PAGES select COMPAT_BINFMT_ELF + select HAVE_UID16 help This option enables support for a 32-bit EL0 running under a 64-bit kernel at EL1. AArch32-specific components such as system calls, diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index c7092e6057c5..2169889c73ec 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -33,6 +33,7 @@ config BLACKFIN select HAVE_PERF_EVENTS select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_WANT_OPTIONAL_GPIOLIB + select HAVE_UID16 select ARCH_WANT_IPC_PARSE_VERSION select HAVE_GENERIC_HARDIRQS select GENERIC_ATOMIC64 diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 72bd5ae50a89..a118163b04ee 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -42,6 +42,7 @@ config CRIS select HAVE_IDE select GENERIC_ATOMIC64 select HAVE_GENERIC_HARDIRQS + select HAVE_UID16 select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_IRQ_SHOW select GENERIC_IOMAP diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 971c0a19facb..cc5709d18350 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -5,6 +5,7 @@ config FRV select HAVE_ARCH_TRACEHOOK select HAVE_IRQ_WORK select HAVE_PERF_EVENTS + select HAVE_UID16 select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 5e8a0d9a09ce..90462eb23d02 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -3,6 +3,7 @@ config H8300 default y select HAVE_IDE select HAVE_GENERIC_HARDIRQS + select HAVE_UID16 select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index b22df9410dce..3e2b2f66db60 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -6,6 +6,7 @@ config M68K select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 + select HAVE_UID16 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select GENERIC_CPU_DEVICES select GENERIC_STRNCPY_FROM_USER if MMU diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c8af429991d9..baba37cfcf84 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -126,6 +126,7 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_BH select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select HAVE_UID16 if 32BIT select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 36f5141e8041..f0c85e424777 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -21,6 +21,7 @@ config SUPERH select HAVE_KERNEL_LZMA select HAVE_KERNEL_XZ select HAVE_KERNEL_LZO + select HAVE_UID16 select ARCH_WANT_IPC_PARSE_VERSION select HAVE_SYSCALL_TRACEPOINTS select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 67f1f6f5f4e1..e66481015d3b 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -42,6 +42,7 @@ config SPARC32 def_bool !64BIT select GENERIC_ATOMIC64 select CLZ_TAB + select HAVE_UID16 config SPARC64 def_bool 64BIT @@ -571,6 +572,7 @@ config COMPAT depends on SPARC64 default y select COMPAT_BINFMT_ELF + select HAVE_UID16 select ARCH_WANT_OLD_COMPAT_IPC config SYSVIPC_COMPAT diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index cb837c223922..648121b037d5 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -7,6 +7,7 @@ config UML bool default y select HAVE_GENERIC_HARDIRQS + select HAVE_UID16 select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_IO diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b72777ff32a9..fd5d7c2c2daa 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -10,6 +10,7 @@ config X86_32 def_bool y depends on !64BIT select CLKSRC_I8253 + select HAVE_UID16 config X86_64 def_bool y @@ -2168,6 +2169,7 @@ config IA32_EMULATION bool "IA32 Emulation" depends on X86_64 select COMPAT_BINFMT_ELF + select HAVE_UID16 ---help--- Include code to run legacy 32-bit programs under a 64-bit kernel. You should likely turn this on, unless you're -- cgit v1.2.2 From b69ec42b1b194cc88f04b3fbcda8d3f93182d6c3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 8 Oct 2012 16:28:11 -0700 Subject: Kconfig: clean up the long arch list for the DEBUG_KMEMLEAK config option Introduce HAVE_DEBUG_KMEMLEAK config option and select it in corresponding architecture Kconfig files. DEBUG_KMEMLEAK now only depends on HAVE_DEBUG_KMEMLEAK. Signed-off-by: Catalin Marinas Cc: Russell King Cc: Michal Simek Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Paul Mundt Cc: "David S. Miller" Cc: Chris Metcalf Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/Kconfig | 1 + arch/arm64/Kconfig | 1 + arch/microblaze/Kconfig | 1 + arch/mips/Kconfig | 1 + arch/powerpc/Kconfig | 1 + arch/s390/Kconfig | 1 + arch/sh/Kconfig | 1 + arch/sparc/Kconfig | 1 + arch/tile/Kconfig | 1 + arch/x86/Kconfig | 1 + 10 files changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5f5439672932..2867a7742306 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -25,6 +25,7 @@ config ARM select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL) select ARCH_BINFMT_ELF_RANDOMIZE_PIE select HAVE_GENERIC_DMA_COHERENT + select HAVE_DEBUG_KMEMLEAK select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e61acae0d891..5dc9273781d6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -10,6 +10,7 @@ config ARM64 select GENERIC_TIME_VSYSCALL select HARDIRQS_SW_RESEND select HAVE_ARCH_TRACEHOOK + select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS select HAVE_GENERIC_DMA_COHERENT diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 6133bed2b855..53fd94ab60f0 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -16,6 +16,7 @@ config MICROBLAZE select OF select OF_EARLY_FLATTREE select ARCH_WANT_IPC_PARSE_VERSION + select HAVE_DEBUG_KMEMLEAK select IRQ_DOMAIN select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index faf65286574e..335115e5bdd9 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -17,6 +17,7 @@ config MIPS select HAVE_FUNCTION_GRAPH_TRACER select HAVE_KPROBES select HAVE_KRETPROBES + select HAVE_DEBUG_KMEMLEAK select ARCH_BINFMT_ELF_RANDOMIZE_PIE select RTC_LIB if !MACH_LOONGSON select GENERIC_ATOMIC64 if !64BIT diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4ce0be32d153..6a798a70a6d1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -113,6 +113,7 @@ config PPC select HAVE_DMA_API_DEBUG select USE_GENERIC_SMP_HELPERS if SMP select HAVE_OPROFILE + select HAVE_DEBUG_KMEMLEAK select HAVE_SYSCALL_WRAPPERS if PPC64 select GENERIC_ATOMIC64 if PPC32 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index baba37cfcf84..8c6d7986f6d2 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -80,6 +80,7 @@ config S390 select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select ARCH_HAVE_NMI_SAFE_CMPXCHG + select HAVE_DEBUG_KMEMLEAK select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index f0c85e424777..cfbf3e3c982b 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -16,6 +16,7 @@ config SUPERH select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) select PERF_USE_VMALLOC + select HAVE_DEBUG_KMEMLEAK select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index e66481015d3b..274d6cf0ada2 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -60,6 +60,7 @@ config SPARC64 select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_SYSCALL_TRACEPOINTS + select HAVE_DEBUG_KMEMLEAK select RTC_DRV_CMOS select RTC_DRV_BQ4802 select RTC_DRV_SUN4V diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index c9a3c1fe7297..9a0d77d3ba14 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -9,6 +9,7 @@ config TILE select GENERIC_FIND_FIRST_BIT select USE_GENERIC_SMP_HELPERS select CC_OPTIMIZE_FOR_SIZE + select HAVE_DEBUG_KMEMLEAK select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fd5d7c2c2daa..3fea1848d955 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -66,6 +66,7 @@ config X86 select HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_DEBUG_KMEMLEAK select ANON_INODES select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386 select HAVE_CMPXCHG_LOCAL if !M386 -- cgit v1.2.2 From 9b2a60c484715e2d2f07d8192fd9f18435cbc77c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 8 Oct 2012 16:28:13 -0700 Subject: Kconfig: clean up the long arch list for the DEBUG_BUGVERBOSE config option Introduce HAVE_DEBUG_BUGVERBOSE config option and select it in corresponding architecture Kconfig files. Architectures that already select GENERIC_BUG don't need to select HAVE_DEBUG_BUGVERBOSE. Signed-off-by: Catalin Marinas Acked-by: Geert Uytterhoeven Cc: David Howells Cc: Hirokazu Takata Cc: Paul Mundt Cc: "David S. Miller" Cc: Chris Metcalf Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/Kconfig | 1 + arch/frv/Kconfig | 1 + arch/m32r/Kconfig | 1 + arch/m68k/Kconfig | 1 + arch/sh/Kconfig | 1 + arch/sparc/Kconfig | 1 + arch/tile/Kconfig | 1 + 7 files changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5dc9273781d6..a30856058742 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -10,6 +10,7 @@ config ARM64 select GENERIC_TIME_VSYSCALL select HARDIRQS_SW_RESEND select HAVE_ARCH_TRACEHOOK + select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index cc5709d18350..9d262645f667 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -8,6 +8,7 @@ config FRV select HAVE_UID16 select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW + select HAVE_DEBUG_BUGVERBOSE select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_CPU_DEVICES select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 49498bbb9616..e875fc3ce9cb 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -8,6 +8,7 @@ config M32R select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA select ARCH_WANT_IPC_PARSE_VERSION + select HAVE_DEBUG_BUGVERBOSE select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 3e2b2f66db60..dae1e7e16a37 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -3,6 +3,7 @@ config M68K default y select HAVE_IDE select HAVE_AOUT if MMU + select HAVE_DEBUG_BUGVERBOSE select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index cfbf3e3c982b..3b3e27a3ff2c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -13,6 +13,7 @@ config SUPERH select HAVE_DMA_ATTRS select HAVE_IRQ_WORK select HAVE_PERF_EVENTS + select HAVE_DEBUG_BUGVERBOSE select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) select PERF_USE_VMALLOC diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 274d6cf0ada2..700a01adec3a 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -32,6 +32,7 @@ config SPARC select GENERIC_PCI_IOMAP select HAVE_NMI_WATCHDOG if SPARC64 select HAVE_BPF_JIT + select HAVE_DEBUG_BUGVERBOSE select GENERIC_SMP_IDLE_THREAD select GENERIC_CMOS_UPDATE select GENERIC_CLOCKEVENTS diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 9a0d77d3ba14..df69d4296b4b 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -14,6 +14,7 @@ config TILE select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP select GENERIC_IRQ_SHOW + select HAVE_DEBUG_BUGVERBOSE select HAVE_SYSCALL_WRAPPERS if TILEGX select SYS_HYPERVISOR select ARCH_HAVE_NMI_SAFE_CMPXCHG -- cgit v1.2.2 From 7ac57a89de958fbb5271dc504d0c25e34dbeec32 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 8 Oct 2012 16:28:16 -0700 Subject: Kconfig: clean up the "#if defined(arch)" list for exception-trace sysctl entry Introduce SYSCTL_EXCEPTION_TRACE config option and selec it in the architectures requiring support for the "exception-trace" debug_table entry in kernel/sysctl.c. Signed-off-by: Catalin Marinas Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: "David S. Miller" Cc: Chris Metcalf Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/Kconfig | 1 + arch/powerpc/Kconfig | 1 + arch/s390/Kconfig | 1 + arch/sparc/Kconfig | 1 + arch/tile/Kconfig | 1 + arch/x86/Kconfig | 1 + 6 files changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a30856058742..7ff68c946073 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -28,6 +28,7 @@ config ARM64 select PERF_USE_VMALLOC select RTC_LIB select SPARSE_IRQ + select SYSCTL_EXCEPTION_TRACE help ARM 64-bit (AArch64) Linux support. diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 6a798a70a6d1..df7edb887a04 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -99,6 +99,7 @@ config PPC select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER + select SYSCTL_EXCEPTION_TRACE select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_IDE select HAVE_IOREMAP_PROT diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8c6d7986f6d2..ceff7aef2477 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -68,6 +68,7 @@ config S390 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_C_RECORDMCOUNT select HAVE_SYSCALL_TRACEPOINTS + select SYSCTL_EXCEPTION_TRACE select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_GRAPH_TRACER select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 700a01adec3a..e184075877d7 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -18,6 +18,7 @@ config SPARC select HAVE_OPROFILE select HAVE_ARCH_KGDB if !SMP || SPARC64 select HAVE_ARCH_TRACEHOOK + select SYSCTL_EXCEPTION_TRACE select ARCH_WANT_OPTIONAL_GPIOLIB select RTC_CLASS select RTC_DRV_M48T59 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index df69d4296b4b..dc46490adca0 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -7,6 +7,7 @@ config TILE select HAVE_DMA_API_DEBUG select HAVE_KVM if !TILEGX select GENERIC_FIND_FIRST_BIT + select SYSCTL_EXCEPTION_TRACE select USE_GENERIC_SMP_HELPERS select CC_OPTIMIZE_FOR_SIZE select HAVE_DEBUG_KMEMLEAK diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3fea1848d955..6119d6c7002e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -47,6 +47,7 @@ config X86 select HAVE_FUNCTION_GRAPH_FP_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_SYSCALL_TRACEPOINTS + select SYSCTL_EXCEPTION_TRACE select HAVE_KVM select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK -- cgit v1.2.2 From b1a86e15dc0304366f50ba1720834bc419c801b1 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Mon, 8 Oct 2012 16:28:23 -0700 Subject: x86, pat: remove the dependency on 'vm_pgoff' in track/untrack pfn vma routines 'pfn' argument for track_pfn_vma_new() can be used for reserving the attribute for the pfn range. No need to depend on 'vm_pgoff' Similarly, untrack_pfn_vma() can depend on the 'pfn' argument if it is non-zero or can use follow_phys() to get the starting value of the pfn range. Also the non zero 'size' argument can be used instead of recomputing it from vma. This cleanup also prepares the ground for the track/untrack pfn vma routines to take over the ownership of setting PAT specific vm_flag in the 'vma'. [khlebnikov@openvz.org: Clear pfn to paddr conversion] Signed-off-by: Suresh Siddha Signed-off-by: Konstantin Khlebnikov Cc: Venkatesh Pallipadi Cc: H. Peter Anvin Cc: Nick Piggin Cc: Ingo Molnar Cc: Alexander Viro Cc: Carsten Otte Cc: Chris Metcalf Cc: Cyrill Gorcunov Cc: Eric Paris Cc: H. Peter Anvin Cc: Hugh Dickins Cc: Ingo Molnar Cc: James Morris Cc: Jason Baron Cc: Kentaro Takeda Cc: Matt Helsley Cc: Nick Piggin Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Robert Richter Cc: Tetsuo Handa Acked-by: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pat.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 3d68ef6d2266..de36c886cd38 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -704,21 +704,18 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { + resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; unsigned long flags; - resource_size_t paddr; - unsigned long vma_size = vma->vm_end - vma->vm_start; - if (is_linear_pfn_mapping(vma)) { - /* reserve the whole chunk starting from vm_pgoff */ - paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; - return reserve_pfn_range(paddr, vma_size, prot, 0); - } + /* reserve the whole chunk starting from paddr */ + if (is_linear_pfn_mapping(vma)) + return reserve_pfn_range(paddr, size, prot, 0); if (!pat_enabled) return 0; /* for vm_insert_pfn and friends, we set prot based on lookup */ - flags = lookup_memtype(pfn << PAGE_SHIFT); + flags = lookup_memtype(paddr); *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | flags); @@ -728,20 +725,28 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, /* * untrack_pfn_vma is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or - * can be for the entire vma (in which case size can be zero). + * can be for the entire vma (in which case pfn, size are zero). */ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { resource_size_t paddr; - unsigned long vma_size = vma->vm_end - vma->vm_start; + unsigned long prot; - if (is_linear_pfn_mapping(vma)) { - /* free the whole chunk starting from vm_pgoff */ - paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; - free_pfn_range(paddr, vma_size); + if (!is_linear_pfn_mapping(vma)) return; + + /* free the chunk starting from pfn or the whole chunk */ + paddr = (resource_size_t)pfn << PAGE_SHIFT; + if (!paddr && !size) { + if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { + WARN_ON_ONCE(1); + return; + } + + size = vma->vm_end - vma->vm_start; } + free_pfn_range(paddr, size); } pgprot_t pgprot_writecombine(pgprot_t prot) -- cgit v1.2.2 From 5180da410db6369d1f95c9014da1c9bc33fb043e Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Mon, 8 Oct 2012 16:28:29 -0700 Subject: x86, pat: separate the pfn attribute tracking for remap_pfn_range and vm_insert_pfn With PAT enabled, vm_insert_pfn() looks up the existing pfn memory attribute and uses it. Expectation is that the driver reserves the memory attributes for the pfn before calling vm_insert_pfn(). remap_pfn_range() (when called for the whole vma) will setup a new attribute (based on the prot argument) for the specified pfn range. This addresses the legacy usage which typically calls remap_pfn_range() with a desired memory attribute. For ranges smaller than the vma size (which is typically not the case), remap_pfn_range() will use the existing memory attribute for the pfn range. Expose two different API's for these different behaviors. track_pfn_insert() for tracking the pfn attribute set by vm_insert_pfn() and track_pfn_remap() for the remap_pfn_range(). This cleanup also prepares the ground for the track/untrack pfn vma routines to take over the ownership of setting PAT specific vm_flag in the 'vma'. [khlebnikov@openvz.org: Clear checks in track_pfn_remap()] [akpm@linux-foundation.org: tweak a few comments] Signed-off-by: Suresh Siddha Signed-off-by: Konstantin Khlebnikov Cc: Venkatesh Pallipadi Cc: H. Peter Anvin Cc: Nick Piggin Cc: Ingo Molnar Cc: Alexander Viro Cc: Carsten Otte Cc: Chris Metcalf Cc: Cyrill Gorcunov Cc: Eric Paris Cc: Hugh Dickins Cc: James Morris Cc: Jason Baron Cc: Kentaro Takeda Cc: Konstantin Khlebnikov Cc: Matt Helsley Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Robert Richter Cc: Suresh Siddha Cc: Tetsuo Handa Acked-by: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pat.c | 47 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index de36c886cd38..74a702674e86 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -664,13 +664,13 @@ static void free_pfn_range(u64 paddr, unsigned long size) } /* - * track_pfn_vma_copy is called when vma that is covering the pfnmap gets + * track_pfn_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). * * If the vma has a linear pfn mapping for the entire range, we get the prot * from pte and reserve the entire vma range with single reserve_pfn_range call. */ -int track_pfn_vma_copy(struct vm_area_struct *vma) +int track_pfn_copy(struct vm_area_struct *vma) { resource_size_t paddr; unsigned long prot; @@ -694,15 +694,12 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) } /* - * track_pfn_vma_new is called when a _new_ pfn mapping is being established - * for physical range indicated by pfn and size. - * * prot is passed in as a parameter for the new mapping. If the vma has a * linear pfn mapping for the entire range reserve the entire vma range with * single reserve_pfn_range call. */ -int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn, unsigned long size) +int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long size) { resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; unsigned long flags; @@ -714,8 +711,36 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, if (!pat_enabled) return 0; - /* for vm_insert_pfn and friends, we set prot based on lookup */ + /* + * For anything smaller than the vma size we set prot based on the + * lookup. + */ flags = lookup_memtype(paddr); + + /* Check memtype for the remaining pages */ + while (size > PAGE_SIZE) { + size -= PAGE_SIZE; + paddr += PAGE_SIZE; + if (flags != lookup_memtype(paddr)) + return -EINVAL; + } + + *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | + flags); + + return 0; +} + +int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn) +{ + unsigned long flags; + + if (!pat_enabled) + return 0; + + /* Set prot based on lookup */ + flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | flags); @@ -723,12 +748,12 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, } /* - * untrack_pfn_vma is called while unmapping a pfnmap for a region. + * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case pfn, size are zero). */ -void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, - unsigned long size) +void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size) { resource_size_t paddr; unsigned long prot; -- cgit v1.2.2 From b3b9c2932c32e0692018ed5f12f3fd8c70eea8ce Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Mon, 8 Oct 2012 16:28:34 -0700 Subject: mm, x86, pat: rework linear pfn-mmap tracking Replace the generic vma-flag VM_PFN_AT_MMAP with x86-only VM_PAT. We can toss mapping address from remap_pfn_range() into track_pfn_vma_new(), and collect all PAT-related logic together in arch/x86/. This patch also restores orignal frustration-free is_cow_mapping() check in remap_pfn_range(), as it was before commit v2.6.28-rc8-88-g3c8bb73 ("x86: PAT: store vm_pgoff for all linear_over_vma_region mappings - v3") is_linear_pfn_mapping() checks can be removed from mm/huge_memory.c, because it already handled by VM_PFNMAP in VM_NO_THP bit-mask. [suresh.b.siddha@intel.com: Reset the VM_PAT flag as part of untrack_pfn_vma()] Signed-off-by: Konstantin Khlebnikov Signed-off-by: Suresh Siddha Cc: Venkatesh Pallipadi Cc: H. Peter Anvin Cc: Nick Piggin Cc: Ingo Molnar Cc: Alexander Viro Cc: Carsten Otte Cc: Chris Metcalf Cc: Cyrill Gorcunov Cc: Eric Paris Cc: Hugh Dickins Cc: James Morris Cc: Jason Baron Cc: Kentaro Takeda Cc: Matt Helsley Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Robert Richter Cc: Tetsuo Handa Cc: Venkatesh Pallipadi Acked-by: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pat.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 74a702674e86..0eb572eda406 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -677,7 +677,7 @@ int track_pfn_copy(struct vm_area_struct *vma) unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; - if (is_linear_pfn_mapping(vma)) { + if (vma->vm_flags & VM_PAT) { /* * reserve the whole chunk covered by vma. We need the * starting address and protection from pte. @@ -699,14 +699,20 @@ int track_pfn_copy(struct vm_area_struct *vma) * single reserve_pfn_range call. */ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn, unsigned long size) + unsigned long pfn, unsigned long addr, unsigned long size) { resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; unsigned long flags; /* reserve the whole chunk starting from paddr */ - if (is_linear_pfn_mapping(vma)) - return reserve_pfn_range(paddr, size, prot, 0); + if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { + int ret; + + ret = reserve_pfn_range(paddr, size, prot, 0); + if (!ret) + vma->vm_flags |= VM_PAT; + return ret; + } if (!pat_enabled) return 0; @@ -758,7 +764,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, resource_size_t paddr; unsigned long prot; - if (!is_linear_pfn_mapping(vma)) + if (!(vma->vm_flags & VM_PAT)) return; /* free the chunk starting from pfn or the whole chunk */ @@ -772,6 +778,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, size = vma->vm_end - vma->vm_start; } free_pfn_range(paddr, size); + vma->vm_flags &= ~VM_PAT; } pgprot_t pgprot_writecombine(pgprot_t prot) -- cgit v1.2.2 From 2dd8ad81e31d0d36a5d448329c646ab43eb17788 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Mon, 8 Oct 2012 16:28:51 -0700 Subject: mm: use mm->exe_file instead of first VM_EXECUTABLE vma->vm_file Some security modules and oprofile still uses VM_EXECUTABLE for retrieving a task's executable file. After this patch they will use mm->exe_file directly. mm->exe_file is protected with mm->mmap_sem, so locking stays the same. Signed-off-by: Konstantin Khlebnikov Acked-by: Chris Metcalf [arch/tile] Acked-by: Tetsuo Handa [tomoyo] Cc: Alexander Viro Cc: Carsten Otte Cc: Cyrill Gorcunov Cc: Eric Paris Cc: H. Peter Anvin Cc: Hugh Dickins Cc: Ingo Molnar Acked-by: James Morris Cc: Jason Baron Cc: Kentaro Takeda Cc: Matt Helsley Cc: Nick Piggin Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Robert Richter Cc: Suresh Siddha Cc: Venkatesh Pallipadi Acked-by: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/oprofile/cell/spu_task_sync.c | 15 ++++----------- arch/tile/mm/elf.c | 19 +++++++------------ 2 files changed, 11 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c index 642fca137ccb..28f1af2db1f5 100644 --- a/arch/powerpc/oprofile/cell/spu_task_sync.c +++ b/arch/powerpc/oprofile/cell/spu_task_sync.c @@ -304,7 +304,7 @@ static inline unsigned long fast_get_dcookie(struct path *path) return cookie; } -/* Look up the dcookie for the task's first VM_EXECUTABLE mapping, +/* Look up the dcookie for the task's mm->exe_file, * which corresponds loosely to "application name". Also, determine * the offset for the SPU ELF object. If computed offset is * non-zero, it implies an embedded SPU object; otherwise, it's a @@ -321,7 +321,6 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, { unsigned long app_cookie = 0; unsigned int my_offset = 0; - struct file *app = NULL; struct vm_area_struct *vma; struct mm_struct *mm = spu->mm; @@ -330,16 +329,10 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, down_read(&mm->mmap_sem); - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (!vma->vm_file) - continue; - if (!(vma->vm_flags & VM_EXECUTABLE)) - continue; - app_cookie = fast_get_dcookie(&vma->vm_file->f_path); + if (mm->exe_file) { + app_cookie = fast_get_dcookie(&mm->exe_file->f_path); pr_debug("got dcookie for %s\n", - vma->vm_file->f_dentry->d_name.name); - app = vma->vm_file; - break; + mm->exe_file->f_dentry->d_name.name); } for (vma = mm->mmap; vma; vma = vma->vm_next) { diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c index 758b6038c2b7..3cfa98bf9125 100644 --- a/arch/tile/mm/elf.c +++ b/arch/tile/mm/elf.c @@ -36,19 +36,14 @@ static void sim_notify_exec(const char *binary_name) } while (c); } -static int notify_exec(void) +static int notify_exec(struct mm_struct *mm) { int retval = 0; /* failure */ - struct vm_area_struct *vma = current->mm->mmap; - while (vma) { - if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) - break; - vma = vma->vm_next; - } - if (vma) { + + if (mm->exe_file) { char *buf = (char *) __get_free_page(GFP_KERNEL); if (buf) { - char *path = d_path(&vma->vm_file->f_path, + char *path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE); if (!IS_ERR(path)) { sim_notify_exec(path); @@ -106,16 +101,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, unsigned long vdso_base; int retval = 0; + down_write(&mm->mmap_sem); + /* * Notify the simulator that an exec just occurred. * If we can't find the filename of the mapping, just use * whatever was passed as the linux_binprm filename. */ - if (!notify_exec()) + if (!notify_exec(mm)) sim_notify_exec(bprm->filename); - down_write(&mm->mmap_sem); - /* * MAYWRITE to allow gdb to COW and set breakpoints */ -- cgit v1.2.2 From 314e51b9851b4f4e8ab302243ff5a6fc6147f379 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Mon, 8 Oct 2012 16:29:02 -0700 Subject: mm: kill vma flag VM_RESERVED and mm->reserved_vm counter A long time ago, in v2.4, VM_RESERVED kept swapout process off VMA, currently it lost original meaning but still has some effects: | effect | alternative flags -+------------------------+--------------------------------------------- 1| account as reserved_vm | VM_IO 2| skip in core dump | VM_IO, VM_DONTDUMP 3| do not merge or expand | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP 4| do not mlock | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP This patch removes reserved_vm counter from mm_struct. Seems like nobody cares about it, it does not exported into userspace directly, it only reduces total_vm showed in proc. Thus VM_RESERVED can be replaced with VM_IO or pair VM_DONTEXPAND | VM_DONTDUMP. remap_pfn_range() and io_remap_pfn_range() set VM_IO|VM_DONTEXPAND|VM_DONTDUMP. remap_vmalloc_range() set VM_DONTEXPAND | VM_DONTDUMP. [akpm@linux-foundation.org: drivers/vfio/pci/vfio_pci.c fixup] Signed-off-by: Konstantin Khlebnikov Cc: Alexander Viro Cc: Carsten Otte Cc: Chris Metcalf Cc: Cyrill Gorcunov Cc: Eric Paris Cc: H. Peter Anvin Cc: Hugh Dickins Cc: Ingo Molnar Cc: James Morris Cc: Jason Baron Cc: Kentaro Takeda Cc: Matt Helsley Cc: Nick Piggin Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Robert Richter Cc: Suresh Siddha Cc: Tetsuo Handa Cc: Venkatesh Pallipadi Acked-by: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/pci-sysfs.c | 2 +- arch/ia64/kernel/perfmon.c | 2 +- arch/ia64/mm/init.c | 3 ++- arch/powerpc/kvm/book3s_hv.c | 2 +- arch/sparc/kernel/pci.c | 2 +- arch/unicore32/kernel/process.c | 2 +- arch/x86/xen/mmu.c | 3 +-- 7 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c index 53649c7d0068..b51f7b4818cd 100644 --- a/arch/alpha/kernel/pci-sysfs.c +++ b/arch/alpha/kernel/pci-sysfs.c @@ -26,7 +26,7 @@ static int hose_mmap_page_range(struct pci_controller *hose, base = sparse ? hose->sparse_io_base : hose->dense_io_base; vma->vm_pgoff += base >> PAGE_SHIFT; - vma->vm_flags |= (VM_IO | VM_RESERVED); + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f388b4e18a37..ea39eba61ef5 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2307,7 +2307,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t */ vma->vm_mm = mm; vma->vm_file = get_file(filp); - vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; + vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ /* diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 0eab454867a2..082e383c1b6f 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -138,7 +138,8 @@ ia64_init_addr_space (void) vma->vm_mm = current->mm; vma->vm_end = PAGE_SIZE; vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); - vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; + vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | + VM_DONTEXPAND | VM_DONTDUMP; down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 83e929e66f9d..721d4603a235 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1183,7 +1183,7 @@ static const struct vm_operations_struct kvm_rma_vm_ops = { static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma) { - vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &kvm_rma_vm_ops; return 0; } diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index acc8c838ff72..75b31bcdeadf 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -779,7 +779,7 @@ static int __pci_mmap_make_offset(struct pci_dev *pdev, static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { - vma->vm_flags |= (VM_IO | VM_RESERVED); + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; } /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index b6f0458c3143..b008586dad75 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -380,7 +380,7 @@ int vectors_user_mapping(void) return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC | - VM_RESERVED, + VM_DONTEXPAND | VM_DONTDUMP, NULL); } diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5a16824cc2b3..fd28d86fe3d2 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2451,8 +2451,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); - BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == - (VM_PFNMAP | VM_RESERVED | VM_IO))); + BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); rmd.mfn = mfn; rmd.prot = prot; -- cgit v1.2.2 From 5d3a551c28c6669dc43be40d8fafafbc2ec8f42b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 8 Oct 2012 16:29:32 -0700 Subject: mm: hugetlb: add arch hook for clearing page flags before entering pool The core page allocator ensures that page flags are zeroed when freeing pages via free_pages_check. A number of architectures (ARM, PPC, MIPS) rely on this property to treat new pages as dirty with respect to the data cache and perform the appropriate flushing before mapping the pages into userspace. This can lead to cache synchronisation problems when using hugepages, since the allocator keeps its own pool of pages above the usual page allocator and does not reset the page flags when freeing a page into the pool. This patch adds a new architecture hook, arch_clear_hugepage_flags, so that architectures which rely on the page flags being in a particular state for fresh allocations can adjust the flags accordingly when a page is freed into the pool. Signed-off-by: Will Deacon Cc: Michal Hocko Reviewed-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/hugetlb.h | 4 ++++ arch/mips/include/asm/hugetlb.h | 4 ++++ arch/powerpc/include/asm/hugetlb.h | 4 ++++ arch/s390/include/asm/hugetlb.h | 1 + arch/sh/include/asm/hugetlb.h | 6 ++++++ arch/sparc/include/asm/hugetlb.h | 4 ++++ arch/tile/include/asm/hugetlb.h | 4 ++++ arch/x86/include/asm/hugetlb.h | 4 ++++ 8 files changed, 31 insertions(+) (limited to 'arch') diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h index da55c63728e0..94eaa5bd5d0c 100644 --- a/arch/ia64/include/asm/hugetlb.h +++ b/arch/ia64/include/asm/hugetlb.h @@ -77,4 +77,8 @@ static inline void arch_release_hugepage(struct page *page) { } +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + #endif /* _ASM_IA64_HUGETLB_H */ diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index 58d36889f09b..bd94946a18f3 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -112,4 +112,8 @@ static inline void arch_release_hugepage(struct page *page) { } +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + #endif /* __ASM_HUGETLB_H */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index dfdb95bc59a5..62e11a32c4c2 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -151,6 +151,10 @@ static inline void arch_release_hugepage(struct page *page) { } +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + #else /* ! CONFIG_HUGETLB_PAGE */ static inline void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 2d6e6e380564..fc322421b1cc 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -33,6 +33,7 @@ static inline int prepare_hugepage_range(struct file *file, } #define hugetlb_prefault_arch_hook(mm) do { } while (0) +#define arch_clear_hugepage_flags(page) do { } while (0) int arch_prepare_hugepage(struct page *page); void arch_release_hugepage(struct page *page); diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h index 967068fb79ac..b3808c7d67b2 100644 --- a/arch/sh/include/asm/hugetlb.h +++ b/arch/sh/include/asm/hugetlb.h @@ -1,6 +1,7 @@ #ifndef _ASM_SH_HUGETLB_H #define _ASM_SH_HUGETLB_H +#include #include @@ -89,4 +90,9 @@ static inline void arch_release_hugepage(struct page *page) { } +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} + #endif /* _ASM_SH_HUGETLB_H */ diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 177061064ee6..e7927c9758a1 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -82,4 +82,8 @@ static inline void arch_release_hugepage(struct page *page) { } +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + #endif /* _ASM_SPARC64_HUGETLB_H */ diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h index b2042380a5aa..0f885af2b621 100644 --- a/arch/tile/include/asm/hugetlb.h +++ b/arch/tile/include/asm/hugetlb.h @@ -106,6 +106,10 @@ static inline void arch_release_hugepage(struct page *page) { } +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + #ifdef CONFIG_HUGETLB_SUPER_PAGES static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, struct page *page, int writable) diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index 439a9acc132d..bdd35dbd0605 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h @@ -90,4 +90,8 @@ static inline void arch_release_hugepage(struct page *page) { } +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + #endif /* _ASM_X86_HUGETLB_H */ -- cgit v1.2.2 From 15626062f4a98279c59a2a5208c496cf65cbf8c0 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Mon, 8 Oct 2012 16:30:04 -0700 Subject: thp, x86: introduce HAVE_ARCH_TRANSPARENT_HUGEPAGE Cleanup patch in preparation for transparent hugepage support on s390. Adding new architectures to the TRANSPARENT_HUGEPAGE config option can make the "depends" line rather ugly, like "depends on (X86 || (S390 && 64BIT)) && MMU". This patch adds a HAVE_ARCH_TRANSPARENT_HUGEPAGE instead. x86 already has MMU "def_bool y", so the MMU check is superfluous there and HAVE_ARCH_TRANSPARENT_HUGEPAGE can be selected in arch/x86/Kconfig. Signed-off-by: Gerald Schaefer Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Hugh Dickins Cc: Hillf Danton Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 3 +++ arch/x86/Kconfig | 1 + 2 files changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/Kconfig b/arch/Kconfig index a62965d057f6..550cce4dd648 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -313,4 +313,7 @@ config HAVE_IRQ_TIME_ACCOUNTING Archs need to ensure they use a high enough resolution clock to support irq time accounting and then call enable_sched_clock_irqtime(). +config HAVE_ARCH_TRANSPARENT_HUGEPAGE + bool + source "kernel/gcov/Kconfig" diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6119d6c7002e..1ae94bcae5d9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -88,6 +88,7 @@ config X86 select IRQ_FORCED_THREADING select USE_GENERIC_SMP_HELPERS if SMP select HAVE_BPF_JIT if X86_64 + select HAVE_ARCH_TRANSPARENT_HUGEPAGE select CLKEVT_I8253 select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_IOMAP -- cgit v1.2.2 From 75077afbec1ac89178c1542b23a70d0f960b0aaf Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Mon, 8 Oct 2012 16:30:15 -0700 Subject: thp, s390: thp splitting backend for s390 This patch is part of the architecture backend for thp on s390. It provides the functions related to thp splitting, including serialization against gup. Unlike other archs, pmdp_splitting_flush() cannot use a tlb flushing operation to serialize against gup on s390, because that wouldn't be stopped by the disabled IRQs. So instead, smp_call_function() is called with an empty function, which will have the expected effect. Signed-off-by: Gerald Schaefer Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Hugh Dickins Cc: Hillf Danton Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/include/asm/pgtable.h | 13 +++++++++++++ arch/s390/mm/gup.c | 11 ++++++++++- arch/s390/mm/pgtable.c | 18 ++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 6bd7d7483017..c3f0775e71f1 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -347,6 +347,8 @@ extern struct page *vmemmap; #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ +#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ +#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT) /* Page status table bits for virtualization */ #define RCP_ACC_BITS 0xf000000000000000UL @@ -506,6 +508,10 @@ static inline int pmd_bad(pmd_t pmd) return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; } +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + static inline int pte_none(pte_t pte) { return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); @@ -1159,6 +1165,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + /* * 31 bit swap entry format: * A page-table entry has some bits we have to treat in a special way. diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index eeaf8023851f..60acb93a4680 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -115,7 +115,16 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, pmd = *pmdp; barrier(); next = pmd_addr_end(addr, end); - if (pmd_none(pmd)) + /* + * The pmd_trans_splitting() check below explains why + * pmdp_splitting_flush() has to serialize with + * smp_call_function() against our disabled IRQs, to stop + * this gup-fast code from running while we set the + * splitting bit in the pmd. Returning zero will take + * the slow path that will call wait_split_huge_page() + * if the pmd is still in splitting state. + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_huge(pmd))) { if (!gup_huge_pmd(pmdp, pmd, addr, next, diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b402991e43d7..a6131d1fe6c0 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -866,3 +866,21 @@ bool kernel_page_present(struct page *page) return cc == 0; } #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static void pmdp_splitting_flush_sync(void *arg) +{ + /* Simply deliver the interrupt */ +} + +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT, + (unsigned long *) pmdp)) { + /* need to serialize against gup-fast (IRQ disabled) */ + smp_call_function(pmdp_splitting_flush_sync, NULL, 1); + } +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -- cgit v1.2.2 From 9501d09fa3c4ca18971083dfb0c9aa1afc85f19c Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Mon, 8 Oct 2012 16:30:18 -0700 Subject: thp, s390: thp pagetable pre-allocation for s390 This patch is part of the architecture backend for thp on s390. It provides the pagetable pre-allocation functions pgtable_trans_huge_deposit() and pgtable_trans_huge_withdraw(). Unlike other archs, s390 has no struct page * as pgtable_t, but rather a pointer to the page table. So instead of saving the pagetable pre- allocation list info inside the struct page, it is being saved within the pagetable itself. Signed-off-by: Gerald Schaefer Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Hugh Dickins Cc: Hillf Danton Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/include/asm/pgtable.h | 6 ++++++ arch/s390/mm/pgtable.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) (limited to 'arch') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index c3f0775e71f1..2c8f00d31cfe 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1166,6 +1166,12 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #define pte_unmap(pte) do { } while (0) #ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); + +#define __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); + static inline int pmd_trans_splitting(pmd_t pmd) { return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index a6131d1fe6c0..1ca371a06516 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -883,4 +883,42 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, smp_call_function(pmdp_splitting_flush_sync, NULL, 1); } } + +void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) +{ + struct list_head *lh = (struct list_head *) pgtable; + + assert_spin_locked(&mm->page_table_lock); + + /* FIFO */ + if (!mm->pmd_huge_pte) + INIT_LIST_HEAD(lh); + else + list_add(lh, (struct list_head *) mm->pmd_huge_pte); + mm->pmd_huge_pte = pgtable; +} + +pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) +{ + struct list_head *lh; + pgtable_t pgtable; + pte_t *ptep; + + assert_spin_locked(&mm->page_table_lock); + + /* FIFO */ + pgtable = mm->pmd_huge_pte; + lh = (struct list_head *) pgtable; + if (list_empty(lh)) + mm->pmd_huge_pte = NULL; + else { + mm->pmd_huge_pte = (pgtable_t) lh->next; + list_del(lh); + } + ptep = (pte_t *) pgtable; + pte_val(*ptep) = _PAGE_TYPE_EMPTY; + ptep++; + pte_val(*ptep) = _PAGE_TYPE_EMPTY; + return pgtable; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -- cgit v1.2.2 From 274023da1e8a49efa6fd9bf857f8557e5db44cdf Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Mon, 8 Oct 2012 16:30:21 -0700 Subject: thp, s390: disable thp for kvm host on s390 This patch is part of the architecture backend for thp on s390. It disables thp for kvm hosts, because there is no kvm host hugepage support so far. Existing thp mappings are split by follow_page() with FOLL_SPLIT, and future thp mappings are prevented by setting VM_NOHUGEPAGE in mm->def_flags. Signed-off-by: Gerald Schaefer Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Hugh Dickins Cc: Hillf Danton Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/mm/pgtable.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'arch') diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 1ca371a06516..e86a523875eb 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -787,6 +787,30 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) tlb_table_flush(tlb); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void thp_split_vma(struct vm_area_struct *vma) +{ + unsigned long addr; + struct page *page; + + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { + page = follow_page(vma, addr, FOLL_SPLIT); + } +} + +void thp_split_mm(struct mm_struct *mm) +{ + struct vm_area_struct *vma = mm->mmap; + + while (vma != NULL) { + thp_split_vma(vma); + vma->vm_flags &= ~VM_HUGEPAGE; + vma->vm_flags |= VM_NOHUGEPAGE; + vma = vma->vm_next; + } +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + /* * switch on pgstes for its userspace process (for kvm) */ @@ -824,6 +848,12 @@ int s390_enable_sie(void) if (!mm) return -ENOMEM; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* split thp mappings and disable thp for future mappings */ + thp_split_mm(mm); + mm->def_flags |= VM_NOHUGEPAGE; +#endif + /* Now lets check again if something happened */ task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || -- cgit v1.2.2 From 1ae1c1d09f220ded48ee9a7d91a65e94f95c4af1 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Mon, 8 Oct 2012 16:30:24 -0700 Subject: thp, s390: architecture backend for thp on s390 This implements the architecture backend for transparent hugepages on s390. Signed-off-by: Gerald Schaefer Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Hugh Dickins Cc: Hillf Danton Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/include/asm/hugetlb.h | 18 +--- arch/s390/include/asm/pgtable.h | 190 ++++++++++++++++++++++++++++++++++++++++ arch/s390/include/asm/setup.h | 5 +- arch/s390/include/asm/tlb.h | 1 + arch/s390/kernel/early.c | 2 + arch/s390/mm/pgtable.c | 22 +++++ 6 files changed, 220 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index fc322421b1cc..593753ee07f3 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -78,23 +78,6 @@ static inline void __pmd_csp(pmd_t *pmdp) " csp %1,%3" : "=m" (*pmdp) : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); - pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; -} - -static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) -{ - unsigned long sto = (unsigned long) pmdp - - pmd_index(address) * sizeof(pmd_t); - - if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { - asm volatile( - " .insn rrf,0xb98e0000,%2,%3,0,0" - : "=m" (*pmdp) - : "m" (*pmdp), "a" (sto), - "a" ((address & HPAGE_MASK)) - ); - } - pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; } static inline void huge_ptep_invalidate(struct mm_struct *mm, @@ -106,6 +89,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, __pmd_idte(address, pmdp); else __pmd_csp(pmdp); + pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; } static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 2c8f00d31cfe..ed14fc2db6e0 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -350,6 +350,10 @@ extern struct page *vmemmap; #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT) +/* Set of bits not changed in pmd_modify */ +#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ + | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO) + /* Page status table bits for virtualization */ #define RCP_ACC_BITS 0xf000000000000000UL #define RCP_FP_BIT 0x0800000000000000UL @@ -512,6 +516,26 @@ static inline int pmd_bad(pmd_t pmd) extern void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp); +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMD_WRITE +static inline int pmd_write(pmd_t pmd) +{ + return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; +} + +static inline int pmd_young(pmd_t pmd) +{ + return 0; +} + static inline int pte_none(pte_t pte) { return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); @@ -1165,6 +1189,22 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) +static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) +{ + unsigned long sto = (unsigned long) pmdp - + pmd_index(address) * sizeof(pmd_t); + + if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { + asm volatile( + " .insn rrf,0xb98e0000,%2,%3,0,0" + : "=m" (*pmdp) + : "m" (*pmdp), "a" (sto), + "a" ((address & HPAGE_MASK)) + : "cc" + ); + } +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PGTABLE_DEPOSIT extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); @@ -1176,6 +1216,156 @@ static inline int pmd_trans_splitting(pmd_t pmd) { return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; } + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t entry) +{ + *pmdp = entry; +} + +static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) +{ + unsigned long pgprot_pmd = 0; + + if (pgprot_val(pgprot) & _PAGE_INVALID) { + if (pgprot_val(pgprot) & _PAGE_SWT) + pgprot_pmd |= _HPAGE_TYPE_NONE; + pgprot_pmd |= _SEGMENT_ENTRY_INV; + } + if (pgprot_val(pgprot) & _PAGE_RO) + pgprot_pmd |= _SEGMENT_ENTRY_RO; + return pgprot_pmd; +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmd_val(pmd) &= _SEGMENT_CHG_MASK; + pmd_val(pmd) |= massage_pgprot_pmd(newprot); + return pmd; +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; + return pmd; +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ + pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; + return pmd; +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) |= _SEGMENT_ENTRY_RO; + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + /* No dirty bit in the segment table entry. */ + return pmd; +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + /* No referenced bit in the segment table entry. */ + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + /* No referenced bit in the segment table entry. */ + return pmd; +} + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK; + long tmp, rc; + int counter; + + rc = 0; + if (MACHINE_HAS_RRBM) { + counter = PTRS_PER_PTE >> 6; + asm volatile( + "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */ + " ogr %1,%0\n" + " la %3,0(%4,%3)\n" + " brct %2,0b\n" + : "=&d" (tmp), "+&d" (rc), "+d" (counter), + "+a" (pmd_addr) + : "a" (64 * 4096UL) : "cc"); + rc = !!rc; + } else { + counter = PTRS_PER_PTE; + asm volatile( + "0: rrbe 0,%2\n" + " la %2,0(%3,%2)\n" + " brc 12,1f\n" + " lhi %0,1\n" + "1: brct %1,0b\n" + : "+d" (rc), "+d" (counter), "+a" (pmd_addr) + : "a" (4096UL) : "cc"); + } + return rc; +} + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + + __pmd_idte(address, pmdp); + pmd_clear(pmdp); + return pmd; +} + +#define __HAVE_ARCH_PMDP_CLEAR_FLUSH +static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + return pmdp_get_and_clear(vma->vm_mm, address, pmdp); +} + +#define __HAVE_ARCH_PMDP_INVALIDATE +static inline void pmdp_invalidate(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + __pmd_idte(address, pmdp); +} + +static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) +{ + pmd_t __pmd; + pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); + return __pmd; +} + +#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; +} + +static inline int has_transparent_hugepage(void) +{ + return MACHINE_HAS_HPAGE ? 1 : 0; +} + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + if (pmd_trans_huge(pmd)) + return pmd_val(pmd) >> HPAGE_SHIFT; + else + return pmd_val(pmd) >> PAGE_SHIFT; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 87b47ca954f1..8cfd731a18d8 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -81,6 +81,7 @@ extern unsigned int s390_user_mode; #define MACHINE_FLAG_SPP (1UL << 13) #define MACHINE_FLAG_TOPOLOGY (1UL << 14) #define MACHINE_FLAG_TE (1UL << 15) +#define MACHINE_FLAG_RRBM (1UL << 16) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) @@ -99,7 +100,8 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_PFMF (0) #define MACHINE_HAS_SPP (0) #define MACHINE_HAS_TOPOLOGY (0) -#define MACHINE_HAS_TE (0) +#define MACHINE_HAS_TE (0) +#define MACHINE_HAS_RRBM (0) #else /* CONFIG_64BIT */ #define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_CSP (1) @@ -112,6 +114,7 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) +#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) #endif /* CONFIG_64BIT */ #define ZFCPDUMP_HSA_SIZE (32UL<<20) diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 06e5acbc84bd..b75d7d686684 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -137,6 +137,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) +#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0) #define tlb_migrate_finish(mm) do { } while (0) #endif /* _S390_TLB_H */ diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 7f4717675c19..00d114445068 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -388,6 +388,8 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; if (test_facility(50) && test_facility(73)) S390_lowcore.machine_flags |= MACHINE_FLAG_TE; + if (test_facility(66)) + S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM; #endif } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index e86a523875eb..c8188a18af05 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -898,6 +898,28 @@ bool kernel_page_present(struct page *page) #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE +int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + /* No need to flush TLB + * On s390 reference bits are in storage key and never in TLB */ + return pmdp_test_and_clear_young(vma, address, pmdp); +} + +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + if (pmd_same(*pmdp, entry)) + return 0; + pmdp_invalidate(vma, address, pmdp); + set_pmd_at(vma->vm_mm, address, pmdp, entry); + return 1; +} + static void pmdp_splitting_flush_sync(void *arg) { /* Simply deliver the interrupt */ -- cgit v1.2.2 From 9d9e6f9703bbd642f3f2f807e6aaa642a4cbcec9 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:20 -0700 Subject: rbtree: remove prior augmented rbtree implementation convert arch/x86/mm/pat_rbtree.c to the proposed augmented rbtree api and remove the old augmented rbtree implementation. Signed-off-by: Michel Lespinasse Acked-by: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pat_rbtree.c | 65 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 8acaddd0fb21..7e1515bd4770 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -54,29 +54,57 @@ static u64 get_subtree_max_end(struct rb_node *node) return ret; } -/* Update 'subtree_max_end' for a node, based on node and its children */ -static void memtype_rb_augment_cb(struct rb_node *node, void *__unused) +static u64 compute_subtree_max_end(struct memtype *data) { - struct memtype *data; - u64 max_end, child_max_end; - - if (!node) - return; - - data = container_of(node, struct memtype, rb); - max_end = data->end; + u64 max_end = data->end, child_max_end; - child_max_end = get_subtree_max_end(node->rb_right); + child_max_end = get_subtree_max_end(data->rb.rb_right); if (child_max_end > max_end) max_end = child_max_end; - child_max_end = get_subtree_max_end(node->rb_left); + child_max_end = get_subtree_max_end(data->rb.rb_left); if (child_max_end > max_end) max_end = child_max_end; - data->subtree_max_end = max_end; + return max_end; +} + +/* Update 'subtree_max_end' for node and its parents */ +static void memtype_rb_propagate_cb(struct rb_node *node, struct rb_node *stop) +{ + while (node != stop) { + struct memtype *data = container_of(node, struct memtype, rb); + u64 subtree_max_end = compute_subtree_max_end(data); + if (data->subtree_max_end == subtree_max_end) + break; + data->subtree_max_end = subtree_max_end; + node = rb_parent(&data->rb); + } +} + +static void memtype_rb_copy_cb(struct rb_node *old, struct rb_node *new) +{ + struct memtype *old_data = container_of(old, struct memtype, rb); + struct memtype *new_data = container_of(new, struct memtype, rb); + + new_data->subtree_max_end = old_data->subtree_max_end; } +/* Update 'subtree_max_end' after tree rotation. old and new are the + * former and current subtree roots */ +static void memtype_rb_rotate_cb(struct rb_node *old, struct rb_node *new) +{ + struct memtype *old_data = container_of(old, struct memtype, rb); + struct memtype *new_data = container_of(new, struct memtype, rb); + + new_data->subtree_max_end = old_data->subtree_max_end; + old_data->subtree_max_end = compute_subtree_max_end(old_data); +} + +static const struct rb_augment_callbacks memtype_rb_augment_cb = { + memtype_rb_propagate_cb, memtype_rb_copy_cb, memtype_rb_rotate_cb +}; + /* Find the first (lowest start addr) overlapping range from rb tree */ static struct memtype *memtype_rb_lowest_match(struct rb_root *root, u64 start, u64 end) @@ -179,15 +207,17 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) struct memtype *data = container_of(*node, struct memtype, rb); parent = *node; + if (data->subtree_max_end < newdata->end) + data->subtree_max_end = newdata->end; if (newdata->start <= data->start) node = &((*node)->rb_left); else if (newdata->start > data->start) node = &((*node)->rb_right); } + newdata->subtree_max_end = newdata->end; rb_link_node(&newdata->rb, parent, node); - rb_insert_color(&newdata->rb, root); - rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL); + rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); } int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) @@ -209,16 +239,13 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) struct memtype *rbt_memtype_erase(u64 start, u64 end) { - struct rb_node *deepest; struct memtype *data; data = memtype_rb_exact_match(&memtype_rbroot, start, end); if (!data) goto out; - deepest = rb_augment_erase_begin(&data->rb); - rb_erase(&data->rb, &memtype_rbroot); - rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL); + rb_erase_augmented(&data->rb, &memtype_rbroot, &memtype_rb_augment_cb); out: return data; } -- cgit v1.2.2 From 3908836aa77e3621aaf2101f2920e01d7c8460d6 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:21 -0700 Subject: rbtree: add RB_DECLARE_CALLBACKS() macro As proposed by Peter Zijlstra, this makes it easier to define the augmented rbtree callbacks. Signed-off-by: Michel Lespinasse Cc: Rik van Riel Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pat_rbtree.c | 37 ++----------------------------------- 1 file changed, 2 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 7e1515bd4770..4d116959075d 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -69,41 +69,8 @@ static u64 compute_subtree_max_end(struct memtype *data) return max_end; } -/* Update 'subtree_max_end' for node and its parents */ -static void memtype_rb_propagate_cb(struct rb_node *node, struct rb_node *stop) -{ - while (node != stop) { - struct memtype *data = container_of(node, struct memtype, rb); - u64 subtree_max_end = compute_subtree_max_end(data); - if (data->subtree_max_end == subtree_max_end) - break; - data->subtree_max_end = subtree_max_end; - node = rb_parent(&data->rb); - } -} - -static void memtype_rb_copy_cb(struct rb_node *old, struct rb_node *new) -{ - struct memtype *old_data = container_of(old, struct memtype, rb); - struct memtype *new_data = container_of(new, struct memtype, rb); - - new_data->subtree_max_end = old_data->subtree_max_end; -} - -/* Update 'subtree_max_end' after tree rotation. old and new are the - * former and current subtree roots */ -static void memtype_rb_rotate_cb(struct rb_node *old, struct rb_node *new) -{ - struct memtype *old_data = container_of(old, struct memtype, rb); - struct memtype *new_data = container_of(new, struct memtype, rb); - - new_data->subtree_max_end = old_data->subtree_max_end; - old_data->subtree_max_end = compute_subtree_max_end(old_data); -} - -static const struct rb_augment_callbacks memtype_rb_augment_cb = { - memtype_rb_propagate_cb, memtype_rb_copy_cb, memtype_rb_rotate_cb -}; +RB_DECLARE_CALLBACKS(static, memtype_rb_augment_cb, struct memtype, rb, + u64, subtree_max_end, compute_subtree_max_end) /* Find the first (lowest start addr) overlapping range from rb tree */ static struct memtype *memtype_rb_lowest_match(struct rb_root *root, -- cgit v1.2.2 From 6b2dbba8b6ac4df26f72eda1e5ea7bab9f950e08 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:25 -0700 Subject: mm: replace vma prio_tree with an interval tree Implement an interval tree as a replacement for the VMA prio_tree. The algorithms are similar to lib/interval_tree.c; however that code can't be directly reused as the interval endpoints are not explicitly stored in the VMA. So instead, the common algorithm is moved into a template and the details (node type, how to get interval endpoints from the node, etc) are filled in using the C preprocessor. Once the interval tree functions are available, using them as a replacement to the VMA prio tree is a relatively simple, mechanical job. Signed-off-by: Michel Lespinasse Cc: Rik van Riel Cc: Hillf Danton Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/fault-armv.c | 3 +-- arch/arm/mm/flush.c | 3 +-- arch/parisc/kernel/cache.c | 3 +-- arch/x86/mm/hugetlbpage.c | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 7599e2625c7d..2a5907b5c8d2 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -134,7 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; - struct prio_tree_iter iter; unsigned long offset; pgoff_t pgoff; int aliases = 0; @@ -147,7 +146,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, * cache coherency. */ flush_dcache_mmap_lock(mapping); - vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { + vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { /* * If this VMA is not in our MM, we can ignore it. * Note that we intentionally mask out the VMA diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 40ca11ed6e5f..1c8f7f564175 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -196,7 +196,6 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p { struct mm_struct *mm = current->active_mm; struct vm_area_struct *mpnt; - struct prio_tree_iter iter; pgoff_t pgoff; /* @@ -208,7 +207,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); flush_dcache_mmap_lock(mapping); - vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { + vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { unsigned long offset; /* diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 9d181890a7e3..48e16dc20102 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -276,7 +276,6 @@ void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); struct vm_area_struct *mpnt; - struct prio_tree_iter iter; unsigned long offset; unsigned long addr, old_addr = 0; pgoff_t pgoff; @@ -299,7 +298,7 @@ void flush_dcache_page(struct page *page) * to flush one address here for them all to become coherent */ flush_dcache_mmap_lock(mapping); - vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { + vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index b91e48512425..937bff5cdaa7 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -71,7 +71,6 @@ huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - struct prio_tree_iter iter; struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; @@ -81,7 +80,7 @@ huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) return (pte_t *)pmd_alloc(mm, pud, addr); mutex_lock(&mapping->i_mmap_mutex); - vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { + vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; -- cgit v1.2.2 From 9c079add0d0f45220f4bb37febf0621137ec2d38 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Oct 2012 16:31:33 -0700 Subject: rbtree: move augmented rbtree functionality to rbtree_augmented.h Provide rb_insert_augmented() and rb_erase_augmented() through a new rbtree_augmented.h include file. rb_erase_augmented() is defined there as an __always_inline function, in order to allow inlining of augmented rbtree callbacks into it. Since this generates a relatively large function, each augmented rbtree user should make sure to have a single call site. Signed-off-by: Michel Lespinasse Cc: Rik van Riel Cc: Hillf Danton Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Andrea Arcangeli Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pat_rbtree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 4d116959075d..415f6c4ced36 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include -- cgit v1.2.2 From e79bee24fd6134f90af4228cfebd010136d67631 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 8 Oct 2012 16:32:18 -0700 Subject: atomic: implement generic atomic_dec_if_positive() The x86 implementation of atomic_dec_if_positive is quite generic, so make it available to all architectures. This is needed for "swap: add a simple detector for inappropriate swapin readahead". [akpm@linux-foundation.org: do the "#define foo foo" trick in the conventional manner] Signed-off-by: Shaohua Li Cc: Stephen Rothwell Cc: "David S. Miller" Cc: Rik van Riel Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Benjamin Herrenschmidt Cc: Michal Simek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/microblaze/include/asm/atomic.h | 1 + arch/powerpc/include/asm/atomic.h | 1 + arch/x86/include/asm/atomic.h | 24 ------------------------ 3 files changed, 2 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h index 472d8bf726df..42ac382a09da 100644 --- a/arch/microblaze/include/asm/atomic.h +++ b/arch/microblaze/include/asm/atomic.h @@ -22,5 +22,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) return res; } +#define atomic_dec_if_positive atomic_dec_if_positive #endif /* _ASM_MICROBLAZE_ATOMIC_H */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index da29032ae38f..e3b1d41c89be 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -268,6 +268,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) return t; } +#define atomic_dec_if_positive atomic_dec_if_positive #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 250b8774c158..b6c3b821acf6 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -240,30 +240,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return c; } - -/* - * atomic_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ -static inline int atomic_dec_if_positive(atomic_t *v) -{ - int c, old, dec; - c = atomic_read(v); - for (;;) { - dec = c - 1; - if (unlikely(dec < 0)) - break; - old = atomic_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } - return dec; -} - /** * atomic_inc_short - increment of a short integer * @v: pointer to type int -- cgit v1.2.2 From 45cac65b0fcd287ebb877b141d40ba9bbe8e5da7 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 8 Oct 2012 16:32:19 -0700 Subject: readahead: fault retry breaks mmap file read random detection .fault now can retry. The retry can break state machine of .fault. In filemap_fault, if page is miss, ra->mmap_miss is increased. In the second try, since the page is in page cache now, ra->mmap_miss is decreased. And these are done in one fault, so we can't detect random mmap file access. Add a new flag to indicate .fault is tried once. In the second try, skip ra->mmap_miss decreasing. The filemap_fault state machine is ok with it. I only tested x86, didn't test other archs, but looks the change for other archs is obvious, but who knows :) Signed-off-by: Shaohua Li Cc: Rik van Riel Cc: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/fault.c | 1 + arch/avr32/mm/fault.c | 1 + arch/cris/mm/fault.c | 1 + arch/hexagon/mm/vm_fault.c | 1 + arch/ia64/mm/fault.c | 1 + arch/m68k/mm/fault.c | 1 + arch/microblaze/mm/fault.c | 1 + arch/mips/mm/fault.c | 1 + arch/openrisc/mm/fault.c | 1 + arch/powerpc/mm/fault.c | 1 + arch/s390/mm/fault.c | 1 + arch/sh/mm/fault.c | 1 + arch/sparc/mm/fault_32.c | 1 + arch/sparc/mm/fault_64.c | 1 + arch/tile/mm/fault.c | 1 + arch/um/kernel/trap.c | 1 + arch/x86/mm/fault.c | 1 + arch/xtensa/mm/fault.c | 1 + 18 files changed, 18 insertions(+) (limited to 'arch') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index c3bd83450227..5dbf13f954f6 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -336,6 +336,7 @@ retry: /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index b92e60958617..b2f2d2d66849 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -152,6 +152,7 @@ good_area: tsk->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would have diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 45fd542cf173..73312ab6c696 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -186,6 +186,7 @@ retry: tsk->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 06695cc4fe58..513b74cb397e 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -113,6 +113,7 @@ good_area: current->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 8443daf4f515..6cf0341f978e 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -184,6 +184,7 @@ retry: current->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index aeebbb7b30f0..a563727806bf 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -170,6 +170,7 @@ good_area: /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index eb365d6795fa..714b35a9c4f7 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -233,6 +233,7 @@ good_area: current->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index c14f6dfed995..9f513486af10 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -171,6 +171,7 @@ good_area: } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 40f850e9766c..e2bfafce66c5 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -183,6 +183,7 @@ good_area: tsk->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 5495ebe983a2..0a6b28336eb0 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -451,6 +451,7 @@ good_area: /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index ac9122ca1152..04ad4001a289 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -367,6 +367,7 @@ retry: /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; down_read(&mm->mmap_sem); goto retry; } diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 3bdc1ad9a341..cbbdcad8fcb3 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -504,6 +504,7 @@ good_area: } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 77ac917be152..e98bfda205a2 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -265,6 +265,7 @@ good_area: } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 1fe0429b6314..413d29263304 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -452,6 +452,7 @@ good_area: } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 84ce7abbf5af..fe811fa5f1b9 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -454,6 +454,7 @@ good_area: tsk->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 0353b98ae35a..0f00e9c82080 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -89,6 +89,7 @@ good_area: current->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; goto retry; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index a530b230e7d7..8e13ecb41bee 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1220,6 +1220,7 @@ good_area: /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 5a74c53bc69c..2c2f710ed1dc 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -126,6 +126,7 @@ good_area: current->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry -- cgit v1.2.2 From 7f1290f2f2a4d2c3f1b7ce8e87256e052ca23125 Mon Sep 17 00:00:00 2001 From: Jianguo Wu Date: Mon, 8 Oct 2012 16:33:06 -0700 Subject: mm: fix-up zone present pages I think zone->present_pages indicates pages that buddy system can management, it should be: zone->present_pages = spanned pages - absent pages - bootmem pages, but is now: zone->present_pages = spanned pages - absent pages - memmap pages. spanned pages: total size, including holes. absent pages: holes. bootmem pages: pages used in system boot, managed by bootmem allocator. memmap pages: pages used by page structs. This may cause zone->present_pages less than it should be. For example, numa node 1 has ZONE_NORMAL and ZONE_MOVABLE, it's memmap and other bootmem will be allocated from ZONE_MOVABLE, so ZONE_NORMAL's present_pages should be spanned pages - absent pages, but now it also minus memmap pages(free_area_init_core), which are actually allocated from ZONE_MOVABLE. When offlining all memory of a zone, this will cause zone->present_pages less than 0, because present_pages is unsigned long type, it is actually a very large integer, it indirectly caused zone->watermark[WMARK_MIN] becomes a large integer(setup_per_zone_wmarks()), than cause totalreserve_pages become a large integer(calculate_totalreserve_pages()), and finally cause memory allocating failure when fork process(__vm_enough_memory()). [root@localhost ~]# dmesg -bash: fork: Cannot allocate memory I think the bug described in http://marc.info/?l=linux-mm&m=134502182714186&w=2 is also caused by wrong zone present pages. This patch intends to fix-up zone->present_pages when memory are freed to buddy system on x86_64 and IA64 platforms. Signed-off-by: Jianguo Wu Signed-off-by: Jiang Liu Reported-by: Petr Tesarik Tested-by: Petr Tesarik Cc: "Luck, Tony" Cc: Mel Gorman Cc: Yinghai Lu Cc: Minchan Kim Cc: Johannes Weiner Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/init.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 082e383c1b6f..acd5b68e8871 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -637,6 +637,7 @@ mem_init (void) high_memory = __va(max_low_pfn * PAGE_SIZE); + reset_zone_present_pages(); for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); -- cgit v1.2.2 From 027ef6c87853b0a9df53175063028edb4950d476 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 8 Oct 2012 16:33:27 -0700 Subject: mm: thp: fix pmd_present for split_huge_page and PROT_NONE with THP In many places !pmd_present has been converted to pmd_none. For pmds that's equivalent and pmd_none is quicker so using pmd_none is better. However (unless we delete pmd_present) we should provide an accurate pmd_present too. This will avoid the risk of code thinking the pmd is non present because it's under __split_huge_page_map, see the pmd_mknotpresent there and the comment above it. If the page has been mprotected as PROT_NONE, it would also lead to a pmd_present false negative in the same way as the race with split_huge_page. Because the PSE bit stays on at all times (both during split_huge_page and when the _PAGE_PROTNONE bit get set), we could only check for the PSE bit, but checking the PROTNONE bit too is still good to remember pmd_present must always keep PROT_NONE into account. This explains a not reproducible BUG_ON that was seldom reported on the lists. The same issue is in pmd_large, it would go wrong with both PROT_NONE and if it races with split_huge_page. Signed-off-by: Andrea Arcangeli Acked-by: Rik van Riel Cc: Johannes Weiner Cc: Hugh Dickins Cc: Mel Gorman Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/pgtable.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index fc9948465293..a1f780d45f76 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd) static inline int pmd_large(pmd_t pte) { - return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == - (_PAGE_PSE | _PAGE_PRESENT); + return pmd_flags(pte) & _PAGE_PSE; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte) static inline int pmd_present(pmd_t pmd) { - return pmd_flags(pmd) & _PAGE_PRESENT; + /* + * Checking for _PAGE_PSE is needed too because + * split_huge_page will temporarily clear the present bit (but + * the _PAGE_PSE flag will remain set at all times while the + * _PAGE_PRESENT bit is clear). + */ + return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); } static inline int pmd_none(pmd_t pmd) -- cgit v1.2.2 From d760afd4d2570653891f94e13b848e97150dc5a6 Mon Sep 17 00:00:00 2001 From: Yasuaki Ishimatsu Date: Mon, 8 Oct 2012 16:34:14 -0700 Subject: memory-hotplug: suppress "Trying to free nonexistent resource " warning When our x86 box calls __remove_pages(), release_mem_region() shows many warnings. And x86 box cannot unregister iomem_resource. "Trying to free nonexistent resource " release_mem_region() has been changed to be called in each PAGES_PER_SECTION by commit de7f0cba9678 ("memory hotplug: release memory regions in PAGES_PER_SECTION chunks"). Because powerpc registers iomem_resource in each PAGES_PER_SECTION chunk. But when I hot add memory on x86 box, iomem_resource is register in each _CRS not PAGES_PER_SECTION chunk. So x86 box unregisters iomem_resource. The patch fixes the problem. Signed-off-by: Yasuaki Ishimatsu Cc: David Rientjes Cc: Jiang Liu Cc: Len Brown Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Christoph Lameter Cc: Minchan Kim Cc: Andrew Morton Cc: KOSAKI Motohiro Cc: Wen Congyang Cc: Dave Hansen Cc: Nathan Fontenot Cc: Badari Pulavarty Cc: Yasunori Goto Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/platforms/pseries/hotplug-memory.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 11d8e0544ac0..dc0a035e63bb 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -77,7 +77,8 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz { unsigned long start, start_pfn; struct zone *zone; - int ret; + int i, ret; + int sections_to_remove; start_pfn = base >> PAGE_SHIFT; @@ -97,9 +98,13 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz * to sysfs "state" file and we can't remove sysfs entries * while writing to it. So we have to defer it to here. */ - ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT); - if (ret) - return ret; + sections_to_remove = (memblock_size >> PAGE_SHIFT) / PAGES_PER_SECTION; + for (i = 0; i < sections_to_remove; i++) { + unsigned long pfn = start_pfn + i * PAGES_PER_SECTION; + ret = __remove_pages(zone, start_pfn, PAGES_PER_SECTION); + if (ret) + return ret; + } /* * Update memory regions for memory remove -- cgit v1.2.2 From 15b9350a177b9fb23b69e00f78f52890ee338880 Mon Sep 17 00:00:00 2001 From: David Miller Date: Mon, 8 Oct 2012 16:34:19 -0700 Subject: sparc64: Only support 4MB huge pages and 8KB base pages. Narrowing the scope of the page size configurations will make the transparent hugepage changes much simpler. In the end what we really want to do is have the kernel support multiple huge page sizes and use whatever is appropriate as the context dictactes. Signed-off-by: David S. Miller Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Gerald Schaefer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/Kconfig | 36 --------------------------------- arch/sparc/include/asm/mmu_64.h | 16 +-------------- arch/sparc/include/asm/page_64.h | 12 ----------- arch/sparc/include/asm/pgtable_64.h | 40 ++++++------------------------------- arch/sparc/mm/init_64.c | 9 ++------- arch/sparc/mm/tsb.c | 17 ---------------- 6 files changed, 9 insertions(+), 121 deletions(-) (limited to 'arch') diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index e184075877d7..91c780c973ba 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -230,25 +230,6 @@ config EARLYFB help Say Y here to enable a faster early framebuffer boot console. -choice - prompt "Kernel page size" if SPARC64 - default SPARC64_PAGE_SIZE_8KB - -config SPARC64_PAGE_SIZE_8KB - bool "8KB" - help - This lets you select the page size of the kernel. - - 8KB and 64KB work quite well, since SPARC ELF sections - provide for up to 64KB alignment. - - If you don't know what to do, choose 8KB. - -config SPARC64_PAGE_SIZE_64KB - bool "64KB" - -endchoice - config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" depends on SPARC64 && PROC_FS @@ -320,23 +301,6 @@ config GENERIC_LOCKBREAK default y depends on SPARC64 && SMP && PREEMPT -choice - prompt "SPARC64 Huge TLB Page Size" - depends on SPARC64 && HUGETLB_PAGE - default HUGETLB_PAGE_SIZE_4MB - -config HUGETLB_PAGE_SIZE_4MB - bool "4MB" - -config HUGETLB_PAGE_SIZE_512K - bool "512K" - -config HUGETLB_PAGE_SIZE_64K - depends on !SPARC64_PAGE_SIZE_64KB - bool "64K" - -endchoice - config NUMA bool "NUMA support" depends on SPARC64 && SMP diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 9067dc500535..5fb97e19d81e 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -30,22 +30,8 @@ #define CTX_PGSZ_MASK ((CTX_PGSZ_BITS << CTX_PGSZ0_SHIFT) | \ (CTX_PGSZ_BITS << CTX_PGSZ1_SHIFT)) -#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) #define CTX_PGSZ_BASE CTX_PGSZ_8KB -#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) -#define CTX_PGSZ_BASE CTX_PGSZ_64KB -#else -#error No page size specified in kernel configuration -#endif - -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -#define CTX_PGSZ_HUGE CTX_PGSZ_4MB -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define CTX_PGSZ_HUGE CTX_PGSZ_512KB -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define CTX_PGSZ_HUGE CTX_PGSZ_64KB -#endif - +#define CTX_PGSZ_HUGE CTX_PGSZ_4MB #define CTX_PGSZ_KERN CTX_PGSZ_4MB /* Thus, when running on UltraSPARC-III+ and later, we use the following diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index f0d09b401036..b2df9b8b2d46 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -3,13 +3,7 @@ #include -#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) #define PAGE_SHIFT 13 -#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) -#define PAGE_SHIFT 16 -#else -#error No page size specified in kernel configuration -#endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) @@ -21,13 +15,7 @@ #define DCACHE_ALIASING_POSSIBLE #endif -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) #define HPAGE_SHIFT 22 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define HPAGE_SHIFT 19 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define HPAGE_SHIFT 16 -#endif #ifdef CONFIG_HUGETLB_PAGE #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 61210db139fb..387d484c62f9 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -160,26 +160,11 @@ #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */ -#if PAGE_SHIFT == 13 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V -#elif PAGE_SHIFT == 16 -#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V -#else -#error Wrong PAGE_SHIFT specified -#endif -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V -#endif /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ #define __P000 __pgprot(0) @@ -218,7 +203,6 @@ extern unsigned long _PAGE_CACHE; extern unsigned long pg_iobits; extern unsigned long _PAGE_ALL_SZ_BITS; -extern unsigned long _PAGE_SZBITS; extern struct page *mem_map_zero; #define ZERO_PAGE(vaddr) (mem_map_zero) @@ -231,22 +215,9 @@ extern struct page *mem_map_zero; static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) { unsigned long paddr = pfn << PAGE_SHIFT; - unsigned long sz_bits; - - sz_bits = 0UL; - if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { - __asm__ __volatile__( - "\n661: sethi %%uhi(%1), %0\n" - " sllx %0, 32, %0\n" - " .section .sun4v_2insn_patch, \"ax\"\n" - " .word 661b\n" - " mov %2, %0\n" - " nop\n" - " .previous\n" - : "=r" (sz_bits) - : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V)); - } - return __pte(paddr | sz_bits | pgprot_val(prot)); + + BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); + return __pte(paddr | pgprot_val(prot)); } #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) @@ -286,6 +257,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) * Note: We encode this into 3 sun4v 2-insn patch sequences. */ + BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); __asm__ __volatile__( "\n661: sethi %%uhi(%2), %1\n" " sethi %%hi(%2), %0\n" @@ -307,10 +279,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) : "=r" (mask), "=r" (tmp) : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U | _PAGE_SPECIAL), + _PAGE_SPECIAL), "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V | _PAGE_SPECIAL)); + _PAGE_SPECIAL)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 7a9b788c6ced..809eecf6c797 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -276,7 +276,6 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long } unsigned long _PAGE_ALL_SZ_BITS __read_mostly; -unsigned long _PAGE_SZBITS __read_mostly; static void flush_dcache(unsigned long pfn) { @@ -2275,8 +2274,7 @@ static void __init sun4u_pgprot_init(void) __ACCESS_BITS_4U | _PAGE_E_4U); #ifdef CONFIG_DEBUG_PAGEALLOC - kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ - 0xfffff80000000000UL; + kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 0xfffff80000000000UL; @@ -2287,7 +2285,6 @@ static void __init sun4u_pgprot_init(void) for (i = 1; i < 4; i++) kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; - _PAGE_SZBITS = _PAGE_SZBITS_4U; _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); @@ -2324,8 +2321,7 @@ static void __init sun4v_pgprot_init(void) _PAGE_CACHE = _PAGE_CACHE_4V; #ifdef CONFIG_DEBUG_PAGEALLOC - kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ - 0xfffff80000000000UL; + kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 0xfffff80000000000UL; @@ -2339,7 +2335,6 @@ static void __init sun4v_pgprot_init(void) pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | __ACCESS_BITS_4V | _PAGE_E_4V); - _PAGE_SZBITS = _PAGE_SZBITS_4V; _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index c52add79b83d..70e50ea2a5b6 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -90,29 +90,12 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } -#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K -#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) -#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K -#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K -#else -#error Broken base page size setting... -#endif #ifdef CONFIG_HUGETLB_PAGE -#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K -#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K -#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB -#else -#error Broken huge page size setting... -#endif #endif static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) -- cgit v1.2.2 From 56a70b8c6acc73f5d9ec383d840909dd9e63c865 Mon Sep 17 00:00:00 2001 From: David Miller Date: Mon, 8 Oct 2012 16:34:20 -0700 Subject: sparc64: Halve the size of PTE tables The reason we want to do this is to facilitate transparent huge page support. Right now PMD's cover 8MB of address space, and our huge page size is 4MB. The current transparent hugepage support is not able to handle HPAGE_SIZE != PMD_SIZE. So make PTE tables be sized to half of a page instead of a full page. We can still map properly the whole supported virtual address range which on sparc64 requires 44 bits. Add a compile time CPP test which ensures that this requirement is always met. There is a minor inefficiency added by this change. We only use half of the page for PTE tables. It's not trivial to use only half of the page yet still get all of the pgtable_page_{ctor,dtor}() stuff working properly. It is doable, and that will come in a subsequent change. Signed-off-by: David S. Miller Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Gerald Schaefer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/include/asm/pgtable_64.h | 24 +++++++----------------- arch/sparc/include/asm/tsb.h | 4 ++-- 2 files changed, 9 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 387d484c62f9..a7b5091f3b13 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -45,40 +45,30 @@ #define vmemmap ((struct page *)VMEMMAP_BASE) -/* XXX All of this needs to be rethought so we can take advantage - * XXX cheetah's full 64-bit virtual address space, ie. no more hole - * XXX in the middle like on spitfire. -DaveM - */ -/* - * Given a virtual address, the lowest PAGE_SHIFT bits determine offset - * into the page; the next higher PAGE_SHIFT-3 bits determine the pte# - * in the proper pagetable (the -3 is from the 8 byte ptes, and each page - * table is a single page long). The next higher PMD_BITS determine pmd# - * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2) - * since the pmd entries are 4 bytes, and each pmd page is a single page - * long). Finally, the higher few bits determine pgde#. - */ - /* PMD_SHIFT determines the size of the area a second-level page * table can map */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4)) #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PMD_BITS (PAGE_SHIFT - 2) /* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4) + PMD_BITS) #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_BITS (PAGE_SHIFT - 2) +#if (PGDIR_SHIFT + PGDIR_BITS) != 44 +#error Page table parameters do not cover virtual address space properly. +#endif + #ifndef __ASSEMBLY__ #include /* Entries per page directory level. */ -#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-4)) #define PTRS_PER_PMD (1UL << PMD_BITS) #define PTRS_PER_PGD (1UL << PGDIR_BITS) diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 1a8afd1ad04f..6435924b4514 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -152,7 +152,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - PMD_SHIFT, REG2; \ - srlx REG2, 64 - PAGE_SHIFT, REG2; \ + srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ sllx REG1, 11, REG1; \ andn REG2, 0x7, REG2; \ add REG1, REG2, REG1; @@ -177,7 +177,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - PMD_SHIFT, REG2; \ - srlx REG2, 64 - PAGE_SHIFT, REG2; \ + srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ sllx REG1, 11, REG1; \ andn REG2, 0x7, REG2; \ add REG1, REG2, REG1; -- cgit v1.2.2 From c460bec78d9257a54bcc5f9d5fadf89f8c70dbd1 Mon Sep 17 00:00:00 2001 From: David Miller Date: Mon, 8 Oct 2012 16:34:22 -0700 Subject: sparc64: Eliminate PTE table memory wastage. We've split up the PTE tables so that they take up half a page instead of a full page. This is in order to facilitate transparent huge page support, which works much better if our PMDs cover 4MB instead of 8MB. What we do is have a one-behind cache for PTE table allocations in the mm struct. This logic triggers only on allocations. For example, we don't try to keep track of free'd up page table blocks in the style that the s390 port does. There were only two slightly annoying aspects to this change: 1) Changing pgtable_t to be a "pte_t *". There's all of this special logic in the TLB free paths that needed adjustments, as did the PMD populate interfaces. 2) init_new_context() needs to zap the pointer, since the mm struct just gets copied from the parent on fork. Signed-off-by: David S. Miller Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Gerald Schaefer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/include/asm/mmu_64.h | 1 + arch/sparc/include/asm/page_64.h | 2 +- arch/sparc/include/asm/pgalloc_64.h | 54 ++++--------------- arch/sparc/mm/init_64.c | 101 ++++++++++++++++++++++++++++++++++++ arch/sparc/mm/tsb.c | 9 ++++ 5 files changed, 123 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 5fb97e19d81e..31977c8dd942 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -93,6 +93,7 @@ typedef struct { spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long huge_pte_count; + struct page *pgtable_page; struct tsb_config tsb_block[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; } mm_context_t; diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index b2df9b8b2d46..087a5c505c69 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -86,7 +86,7 @@ typedef unsigned long pgprot_t; #endif /* (STRICT_MM_TYPECHECKS) */ -typedef struct page *pgtable_t; +typedef pte_t *pgtable_t; #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ (_AC(0x0000000070000000,UL)) : \ diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 40b2d7a7023d..0ebca93ef0f5 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h @@ -38,51 +38,20 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) kmem_cache_free(pgtable_cache, pmd); } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) -{ - return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); -} - -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) -{ - struct page *page; - pte_t *pte; - - pte = pte_alloc_one_kernel(mm, address); - if (!pte) - return NULL; - page = virt_to_page(pte); - pgtable_page_ctor(page); - return page; -} - -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) -{ - pgtable_page_dtor(ptepage); - __free_page(ptepage); -} +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address); +extern pgtable_t pte_alloc_one(struct mm_struct *mm, + unsigned long address); +extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); +extern void pte_free(struct mm_struct *mm, pgtable_t ptepage); #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) -#define pmd_populate(MM,PMD,PTE_PAGE) \ - pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE) +#define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD)) #define check_pgt_cache() do { } while (0) -static inline void pgtable_free(void *table, bool is_page) -{ - if (is_page) - free_page((unsigned long)table); - else - kmem_cache_free(pgtable_cache, table); -} +extern void pgtable_free(void *table, bool is_page); #ifdef CONFIG_SMP @@ -113,11 +82,10 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is } #endif /* !CONFIG_SMP */ -static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, +static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte, unsigned long address) { - pgtable_page_dtor(ptepage); - pgtable_free_tlb(tlb, page_address(ptepage), true); + pgtable_free_tlb(tlb, pte, true); } #define __pmd_free_tlb(tlb, pmd, addr) \ diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 809eecf6c797..12ef4ea60c88 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2467,3 +2467,104 @@ void __flush_tlb_all(void) __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate)); } + +static pte_t *get_from_cache(struct mm_struct *mm) +{ + struct page *page; + pte_t *ret; + + spin_lock(&mm->page_table_lock); + page = mm->context.pgtable_page; + ret = NULL; + if (page) { + void *p = page_address(page); + + mm->context.pgtable_page = NULL; + + ret = (pte_t *) (p + (PAGE_SIZE / 2)); + } + spin_unlock(&mm->page_table_lock); + + return ret; +} + +static struct page *__alloc_for_cache(struct mm_struct *mm) +{ + struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | + __GFP_REPEAT | __GFP_ZERO); + + if (page) { + spin_lock(&mm->page_table_lock); + if (!mm->context.pgtable_page) { + atomic_set(&page->_count, 2); + mm->context.pgtable_page = page; + } + spin_unlock(&mm->page_table_lock); + } + return page; +} + +pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + struct page *page; + pte_t *pte; + + pte = get_from_cache(mm); + if (pte) + return pte; + + page = __alloc_for_cache(mm); + if (page) + pte = (pte_t *) page_address(page); + + return pte; +} + +pgtable_t pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *page; + pte_t *pte; + + pte = get_from_cache(mm); + if (pte) + return pte; + + page = __alloc_for_cache(mm); + if (page) { + pgtable_page_ctor(page); + pte = (pte_t *) page_address(page); + } + + return pte; +} + +void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + struct page *page = virt_to_page(pte); + if (put_page_testzero(page)) + free_hot_cold_page(page, 0); +} + +static void __pte_free(pgtable_t pte) +{ + struct page *page = virt_to_page(pte); + if (put_page_testzero(page)) { + pgtable_page_dtor(page); + free_hot_cold_page(page, 0); + } +} + +void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + __pte_free(pte); +} + +void pgtable_free(void *table, bool is_page) +{ + if (is_page) + __pte_free(table); + else + kmem_cache_free(pgtable_cache, table); +} diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 70e50ea2a5b6..a35ee832baf3 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -445,6 +445,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.huge_pte_count = 0; #endif + mm->context.pgtable_page = NULL; + /* copy_mm() copies over the parent's mm_struct before calling * us, so we need to zero out the TSB pointer or else tsb_grow() * will be confused and think there is an older TSB to free up. @@ -483,10 +485,17 @@ static void tsb_destroy_one(struct tsb_config *tp) void destroy_context(struct mm_struct *mm) { unsigned long flags, i; + struct page *page; for (i = 0; i < MM_NUM_TSBS; i++) tsb_destroy_one(&mm->context.tsb_block[i]); + page = mm->context.pgtable_page; + if (page && put_page_testzero(page)) { + pgtable_page_dtor(page); + free_hot_cold_page(page, 0); + } + spin_lock_irqsave(&ctx_alloc_lock, flags); if (CTX_VALID(mm->context)) { -- cgit v1.2.2 From dbc9fdf063dc4f12af71d7858bd216170129822e Mon Sep 17 00:00:00 2001 From: David Miller Date: Mon, 8 Oct 2012 16:34:23 -0700 Subject: sparc64: Document PGD and PMD layout. We're going to be messing around with the PMD interpretation and layout for the sake of transparent huge pages, so we better clearly document what we're starting with. Signed-off-by: David S. Miller Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Gerald Schaefer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/include/asm/pgtable_64.h | 16 ++++++++++++---- arch/sparc/include/asm/tsb.h | 8 ++++---- 2 files changed, 16 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index a7b5091f3b13..af3cd7a9e9ac 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -63,6 +63,14 @@ #error Page table parameters do not cover virtual address space properly. #endif +/* PMDs point to PTE tables which are 4K aligned. */ +#define PMD_PADDR _AC(0xfffffffe,UL) +#define PMD_PADDR_SHIFT _AC(11,UL) + +/* PGDs point to PMD tables which are 8K aligned. */ +#define PGD_PADDR _AC(0xfffffffc,UL) +#define PGD_PADDR_SHIFT _AC(11,UL) + #ifndef __ASSEMBLY__ #include @@ -581,14 +589,14 @@ static inline unsigned long pte_special(pte_t pte) } #define pmd_set(pmdp, ptep) \ - (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) + (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT)) #define pud_set(pudp, pmdp) \ - (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL)) + (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT)) #define __pmd_page(pmd) \ - ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL))) + ((unsigned long) __va((((unsigned long)pmd_val(pmd))< Date: Mon, 8 Oct 2012 16:34:25 -0700 Subject: mm: Add and use update_mmu_cache_pmd() in transparent huge page code. The transparent huge page code passes a PMD pointer in as the third argument of update_mmu_cache(), which expects a PTE pointer. This never got noticed because X86 implements update_mmu_cache() as a macro and thus we don't get any type checking, and X86 is the only architecture which supports transparent huge pages currently. Before other architectures can support transparent huge pages properly we need to add a new interface which will take a PMD pointer as the third argument rather than a PTE pointer. [akpm@linux-foundation.org: implement update_mm_cache_pmd() for s390] Signed-off-by: David S. Miller Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Gerald Schaefer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/include/asm/pgtable.h | 1 + arch/x86/include/asm/pgtable_32.h | 1 + arch/x86/include/asm/pgtable_64.h | 1 + 3 files changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index ed14fc2db6e0..979fe3dc0788 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -42,6 +42,7 @@ extern void fault_init(void); * tables contain all the necessary information. */ #define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) /* * ZERO_PAGE is a global shared page that is always zero; used diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 0c92113c4cb6..8faa215a503e 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -71,6 +71,7 @@ do { \ * tables contain all the necessary information. */ #define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 8251be02301e..47356f9df82e 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -143,6 +143,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define pte_unmap(pte) ((void)(pte))/* NOP */ #define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE -- cgit v1.2.2 From 9e695d2ecc8451cc2c1603d60b5c8e7f5581923a Mon Sep 17 00:00:00 2001 From: David Miller Date: Mon, 8 Oct 2012 16:34:29 -0700 Subject: sparc64: Support transparent huge pages. This is relatively easy since PMD's now cover exactly 4MB of memory. Our PMD entries are 32-bits each, so we use a special encoding. The lowest bit, PMD_ISHUGE, determines the interpretation. This is possible because sparc64's page tables are purely software entities so we can use whatever encoding scheme we want. We just have to make the TLB miss assembler page table walkers aware of the layout. set_pmd_at() works much like set_pte_at() but it has to operate in two page from a table of non-huge PTEs, so we have to queue up TLB flushes based upon what mappings are valid in the PTE table. In the second regime we are going from huge-page to non-huge-page, and in that case we need only queue up a single TLB flush to push out the huge page mapping. We still have 5 bits remaining in the huge PMD encoding so we can very likely support any new pieces of THP state tracking that might get added in the future. With lots of help from Johannes Weiner. Signed-off-by: David S. Miller Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Gerald Schaefer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/include/asm/hugetlb.h | 5 +- arch/sparc/include/asm/mmu_64.h | 2 +- arch/sparc/include/asm/mmu_context_64.h | 2 +- arch/sparc/include/asm/page_64.h | 7 +- arch/sparc/include/asm/pgalloc_64.h | 4 +- arch/sparc/include/asm/pgtable_64.h | 177 ++++++++++++++++++++++++++- arch/sparc/include/asm/tsb.h | 94 +++++++++++++-- arch/sparc/kernel/sun4v_tlb_miss.S | 2 +- arch/sparc/kernel/tsb.S | 9 +- arch/sparc/mm/fault_64.c | 4 +- arch/sparc/mm/hugetlbpage.c | 50 -------- arch/sparc/mm/init_64.c | 204 ++++++++++++++++++++++++++++++-- arch/sparc/mm/tlb.c | 118 +++++++++++++++--- arch/sparc/mm/tsb.c | 14 +-- 14 files changed, 582 insertions(+), 110 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index e7927c9758a1..8c5eed6d267f 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -10,7 +10,10 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -void hugetlb_prefault_arch_hook(struct mm_struct *mm); +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ + hugetlb_setup(mm); +} static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 31977c8dd942..76092c4dd277 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -82,7 +82,7 @@ struct tsb_config { #define MM_TSB_BASE 0 -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #define MM_TSB_HUGE 1 #define MM_NUM_TSBS 2 #else diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index a97fd085cebe..9191ca62ed9c 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -36,7 +36,7 @@ static inline void tsb_context_switch(struct mm_struct *mm) { __tsb_context_switch(__pa(mm->pgd), &mm->context.tsb_block[0], -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) (mm->context.tsb_block[1].tsb ? &mm->context.tsb_block[1] : NULL) diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 087a5c505c69..4b39f74d6ca0 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,7 +17,7 @@ #define HPAGE_SHIFT 22 -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) @@ -26,6 +26,11 @@ #ifndef __ASSEMBLY__ +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +struct mm_struct; +extern void hugetlb_setup(struct mm_struct *mm); +#endif + #define WANT_PAGE_VIRTUAL extern void _clear_page(void *page); diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 0ebca93ef0f5..bcfe063bce23 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h @@ -45,8 +45,8 @@ extern pgtable_t pte_alloc_one(struct mm_struct *mm, extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); extern void pte_free(struct mm_struct *mm, pgtable_t ptepage); -#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) -#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE) +#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE) +#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE) #define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD)) #define check_pgt_cache() do { } while (0) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index af3cd7a9e9ac..95515f1e7cef 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -63,10 +63,31 @@ #error Page table parameters do not cover virtual address space properly. #endif +#if (PMD_SHIFT != HPAGE_SHIFT) +#error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages. +#endif + /* PMDs point to PTE tables which are 4K aligned. */ #define PMD_PADDR _AC(0xfffffffe,UL) #define PMD_PADDR_SHIFT _AC(11,UL) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PMD_ISHUGE _AC(0x00000001,UL) + +/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge + * pages, this frees up a bunch of bits in the layout that we can + * use for the protection settings and software metadata. + */ +#define PMD_HUGE_PADDR _AC(0xfffff800,UL) +#define PMD_HUGE_PROTBITS _AC(0x000007ff,UL) +#define PMD_HUGE_PRESENT _AC(0x00000400,UL) +#define PMD_HUGE_WRITE _AC(0x00000200,UL) +#define PMD_HUGE_DIRTY _AC(0x00000100,UL) +#define PMD_HUGE_ACCESSED _AC(0x00000080,UL) +#define PMD_HUGE_EXEC _AC(0x00000040,UL) +#define PMD_HUGE_SPLITTING _AC(0x00000020,UL) +#endif + /* PGDs point to PMD tables which are 8K aligned. */ #define PGD_PADDR _AC(0xfffffffc,UL) #define PGD_PADDR_SHIFT _AC(11,UL) @@ -219,6 +240,19 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) } #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot); +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) + +extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + /* Do nothing, mk_pmd() does this part. */ + return pmd; +} +#endif + /* This one can be done with two shifts. */ static inline unsigned long pte_pfn(pte_t pte) { @@ -588,19 +622,130 @@ static inline unsigned long pte_special(pte_t pte) return pte_val(pte) & _PAGE_SPECIAL; } -#define pmd_set(pmdp, ptep) \ - (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT)) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_young(pmd_t pmd) +{ + return pmd_val(pmd) & PMD_HUGE_ACCESSED; +} + +static inline int pmd_write(pmd_t pmd) +{ + return pmd_val(pmd) & PMD_HUGE_WRITE; +} + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR; + + return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); +} + +static inline int pmd_large(pmd_t pmd) +{ + return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == + (PMD_ISHUGE | PMD_HUGE_PRESENT); +} + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == + (PMD_ISHUGE|PMD_HUGE_SPLITTING); +} + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return pmd_val(pmd) & PMD_ISHUGE; +} + +#define has_transparent_hugepage() 1 + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd_val(pmd) &= ~PMD_HUGE_ACCESSED; + return pmd; +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) &= ~PMD_HUGE_WRITE; + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd_val(pmd) |= PMD_HUGE_DIRTY; + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd_val(pmd) |= PMD_HUGE_ACCESSED; + return pmd; +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ + pmd_val(pmd) |= PMD_HUGE_WRITE; + return pmd; +} + +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ + pmd_val(pmd) &= ~PMD_HUGE_PRESENT; + return pmd; +} + +static inline pmd_t pmd_mksplitting(pmd_t pmd) +{ + pmd_val(pmd) |= PMD_HUGE_SPLITTING; + return pmd; +} + +extern pgprot_t pmd_pgprot(pmd_t entry); +#endif + +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) != 0U; +} + +#define pmd_none(pmd) (!pmd_val(pmd)) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd); +#else +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} +#endif + +static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) +{ + unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT; + + pmd_val(*pmdp) = val; +} + #define pud_set(pudp, pmdp) \ (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT)) -#define __pmd_page(pmd) \ - ((unsigned long) __va((((unsigned long)pmd_val(pmd))<> PAGE_SHIFT) & 0xffUL) diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index ef8cd1a174f1..b4c258de4443 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -157,10 +157,86 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; andn REG2, 0x7, REG2; \ add REG1, REG2, REG1; - /* Do a user page table walk in MMU globals. Leaves physical PTE - * pointer in REG1. Jumps to FAIL_LABEL on early page table walk - * termination. Physical base of page tables is in PHYS_PGD which - * will not be modified. + /* This macro exists only to make the PMD translator below easier + * to read. It hides the ELF section switch for the sun4v code + * patching. + */ +#define OR_PTE_BIT(REG, NAME) \ +661: or REG, _PAGE_##NAME##_4U, REG; \ + .section .sun4v_1insn_patch, "ax"; \ + .word 661b; \ + or REG, _PAGE_##NAME##_4V, REG; \ + .previous; + + /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ +#define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ +661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ + .section .sun4v_1insn_patch, "ax"; \ + .word 661b; \ + sethi %uhi(_PAGE_VALID), REG; \ + .previous; \ + sllx REG, 32, REG; \ +661: or REG, _PAGE_CP_4U|_PAGE_CV_4U, REG; \ + .section .sun4v_1insn_patch, "ax"; \ + .word 661b; \ + or REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \ + .previous; + + /* PMD has been loaded into REG1, interpret the value, seeing + * if it is a HUGE PMD or a normal one. If it is not valid + * then jump to FAIL_LABEL. If it is a HUGE PMD, and it + * translates to a valid PTE, branch to PTE_LABEL. + * + * We translate the PMD by hand, one bit at a time, + * constructing the huge PTE. + * + * So we construct the PTE in REG2 as follows: + * + * 1) Extract the PMD PFN from REG1 and place it into REG2. + * + * 2) Translate PMD protection bits in REG1 into REG2, one bit + * at a time using andcc tests on REG1 and OR's into REG2. + * + * Only two bits to be concerned with here, EXEC and WRITE. + * Now REG1 is freed up and we can use it as a temporary. + * + * 3) Construct the VALID, CACHE, and page size PTE bits in + * REG1, OR with REG2 to form final PTE. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ + brz,pn REG1, FAIL_LABEL; \ + andcc REG1, PMD_ISHUGE, %g0; \ + be,pt %xcc, 700f; \ + and REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2; \ + cmp REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED; \ + bne,pn %xcc, FAIL_LABEL; \ + andn REG1, PMD_HUGE_PROTBITS, REG2; \ + sllx REG2, PMD_PADDR_SHIFT, REG2; \ + /* REG2 now holds PFN << PAGE_SHIFT */ \ + andcc REG1, PMD_HUGE_EXEC, %g0; \ + bne,a,pt %xcc, 1f; \ + OR_PTE_BIT(REG2, EXEC); \ +1: andcc REG1, PMD_HUGE_WRITE, %g0; \ + bne,a,pt %xcc, 1f; \ + OR_PTE_BIT(REG2, W); \ + /* REG1 can now be clobbered, build final PTE */ \ +1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ + ba,pt %xcc, PTE_LABEL; \ + or REG1, REG2, REG1; \ +700: +#else +#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ + brz,pn REG1, FAIL_LABEL; \ + nop; +#endif + + /* Do a user page table walk in MMU globals. Leaves final, + * valid, PTE value in REG1. Jumps to FAIL_LABEL on early + * page table walk termination or if the PTE is not valid. + * + * Physical base of page tables is in PHYS_PGD which will not + * be modified. * * VADDR will not be clobbered, but REG1 and REG2 will. */ @@ -175,12 +251,16 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; sllx REG1, PGD_PADDR_SHIFT, REG1; \ andn REG2, 0x3, REG2; \ lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ - brz,pn REG1, FAIL_LABEL; \ - sllx VADDR, 64 - PMD_SHIFT, REG2; \ + USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ + sllx VADDR, 64 - PMD_SHIFT, REG2; \ srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ sllx REG1, PMD_PADDR_SHIFT, REG1; \ andn REG2, 0x7, REG2; \ - add REG1, REG2, REG1; + add REG1, REG2, REG1; \ + ldxa [REG1] ASI_PHYS_USE_EC, REG1; \ + brgez,pn REG1, FAIL_LABEL; \ + nop; \ +800: /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. * If no entry is found, FAIL_LABEL will be branched to. On success diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S index e1fbf8c75787..bde867fd71e8 100644 --- a/arch/sparc/kernel/sun4v_tlb_miss.S +++ b/arch/sparc/kernel/sun4v_tlb_miss.S @@ -176,7 +176,7 @@ sun4v_tsb_miss_common: sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) mov SCRATCHPAD_UTSBREG2, %g5 ldxa [%g5] ASI_SCRATCHPAD, %g5 cmp %g5, -1 diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index db15d123f054..d4bdc7a62375 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -49,7 +49,7 @@ tsb_miss_page_table_walk: /* Before committing to a full page table walk, * check the huge page TSB. */ -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 nop @@ -110,12 +110,9 @@ tsb_miss_page_table_walk: tsb_miss_page_table_walk_sun4v_fastpath: USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) - /* Load and check PTE. */ - ldxa [%g5] ASI_PHYS_USE_EC, %g5 - brgez,pn %g5, tsb_do_fault - nop + /* Valid PTE is now in %g5. */ -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 661: sethi %uhi(_PAGE_SZALL_4U), %g7 sllx %g7, 32, %g7 .section .sun4v_2insn_patch, "ax" diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 413d29263304..2976dba1ebaf 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -465,13 +465,13 @@ good_area: up_read(&mm->mmap_sem); mm_rss = get_mm_rss(mm); -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); #endif if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) tsb_grow(mm, MM_TSB_BASE, mm_rss); -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss = mm->context.huge_pte_count; if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 07e14535375c..f76f83d5ac63 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -303,53 +303,3 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, { return NULL; } - -static void context_reload(void *__data) -{ - struct mm_struct *mm = __data; - - if (mm == current->mm) - load_secondary_context(mm); -} - -void hugetlb_prefault_arch_hook(struct mm_struct *mm) -{ - struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; - - if (likely(tp->tsb != NULL)) - return; - - tsb_grow(mm, MM_TSB_HUGE, 0); - tsb_context_switch(mm); - smp_tsb_sync(mm); - - /* On UltraSPARC-III+ and later, configure the second half of - * the Data-TLB for huge pages. - */ - if (tlb_type == cheetah_plus) { - unsigned long ctx; - - spin_lock(&ctx_alloc_lock); - ctx = mm->context.sparc64_ctx_val; - ctx &= ~CTX_PGSZ_MASK; - ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; - ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; - - if (ctx != mm->context.sparc64_ctx_val) { - /* When changing the page size fields, we - * must perform a context flush so that no - * stale entries match. This flush must - * occur with the original context register - * settings. - */ - do_flush_tlb_mm(mm); - - /* Reload the context register of all processors - * also executing in this address space. - */ - mm->context.sparc64_ctx_val = ctx; - on_each_cpu(context_reload, mm, 0); - } - spin_unlock(&ctx_alloc_lock); - } -} diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 12ef4ea60c88..9e28a118e6a4 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -306,12 +306,24 @@ static void flush_dcache(unsigned long pfn) } } +/* mm->context.lock must be held */ +static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index, + unsigned long tsb_hash_shift, unsigned long address, + unsigned long tte) +{ + struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; + unsigned long tag; + + tsb += ((address >> tsb_hash_shift) & + (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); + tag = (address >> 22UL); + tsb_insert(tsb, tag, tte); +} + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + unsigned long tsb_index, tsb_hash_shift, flags; struct mm_struct *mm; - struct tsb *tsb; - unsigned long tag, flags; - unsigned long tsb_index, tsb_hash_shift; pte_t pte = *ptep; if (tlb_type != hypervisor) { @@ -328,7 +340,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * spin_lock_irqsave(&mm->context.lock, flags); -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { if ((tlb_type == hypervisor && (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || @@ -340,11 +352,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * } #endif - tsb = mm->context.tsb_block[tsb_index].tsb; - tsb += ((address >> tsb_hash_shift) & - (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); - tag = (address >> 22UL); - tsb_insert(tsb, tag, pte_val(pte)); + __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, + address, pte_val(pte)); spin_unlock_irqrestore(&mm->context.lock, flags); } @@ -2568,3 +2577,180 @@ void pgtable_free(void *table, bool is_page) else kmem_cache_free(pgtable_cache, table); } + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify) +{ + if (pgprot_val(pgprot) & _PAGE_VALID) + pmd_val(pmd) |= PMD_HUGE_PRESENT; + if (tlb_type == hypervisor) { + if (pgprot_val(pgprot) & _PAGE_WRITE_4V) + pmd_val(pmd) |= PMD_HUGE_WRITE; + if (pgprot_val(pgprot) & _PAGE_EXEC_4V) + pmd_val(pmd) |= PMD_HUGE_EXEC; + + if (!for_modify) { + if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V) + pmd_val(pmd) |= PMD_HUGE_ACCESSED; + if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V) + pmd_val(pmd) |= PMD_HUGE_DIRTY; + } + } else { + if (pgprot_val(pgprot) & _PAGE_WRITE_4U) + pmd_val(pmd) |= PMD_HUGE_WRITE; + if (pgprot_val(pgprot) & _PAGE_EXEC_4U) + pmd_val(pmd) |= PMD_HUGE_EXEC; + + if (!for_modify) { + if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U) + pmd_val(pmd) |= PMD_HUGE_ACCESSED; + if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U) + pmd_val(pmd) |= PMD_HUGE_DIRTY; + } + } + + return pmd; +} + +pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) +{ + pmd_t pmd; + + pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT))); + pmd_val(pmd) |= PMD_ISHUGE; + pmd = pmd_set_protbits(pmd, pgprot, false); + return pmd; +} + +pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmd_val(pmd) &= ~(PMD_HUGE_PRESENT | + PMD_HUGE_WRITE | + PMD_HUGE_EXEC); + pmd = pmd_set_protbits(pmd, newprot, true); + return pmd; +} + +pgprot_t pmd_pgprot(pmd_t entry) +{ + unsigned long pte = 0; + + if (pmd_val(entry) & PMD_HUGE_PRESENT) + pte |= _PAGE_VALID; + + if (tlb_type == hypervisor) { + if (pmd_val(entry) & PMD_HUGE_PRESENT) + pte |= _PAGE_PRESENT_4V; + if (pmd_val(entry) & PMD_HUGE_EXEC) + pte |= _PAGE_EXEC_4V; + if (pmd_val(entry) & PMD_HUGE_WRITE) + pte |= _PAGE_W_4V; + if (pmd_val(entry) & PMD_HUGE_ACCESSED) + pte |= _PAGE_ACCESSED_4V; + if (pmd_val(entry) & PMD_HUGE_DIRTY) + pte |= _PAGE_MODIFIED_4V; + pte |= _PAGE_CP_4V|_PAGE_CV_4V; + } else { + if (pmd_val(entry) & PMD_HUGE_PRESENT) + pte |= _PAGE_PRESENT_4U; + if (pmd_val(entry) & PMD_HUGE_EXEC) + pte |= _PAGE_EXEC_4U; + if (pmd_val(entry) & PMD_HUGE_WRITE) + pte |= _PAGE_W_4U; + if (pmd_val(entry) & PMD_HUGE_ACCESSED) + pte |= _PAGE_ACCESSED_4U; + if (pmd_val(entry) & PMD_HUGE_DIRTY) + pte |= _PAGE_MODIFIED_4U; + pte |= _PAGE_CP_4U|_PAGE_CV_4U; + } + + return __pgprot(pte); +} + +void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd) +{ + unsigned long pte, flags; + struct mm_struct *mm; + pmd_t entry = *pmd; + pgprot_t prot; + + if (!pmd_large(entry) || !pmd_young(entry)) + return; + + pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS); + pte <<= PMD_PADDR_SHIFT; + pte |= _PAGE_VALID; + + prot = pmd_pgprot(entry); + + if (tlb_type == hypervisor) + pgprot_val(prot) |= _PAGE_SZHUGE_4V; + else + pgprot_val(prot) |= _PAGE_SZHUGE_4U; + + pte |= pgprot_val(prot); + + mm = vma->vm_mm; + + spin_lock_irqsave(&mm->context.lock, flags); + + if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, + addr, pte); + + spin_unlock_irqrestore(&mm->context.lock, flags); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +static void context_reload(void *__data) +{ + struct mm_struct *mm = __data; + + if (mm == current->mm) + load_secondary_context(mm); +} + +void hugetlb_setup(struct mm_struct *mm) +{ + struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; + + if (likely(tp->tsb != NULL)) + return; + + tsb_grow(mm, MM_TSB_HUGE, 0); + tsb_context_switch(mm); + smp_tsb_sync(mm); + + /* On UltraSPARC-III+ and later, configure the second half of + * the Data-TLB for huge pages. + */ + if (tlb_type == cheetah_plus) { + unsigned long ctx; + + spin_lock(&ctx_alloc_lock); + ctx = mm->context.sparc64_ctx_val; + ctx &= ~CTX_PGSZ_MASK; + ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; + ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; + + if (ctx != mm->context.sparc64_ctx_val) { + /* When changing the page size fields, we + * must perform a context flush so that no + * stale entries match. This flush must + * occur with the original context register + * settings. + */ + do_flush_tlb_mm(mm); + + /* Reload the context register of all processors + * also executing in this address space. + */ + mm->context.sparc64_ctx_val = ctx; + on_each_cpu(context_reload, mm, 0); + } + spin_unlock(&ctx_alloc_lock); + } +} +#endif diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b1f279cd00bf..3e8fec391fe0 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -43,16 +43,37 @@ void flush_tlb_pending(void) put_cpu_var(tlb_batch); } -void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, - pte_t *ptep, pte_t orig, int fullmm) +static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, + bool exec) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; - if (pte_exec(orig)) + if (exec) vaddr |= 0x1UL; + nr = tb->tlb_nr; + + if (unlikely(nr != 0 && mm != tb->mm)) { + flush_tlb_pending(); + nr = 0; + } + + if (nr == 0) + tb->mm = mm; + + tb->vaddrs[nr] = vaddr; + tb->tlb_nr = ++nr; + if (nr >= TLB_BATCH_NR) + flush_tlb_pending(); + + put_cpu_var(tlb_batch); +} + +void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, + pte_t *ptep, pte_t orig, int fullmm) +{ if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); @@ -77,26 +98,91 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, } no_cache_flush: + if (!fullmm) + tlb_batch_add_one(mm, vaddr, pte_exec(orig)); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, + pmd_t pmd, bool exec) +{ + unsigned long end; + pte_t *pte; + + pte = pte_offset_map(&pmd, vaddr); + end = vaddr + HPAGE_SIZE; + while (vaddr < end) { + if (pte_val(*pte) & _PAGE_VALID) + tlb_batch_add_one(mm, vaddr, exec); + pte++; + vaddr += PAGE_SIZE; + } + pte_unmap(pte); +} - if (fullmm) { - put_cpu_var(tlb_batch); +void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + pmd_t orig = *pmdp; + + *pmdp = pmd; + + if (mm == &init_mm) return; + + if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) { + if (pmd_val(pmd) & PMD_ISHUGE) + mm->context.huge_pte_count++; + else + mm->context.huge_pte_count--; + if (mm->context.huge_pte_count == 1) + hugetlb_setup(mm); } - nr = tb->tlb_nr; + if (!pmd_none(orig)) { + bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); - if (unlikely(nr != 0 && mm != tb->mm)) { - flush_tlb_pending(); - nr = 0; + addr &= HPAGE_MASK; + if (pmd_val(orig) & PMD_ISHUGE) + tlb_batch_add_one(mm, addr, exec); + else + tlb_batch_pmd_scan(mm, addr, orig, exec); } +} - if (nr == 0) - tb->mm = mm; +void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) +{ + struct list_head *lh = (struct list_head *) pgtable; - tb->vaddrs[nr] = vaddr; - tb->tlb_nr = ++nr; - if (nr >= TLB_BATCH_NR) - flush_tlb_pending(); + assert_spin_locked(&mm->page_table_lock); - put_cpu_var(tlb_batch); + /* FIFO */ + if (!mm->pmd_huge_pte) + INIT_LIST_HEAD(lh); + else + list_add(lh, (struct list_head *) mm->pmd_huge_pte); + mm->pmd_huge_pte = pgtable; +} + +pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) +{ + struct list_head *lh; + pgtable_t pgtable; + + assert_spin_locked(&mm->page_table_lock); + + /* FIFO */ + pgtable = mm->pmd_huge_pte; + lh = (struct list_head *) pgtable; + if (list_empty(lh)) + mm->pmd_huge_pte = NULL; + else { + mm->pmd_huge_pte = (pgtable_t) lh->next; + list_del(lh); + } + pte_val(pgtable[0]) = 0; + pte_val(pgtable[1]) = 0; + + return pgtable; } +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a35ee832baf3..7f6474347491 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -78,7 +78,7 @@ void flush_tsb_user(struct tlb_batch *tb) base = __pa(base); __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; @@ -93,7 +93,7 @@ void flush_tsb_user(struct tlb_batch *tb) #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB #endif @@ -190,7 +190,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign case MM_TSB_BASE: hp->pgsz_idx = HV_PGSZ_IDX_BASE; break; -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) case MM_TSB_HUGE: hp->pgsz_idx = HV_PGSZ_IDX_HUGE; break; @@ -205,7 +205,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign case MM_TSB_BASE: hp->pgsz_mask = HV_PGSZ_MASK_BASE; break; -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) case MM_TSB_HUGE: hp->pgsz_mask = HV_PGSZ_MASK_HUGE; break; @@ -427,7 +427,7 @@ retry_tsb_alloc: int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) unsigned long huge_pte_count; #endif unsigned int i; @@ -436,7 +436,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.sparc64_ctx_val = 0UL; -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) /* We reset it to zero because the fork() page copying * will re-increment the counters as the parent PTEs are * copied into the child address space. @@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) */ tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (unlikely(huge_pte_count)) tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); #endif -- cgit v1.2.2