diff options
author | Luis R. Rodriguez <mcgrof@suse.com> | 2015-03-04 20:24:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-05 02:02:12 -0500 |
commit | e5008abe929c160d36e44b8c2b644d4330d2e389 (patch) | |
tree | 615a41d918d9ce3c623716925b0d25a1fb543d65 | |
parent | d9fd579c218e22c897f0f1b9e132af9b436cf445 (diff) |
x86/mm: Simplify enabling direct_gbpages
direct_gbpages can be force enabled as an early parameter
but not really have taken effect when DEBUG_PAGEALLOC
or KMEMCHECK is enabled. You can also enable direct_gbpages
right now if you have an x86_64 architecture but your CPU
doesn't really have support for this feature. In both cases
PG_LEVEL_1G won't actually be enabled but direct_gbpages is used
in other areas under the assumptions that PG_LEVEL_1G
was set. Fix this by putting together all requirements
which make this feature sensible to enable under, and only
enable both finally flipping on PG_LEVEL_1G and leaving
PG_LEVEL_1G set when this is true.
We only enable this feature then to be possible on sensible
builds defined by the new ENABLE_DIRECT_GBPAGES. If the
CPU has support for it you can either enable this by using
the DIRECT_GBPAGES option or using the early kernel parameter.
If a platform had support for this you can always force disable
it as well.
Signed-off-by: Luis R. Rodriguez <mcgrof@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Dexuan Cui <decui@microsoft.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: JBeulich@suse.com
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Lindgren <tony@atomide.com>
Cc: Toshi Kani <toshi.kani@hp.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: julia.lawall@lip6.fr
Link: http://lkml.kernel.org/r/1425518654-3403-3-git-send-email-mcgrof@do-not-panic.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/Kconfig | 18 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 17 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 2 |
3 files changed, 22 insertions, 15 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c2fb8a87dccb..4d06e1c8294a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1299,14 +1299,22 @@ config ARCH_DMA_ADDR_T_64BIT | |||
1299 | def_bool y | 1299 | def_bool y |
1300 | depends on X86_64 || HIGHMEM64G | 1300 | depends on X86_64 || HIGHMEM64G |
1301 | 1301 | ||
1302 | config ENABLE_DIRECT_GBPAGES | ||
1303 | def_bool y | ||
1304 | depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK | ||
1305 | |||
1302 | config DIRECT_GBPAGES | 1306 | config DIRECT_GBPAGES |
1303 | bool "Enable 1GB pages for kernel pagetables" if EXPERT | 1307 | bool "Enable 1GB pages for kernel pagetables" if EXPERT |
1304 | default y | 1308 | default y |
1305 | depends on X86_64 | 1309 | depends on ENABLE_DIRECT_GBPAGES |
1306 | ---help--- | 1310 | ---help--- |
1307 | Allow the kernel linear mapping to use 1GB pages on CPUs that | 1311 | Enable by default the kernel linear mapping to use 1GB pages on CPUs |
1308 | support it. This can improve the kernel's performance a tiny bit by | 1312 | that support it. This can improve the kernel's performance a tiny bit |
1309 | reducing TLB pressure. If in doubt, say "Y". | 1313 | by reducing TLB pressure. If in doubt, say "Y". If you've disabled |
1314 | option but your platform is capable of handling support for this | ||
1315 | you can use the gbpages kernel parameter. Likewise if you've enabled | ||
1316 | this but you'd like to force disable this option you can use the | ||
1317 | nogbpages kernel parameter. | ||
1310 | 1318 | ||
1311 | # Common NUMA Features | 1319 | # Common NUMA Features |
1312 | config NUMA | 1320 | config NUMA |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 74f2b37fd073..2ce2c8e8c99c 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -131,16 +131,21 @@ void __init early_alloc_pgt_buf(void) | |||
131 | 131 | ||
132 | int after_bootmem; | 132 | int after_bootmem; |
133 | 133 | ||
134 | static int page_size_mask; | ||
135 | |||
134 | int direct_gbpages = IS_ENABLED(CONFIG_DIRECT_GBPAGES); | 136 | int direct_gbpages = IS_ENABLED(CONFIG_DIRECT_GBPAGES); |
135 | 137 | ||
136 | static void __init init_gbpages(void) | 138 | static void __init init_gbpages(void) |
137 | { | 139 | { |
138 | #ifdef CONFIG_X86_64 | 140 | if (!IS_ENABLED(CONFIG_ENABLE_DIRECT_GBPAGES)) { |
139 | if (direct_gbpages && cpu_has_gbpages) | 141 | direct_gbpages = 0; |
142 | return; | ||
143 | } | ||
144 | if (direct_gbpages && cpu_has_gbpages) { | ||
140 | printk(KERN_INFO "Using GB pages for direct mapping\n"); | 145 | printk(KERN_INFO "Using GB pages for direct mapping\n"); |
141 | else | 146 | page_size_mask |= 1 << PG_LEVEL_1G; |
147 | } else | ||
142 | direct_gbpages = 0; | 148 | direct_gbpages = 0; |
143 | #endif | ||
144 | } | 149 | } |
145 | 150 | ||
146 | struct map_range { | 151 | struct map_range { |
@@ -149,8 +154,6 @@ struct map_range { | |||
149 | unsigned page_size_mask; | 154 | unsigned page_size_mask; |
150 | }; | 155 | }; |
151 | 156 | ||
152 | static int page_size_mask; | ||
153 | |||
154 | static void __init probe_page_size_mask(void) | 157 | static void __init probe_page_size_mask(void) |
155 | { | 158 | { |
156 | init_gbpages(); | 159 | init_gbpages(); |
@@ -161,8 +164,6 @@ static void __init probe_page_size_mask(void) | |||
161 | * This will simplify cpa(), which otherwise needs to support splitting | 164 | * This will simplify cpa(), which otherwise needs to support splitting |
162 | * large pages into small in interrupt context, etc. | 165 | * large pages into small in interrupt context, etc. |
163 | */ | 166 | */ |
164 | if (direct_gbpages) | ||
165 | page_size_mask |= 1 << PG_LEVEL_1G; | ||
166 | if (cpu_has_pse) | 167 | if (cpu_has_pse) |
167 | page_size_mask |= 1 << PG_LEVEL_2M; | 168 | page_size_mask |= 1 << PG_LEVEL_2M; |
168 | #endif | 169 | #endif |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 81e8282d8c2f..89af288ec674 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -81,11 +81,9 @@ void arch_report_meminfo(struct seq_file *m) | |||
81 | seq_printf(m, "DirectMap4M: %8lu kB\n", | 81 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
82 | direct_pages_count[PG_LEVEL_2M] << 12); | 82 | direct_pages_count[PG_LEVEL_2M] << 12); |
83 | #endif | 83 | #endif |
84 | #ifdef CONFIG_X86_64 | ||
85 | if (direct_gbpages) | 84 | if (direct_gbpages) |
86 | seq_printf(m, "DirectMap1G: %8lu kB\n", | 85 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
87 | direct_pages_count[PG_LEVEL_1G] << 20); | 86 | direct_pages_count[PG_LEVEL_1G] << 20); |
88 | #endif | ||
89 | } | 87 | } |
90 | #else | 88 | #else |
91 | static inline void split_page_count(int level) { } | 89 | static inline void split_page_count(int level) { } |