aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c68
1 files changed, 31 insertions, 37 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 52417e771af9..1d553186c434 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -29,29 +29,33 @@
29 29
30/* 30/*
31 * Tables translating between page_cache_type_t and pte encoding. 31 * Tables translating between page_cache_type_t and pte encoding.
32 * Minimal supported modes are defined statically, modified if more supported 32 *
33 * cache modes are available. 33 * Minimal supported modes are defined statically, they are modified
34 * Index into __cachemode2pte_tbl is the cachemode. 34 * during bootup if more supported cache modes are available.
35 * Index into __pte2cachemode_tbl are the caching attribute bits of the pte 35 *
36 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2. 36 * Index into __cachemode2pte_tbl[] is the cachemode.
37 *
38 * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
39 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
37 */ 40 */
38uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = { 41uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
39 [_PAGE_CACHE_MODE_WB] = 0, 42 [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
40 [_PAGE_CACHE_MODE_WC] = _PAGE_PWT, 43 [_PAGE_CACHE_MODE_WC ] = _PAGE_PWT | 0 ,
41 [_PAGE_CACHE_MODE_UC_MINUS] = _PAGE_PCD, 44 [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
42 [_PAGE_CACHE_MODE_UC] = _PAGE_PCD | _PAGE_PWT, 45 [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
43 [_PAGE_CACHE_MODE_WT] = _PAGE_PCD, 46 [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
44 [_PAGE_CACHE_MODE_WP] = _PAGE_PCD, 47 [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
45}; 48};
46EXPORT_SYMBOL(__cachemode2pte_tbl); 49EXPORT_SYMBOL(__cachemode2pte_tbl);
50
47uint8_t __pte2cachemode_tbl[8] = { 51uint8_t __pte2cachemode_tbl[8] = {
48 [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB, 52 [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
49 [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC, 53 [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_WC,
50 [__pte2cm_idx(_PAGE_PCD)] = _PAGE_CACHE_MODE_UC_MINUS, 54 [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
51 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD)] = _PAGE_CACHE_MODE_UC, 55 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
52 [__pte2cm_idx(_PAGE_PAT)] = _PAGE_CACHE_MODE_WB, 56 [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
53 [__pte2cm_idx(_PAGE_PWT | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC, 57 [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC,
54 [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, 58 [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
55 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, 59 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
56}; 60};
57EXPORT_SYMBOL(__pte2cachemode_tbl); 61EXPORT_SYMBOL(__pte2cachemode_tbl);
@@ -131,21 +135,7 @@ void __init early_alloc_pgt_buf(void)
131 135
132int after_bootmem; 136int after_bootmem;
133 137
134int direct_gbpages 138early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
135#ifdef CONFIG_DIRECT_GBPAGES
136 = 1
137#endif
138;
139
140static void __init init_gbpages(void)
141{
142#ifdef CONFIG_X86_64
143 if (direct_gbpages && cpu_has_gbpages)
144 printk(KERN_INFO "Using GB pages for direct mapping\n");
145 else
146 direct_gbpages = 0;
147#endif
148}
149 139
150struct map_range { 140struct map_range {
151 unsigned long start; 141 unsigned long start;
@@ -157,16 +147,12 @@ static int page_size_mask;
157 147
158static void __init probe_page_size_mask(void) 148static void __init probe_page_size_mask(void)
159{ 149{
160 init_gbpages();
161
162#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK) 150#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
163 /* 151 /*
164 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. 152 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
165 * This will simplify cpa(), which otherwise needs to support splitting 153 * This will simplify cpa(), which otherwise needs to support splitting
166 * large pages into small in interrupt context, etc. 154 * large pages into small in interrupt context, etc.
167 */ 155 */
168 if (direct_gbpages)
169 page_size_mask |= 1 << PG_LEVEL_1G;
170 if (cpu_has_pse) 156 if (cpu_has_pse)
171 page_size_mask |= 1 << PG_LEVEL_2M; 157 page_size_mask |= 1 << PG_LEVEL_2M;
172#endif 158#endif
@@ -181,6 +167,14 @@ static void __init probe_page_size_mask(void)
181 __supported_pte_mask |= _PAGE_GLOBAL; 167 __supported_pte_mask |= _PAGE_GLOBAL;
182 } else 168 } else
183 __supported_pte_mask &= ~_PAGE_GLOBAL; 169 __supported_pte_mask &= ~_PAGE_GLOBAL;
170
171 /* Enable 1 GB linear kernel mappings if available: */
172 if (direct_gbpages && cpu_has_gbpages) {
173 printk(KERN_INFO "Using GB pages for direct mapping\n");
174 page_size_mask |= 1 << PG_LEVEL_1G;
175 } else {
176 direct_gbpages = 0;
177 }
184} 178}
185 179
186#ifdef CONFIG_X86_32 180#ifdef CONFIG_X86_32