aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-09-23 17:00:38 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-10 13:29:13 -0400
commita2699e477b8e6b17d4da64916f766dd5a2576c9c (patch)
tree44d9c0840cec212070a94f4097442c7cc7957522 /arch/x86/mm/init_32.c
parent3a85e770aa77e4f1a4096275c97b64c10cd7323e (diff)
x86, cpa: make the kernel physical mapping initialization a two pass sequence
In the first pass, kernel physical mapping will be setup using large or small pages but uses the same PTE attributes as that of the early PTE attributes setup by early boot code in head_[32|64].S After flushing TLB's, we go through the second pass, which setups the direct mapped PTE's with the appropriate attributes (like NX, GLOBAL etc) which are runtime detectable. This two pass mechanism conforms to the TLB app note which says: "Software should not write to a paging-structure entry in a way that would change, for any linear address, both the page size and either the page frame or attributes." Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: arjan@linux.intel.com Cc: venkatesh.pallipadi@intel.com Cc: jeremy@goop.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c65
1 files changed, 60 insertions, 5 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index d37f29376b0c..9b5f7d7049d0 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -194,11 +194,30 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
194 pgd_t *pgd; 194 pgd_t *pgd;
195 pmd_t *pmd; 195 pmd_t *pmd;
196 pte_t *pte; 196 pte_t *pte;
197 unsigned pages_2m = 0, pages_4k = 0; 197 unsigned pages_2m, pages_4k;
198 int mapping_iter;
199
200 /*
201 * First iteration will setup identity mapping using large/small pages
202 * based on use_pse, with other attributes same as set by
203 * the early code in head_32.S
204 *
205 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
206 * as desired for the kernel identity mapping.
207 *
208 * This two pass mechanism conforms to the TLB app note which says:
209 *
210 * "Software should not write to a paging-structure entry in a way
211 * that would change, for any linear address, both the page size
212 * and either the page frame or attributes."
213 */
214 mapping_iter = 1;
198 215
199 if (!cpu_has_pse) 216 if (!cpu_has_pse)
200 use_pse = 0; 217 use_pse = 0;
201 218
219repeat:
220 pages_2m = pages_4k = 0;
202 pfn = start_pfn; 221 pfn = start_pfn;
203 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 222 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
204 pgd = pgd_base + pgd_idx; 223 pgd = pgd_base + pgd_idx;
@@ -224,6 +243,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
224 if (use_pse) { 243 if (use_pse) {
225 unsigned int addr2; 244 unsigned int addr2;
226 pgprot_t prot = PAGE_KERNEL_LARGE; 245 pgprot_t prot = PAGE_KERNEL_LARGE;
246 /*
247 * first pass will use the same initial
248 * identity mapping attribute + _PAGE_PSE.
249 */
250 pgprot_t init_prot =
251 __pgprot(PTE_IDENT_ATTR |
252 _PAGE_PSE);
227 253
228 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + 254 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
229 PAGE_OFFSET + PAGE_SIZE-1; 255 PAGE_OFFSET + PAGE_SIZE-1;
@@ -233,7 +259,10 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
233 prot = PAGE_KERNEL_LARGE_EXEC; 259 prot = PAGE_KERNEL_LARGE_EXEC;
234 260
235 pages_2m++; 261 pages_2m++;
236 set_pmd(pmd, pfn_pmd(pfn, prot)); 262 if (mapping_iter == 1)
263 set_pmd(pmd, pfn_pmd(pfn, init_prot));
264 else
265 set_pmd(pmd, pfn_pmd(pfn, prot));
237 266
238 pfn += PTRS_PER_PTE; 267 pfn += PTRS_PER_PTE;
239 continue; 268 continue;
@@ -245,17 +274,43 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
245 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; 274 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
246 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { 275 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
247 pgprot_t prot = PAGE_KERNEL; 276 pgprot_t prot = PAGE_KERNEL;
277 /*
278 * first pass will use the same initial
279 * identity mapping attribute.
280 */
281 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
248 282
249 if (is_kernel_text(addr)) 283 if (is_kernel_text(addr))
250 prot = PAGE_KERNEL_EXEC; 284 prot = PAGE_KERNEL_EXEC;
251 285
252 pages_4k++; 286 pages_4k++;
253 set_pte(pte, pfn_pte(pfn, prot)); 287 if (mapping_iter == 1)
288 set_pte(pte, pfn_pte(pfn, init_prot));
289 else
290 set_pte(pte, pfn_pte(pfn, prot));
254 } 291 }
255 } 292 }
256 } 293 }
257 update_page_count(PG_LEVEL_2M, pages_2m); 294 if (mapping_iter == 1) {
258 update_page_count(PG_LEVEL_4K, pages_4k); 295 /*
296 * update direct mapping page count only in the first
297 * iteration.
298 */
299 update_page_count(PG_LEVEL_2M, pages_2m);
300 update_page_count(PG_LEVEL_4K, pages_4k);
301
302 /*
303 * local global flush tlb, which will flush the previous
304 * mappings present in both small and large page TLB's.
305 */
306 __flush_tlb_all();
307
308 /*
309 * Second iteration will set the actual desired PTE attributes.
310 */
311 mapping_iter = 2;
312 goto repeat;
313 }
259} 314}
260 315
261/* 316/*