aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorThomas Garnier <thgarnie@google.com>2016-06-21 20:46:59 -0400
committerIngo Molnar <mingo@kernel.org>2016-07-08 11:33:46 -0400
commit59b3d0206d74a700069e49160e8194b2ca93b703 (patch)
treeb6501cd124d668335f29d59740035058e076815a /arch/x86/mm
parentd899a7d146a2ed8a7e6c2f61bcd232908bcbaabc (diff)
x86/mm: Update physical mapping variable names
Change the variable names in kernel_physical_mapping_init() and related functions to correctly reflect physical and virtual memory addresses. Also add comments on each function to describe usage and alignment constraints. Signed-off-by: Thomas Garnier <thgarnie@google.com> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Alexander Kuleshov <kuleshovmail@gmail.com> Cc: Alexander Popov <alpopov@ptsecurity.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Baoquan He <bhe@redhat.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dave Young <dyoung@redhat.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lv Zheng <lv.zheng@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Smalley <sds@tycho.nsa.gov> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: kernel-hardening@lists.openwall.com Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/1466556426-32664-3-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_64.c162
1 files changed, 96 insertions, 66 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index bce2e5d9edd4..6714712bd5da 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -328,22 +328,30 @@ void __init cleanup_highmap(void)
328 } 328 }
329} 329}
330 330
331/*
332 * Create PTE level page table mapping for physical addresses.
333 * It returns the last physical address mapped.
334 */
331static unsigned long __meminit 335static unsigned long __meminit
332phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, 336phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
333 pgprot_t prot) 337 pgprot_t prot)
334{ 338{
335 unsigned long pages = 0, next; 339 unsigned long pages = 0, paddr_next;
336 unsigned long last_map_addr = end; 340 unsigned long paddr_last = paddr_end;
341 pte_t *pte;
337 int i; 342 int i;
338 343
339 pte_t *pte = pte_page + pte_index(addr); 344 pte = pte_page + pte_index(paddr);
345 i = pte_index(paddr);
340 346
341 for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) { 347 for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
342 next = (addr & PAGE_MASK) + PAGE_SIZE; 348 paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
343 if (addr >= end) { 349 if (paddr >= paddr_end) {
344 if (!after_bootmem && 350 if (!after_bootmem &&
345 !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) && 351 !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
346 !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN)) 352 E820_RAM) &&
353 !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
354 E820_RESERVED_KERN))
347 set_pte(pte, __pte(0)); 355 set_pte(pte, __pte(0));
348 continue; 356 continue;
349 } 357 }
@@ -361,37 +369,44 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
361 } 369 }
362 370
363 if (0) 371 if (0)
364 printk(" pte=%p addr=%lx pte=%016lx\n", 372 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
365 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); 373 pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
366 pages++; 374 pages++;
367 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); 375 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
368 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; 376 paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
369 } 377 }
370 378
371 update_page_count(PG_LEVEL_4K, pages); 379 update_page_count(PG_LEVEL_4K, pages);
372 380
373 return last_map_addr; 381 return paddr_last;
374} 382}
375 383
384/*
385 * Create PMD level page table mapping for physical addresses. The virtual
386 * and physical address have to be aligned at this level.
387 * It returns the last physical address mapped.
388 */
376static unsigned long __meminit 389static unsigned long __meminit
377phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, 390phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
378 unsigned long page_size_mask, pgprot_t prot) 391 unsigned long page_size_mask, pgprot_t prot)
379{ 392{
380 unsigned long pages = 0, next; 393 unsigned long pages = 0, paddr_next;
381 unsigned long last_map_addr = end; 394 unsigned long paddr_last = paddr_end;
382 395
383 int i = pmd_index(address); 396 int i = pmd_index(paddr);
384 397
385 for (; i < PTRS_PER_PMD; i++, address = next) { 398 for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
386 pmd_t *pmd = pmd_page + pmd_index(address); 399 pmd_t *pmd = pmd_page + pmd_index(paddr);
387 pte_t *pte; 400 pte_t *pte;
388 pgprot_t new_prot = prot; 401 pgprot_t new_prot = prot;
389 402
390 next = (address & PMD_MASK) + PMD_SIZE; 403 paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
391 if (address >= end) { 404 if (paddr >= paddr_end) {
392 if (!after_bootmem && 405 if (!after_bootmem &&
393 !e820_any_mapped(address & PMD_MASK, next, E820_RAM) && 406 !e820_any_mapped(paddr & PMD_MASK, paddr_next,
394 !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN)) 407 E820_RAM) &&
408 !e820_any_mapped(paddr & PMD_MASK, paddr_next,
409 E820_RESERVED_KERN))
395 set_pmd(pmd, __pmd(0)); 410 set_pmd(pmd, __pmd(0));
396 continue; 411 continue;
397 } 412 }
@@ -400,8 +415,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
400 if (!pmd_large(*pmd)) { 415 if (!pmd_large(*pmd)) {
401 spin_lock(&init_mm.page_table_lock); 416 spin_lock(&init_mm.page_table_lock);
402 pte = (pte_t *)pmd_page_vaddr(*pmd); 417 pte = (pte_t *)pmd_page_vaddr(*pmd);
403 last_map_addr = phys_pte_init(pte, address, 418 paddr_last = phys_pte_init(pte, paddr,
404 end, prot); 419 paddr_end, prot);
405 spin_unlock(&init_mm.page_table_lock); 420 spin_unlock(&init_mm.page_table_lock);
406 continue; 421 continue;
407 } 422 }
@@ -420,7 +435,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
420 if (page_size_mask & (1 << PG_LEVEL_2M)) { 435 if (page_size_mask & (1 << PG_LEVEL_2M)) {
421 if (!after_bootmem) 436 if (!after_bootmem)
422 pages++; 437 pages++;
423 last_map_addr = next; 438 paddr_last = paddr_next;
424 continue; 439 continue;
425 } 440 }
426 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); 441 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
@@ -430,42 +445,49 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
430 pages++; 445 pages++;
431 spin_lock(&init_mm.page_table_lock); 446 spin_lock(&init_mm.page_table_lock);
432 set_pte((pte_t *)pmd, 447 set_pte((pte_t *)pmd,
433 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT, 448 pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
434 __pgprot(pgprot_val(prot) | _PAGE_PSE))); 449 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
435 spin_unlock(&init_mm.page_table_lock); 450 spin_unlock(&init_mm.page_table_lock);
436 last_map_addr = next; 451 paddr_last = paddr_next;
437 continue; 452 continue;
438 } 453 }
439 454
440 pte = alloc_low_page(); 455 pte = alloc_low_page();
441 last_map_addr = phys_pte_init(pte, address, end, new_prot); 456 paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
442 457
443 spin_lock(&init_mm.page_table_lock); 458 spin_lock(&init_mm.page_table_lock);
444 pmd_populate_kernel(&init_mm, pmd, pte); 459 pmd_populate_kernel(&init_mm, pmd, pte);
445 spin_unlock(&init_mm.page_table_lock); 460 spin_unlock(&init_mm.page_table_lock);
446 } 461 }
447 update_page_count(PG_LEVEL_2M, pages); 462 update_page_count(PG_LEVEL_2M, pages);
448 return last_map_addr; 463 return paddr_last;
449} 464}
450 465
466/*
467 * Create PUD level page table mapping for physical addresses. The virtual
468 * and physical address have to be aligned at this level.
469 * It returns the last physical address mapped.
470 */
451static unsigned long __meminit 471static unsigned long __meminit
452phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, 472phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
453 unsigned long page_size_mask) 473 unsigned long page_size_mask)
454{ 474{
455 unsigned long pages = 0, next; 475 unsigned long pages = 0, paddr_next;
456 unsigned long last_map_addr = end; 476 unsigned long paddr_last = paddr_end;
457 int i = pud_index(addr); 477 int i = pud_index(paddr);
458 478
459 for (; i < PTRS_PER_PUD; i++, addr = next) { 479 for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
460 pud_t *pud = pud_page + pud_index(addr); 480 pud_t *pud = pud_page + pud_index(paddr);
461 pmd_t *pmd; 481 pmd_t *pmd;
462 pgprot_t prot = PAGE_KERNEL; 482 pgprot_t prot = PAGE_KERNEL;
463 483
464 next = (addr & PUD_MASK) + PUD_SIZE; 484 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
465 if (addr >= end) { 485 if (paddr >= paddr_end) {
466 if (!after_bootmem && 486 if (!after_bootmem &&
467 !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) && 487 !e820_any_mapped(paddr & PUD_MASK, paddr_next,
468 !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN)) 488 E820_RAM) &&
489 !e820_any_mapped(paddr & PUD_MASK, paddr_next,
490 E820_RESERVED_KERN))
469 set_pud(pud, __pud(0)); 491 set_pud(pud, __pud(0));
470 continue; 492 continue;
471 } 493 }
@@ -473,8 +495,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
473 if (pud_val(*pud)) { 495 if (pud_val(*pud)) {
474 if (!pud_large(*pud)) { 496 if (!pud_large(*pud)) {
475 pmd = pmd_offset(pud, 0); 497 pmd = pmd_offset(pud, 0);
476 last_map_addr = phys_pmd_init(pmd, addr, end, 498 paddr_last = phys_pmd_init(pmd, paddr,
477 page_size_mask, prot); 499 paddr_end,
500 page_size_mask,
501 prot);
478 __flush_tlb_all(); 502 __flush_tlb_all();
479 continue; 503 continue;
480 } 504 }
@@ -493,7 +517,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
493 if (page_size_mask & (1 << PG_LEVEL_1G)) { 517 if (page_size_mask & (1 << PG_LEVEL_1G)) {
494 if (!after_bootmem) 518 if (!after_bootmem)
495 pages++; 519 pages++;
496 last_map_addr = next; 520 paddr_last = paddr_next;
497 continue; 521 continue;
498 } 522 }
499 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); 523 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
@@ -503,16 +527,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
503 pages++; 527 pages++;
504 spin_lock(&init_mm.page_table_lock); 528 spin_lock(&init_mm.page_table_lock);
505 set_pte((pte_t *)pud, 529 set_pte((pte_t *)pud,
506 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT, 530 pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
507 PAGE_KERNEL_LARGE)); 531 PAGE_KERNEL_LARGE));
508 spin_unlock(&init_mm.page_table_lock); 532 spin_unlock(&init_mm.page_table_lock);
509 last_map_addr = next; 533 paddr_last = paddr_next;
510 continue; 534 continue;
511 } 535 }
512 536
513 pmd = alloc_low_page(); 537 pmd = alloc_low_page();
514 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, 538 paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
515 prot); 539 page_size_mask, prot);
516 540
517 spin_lock(&init_mm.page_table_lock); 541 spin_lock(&init_mm.page_table_lock);
518 pud_populate(&init_mm, pud, pmd); 542 pud_populate(&init_mm, pud, pmd);
@@ -522,38 +546,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
522 546
523 update_page_count(PG_LEVEL_1G, pages); 547 update_page_count(PG_LEVEL_1G, pages);
524 548
525 return last_map_addr; 549 return paddr_last;
526} 550}
527 551
552/*
553 * Create page table mapping for the physical memory for specific physical
554 * addresses. The virtual and physical addresses have to be aligned on PUD level
555 * down. It returns the last physical address mapped.
556 */
528unsigned long __meminit 557unsigned long __meminit
529kernel_physical_mapping_init(unsigned long start, 558kernel_physical_mapping_init(unsigned long paddr_start,
530 unsigned long end, 559 unsigned long paddr_end,
531 unsigned long page_size_mask) 560 unsigned long page_size_mask)
532{ 561{
533 bool pgd_changed = false; 562 bool pgd_changed = false;
534 unsigned long next, last_map_addr = end; 563 unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
535 unsigned long addr;
536 564
537 start = (unsigned long)__va(start); 565 paddr_last = paddr_end;
538 end = (unsigned long)__va(end); 566 vaddr = (unsigned long)__va(paddr_start);
539 addr = start; 567 vaddr_end = (unsigned long)__va(paddr_end);
568 vaddr_start = vaddr;
540 569
541 for (; start < end; start = next) { 570 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
542 pgd_t *pgd = pgd_offset_k(start); 571 pgd_t *pgd = pgd_offset_k(vaddr);
543 pud_t *pud; 572 pud_t *pud;
544 573
545 next = (start & PGDIR_MASK) + PGDIR_SIZE; 574 vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
546 575
547 if (pgd_val(*pgd)) { 576 if (pgd_val(*pgd)) {
548 pud = (pud_t *)pgd_page_vaddr(*pgd); 577 pud = (pud_t *)pgd_page_vaddr(*pgd);
549 last_map_addr = phys_pud_init(pud, __pa(start), 578 paddr_last = phys_pud_init(pud, __pa(vaddr),
550 __pa(end), page_size_mask); 579 __pa(vaddr_end),
580 page_size_mask);
551 continue; 581 continue;
552 } 582 }
553 583
554 pud = alloc_low_page(); 584 pud = alloc_low_page();
555 last_map_addr = phys_pud_init(pud, __pa(start), __pa(end), 585 paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
556 page_size_mask); 586 page_size_mask);
557 587
558 spin_lock(&init_mm.page_table_lock); 588 spin_lock(&init_mm.page_table_lock);
559 pgd_populate(&init_mm, pgd, pud); 589 pgd_populate(&init_mm, pgd, pud);
@@ -562,11 +592,11 @@ kernel_physical_mapping_init(unsigned long start,
562 } 592 }
563 593
564 if (pgd_changed) 594 if (pgd_changed)
565 sync_global_pgds(addr, end - 1, 0); 595 sync_global_pgds(vaddr_start, vaddr_end - 1, 0);
566 596
567 __flush_tlb_all(); 597 __flush_tlb_all();
568 598
569 return last_map_addr; 599 return paddr_last;
570} 600}
571 601
572#ifndef CONFIG_NUMA 602#ifndef CONFIG_NUMA