diff options
author | Suzuki K. Poulose <suzuki.poulose@arm.com> | 2015-10-19 09:19:28 -0400 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2015-10-19 12:52:36 -0400 |
commit | b433dce056d3814dc4b33e5a8a533d6401ffcfb0 (patch) | |
tree | fdc010394500646237015d7f28af40c94ea323f3 | |
parent | 87d1587bef394cd8a77dbca8cc92885fe7041b8f (diff) |
arm64: Handle section maps for swapper/idmap
We use section maps with 4K page size to create the swapper/idmaps.
So far we have used !64K or 4K checks to handle the case where we
use the section maps.
This patch adds a new symbol, ARM64_SWAPPER_USES_SECTION_MAPS, to
handle cases where we use section maps, instead of using the page size
symbols.
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Suzuki K. Poulose <suzuki.poulose@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/include/asm/kernel-pgtable.h | 37 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 74 |
2 files changed, 59 insertions, 52 deletions
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 3c92fa7bad2b..4e08faabd9e4 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h | |||
@@ -19,6 +19,19 @@ | |||
19 | #ifndef __ASM_KERNEL_PGTABLE_H | 19 | #ifndef __ASM_KERNEL_PGTABLE_H |
20 | #define __ASM_KERNEL_PGTABLE_H | 20 | #define __ASM_KERNEL_PGTABLE_H |
21 | 21 | ||
22 | |||
23 | /* | ||
24 | * The linear mapping and the start of memory are both 2M aligned (per | ||
25 | * the arm64 booting.txt requirements). Hence we can use section mapping | ||
26 | * with 4K (section size = 2M) but not with 16K (section size = 32M) or | ||
27 | * 64K (section size = 512M). | ||
28 | */ | ||
29 | #ifdef CONFIG_ARM64_4K_PAGES | ||
30 | #define ARM64_SWAPPER_USES_SECTION_MAPS 1 | ||
31 | #else | ||
32 | #define ARM64_SWAPPER_USES_SECTION_MAPS 0 | ||
33 | #endif | ||
34 | |||
22 | /* | 35 | /* |
23 | * The idmap and swapper page tables need some space reserved in the kernel | 36 | * The idmap and swapper page tables need some space reserved in the kernel |
24 | * image. Both require pgd, pud (4 levels only) and pmd tables to (section) | 37 | * image. Both require pgd, pud (4 levels only) and pmd tables to (section) |
@@ -28,26 +41,28 @@ | |||
28 | * could be increased on the fly if system RAM is out of reach for the default | 41 | * could be increased on the fly if system RAM is out of reach for the default |
29 | * VA range, so 3 pages are reserved in all cases. | 42 | * VA range, so 3 pages are reserved in all cases. |
30 | */ | 43 | */ |
31 | #ifdef CONFIG_ARM64_64K_PAGES | 44 | #if ARM64_SWAPPER_USES_SECTION_MAPS |
32 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) | ||
33 | #else | ||
34 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) | 45 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) |
46 | #else | ||
47 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) | ||
35 | #endif | 48 | #endif |
36 | 49 | ||
37 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) | 50 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) |
38 | #define IDMAP_DIR_SIZE (3 * PAGE_SIZE) | 51 | #define IDMAP_DIR_SIZE (3 * PAGE_SIZE) |
39 | 52 | ||
40 | /* Initial memory map size */ | 53 | /* Initial memory map size */ |
41 | #ifdef CONFIG_ARM64_64K_PAGES | 54 | #if ARM64_SWAPPER_USES_SECTION_MAPS |
42 | #define SWAPPER_BLOCK_SHIFT PAGE_SHIFT | ||
43 | #define SWAPPER_BLOCK_SIZE PAGE_SIZE | ||
44 | #define SWAPPER_TABLE_SHIFT PMD_SHIFT | ||
45 | #else | ||
46 | #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT | 55 | #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT |
47 | #define SWAPPER_BLOCK_SIZE SECTION_SIZE | 56 | #define SWAPPER_BLOCK_SIZE SECTION_SIZE |
48 | #define SWAPPER_TABLE_SHIFT PUD_SHIFT | 57 | #define SWAPPER_TABLE_SHIFT PUD_SHIFT |
58 | #else | ||
59 | #define SWAPPER_BLOCK_SHIFT PAGE_SHIFT | ||
60 | #define SWAPPER_BLOCK_SIZE PAGE_SIZE | ||
61 | #define SWAPPER_TABLE_SHIFT PMD_SHIFT | ||
49 | #endif | 62 | #endif |
50 | 63 | ||
64 | /* The size of the initial kernel direct mapping */ | ||
65 | #define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT) | ||
51 | 66 | ||
52 | /* | 67 | /* |
53 | * Initial memory map attributes. | 68 | * Initial memory map attributes. |
@@ -55,10 +70,10 @@ | |||
55 | #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) | 70 | #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
56 | #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) | 71 | #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
57 | 72 | ||
58 | #ifdef CONFIG_ARM64_64K_PAGES | 73 | #if ARM64_SWAPPER_USES_SECTION_MAPS |
59 | #define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) | ||
60 | #else | ||
61 | #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) | 74 | #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) |
75 | #else | ||
76 | #define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) | ||
62 | #endif | 77 | #endif |
63 | 78 | ||
64 | 79 | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index eed6d52f5e54..c2fa6b56613c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <asm/cputype.h> | 33 | #include <asm/cputype.h> |
34 | #include <asm/fixmap.h> | 34 | #include <asm/fixmap.h> |
35 | #include <asm/kernel-pgtable.h> | ||
35 | #include <asm/sections.h> | 36 | #include <asm/sections.h> |
36 | #include <asm/setup.h> | 37 | #include <asm/setup.h> |
37 | #include <asm/sizes.h> | 38 | #include <asm/sizes.h> |
@@ -406,14 +407,11 @@ static void __init map_mem(void) | |||
406 | * memory addressable from the initial direct kernel mapping. | 407 | * memory addressable from the initial direct kernel mapping. |
407 | * | 408 | * |
408 | * The initial direct kernel mapping, located at swapper_pg_dir, gives | 409 | * The initial direct kernel mapping, located at swapper_pg_dir, gives |
409 | * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from | 410 | * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps, |
410 | * PHYS_OFFSET (which must be aligned to 2MB as per | 411 | * memory starting from PHYS_OFFSET (which must be aligned to 2MB as |
411 | * Documentation/arm64/booting.txt). | 412 | * per Documentation/arm64/booting.txt). |
412 | */ | 413 | */ |
413 | if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) | 414 | limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE; |
414 | limit = PHYS_OFFSET + PMD_SIZE; | ||
415 | else | ||
416 | limit = PHYS_OFFSET + PUD_SIZE; | ||
417 | memblock_set_current_limit(limit); | 415 | memblock_set_current_limit(limit); |
418 | 416 | ||
419 | /* map all the memory banks */ | 417 | /* map all the memory banks */ |
@@ -424,21 +422,24 @@ static void __init map_mem(void) | |||
424 | if (start >= end) | 422 | if (start >= end) |
425 | break; | 423 | break; |
426 | 424 | ||
427 | #ifndef CONFIG_ARM64_64K_PAGES | 425 | if (ARM64_SWAPPER_USES_SECTION_MAPS) { |
428 | /* | 426 | /* |
429 | * For the first memory bank align the start address and | 427 | * For the first memory bank align the start address and |
430 | * current memblock limit to prevent create_mapping() from | 428 | * current memblock limit to prevent create_mapping() from |
431 | * allocating pte page tables from unmapped memory. | 429 | * allocating pte page tables from unmapped memory. With |
432 | * When 64K pages are enabled, the pte page table for the | 430 | * the section maps, if the first block doesn't end on section |
433 | * first PGDIR_SIZE is already present in swapper_pg_dir. | 431 | * size boundary, create_mapping() will try to allocate a pte |
434 | */ | 432 | * page, which may be returned from an unmapped area. |
435 | if (start < limit) | 433 | * When section maps are not used, the pte page table for the |
436 | start = ALIGN(start, PMD_SIZE); | 434 | * current limit is already present in swapper_pg_dir. |
437 | if (end < limit) { | 435 | */ |
438 | limit = end & PMD_MASK; | 436 | if (start < limit) |
439 | memblock_set_current_limit(limit); | 437 | start = ALIGN(start, SECTION_SIZE); |
438 | if (end < limit) { | ||
439 | limit = end & SECTION_MASK; | ||
440 | memblock_set_current_limit(limit); | ||
441 | } | ||
440 | } | 442 | } |
441 | #endif | ||
442 | __map_memblock(start, end); | 443 | __map_memblock(start, end); |
443 | } | 444 | } |
444 | 445 | ||
@@ -551,12 +552,12 @@ int kern_addr_valid(unsigned long addr) | |||
551 | return pfn_valid(pte_pfn(*pte)); | 552 | return pfn_valid(pte_pfn(*pte)); |
552 | } | 553 | } |
553 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 554 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
554 | #ifdef CONFIG_ARM64_64K_PAGES | 555 | #if !ARM64_SWAPPER_USES_SECTION_MAPS |
555 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | 556 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
556 | { | 557 | { |
557 | return vmemmap_populate_basepages(start, end, node); | 558 | return vmemmap_populate_basepages(start, end, node); |
558 | } | 559 | } |
559 | #else /* !CONFIG_ARM64_64K_PAGES */ | 560 | #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ |
560 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | 561 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
561 | { | 562 | { |
562 | unsigned long addr = start; | 563 | unsigned long addr = start; |
@@ -691,7 +692,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) | |||
691 | { | 692 | { |
692 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); | 693 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); |
693 | pgprot_t prot = PAGE_KERNEL | PTE_RDONLY; | 694 | pgprot_t prot = PAGE_KERNEL | PTE_RDONLY; |
694 | int granularity, size, offset; | 695 | int size, offset; |
695 | void *dt_virt; | 696 | void *dt_virt; |
696 | 697 | ||
697 | /* | 698 | /* |
@@ -717,24 +718,15 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) | |||
717 | */ | 718 | */ |
718 | BUILD_BUG_ON(dt_virt_base % SZ_2M); | 719 | BUILD_BUG_ON(dt_virt_base % SZ_2M); |
719 | 720 | ||
720 | if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) { | 721 | BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != |
721 | BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT != | 722 | __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); |
722 | __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT); | ||
723 | |||
724 | granularity = PAGE_SIZE; | ||
725 | } else { | ||
726 | BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT != | ||
727 | __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT); | ||
728 | |||
729 | granularity = PMD_SIZE; | ||
730 | } | ||
731 | 723 | ||
732 | offset = dt_phys % granularity; | 724 | offset = dt_phys % SWAPPER_BLOCK_SIZE; |
733 | dt_virt = (void *)dt_virt_base + offset; | 725 | dt_virt = (void *)dt_virt_base + offset; |
734 | 726 | ||
735 | /* map the first chunk so we can read the size from the header */ | 727 | /* map the first chunk so we can read the size from the header */ |
736 | create_mapping(round_down(dt_phys, granularity), dt_virt_base, | 728 | create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, |
737 | granularity, prot); | 729 | SWAPPER_BLOCK_SIZE, prot); |
738 | 730 | ||
739 | if (fdt_check_header(dt_virt) != 0) | 731 | if (fdt_check_header(dt_virt) != 0) |
740 | return NULL; | 732 | return NULL; |
@@ -743,9 +735,9 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) | |||
743 | if (size > MAX_FDT_SIZE) | 735 | if (size > MAX_FDT_SIZE) |
744 | return NULL; | 736 | return NULL; |
745 | 737 | ||
746 | if (offset + size > granularity) | 738 | if (offset + size > SWAPPER_BLOCK_SIZE) |
747 | create_mapping(round_down(dt_phys, granularity), dt_virt_base, | 739 | create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, |
748 | round_up(offset + size, granularity), prot); | 740 | round_up(offset + size, SWAPPER_BLOCK_SIZE), prot); |
749 | 741 | ||
750 | memblock_reserve(dt_phys, size); | 742 | memblock_reserve(dt_phys, size); |
751 | 743 | ||