diff options
author | Reza Arbab <arbab@linux.vnet.ibm.com> | 2017-01-16 14:07:43 -0500 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2017-01-30 21:54:18 -0500 |
commit | b5200ec9edf038459619fce9988842efa751a2c5 (patch) | |
tree | 9ecb40992af97b8afbf1f9a833eb634bf1118fc5 | |
parent | 023b13a50183d9cfc4fc5a66cb1f773ace22024c (diff) |
powerpc/mm: refactor radix physical page mapping
Move the page mapping code in radix_init_pgtable() into a separate
function that will also be used for memory hotplug.
The current goto loop progressively decreases its mapping size as it
covers the tail of a range whose end is unaligned. Change this to a for
loop which can do the same for both ends of the range.
Signed-off-by: Reza Arbab <arbab@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/mm/pgtable-radix.c | 88 |
1 files changed, 50 insertions, 38 deletions
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 086522b7c60f..c0365eca7f81 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c | |||
@@ -108,54 +108,66 @@ set_the_pte: | |||
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline void __meminit print_mapping(unsigned long start, | ||
112 | unsigned long end, | ||
113 | unsigned long size) | ||
114 | { | ||
115 | if (end <= start) | ||
116 | return; | ||
117 | |||
118 | pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size); | ||
119 | } | ||
120 | |||
121 | static int __meminit create_physical_mapping(unsigned long start, | ||
122 | unsigned long end) | ||
123 | { | ||
124 | unsigned long addr, mapping_size = 0; | ||
125 | |||
126 | start = _ALIGN_UP(start, PAGE_SIZE); | ||
127 | for (addr = start; addr < end; addr += mapping_size) { | ||
128 | unsigned long gap, previous_size; | ||
129 | int rc; | ||
130 | |||
131 | gap = end - addr; | ||
132 | previous_size = mapping_size; | ||
133 | |||
134 | if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && | ||
135 | mmu_psize_defs[MMU_PAGE_1G].shift) | ||
136 | mapping_size = PUD_SIZE; | ||
137 | else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && | ||
138 | mmu_psize_defs[MMU_PAGE_2M].shift) | ||
139 | mapping_size = PMD_SIZE; | ||
140 | else | ||
141 | mapping_size = PAGE_SIZE; | ||
142 | |||
143 | if (mapping_size != previous_size) { | ||
144 | print_mapping(start, addr, previous_size); | ||
145 | start = addr; | ||
146 | } | ||
147 | |||
148 | rc = radix__map_kernel_page((unsigned long)__va(addr), addr, | ||
149 | PAGE_KERNEL_X, mapping_size); | ||
150 | if (rc) | ||
151 | return rc; | ||
152 | } | ||
153 | |||
154 | print_mapping(start, addr, mapping_size); | ||
155 | return 0; | ||
156 | } | ||
157 | |||
111 | static void __init radix_init_pgtable(void) | 158 | static void __init radix_init_pgtable(void) |
112 | { | 159 | { |
113 | int loop_count; | ||
114 | u64 base, end, start_addr; | ||
115 | unsigned long rts_field; | 160 | unsigned long rts_field; |
116 | struct memblock_region *reg; | 161 | struct memblock_region *reg; |
117 | unsigned long linear_page_size; | ||
118 | 162 | ||
119 | /* We don't support slb for radix */ | 163 | /* We don't support slb for radix */ |
120 | mmu_slb_size = 0; | 164 | mmu_slb_size = 0; |
121 | /* | 165 | /* |
122 | * Create the linear mapping, using standard page size for now | 166 | * Create the linear mapping, using standard page size for now |
123 | */ | 167 | */ |
124 | loop_count = 0; | 168 | for_each_memblock(memory, reg) |
125 | for_each_memblock(memory, reg) { | 169 | WARN_ON(create_physical_mapping(reg->base, |
126 | 170 | reg->base + reg->size)); | |
127 | start_addr = reg->base; | ||
128 | |||
129 | redo: | ||
130 | if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift) | ||
131 | linear_page_size = PUD_SIZE; | ||
132 | else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift) | ||
133 | linear_page_size = PMD_SIZE; | ||
134 | else | ||
135 | linear_page_size = PAGE_SIZE; | ||
136 | |||
137 | base = _ALIGN_UP(start_addr, linear_page_size); | ||
138 | end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size); | ||
139 | |||
140 | pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n", | ||
141 | (unsigned long)base, (unsigned long)end, | ||
142 | linear_page_size); | ||
143 | |||
144 | while (base < end) { | ||
145 | radix__map_kernel_page((unsigned long)__va(base), | ||
146 | base, PAGE_KERNEL_X, | ||
147 | linear_page_size); | ||
148 | base += linear_page_size; | ||
149 | } | ||
150 | /* | ||
151 | * map the rest using lower page size | ||
152 | */ | ||
153 | if (end < reg->base + reg->size) { | ||
154 | start_addr = end; | ||
155 | loop_count++; | ||
156 | goto redo; | ||
157 | } | ||
158 | } | ||
159 | /* | 171 | /* |
160 | * Allocate Partition table and process table for the | 172 | * Allocate Partition table and process table for the |
161 | * host. | 173 | * host. |