diff options
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r-- | arch/sh/mm/init.c | 173 |
1 files changed, 169 insertions, 4 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index c505de61a5c..46f84de6246 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * linux/arch/sh/mm/init.c | 2 | * linux/arch/sh/mm/init.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 4 | * Copyright (C) 1999 Niibe Yutaka |
5 | * Copyright (C) 2002 - 2007 Paul Mundt | 5 | * Copyright (C) 2002 - 2010 Paul Mundt |
6 | * | 6 | * |
7 | * Based on linux/arch/i386/mm/init.c: | 7 | * Based on linux/arch/i386/mm/init.c: |
8 | * Copyright (C) 1995 Linus Torvalds | 8 | * Copyright (C) 1995 Linus Torvalds |
@@ -16,17 +16,31 @@ | |||
16 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/lmb.h> | ||
19 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
20 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | #include <asm/mmzone.h> | ||
23 | #include <asm/kexec.h> | ||
21 | #include <asm/tlb.h> | 24 | #include <asm/tlb.h> |
22 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
23 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
27 | #include <asm/setup.h> | ||
24 | #include <asm/cache.h> | 28 | #include <asm/cache.h> |
25 | #include <asm/sizes.h> | 29 | #include <asm/sizes.h> |
26 | 30 | ||
27 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 31 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
28 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 32 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
29 | 33 | ||
34 | void __init generic_mem_init(void) | ||
35 | { | ||
36 | lmb_add(__MEMORY_START, __MEMORY_SIZE); | ||
37 | } | ||
38 | |||
39 | void __init __weak plat_mem_setup(void) | ||
40 | { | ||
41 | /* Nothing to see here, move along. */ | ||
42 | } | ||
43 | |||
30 | #ifdef CONFIG_MMU | 44 | #ifdef CONFIG_MMU |
31 | static pte_t *__get_pte_phys(unsigned long addr) | 45 | static pte_t *__get_pte_phys(unsigned long addr) |
32 | { | 46 | { |
@@ -152,15 +166,166 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
152 | } | 166 | } |
153 | #endif /* CONFIG_MMU */ | 167 | #endif /* CONFIG_MMU */ |
154 | 168 | ||
155 | /* | 169 | void __init allocate_pgdat(unsigned int nid) |
156 | * paging_init() sets up the page tables | 170 | { |
157 | */ | 171 | unsigned long start_pfn, end_pfn; |
172 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
173 | unsigned long phys; | ||
174 | #endif | ||
175 | |||
176 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | ||
177 | |||
178 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
179 | phys = __lmb_alloc_base(sizeof(struct pglist_data), | ||
180 | SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); | ||
181 | /* Retry with all of system memory */ | ||
182 | if (!phys) | ||
183 | phys = __lmb_alloc_base(sizeof(struct pglist_data), | ||
184 | SMP_CACHE_BYTES, lmb_end_of_DRAM()); | ||
185 | if (!phys) | ||
186 | panic("Can't allocate pgdat for node %d\n", nid); | ||
187 | |||
188 | NODE_DATA(nid) = __va(phys); | ||
189 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | ||
190 | |||
191 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | ||
192 | #endif | ||
193 | |||
194 | NODE_DATA(nid)->node_start_pfn = start_pfn; | ||
195 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | ||
196 | } | ||
197 | |||
198 | static void __init bootmem_init_one_node(unsigned int nid) | ||
199 | { | ||
200 | unsigned long total_pages, paddr; | ||
201 | unsigned long end_pfn; | ||
202 | struct pglist_data *p; | ||
203 | int i; | ||
204 | |||
205 | p = NODE_DATA(nid); | ||
206 | |||
207 | /* Nothing to do.. */ | ||
208 | if (!p->node_spanned_pages) | ||
209 | return; | ||
210 | |||
211 | end_pfn = p->node_start_pfn + p->node_spanned_pages; | ||
212 | |||
213 | total_pages = bootmem_bootmap_pages(p->node_spanned_pages); | ||
214 | |||
215 | paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); | ||
216 | if (!paddr) | ||
217 | panic("Can't allocate bootmap for nid[%d]\n", nid); | ||
218 | |||
219 | init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); | ||
220 | |||
221 | free_bootmem_with_active_regions(nid, end_pfn); | ||
222 | |||
223 | /* | ||
224 | * XXX Handle initial reservations for the system memory node | ||
225 | * only for the moment, we'll refactor this later for handling | ||
226 | * reservations in other nodes. | ||
227 | */ | ||
228 | if (nid == 0) { | ||
229 | /* Reserve the sections we're already using. */ | ||
230 | for (i = 0; i < lmb.reserved.cnt; i++) | ||
231 | reserve_bootmem(lmb.reserved.region[i].base, | ||
232 | lmb_size_bytes(&lmb.reserved, i), | ||
233 | BOOTMEM_DEFAULT); | ||
234 | } | ||
235 | |||
236 | sparse_memory_present_with_active_regions(nid); | ||
237 | } | ||
238 | |||
239 | static void __init do_init_bootmem(void) | ||
240 | { | ||
241 | int i; | ||
242 | |||
243 | /* Add active regions with valid PFNs. */ | ||
244 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
245 | unsigned long start_pfn, end_pfn; | ||
246 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | ||
247 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | ||
248 | __add_active_range(0, start_pfn, end_pfn); | ||
249 | } | ||
250 | |||
251 | /* All of system RAM sits in node 0 for the non-NUMA case */ | ||
252 | allocate_pgdat(0); | ||
253 | node_set_online(0); | ||
254 | |||
255 | plat_mem_setup(); | ||
256 | |||
257 | for_each_online_node(i) | ||
258 | bootmem_init_one_node(i); | ||
259 | |||
260 | sparse_init(); | ||
261 | } | ||
262 | |||
263 | static void __init early_reserve_mem(void) | ||
264 | { | ||
265 | unsigned long start_pfn; | ||
266 | |||
267 | /* | ||
268 | * Partially used pages are not usable - thus | ||
269 | * we are rounding upwards: | ||
270 | */ | ||
271 | start_pfn = PFN_UP(__pa(_end)); | ||
272 | |||
273 | /* | ||
274 | * Reserve the kernel text and Reserve the bootmem bitmap. We do | ||
275 | * this in two steps (first step was init_bootmem()), because | ||
276 | * this catches the (definitely buggy) case of us accidentally | ||
277 | * initializing the bootmem allocator with an invalid RAM area. | ||
278 | */ | ||
279 | lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, | ||
280 | (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - | ||
281 | (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); | ||
282 | |||
283 | /* | ||
284 | * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. | ||
285 | */ | ||
286 | if (CONFIG_ZERO_PAGE_OFFSET != 0) | ||
287 | lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); | ||
288 | |||
289 | /* | ||
290 | * Handle additional early reservations | ||
291 | */ | ||
292 | check_for_initrd(); | ||
293 | reserve_crashkernel(); | ||
294 | } | ||
295 | |||
158 | void __init paging_init(void) | 296 | void __init paging_init(void) |
159 | { | 297 | { |
160 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 298 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
161 | unsigned long vaddr, end; | 299 | unsigned long vaddr, end; |
162 | int nid; | 300 | int nid; |
163 | 301 | ||
302 | lmb_init(); | ||
303 | |||
304 | sh_mv.mv_mem_init(); | ||
305 | |||
306 | early_reserve_mem(); | ||
307 | |||
308 | lmb_enforce_memory_limit(memory_limit); | ||
309 | lmb_analyze(); | ||
310 | |||
311 | lmb_dump_all(); | ||
312 | |||
313 | /* | ||
314 | * Determine low and high memory ranges: | ||
315 | */ | ||
316 | max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | ||
317 | min_low_pfn = __MEMORY_START >> PAGE_SHIFT; | ||
318 | |||
319 | nodes_clear(node_online_map); | ||
320 | |||
321 | memory_start = (unsigned long)__va(__MEMORY_START); | ||
322 | memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); | ||
323 | |||
324 | uncached_init(); | ||
325 | pmb_init(); | ||
326 | do_init_bootmem(); | ||
327 | ioremap_fixed_init(); | ||
328 | |||
164 | /* We don't need to map the kernel through the TLB, as | 329 | /* We don't need to map the kernel through the TLB, as |
165 | * it is permanatly mapped using P1. So clear the | 330 | * it is permanatly mapped using P1. So clear the |
166 | * entire pgd. */ | 331 | * entire pgd. */ |