aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiaxun Yang <jiaxun.yang@flygoat.com>2019-08-19 10:23:12 -0400
committerPaul Burton <paul.burton@mips.com>2019-08-23 10:40:14 -0400
commita5718fe8f70f33b1b5b47a153057cfdd19684598 (patch)
treebe2e4c4c0e9f6aef7cc58716cc3347e1c4ee6025
parenta121d6e0caf0982579a1be7f4ad4dcf45e36052b (diff)
MIPS: mm: Drop boot_mem_map
Initialize maar by resource map and replace page_is_ram by memblock_is_memory. Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com> [paul.burton@mips.com: - Fix bad MAAR address calculations. - Use ALIGN() & define maar_align to make it clearer what's going on with address manipulations. - Drop the new used field from struct maar_config. - Rework the RAM walk to avoid iterating over the cfg array needlessly to find the first unused entry, then count used entries at the end. Instead just keep the count as we go.] Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org Cc: yasha.che3@gmail.com Cc: aurelien@aurel32.net Cc: sfr@canb.auug.org.au Cc: fancer.lancer@gmail.com Cc: matt.redfearn@mips.com Cc: chenhc@lemote.com
-rw-r--r--arch/mips/mm/init.c94
1 files changed, 37 insertions, 57 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 8a038b30d3c4..e9e1104e0567 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -269,37 +269,46 @@ void __init fixrange_init(unsigned long start, unsigned long end,
269#endif 269#endif
270} 270}
271 271
272unsigned __weak platform_maar_init(unsigned num_pairs) 272struct maar_walk_info {
273 struct maar_config cfg[16];
274 unsigned int num_cfg;
275};
276
277static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
278 void *data)
273{ 279{
274 struct maar_config cfg[BOOT_MEM_MAP_MAX]; 280 struct maar_walk_info *wi = data;
275 unsigned i, num_configured, num_cfg = 0; 281 struct maar_config *cfg = &wi->cfg[wi->num_cfg];
276 282 unsigned int maar_align;
277 for (i = 0; i < boot_mem_map.nr_map; i++) {
278 switch (boot_mem_map.map[i].type) {
279 case BOOT_MEM_RAM:
280 case BOOT_MEM_INIT_RAM:
281 break;
282 default:
283 continue;
284 }
285 283
286 /* Round lower up */ 284 /* MAAR registers hold physical addresses right shifted by 4 bits */
287 cfg[num_cfg].lower = boot_mem_map.map[i].addr; 285 maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
288 cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
289 286
290 /* Round upper down */ 287 /* Fill in the MAAR config entry */
291 cfg[num_cfg].upper = boot_mem_map.map[i].addr + 288 cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
292 boot_mem_map.map[i].size; 289 cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
293 cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1; 290 cfg->attrs = MIPS_MAAR_S;
294 291
295 cfg[num_cfg].attrs = MIPS_MAAR_S; 292 /* Ensure we don't overflow the cfg array */
296 num_cfg++; 293 if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
297 } 294 wi->num_cfg++;
295
296 return 0;
297}
298
299
300unsigned __weak platform_maar_init(unsigned num_pairs)
301{
302 unsigned int num_configured;
303 struct maar_walk_info wi;
304
305 wi.num_cfg = 0;
306 walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
298 307
299 num_configured = maar_config(cfg, num_cfg, num_pairs); 308 num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
300 if (num_configured < num_cfg) 309 if (num_configured < wi.num_cfg)
301 pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", 310 pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
302 num_pairs, num_cfg); 311 num_pairs, wi.num_cfg);
303 312
304 return num_configured; 313 return num_configured;
305} 314}
@@ -382,33 +391,6 @@ void maar_init(void)
382} 391}
383 392
384#ifndef CONFIG_NEED_MULTIPLE_NODES 393#ifndef CONFIG_NEED_MULTIPLE_NODES
385int page_is_ram(unsigned long pagenr)
386{
387 int i;
388
389 for (i = 0; i < boot_mem_map.nr_map; i++) {
390 unsigned long addr, end;
391
392 switch (boot_mem_map.map[i].type) {
393 case BOOT_MEM_RAM:
394 case BOOT_MEM_INIT_RAM:
395 break;
396 default:
397 /* not usable memory */
398 continue;
399 }
400
401 addr = PFN_UP(boot_mem_map.map[i].addr);
402 end = PFN_DOWN(boot_mem_map.map[i].addr +
403 boot_mem_map.map[i].size);
404
405 if (pagenr >= addr && pagenr < end)
406 return 1;
407 }
408
409 return 0;
410}
411
412void __init paging_init(void) 394void __init paging_init(void)
413{ 395{
414 unsigned long max_zone_pfns[MAX_NR_ZONES]; 396 unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -443,7 +425,7 @@ void __init paging_init(void)
443static struct kcore_list kcore_kseg0; 425static struct kcore_list kcore_kseg0;
444#endif 426#endif
445 427
446static inline void mem_init_free_highmem(void) 428static inline void __init mem_init_free_highmem(void)
447{ 429{
448#ifdef CONFIG_HIGHMEM 430#ifdef CONFIG_HIGHMEM
449 unsigned long tmp; 431 unsigned long tmp;
@@ -452,9 +434,7 @@ static inline void mem_init_free_highmem(void)
452 return; 434 return;
453 435
454 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 436 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
455 struct page *page = pfn_to_page(tmp); 437 if (!memblock_is_memory(PFN_PHYS(tmp)))
456
457 if (!page_is_ram(tmp))
458 SetPageReserved(page); 438 SetPageReserved(page);
459 else 439 else
460 free_highmem_page(page); 440 free_highmem_page(page);