aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-08-04 19:56:13 -0400
committerLennert Buytenhek <buytenh@marvell.com>2008-08-09 09:38:15 -0400
commit60296c71f6c5063e3c1f1d2619ca0b60940162e7 (patch)
tree9b66e8716d83c3d350829376d75212798b2c72c6
parent09c0ed2e6eb87613b2670c15ba771ac671db19d9 (diff)
[ARM] prevent crashing when too much RAM installed
This patch will truncate and/or ignore memory banks if their kernel direct mappings would (partially) overlap with the vmalloc area or the mappings between the vmalloc area and the address space top, to prevent crashing during early boot if there happens to be more RAM installed than we are expecting. Since the start of the vmalloc area is not at a fixed address (but the vmalloc end address is, via the per-platform VMALLOC_END define), a default area of 128M is reserved for vmalloc mappings, which can be shrunk or enlarged by passing an appropriate vmalloc= command line option as it is done on x86. On a board with a 3:1 user:kernel split, VMALLOC_END at 0xfe000000, two 512M RAM banks and vmalloc=128M (the default), this patch gives: Truncating RAM at 20000000-3fffffff to -35ffffff (vmalloc region overlap). Memory: 512MB 352MB = 864MB total On a board with a 3:1 user:kernel split, VMALLOC_END at 0xfe800000, two 256M RAM banks and vmalloc=768M, this patch gives: Truncating RAM at 00000000-0fffffff to -0e7fffff (vmalloc region overlap). Ignoring RAM at 10000000-1fffffff (vmalloc region overlap). Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Tested-by: Riku Voipio <riku.voipio@iki.fi>
-rw-r--r--arch/arm/include/asm/memory.h8
-rw-r--r--arch/arm/kernel/setup.c13
-rw-r--r--arch/arm/mm/mmu.c50
3 files changed, 71 insertions, 0 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 1e070a2b561a..7bcd69a9a88c 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -150,6 +150,14 @@
150#endif 150#endif
151 151
152/* 152/*
153 * Amount of memory reserved for the vmalloc() area, and minimum
154 * address for vmalloc mappings.
155 */
156extern unsigned long vmalloc_reserve;
157
158#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
159
160/*
153 * PFNs are used to describe any physical page; this means 161 * PFNs are used to describe any physical page; this means
154 * PFN 0 == physical address 0. 162 * PFN 0 == physical address 0.
155 * 163 *
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 38f0e7940a13..2ca7038b67a7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,6 +81,8 @@ EXPORT_SYMBOL(system_serial_high);
81unsigned int elf_hwcap; 81unsigned int elf_hwcap;
82EXPORT_SYMBOL(elf_hwcap); 82EXPORT_SYMBOL(elf_hwcap);
83 83
84unsigned long __initdata vmalloc_reserve = 128 << 20;
85
84 86
85#ifdef MULTI_CPU 87#ifdef MULTI_CPU
86struct processor processor; 88struct processor processor;
@@ -501,6 +503,17 @@ static void __init early_mem(char **p)
501__early_param("mem=", early_mem); 503__early_param("mem=", early_mem);
502 504
503/* 505/*
506 * vmalloc=size forces the vmalloc area to be exactly 'size'
507 * bytes. This can be used to increase (or decrease) the vmalloc
508 * area - the default is 128m.
509 */
510static void __init early_vmalloc(char **arg)
511{
512 vmalloc_reserve = memparse(*arg, arg);
513}
514__early_param("vmalloc=", early_vmalloc);
515
516/*
504 * Initial parsing of the command line. 517 * Initial parsing of the command line.
505 */ 518 */
506static void __init parse_cmdline(char **cmdline_p, char *from) 519static void __init parse_cmdline(char **cmdline_p, char *from)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 2d6d682c206a..25d9a11eb617 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -568,6 +568,55 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
568 create_mapping(io_desc + i); 568 create_mapping(io_desc + i);
569} 569}
570 570
571static int __init check_membank_valid(struct membank *mb)
572{
573 /*
574 * Check whether this memory region has non-zero size.
575 */
576 if (mb->size == 0)
577 return 0;
578
579 /*
580 * Check whether this memory region would entirely overlap
581 * the vmalloc area.
582 */
583 if (phys_to_virt(mb->start) >= VMALLOC_MIN) {
584 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
585 "(vmalloc region overlap).\n",
586 mb->start, mb->start + mb->size - 1);
587 return 0;
588 }
589
590 /*
591 * Check whether this memory region would partially overlap
592 * the vmalloc area.
593 */
594 if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) ||
595 phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) {
596 unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start);
597
598 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
599 "to -%.8lx (vmalloc region overlap).\n",
600 mb->start, mb->start + mb->size - 1,
601 mb->start + newsize - 1);
602 mb->size = newsize;
603 }
604
605 return 1;
606}
607
608static void __init sanity_check_meminfo(struct meminfo *mi)
609{
610 int i;
611 int j;
612
613 for (i = 0, j = 0; i < mi->nr_banks; i++) {
614 if (check_membank_valid(&mi->bank[i]))
615 mi->bank[j++] = mi->bank[i];
616 }
617 mi->nr_banks = j;
618}
619
571static inline void prepare_page_table(struct meminfo *mi) 620static inline void prepare_page_table(struct meminfo *mi)
572{ 621{
573 unsigned long addr; 622 unsigned long addr;
@@ -753,6 +802,7 @@ void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
753 void *zero_page; 802 void *zero_page;
754 803
755 build_mem_type_table(); 804 build_mem_type_table();
805 sanity_check_meminfo(mi);
756 prepare_page_table(mi); 806 prepare_page_table(mi);
757 bootmem_init(mi); 807 bootmem_init(mi);
758 devicemaps_init(mdesc); 808 devicemaps_init(mdesc);