aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-09-30 14:31:44 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-09-30 16:34:16 -0400
commit6c5da7aced798c7781f054a76c769b85f0173561 (patch)
treea4713f081e16183b6ed00368658ee77bcca83cf6 /arch
parenteca73214c9c50e290b8dc823b41730b01788872d (diff)
[ARM] mm: move vmalloc= parsing to arch/arm/mm/mmu.c
There's no point scattering this around the tree, the parsing of the parameter might as well live beside the code which uses it. That also means we can make vmalloc_reserve a static variable. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/memory.h8
-rw-r--r--arch/arm/kernel/setup.c13
-rw-r--r--arch/arm/mm/mmu.c22
3 files changed, 22 insertions, 21 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 7e8d22fef29c..7834adbe1774 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -140,14 +140,6 @@
140#endif 140#endif
141 141
142/* 142/*
143 * Amount of memory reserved for the vmalloc() area, and minimum
144 * address for vmalloc mappings.
145 */
146extern unsigned long vmalloc_reserve;
147
148#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
149
150/*
151 * PFNs are used to describe any physical page; this means 143 * PFNs are used to describe any physical page; this means
152 * PFN 0 == physical address 0. 144 * PFN 0 == physical address 0.
153 * 145 *
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 2f5d3641f2ed..e4a975fa2d7e 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -84,8 +84,6 @@ EXPORT_SYMBOL(system_serial_high);
84unsigned int elf_hwcap; 84unsigned int elf_hwcap;
85EXPORT_SYMBOL(elf_hwcap); 85EXPORT_SYMBOL(elf_hwcap);
86 86
87unsigned long __initdata vmalloc_reserve = 128 << 20;
88
89 87
90#ifdef MULTI_CPU 88#ifdef MULTI_CPU
91struct processor processor; 89struct processor processor;
@@ -415,17 +413,6 @@ static void __init early_mem(char **p)
415__early_param("mem=", early_mem); 413__early_param("mem=", early_mem);
416 414
417/* 415/*
418 * vmalloc=size forces the vmalloc area to be exactly 'size'
419 * bytes. This can be used to increase (or decrease) the vmalloc
420 * area - the default is 128m.
421 */
422static void __init early_vmalloc(char **arg)
423{
424 vmalloc_reserve = memparse(*arg, arg);
425}
426__early_param("vmalloc=", early_vmalloc);
427
428/*
429 * Initial parsing of the command line. 416 * Initial parsing of the command line.
430 */ 417 */
431static void __init parse_cmdline(char **cmdline_p, char *from) 418static void __init parse_cmdline(char **cmdline_p, char *from)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f03ad87f6baa..e7af83e569d7 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -566,6 +566,28 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
566 create_mapping(io_desc + i); 566 create_mapping(io_desc + i);
567} 567}
568 568
569static unsigned long __initdata vmalloc_reserve = SZ_128M;
570
571/*
572 * vmalloc=size forces the vmalloc area to be exactly 'size'
573 * bytes. This can be used to increase (or decrease) the vmalloc
574 * area - the default is 128m.
575 */
576static void __init early_vmalloc(char **arg)
577{
578 vmalloc_reserve = memparse(*arg, arg);
579
580 if (vmalloc_reserve < SZ_16M) {
581 vmalloc_reserve = SZ_16M;
582 printk(KERN_WARNING
583 "vmalloc area too small, limiting to %luMB\n",
584 vmalloc_reserve >> 20);
585 }
586}
587__early_param("vmalloc=", early_vmalloc);
588
589#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
590
569static int __init check_membank_valid(struct membank *mb) 591static int __init check_membank_valid(struct membank *mb)
570{ 592{
571 /* 593 /*