aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-02-25 17:09:41 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-25 17:09:41 -0500
commit2741ecb4ce5c2d430b5c44b0a169038338c21df5 (patch)
tree4aa71d7551184ee88f32c7f3660d821133058c32 /arch/arm/mm
parentbc85e585c6d0fab4bde12d60964b2f25802c3163 (diff)
parent5de813b6cd06460b337f9da9afe316823cf3ef45 (diff)
Merge branch 'misc2' into devel
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/alignment.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c72
-rw-r--r--arch/arm/mm/context.c124
-rw-r--r--arch/arm/mm/dma-mapping.c3
-rw-r--r--arch/arm/mm/init.c113
-rw-r--r--arch/arm/mm/ioremap.c57
-rw-r--r--arch/arm/mm/mmu.c41
-rw-r--r--arch/arm/mm/nommu.c12
9 files changed, 343 insertions, 93 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index baf638487a2d..c4ed9f93f646 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -399,7 +399,7 @@ config CPU_V6
399config CPU_32v6K 399config CPU_32v6K
400 bool "Support ARM V6K processor extensions" if !SMP 400 bool "Support ARM V6K processor extensions" if !SMP
401 depends on CPU_V6 401 depends on CPU_V6
402 default y if SMP && !ARCH_MX3 402 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
403 help 403 help
404 Say Y here if your ARMv6 processor supports the 'K' extension. 404 Say Y here if your ARMv6 processor supports the 'K' extension.
405 This enables the kernel to use some instructions not present 405 This enables the kernel to use some instructions not present
@@ -410,7 +410,7 @@ config CPU_32v6K
410# ARMv7 410# ARMv7
411config CPU_V7 411config CPU_V7
412 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX 412 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
413 select CPU_32v6K 413 select CPU_32v6K if !ARCH_OMAP2
414 select CPU_32v7 414 select CPU_32v7
415 select CPU_ABRT_EV7 415 select CPU_ABRT_EV7
416 select CPU_PABRT_V7 416 select CPU_PABRT_V7
@@ -754,7 +754,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
754config CACHE_L2X0 754config CACHE_L2X0
755 bool "Enable the L2x0 outer cache controller" 755 bool "Enable the L2x0 outer cache controller"
756 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ 756 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
757 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK 757 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
758 default y 758 default y
759 select OUTER_CACHE 759 select OUTER_CACHE
760 help 760 help
@@ -779,5 +779,5 @@ config CACHE_XSC3L2
779 779
780config ARM_L1_CACHE_SHIFT 780config ARM_L1_CACHE_SHIFT
781 int 781 int
782 default 6 if ARCH_OMAP3 || ARCH_S5PC1XX 782 default 6 if ARM_L1_CACHE_SHIFT_6
783 default 5 783 default 5
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b270d6228fe2..0c5eb6983cef 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -898,11 +898,7 @@ static int __init alignment_init(void)
898#ifdef CONFIG_PROC_FS 898#ifdef CONFIG_PROC_FS
899 struct proc_dir_entry *res; 899 struct proc_dir_entry *res;
900 900
901 res = proc_mkdir("cpu", NULL); 901 res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL);
902 if (!res)
903 return -ENOMEM;
904
905 res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res);
906 if (!res) 902 if (!res)
907 return -ENOMEM; 903 return -ENOMEM;
908 904
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index cb8fc6573b1b..07334632d3e2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -42,6 +42,57 @@ static inline void cache_sync(void)
42 cache_wait(base + L2X0_CACHE_SYNC, 1); 42 cache_wait(base + L2X0_CACHE_SYNC, 1);
43} 43}
44 44
45static inline void l2x0_clean_line(unsigned long addr)
46{
47 void __iomem *base = l2x0_base;
48 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
49 writel(addr, base + L2X0_CLEAN_LINE_PA);
50}
51
52static inline void l2x0_inv_line(unsigned long addr)
53{
54 void __iomem *base = l2x0_base;
55 cache_wait(base + L2X0_INV_LINE_PA, 1);
56 writel(addr, base + L2X0_INV_LINE_PA);
57}
58
59#ifdef CONFIG_PL310_ERRATA_588369
60static void debug_writel(unsigned long val)
61{
62 extern void omap_smc1(u32 fn, u32 arg);
63
64 /*
65 * Texas Instrument secure monitor api to modify the
66 * PL310 Debug Control Register.
67 */
68 omap_smc1(0x100, val);
69}
70
71static inline void l2x0_flush_line(unsigned long addr)
72{
73 void __iomem *base = l2x0_base;
74
75 /* Clean by PA followed by Invalidate by PA */
76 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
77 writel(addr, base + L2X0_CLEAN_LINE_PA);
78 cache_wait(base + L2X0_INV_LINE_PA, 1);
79 writel(addr, base + L2X0_INV_LINE_PA);
80}
81#else
82
83/* Optimised out for non-errata case */
84static inline void debug_writel(unsigned long val)
85{
86}
87
88static inline void l2x0_flush_line(unsigned long addr)
89{
90 void __iomem *base = l2x0_base;
91 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
92 writel(addr, base + L2X0_CLEAN_INV_LINE_PA);
93}
94#endif
95
45static inline void l2x0_inv_all(void) 96static inline void l2x0_inv_all(void)
46{ 97{
47 unsigned long flags; 98 unsigned long flags;
@@ -62,23 +113,24 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
62 spin_lock_irqsave(&l2x0_lock, flags); 113 spin_lock_irqsave(&l2x0_lock, flags);
63 if (start & (CACHE_LINE_SIZE - 1)) { 114 if (start & (CACHE_LINE_SIZE - 1)) {
64 start &= ~(CACHE_LINE_SIZE - 1); 115 start &= ~(CACHE_LINE_SIZE - 1);
65 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 116 debug_writel(0x03);
66 writel(start, base + L2X0_CLEAN_INV_LINE_PA); 117 l2x0_flush_line(start);
118 debug_writel(0x00);
67 start += CACHE_LINE_SIZE; 119 start += CACHE_LINE_SIZE;
68 } 120 }
69 121
70 if (end & (CACHE_LINE_SIZE - 1)) { 122 if (end & (CACHE_LINE_SIZE - 1)) {
71 end &= ~(CACHE_LINE_SIZE - 1); 123 end &= ~(CACHE_LINE_SIZE - 1);
72 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 124 debug_writel(0x03);
73 writel(end, base + L2X0_CLEAN_INV_LINE_PA); 125 l2x0_flush_line(end);
126 debug_writel(0x00);
74 } 127 }
75 128
76 while (start < end) { 129 while (start < end) {
77 unsigned long blk_end = start + min(end - start, 4096UL); 130 unsigned long blk_end = start + min(end - start, 4096UL);
78 131
79 while (start < blk_end) { 132 while (start < blk_end) {
80 cache_wait(base + L2X0_INV_LINE_PA, 1); 133 l2x0_inv_line(start);
81 writel(start, base + L2X0_INV_LINE_PA);
82 start += CACHE_LINE_SIZE; 134 start += CACHE_LINE_SIZE;
83 } 135 }
84 136
@@ -103,8 +155,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
103 unsigned long blk_end = start + min(end - start, 4096UL); 155 unsigned long blk_end = start + min(end - start, 4096UL);
104 156
105 while (start < blk_end) { 157 while (start < blk_end) {
106 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 158 l2x0_clean_line(start);
107 writel(start, base + L2X0_CLEAN_LINE_PA);
108 start += CACHE_LINE_SIZE; 159 start += CACHE_LINE_SIZE;
109 } 160 }
110 161
@@ -128,11 +179,12 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
128 while (start < end) { 179 while (start < end) {
129 unsigned long blk_end = start + min(end - start, 4096UL); 180 unsigned long blk_end = start + min(end - start, 4096UL);
130 181
182 debug_writel(0x03);
131 while (start < blk_end) { 183 while (start < blk_end) {
132 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 184 l2x0_flush_line(start);
133 writel(start, base + L2X0_CLEAN_INV_LINE_PA);
134 start += CACHE_LINE_SIZE; 185 start += CACHE_LINE_SIZE;
135 } 186 }
187 debug_writel(0x00);
136 188
137 if (blk_end < end) { 189 if (blk_end < end) {
138 spin_unlock_irqrestore(&l2x0_lock, flags); 190 spin_unlock_irqrestore(&l2x0_lock, flags);
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index a9e22e31eaa1..b0ee9ba3cfab 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -10,12 +10,17 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/percpu.h>
13 15
14#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
15#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
16 18
17static DEFINE_SPINLOCK(cpu_asid_lock); 19static DEFINE_SPINLOCK(cpu_asid_lock);
18unsigned int cpu_last_asid = ASID_FIRST_VERSION; 20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
21#ifdef CONFIG_SMP
22DEFINE_PER_CPU(struct mm_struct *, current_mm);
23#endif
19 24
20/* 25/*
21 * We fork()ed a process, and we need a new context for the child 26 * We fork()ed a process, and we need a new context for the child
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
26void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
27{ 32{
28 mm->context.id = 0; 33 mm->context.id = 0;
34 spin_lock_init(&mm->context.id_lock);
29} 35}
30 36
37static void flush_context(void)
38{
39 /* set the reserved ASID before flushing the TLB */
40 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
41 isb();
42 local_flush_tlb_all();
43 if (icache_is_vivt_asid_tagged()) {
44 __flush_icache_all();
45 dsb();
46 }
47}
48
49#ifdef CONFIG_SMP
50
51static void set_mm_context(struct mm_struct *mm, unsigned int asid)
52{
53 unsigned long flags;
54
55 /*
56 * Locking needed for multi-threaded applications where the
57 * same mm->context.id could be set from different CPUs during
58 * the broadcast. This function is also called via IPI so the
59 * mm->context.id_lock has to be IRQ-safe.
60 */
61 spin_lock_irqsave(&mm->context.id_lock, flags);
62 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
63 /*
64 * Old version of ASID found. Set the new one and
65 * reset mm_cpumask(mm).
66 */
67 mm->context.id = asid;
68 cpumask_clear(mm_cpumask(mm));
69 }
70 spin_unlock_irqrestore(&mm->context.id_lock, flags);
71
72 /*
73 * Set the mm_cpumask(mm) bit for the current CPU.
74 */
75 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
76}
77
78/*
79 * Reset the ASID on the current CPU. This function call is broadcast
80 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
81 */
82static void reset_context(void *info)
83{
84 unsigned int asid;
85 unsigned int cpu = smp_processor_id();
86 struct mm_struct *mm = per_cpu(current_mm, cpu);
87
88 /*
89 * Check if a current_mm was set on this CPU as it might still
90 * be in the early booting stages and using the reserved ASID.
91 */
92 if (!mm)
93 return;
94
95 smp_rmb();
96 asid = cpu_last_asid + cpu + 1;
97
98 flush_context();
99 set_mm_context(mm, asid);
100
101 /* set the new ASID */
102 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
103 isb();
104}
105
106#else
107
108static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
109{
110 mm->context.id = asid;
111 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
112}
113
114#endif
115
31void __new_context(struct mm_struct *mm) 116void __new_context(struct mm_struct *mm)
32{ 117{
33 unsigned int asid; 118 unsigned int asid;
34 119
35 spin_lock(&cpu_asid_lock); 120 spin_lock(&cpu_asid_lock);
121#ifdef CONFIG_SMP
122 /*
123 * Check the ASID again, in case the change was broadcast from
124 * another CPU before we acquired the lock.
125 */
126 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
127 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
128 spin_unlock(&cpu_asid_lock);
129 return;
130 }
131#endif
132 /*
133 * At this point, it is guaranteed that the current mm (with
134 * an old ASID) isn't active on any other CPU since the ASIDs
135 * are changed simultaneously via IPI.
136 */
36 asid = ++cpu_last_asid; 137 asid = ++cpu_last_asid;
37 if (asid == 0) 138 if (asid == 0)
38 asid = cpu_last_asid = ASID_FIRST_VERSION; 139 asid = cpu_last_asid = ASID_FIRST_VERSION;
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm)
42 * to start a new version and flush the TLB. 143 * to start a new version and flush the TLB.
43 */ 144 */
44 if (unlikely((asid & ~ASID_MASK) == 0)) { 145 if (unlikely((asid & ~ASID_MASK) == 0)) {
45 asid = ++cpu_last_asid; 146 asid = cpu_last_asid + smp_processor_id() + 1;
46 /* set the reserved ASID before flushing the TLB */ 147 flush_context();
47 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" 148#ifdef CONFIG_SMP
48 : 149 smp_wmb();
49 : "r" (0)); 150 smp_call_function(reset_context, NULL, 1);
50 isb(); 151#endif
51 flush_tlb_all(); 152 cpu_last_asid += NR_CPUS;
52 if (icache_is_vivt_asid_tagged()) {
53 __flush_icache_all();
54 dsb();
55 }
56 } 153 }
57 spin_unlock(&cpu_asid_lock);
58 154
59 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); 155 set_mm_context(mm, asid);
60 mm->context.id = asid; 156 spin_unlock(&cpu_asid_lock);
61} 157}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 64daef2173bd..0da7eccf7749 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -29,9 +29,6 @@
29#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" 29#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
30#endif 30#endif
31 31
32#define CONSISTENT_END (0xffe00000)
33#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
34
35#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 32#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
36#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 33#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
37#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 34#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index a04ffbbbe253..7829cb5425f5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -23,6 +23,7 @@
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/sizes.h> 24#include <asm/sizes.h>
25#include <asm/tlb.h> 25#include <asm/tlb.h>
26#include <asm/fixmap.h>
26 27
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
28#include <asm/mach/map.h> 29#include <asm/mach/map.h>
@@ -32,19 +33,21 @@
32static unsigned long phys_initrd_start __initdata = 0; 33static unsigned long phys_initrd_start __initdata = 0;
33static unsigned long phys_initrd_size __initdata = 0; 34static unsigned long phys_initrd_size __initdata = 0;
34 35
35static void __init early_initrd(char **p) 36static int __init early_initrd(char *p)
36{ 37{
37 unsigned long start, size; 38 unsigned long start, size;
39 char *endp;
38 40
39 start = memparse(*p, p); 41 start = memparse(p, &endp);
40 if (**p == ',') { 42 if (*endp == ',') {
41 size = memparse((*p) + 1, p); 43 size = memparse(endp + 1, NULL);
42 44
43 phys_initrd_start = start; 45 phys_initrd_start = start;
44 phys_initrd_size = size; 46 phys_initrd_size = size;
45 } 47 }
48 return 0;
46} 49}
47__early_param("initrd=", early_initrd); 50early_param("initrd", early_initrd);
48 51
49static int __init parse_tag_initrd(const struct tag *tag) 52static int __init parse_tag_initrd(const struct tag *tag)
50{ 53{
@@ -560,7 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
560 */ 563 */
561void __init mem_init(void) 564void __init mem_init(void)
562{ 565{
563 unsigned int codesize, datasize, initsize; 566 unsigned long reserved_pages, free_pages;
564 int i, node; 567 int i, node;
565 568
566#ifndef CONFIG_DISCONTIGMEM 569#ifndef CONFIG_DISCONTIGMEM
@@ -596,6 +599,33 @@ void __init mem_init(void)
596 totalram_pages += totalhigh_pages; 599 totalram_pages += totalhigh_pages;
597#endif 600#endif
598 601
602 reserved_pages = free_pages = 0;
603
604 for_each_online_node(node) {
605 pg_data_t *n = NODE_DATA(node);
606 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
607
608 for_each_nodebank(i, &meminfo, node) {
609 struct membank *bank = &meminfo.bank[i];
610 unsigned int pfn1, pfn2;
611 struct page *page, *end;
612
613 pfn1 = bank_pfn_start(bank);
614 pfn2 = bank_pfn_end(bank);
615
616 page = map + pfn1;
617 end = map + pfn2;
618
619 do {
620 if (PageReserved(page))
621 reserved_pages++;
622 else if (!page_count(page))
623 free_pages++;
624 page++;
625 } while (page < end);
626 }
627 }
628
599 /* 629 /*
600 * Since our memory may not be contiguous, calculate the 630 * Since our memory may not be contiguous, calculate the
601 * real number of pages we have in this system 631 * real number of pages we have in this system
@@ -608,16 +638,71 @@ void __init mem_init(void)
608 } 638 }
609 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 639 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
610 640
611 codesize = _etext - _text; 641 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
612 datasize = _end - _data; 642 nr_free_pages() << (PAGE_SHIFT-10),
613 initsize = __init_end - __init_begin; 643 free_pages << (PAGE_SHIFT-10),
614 644 reserved_pages << (PAGE_SHIFT-10),
615 printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
616 "%dK data, %dK init, %luK highmem)\n",
617 nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10,
618 datasize >> 10, initsize >> 10,
619 totalhigh_pages << (PAGE_SHIFT-10)); 645 totalhigh_pages << (PAGE_SHIFT-10));
620 646
647#define MLK(b, t) b, t, ((t) - (b)) >> 10
648#define MLM(b, t) b, t, ((t) - (b)) >> 20
649#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
650
651 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
652 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
653 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
654#ifdef CONFIG_MMU
655 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
656#endif
657 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
658 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
659#ifdef CONFIG_HIGHMEM
660 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
661#endif
662 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
663 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
664 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
665 " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
666
667 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
668 (PAGE_SIZE)),
669 MLK(FIXADDR_START, FIXADDR_TOP),
670#ifdef CONFIG_MMU
671 MLM(CONSISTENT_BASE, CONSISTENT_END),
672#endif
673 MLM(VMALLOC_START, VMALLOC_END),
674 MLM(PAGE_OFFSET, (unsigned long)high_memory),
675#ifdef CONFIG_HIGHMEM
676 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
677 (PAGE_SIZE)),
678#endif
679 MLM(MODULES_VADDR, MODULES_END),
680
681 MLK_ROUNDUP(__init_begin, __init_end),
682 MLK_ROUNDUP(_text, _etext),
683 MLK_ROUNDUP(_data, _edata));
684
685#undef MLK
686#undef MLM
687#undef MLK_ROUNDUP
688
689 /*
690 * Check boundaries twice: Some fundamental inconsistencies can
691 * be detected at build time already.
692 */
693#ifdef CONFIG_MMU
694 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
695 BUG_ON(VMALLOC_END > CONSISTENT_BASE);
696
697 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
698 BUG_ON(TASK_SIZE > MODULES_VADDR);
699#endif
700
701#ifdef CONFIG_HIGHMEM
702 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
703 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
704#endif
705
621 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 706 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
622 extern int sysctl_overcommit_memory; 707 extern int sysctl_overcommit_memory;
623 /* 708 /*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0ab75c60f7cf..28c8b950ef04 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm)
139 * which requires the new ioremap'd region to be referenced, the CPU will 139 * which requires the new ioremap'd region to be referenced, the CPU will
140 * reference the _old_ region. 140 * reference the _old_ region.
141 * 141 *
142 * Note that get_vm_area() allocates a guard 4K page, so we need to mask 142 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
143 * the size back to 1MB aligned or we will overflow in the loop below. 143 * mask the size back to 1MB aligned or we will overflow in the loop below.
144 */ 144 */
145static void unmap_area_sections(unsigned long virt, unsigned long size) 145static void unmap_area_sections(unsigned long virt, unsigned long size)
146{ 146{
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
254} 254}
255#endif 255#endif
256 256
257 257void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
258/* 258 unsigned long offset, size_t size, unsigned int mtype, void *caller)
259 * Remap an arbitrary physical address space into the kernel virtual
260 * address space. Needed when the kernel wants to access high addresses
261 * directly.
262 *
263 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
264 * have to convert them into an offset in a page-aligned mapping, but the
265 * caller shouldn't need to know that small detail.
266 *
267 * 'flags' are the extra L_PTE_ flags that you want to specify for this
268 * mapping. See <asm/pgtable.h> for more information.
269 */
270void __iomem *
271__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
272 unsigned int mtype)
273{ 259{
274 const struct mem_type *type; 260 const struct mem_type *type;
275 int err; 261 int err;
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
291 */ 277 */
292 size = PAGE_ALIGN(offset + size); 278 size = PAGE_ALIGN(offset + size);
293 279
294 area = get_vm_area(size, VM_IOREMAP); 280 area = get_vm_area_caller(size, VM_IOREMAP, caller);
295 if (!area) 281 if (!area)
296 return NULL; 282 return NULL;
297 addr = (unsigned long)area->addr; 283 addr = (unsigned long)area->addr;
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
318 flush_cache_vmap(addr, addr + size); 304 flush_cache_vmap(addr, addr + size);
319 return (void __iomem *) (offset + addr); 305 return (void __iomem *) (offset + addr);
320} 306}
321EXPORT_SYMBOL(__arm_ioremap_pfn);
322 307
323void __iomem * 308void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
324__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) 309 unsigned int mtype, void *caller)
325{ 310{
326 unsigned long last_addr; 311 unsigned long last_addr;
327 unsigned long offset = phys_addr & ~PAGE_MASK; 312 unsigned long offset = phys_addr & ~PAGE_MASK;
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
334 if (!size || last_addr < phys_addr) 319 if (!size || last_addr < phys_addr)
335 return NULL; 320 return NULL;
336 321
337 return __arm_ioremap_pfn(pfn, offset, size, mtype); 322 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
323 caller);
324}
325
326/*
327 * Remap an arbitrary physical address space into the kernel virtual
328 * address space. Needed when the kernel wants to access high addresses
329 * directly.
330 *
331 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
332 * have to convert them into an offset in a page-aligned mapping, but the
333 * caller shouldn't need to know that small detail.
334 */
335void __iomem *
336__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
337 unsigned int mtype)
338{
339 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
340 __builtin_return_address(0));
341}
342EXPORT_SYMBOL(__arm_ioremap_pfn);
343
344void __iomem *
345__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
346{
347 return __arm_ioremap_caller(phys_addr, size, mtype,
348 __builtin_return_address(0));
338} 349}
339EXPORT_SYMBOL(__arm_ioremap); 350EXPORT_SYMBOL(__arm_ioremap);
340 351
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 1708da82da96..88f5d71248d9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = {
100 * writebuffer to be turned off. (Note: the write 100 * writebuffer to be turned off. (Note: the write
101 * buffer should not be on and the cache off). 101 * buffer should not be on and the cache off).
102 */ 102 */
103static void __init early_cachepolicy(char **p) 103static int __init early_cachepolicy(char *p)
104{ 104{
105 int i; 105 int i;
106 106
107 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 107 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
108 int len = strlen(cache_policies[i].policy); 108 int len = strlen(cache_policies[i].policy);
109 109
110 if (memcmp(*p, cache_policies[i].policy, len) == 0) { 110 if (memcmp(p, cache_policies[i].policy, len) == 0) {
111 cachepolicy = i; 111 cachepolicy = i;
112 cr_alignment &= ~cache_policies[i].cr_mask; 112 cr_alignment &= ~cache_policies[i].cr_mask;
113 cr_no_alignment &= ~cache_policies[i].cr_mask; 113 cr_no_alignment &= ~cache_policies[i].cr_mask;
114 *p += len;
115 break; 114 break;
116 } 115 }
117 } 116 }
@@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p)
130 } 129 }
131 flush_cache_all(); 130 flush_cache_all();
132 set_cr(cr_alignment); 131 set_cr(cr_alignment);
132 return 0;
133} 133}
134__early_param("cachepolicy=", early_cachepolicy); 134early_param("cachepolicy", early_cachepolicy);
135 135
136static void __init early_nocache(char **__unused) 136static int __init early_nocache(char *__unused)
137{ 137{
138 char *p = "buffered"; 138 char *p = "buffered";
139 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 139 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
140 early_cachepolicy(&p); 140 early_cachepolicy(p);
141 return 0;
141} 142}
142__early_param("nocache", early_nocache); 143early_param("nocache", early_nocache);
143 144
144static void __init early_nowrite(char **__unused) 145static int __init early_nowrite(char *__unused)
145{ 146{
146 char *p = "uncached"; 147 char *p = "uncached";
147 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 148 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
148 early_cachepolicy(&p); 149 early_cachepolicy(p);
150 return 0;
149} 151}
150__early_param("nowb", early_nowrite); 152early_param("nowb", early_nowrite);
151 153
152static void __init early_ecc(char **p) 154static int __init early_ecc(char *p)
153{ 155{
154 if (memcmp(*p, "on", 2) == 0) { 156 if (memcmp(p, "on", 2) == 0)
155 ecc_mask = PMD_PROTECTION; 157 ecc_mask = PMD_PROTECTION;
156 *p += 2; 158 else if (memcmp(p, "off", 3) == 0)
157 } else if (memcmp(*p, "off", 3) == 0) {
158 ecc_mask = 0; 159 ecc_mask = 0;
159 *p += 3; 160 return 0;
160 }
161} 161}
162__early_param("ecc=", early_ecc); 162early_param("ecc", early_ecc);
163 163
164static int __init noalign_setup(char *__unused) 164static int __init noalign_setup(char *__unused)
165{ 165{
@@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M;
670 * bytes. This can be used to increase (or decrease) the vmalloc 670 * bytes. This can be used to increase (or decrease) the vmalloc
671 * area - the default is 128m. 671 * area - the default is 128m.
672 */ 672 */
673static void __init early_vmalloc(char **arg) 673static int __init early_vmalloc(char *arg)
674{ 674{
675 vmalloc_reserve = memparse(*arg, arg); 675 vmalloc_reserve = memparse(arg, NULL);
676 676
677 if (vmalloc_reserve < SZ_16M) { 677 if (vmalloc_reserve < SZ_16M) {
678 vmalloc_reserve = SZ_16M; 678 vmalloc_reserve = SZ_16M;
@@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg)
687 "vmalloc area is too big, limiting to %luMB\n", 687 "vmalloc area is too big, limiting to %luMB\n",
688 vmalloc_reserve >> 20); 688 vmalloc_reserve >> 20);
689 } 689 }
690 return 0;
690} 691}
691__early_param("vmalloc=", early_vmalloc); 692early_param("vmalloc", early_vmalloc);
692 693
693#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) 694#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
694 695
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 374a8311bc84..9bfeb6b9509a 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
74} 74}
75EXPORT_SYMBOL(__arm_ioremap_pfn); 75EXPORT_SYMBOL(__arm_ioremap_pfn);
76 76
77void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
78 size_t size, unsigned int mtype, void *caller)
79{
80 return __arm_ioremap_pfn(pfn, offset, size, mtype);
81}
82
77void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, 83void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
78 unsigned int mtype) 84 unsigned int mtype)
79{ 85{
@@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
81} 87}
82EXPORT_SYMBOL(__arm_ioremap); 88EXPORT_SYMBOL(__arm_ioremap);
83 89
90void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
91 unsigned int mtype, void *caller)
92{
93 return __arm_ioremap(phys_addr, size, mtype);
94}
95
84void __iounmap(volatile void __iomem *addr) 96void __iounmap(volatile void __iomem *addr)
85{ 97{
86} 98}