aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig71
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/abort-ev7.S21
-rw-r--r--arch/arm/mm/alignment.c93
-rw-r--r--arch/arm/mm/cache-l2x0.c74
-rw-r--r--arch/arm/mm/cache-v6.S19
-rw-r--r--arch/arm/mm/cache-v7.S8
-rw-r--r--arch/arm/mm/copypage-fa.c2
-rw-r--r--arch/arm/mm/copypage-feroceon.c4
-rw-r--r--arch/arm/mm/copypage-v4wb.c4
-rw-r--r--arch/arm/mm/copypage-v4wt.c4
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/copypage-xsc3.c4
-rw-r--r--arch/arm/mm/discontig.c45
-rw-r--r--arch/arm/mm/dma-mapping.c40
-rw-r--r--arch/arm/mm/fault-armv.c3
-rw-r--r--arch/arm/mm/fault.c54
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c101
-rw-r--r--arch/arm/mm/init.c467
-rw-r--r--arch/arm/mm/ioremap.c80
-rw-r--r--arch/arm/mm/mm.h7
-rw-r--r--arch/arm/mm/mmap.c4
-rw-r--r--arch/arm/mm/mmu.c226
-rw-r--r--arch/arm/mm/nommu.c35
-rw-r--r--arch/arm/mm/pgd.c1
-rw-r--r--arch/arm/mm/proc-arm1020.S6
-rw-r--r--arch/arm/mm/proc-arm1020e.S6
-rw-r--r--arch/arm/mm/proc-arm1022.S6
-rw-r--r--arch/arm/mm/proc-arm1026.S6
-rw-r--r--arch/arm/mm/proc-arm6_7.S2
-rw-r--r--arch/arm/mm/proc-arm720.S6
-rw-r--r--arch/arm/mm/proc-arm740.S6
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S10
-rw-r--r--arch/arm/mm/proc-arm922.S10
-rw-r--r--arch/arm/mm/proc-arm925.S6
-rw-r--r--arch/arm/mm/proc-arm926.S6
-rw-r--r--arch/arm/mm/proc-arm940.S6
-rw-r--r--arch/arm/mm/proc-arm946.S6
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S6
-rw-r--r--arch/arm/mm/proc-feroceon.S7
-rw-r--r--arch/arm/mm/proc-mohawk.S6
-rw-r--r--arch/arm/mm/proc-sa110.S8
-rw-r--r--arch/arm/mm/proc-sa1100.S8
-rw-r--r--arch/arm/mm/proc-v6.S10
-rw-r--r--arch/arm/mm/proc-v7.S7
-rw-r--r--arch/arm/mm/proc-xsc3.S6
-rw-r--r--arch/arm/mm/proc-xscale.S6
-rw-r--r--arch/arm/mm/tlb-v7.S8
-rw-r--r--arch/arm/mm/vmregion.c5
-rw-r--r--arch/arm/mm/vmregion.h2
53 files changed, 756 insertions, 811 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c4ed9f93f646..87ec141fcaa6 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -572,6 +572,8 @@ config CPU_TLB_V6
572config CPU_TLB_V7 572config CPU_TLB_V7
573 bool 573 bool
574 574
575config VERIFY_PERMISSION_FAULT
576 bool
575endif 577endif
576 578
577config CPU_HAS_ASID 579config CPU_HAS_ASID
@@ -715,17 +717,6 @@ config TLS_REG_EMUL
715 a few prototypes like that in existence) and therefore access to 717 a few prototypes like that in existence) and therefore access to
716 that required register must be emulated. 718 that required register must be emulated.
717 719
718config HAS_TLS_REG
719 bool
720 depends on !TLS_REG_EMUL
721 default y if SMP || CPU_32v7
722 help
723 This selects support for the CP15 thread register.
724 It is defined to be available on some ARMv6 processors (including
725 all SMP capable ARMv6's) or later processors. User space may
726 assume directly accessing that register and always obtain the
727 expected value only on ARMv7 and above.
728
729config NEEDS_SYSCALL_FOR_CMPXCHG 720config NEEDS_SYSCALL_FOR_CMPXCHG
730 bool 721 bool
731 help 722 help
@@ -733,9 +724,34 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
733 Forget about fast user space cmpxchg support. 724 Forget about fast user space cmpxchg support.
734 It is just not possible. 725 It is just not possible.
735 726
727config DMA_CACHE_RWFO
728 bool "Enable read/write for ownership DMA cache maintenance"
729 depends on CPU_V6 && SMP
730 default y
731 help
732 The Snoop Control Unit on ARM11MPCore does not detect the
733 cache maintenance operations and the dma_{map,unmap}_area()
734 functions may leave stale cache entries on other CPUs. By
735 enabling this option, Read or Write For Ownership in the ARMv6
736 DMA cache maintenance functions is performed. These LDR/STR
737 instructions change the cache line state to shared or modified
738 so that the cache operation has the desired effect.
739
740 Note that the workaround is only valid on processors that do
741 not perform speculative loads into the D-cache. For such
742 processors, if cache maintenance operations are not broadcast
743 in hardware, other workarounds are needed (e.g. cache
744 maintenance broadcasting in software via FIQ).
745
736config OUTER_CACHE 746config OUTER_CACHE
737 bool 747 bool
738 748
749config OUTER_CACHE_SYNC
750 bool
751 help
752 The outer cache has a outer_cache_fns.sync function pointer
753 that can be used to drain the write buffer of the outer cache.
754
739config CACHE_FEROCEON_L2 755config CACHE_FEROCEON_L2
740 bool "Enable the Feroceon L2 cache controller" 756 bool "Enable the Feroceon L2 cache controller"
741 depends on ARCH_KIRKWOOD || ARCH_MV78XX0 757 depends on ARCH_KIRKWOOD || ARCH_MV78XX0
@@ -754,15 +770,17 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
754config CACHE_L2X0 770config CACHE_L2X0
755 bool "Enable the L2x0 outer cache controller" 771 bool "Enable the L2x0 outer cache controller"
756 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ 772 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
757 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 773 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
774 ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
758 default y 775 default y
759 select OUTER_CACHE 776 select OUTER_CACHE
777 select OUTER_CACHE_SYNC
760 help 778 help
761 This option enables the L2x0 PrimeCell. 779 This option enables the L2x0 PrimeCell.
762 780
763config CACHE_TAUROS2 781config CACHE_TAUROS2
764 bool "Enable the Tauros2 L2 cache controller" 782 bool "Enable the Tauros2 L2 cache controller"
765 depends on ARCH_DOVE 783 depends on (ARCH_DOVE || ARCH_MMP)
766 default y 784 default y
767 select OUTER_CACHE 785 select OUTER_CACHE
768 help 786 help
@@ -781,3 +799,30 @@ config ARM_L1_CACHE_SHIFT
781 int 799 int
782 default 6 if ARM_L1_CACHE_SHIFT_6 800 default 6 if ARM_L1_CACHE_SHIFT_6
783 default 5 801 default 5
802
803config ARM_DMA_MEM_BUFFERABLE
804 bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
805 depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \
806 MACH_REALVIEW_PB11MP)
807 default y if CPU_V6 || CPU_V7
808 help
809 Historically, the kernel has used strongly ordered mappings to
810 provide DMA coherent memory. With the advent of ARMv7, mapping
811 memory with differing types results in unpredictable behaviour,
812 so on these CPUs, this option is forced on.
813
814 Multiple mappings with differing attributes is also unpredictable
815 on ARMv6 CPUs, but since they do not have aggressive speculative
816 prefetch, no harm appears to occur.
817
818 However, drivers may be missing the necessary barriers for ARMv6,
819 and therefore turning this on may result in unpredictable driver
820 behaviour. Therefore, we offer this as an option.
821
822 You are recommended say 'Y' here and debug any affected drivers.
823
824config ARCH_HAS_BARRIERS
825 bool
826 help
827 This option allows the use of custom mandatory barriers
828 included via the mach/barriers.h file.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index e8d34a80851c..d63b6c413758 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -15,7 +15,6 @@ endif
15obj-$(CONFIG_MODULES) += proc-syms.o 15obj-$(CONFIG_MODULES) += proc-syms.o
16 16
17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
18obj-$(CONFIG_DISCONTIGMEM) += discontig.o
19obj-$(CONFIG_HIGHMEM) += highmem.o 18obj-$(CONFIG_HIGHMEM) += highmem.o
20 19
21obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o 20obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 2e6dc040c654..ec88b157d3bb 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -29,5 +29,26 @@ ENTRY(v7_early_abort)
29 * V6 code adjusts the returned DFSR. 29 * V6 code adjusts the returned DFSR.
30 * New designs should not need to patch up faults. 30 * New designs should not need to patch up faults.
31 */ 31 */
32
33#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
34 /*
35 * Detect erroneous permission failures and fix
36 */
37 ldr r3, =0x40d @ On permission fault
38 and r3, r1, r3
39 cmp r3, #0x0d
40 movne pc, lr
41
42 mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
43 isb
44 mrc p15, 0, r2, c7, c4, 0 @ Read the PAR
45 and r3, r2, #0x7b @ On translation fault
46 cmp r3, #0x0b
47 movne pc, lr
48 bic r1, r1, #0xf @ Fix up FSR FS[5:0]
49 and r2, r2, #0x7e
50 orr r1, r1, r2, LSR #1
51#endif
52
32 mov pc, lr 53 mov pc, lr
33ENDPROC(v7_early_abort) 54ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index edddd66faac6..d073b64ae87e 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
@@ -94,36 +95,29 @@ static const char *usermode_action[] = {
94 "signal+warn" 95 "signal+warn"
95}; 96};
96 97
97static int 98static int alignment_proc_show(struct seq_file *m, void *v)
98proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
99 void *data)
100{ 99{
101 char *p = page; 100 seq_printf(m, "User:\t\t%lu\n", ai_user);
102 int len; 101 seq_printf(m, "System:\t\t%lu\n", ai_sys);
103 102 seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
104 p += sprintf(p, "User:\t\t%lu\n", ai_user); 103 seq_printf(m, "Half:\t\t%lu\n", ai_half);
105 p += sprintf(p, "System:\t\t%lu\n", ai_sys); 104 seq_printf(m, "Word:\t\t%lu\n", ai_word);
106 p += sprintf(p, "Skipped:\t%lu\n", ai_skipped);
107 p += sprintf(p, "Half:\t\t%lu\n", ai_half);
108 p += sprintf(p, "Word:\t\t%lu\n", ai_word);
109 if (cpu_architecture() >= CPU_ARCH_ARMv5TE) 105 if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
110 p += sprintf(p, "DWord:\t\t%lu\n", ai_dword); 106 seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
111 p += sprintf(p, "Multi:\t\t%lu\n", ai_multi); 107 seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
112 p += sprintf(p, "User faults:\t%i (%s)\n", ai_usermode, 108 seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
113 usermode_action[ai_usermode]); 109 usermode_action[ai_usermode]);
114 110
115 len = (p - page) - off; 111 return 0;
116 if (len < 0) 112}
117 len = 0;
118
119 *eof = (len <= count) ? 1 : 0;
120 *start = page + off;
121 113
122 return len; 114static int alignment_proc_open(struct inode *inode, struct file *file)
115{
116 return single_open(file, alignment_proc_show, NULL);
123} 117}
124 118
125static int proc_alignment_write(struct file *file, const char __user *buffer, 119static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
126 unsigned long count, void *data) 120 size_t count, loff_t *pos)
127{ 121{
128 char mode; 122 char mode;
129 123
@@ -136,6 +130,13 @@ static int proc_alignment_write(struct file *file, const char __user *buffer,
136 return count; 130 return count;
137} 131}
138 132
133static const struct file_operations alignment_proc_fops = {
134 .open = alignment_proc_open,
135 .read = seq_read,
136 .llseek = seq_lseek,
137 .release = single_release,
138 .write = alignment_proc_write,
139};
139#endif /* CONFIG_PROC_FS */ 140#endif /* CONFIG_PROC_FS */
140 141
141union offset_union { 142union offset_union {
@@ -166,15 +167,15 @@ union offset_union {
166 THUMB( "1: "ins" %1, [%2]\n" ) \ 167 THUMB( "1: "ins" %1, [%2]\n" ) \
167 THUMB( " add %2, %2, #1\n" ) \ 168 THUMB( " add %2, %2, #1\n" ) \
168 "2:\n" \ 169 "2:\n" \
169 " .section .fixup,\"ax\"\n" \ 170 " .pushsection .fixup,\"ax\"\n" \
170 " .align 2\n" \ 171 " .align 2\n" \
171 "3: mov %0, #1\n" \ 172 "3: mov %0, #1\n" \
172 " b 2b\n" \ 173 " b 2b\n" \
173 " .previous\n" \ 174 " .popsection\n" \
174 " .section __ex_table,\"a\"\n" \ 175 " .pushsection __ex_table,\"a\"\n" \
175 " .align 3\n" \ 176 " .align 3\n" \
176 " .long 1b, 3b\n" \ 177 " .long 1b, 3b\n" \
177 " .previous\n" \ 178 " .popsection\n" \
178 : "=r" (err), "=&r" (val), "=r" (addr) \ 179 : "=r" (err), "=&r" (val), "=r" (addr) \
179 : "0" (err), "2" (addr)) 180 : "0" (err), "2" (addr))
180 181
@@ -226,16 +227,16 @@ union offset_union {
226 " mov %1, %1, "NEXT_BYTE"\n" \ 227 " mov %1, %1, "NEXT_BYTE"\n" \
227 "2: "ins" %1, [%2]\n" \ 228 "2: "ins" %1, [%2]\n" \
228 "3:\n" \ 229 "3:\n" \
229 " .section .fixup,\"ax\"\n" \ 230 " .pushsection .fixup,\"ax\"\n" \
230 " .align 2\n" \ 231 " .align 2\n" \
231 "4: mov %0, #1\n" \ 232 "4: mov %0, #1\n" \
232 " b 3b\n" \ 233 " b 3b\n" \
233 " .previous\n" \ 234 " .popsection\n" \
234 " .section __ex_table,\"a\"\n" \ 235 " .pushsection __ex_table,\"a\"\n" \
235 " .align 3\n" \ 236 " .align 3\n" \
236 " .long 1b, 4b\n" \ 237 " .long 1b, 4b\n" \
237 " .long 2b, 4b\n" \ 238 " .long 2b, 4b\n" \
238 " .previous\n" \ 239 " .popsection\n" \
239 : "=r" (err), "=&r" (v), "=&r" (a) \ 240 : "=r" (err), "=&r" (v), "=&r" (a) \
240 : "0" (err), "1" (v), "2" (a)); \ 241 : "0" (err), "1" (v), "2" (a)); \
241 if (err) \ 242 if (err) \
@@ -266,18 +267,18 @@ union offset_union {
266 " mov %1, %1, "NEXT_BYTE"\n" \ 267 " mov %1, %1, "NEXT_BYTE"\n" \
267 "4: "ins" %1, [%2]\n" \ 268 "4: "ins" %1, [%2]\n" \
268 "5:\n" \ 269 "5:\n" \
269 " .section .fixup,\"ax\"\n" \ 270 " .pushsection .fixup,\"ax\"\n" \
270 " .align 2\n" \ 271 " .align 2\n" \
271 "6: mov %0, #1\n" \ 272 "6: mov %0, #1\n" \
272 " b 5b\n" \ 273 " b 5b\n" \
273 " .previous\n" \ 274 " .popsection\n" \
274 " .section __ex_table,\"a\"\n" \ 275 " .pushsection __ex_table,\"a\"\n" \
275 " .align 3\n" \ 276 " .align 3\n" \
276 " .long 1b, 6b\n" \ 277 " .long 1b, 6b\n" \
277 " .long 2b, 6b\n" \ 278 " .long 2b, 6b\n" \
278 " .long 3b, 6b\n" \ 279 " .long 3b, 6b\n" \
279 " .long 4b, 6b\n" \ 280 " .long 4b, 6b\n" \
280 " .previous\n" \ 281 " .popsection\n" \
281 : "=r" (err), "=&r" (v), "=&r" (a) \ 282 : "=r" (err), "=&r" (v), "=&r" (a) \
282 : "0" (err), "1" (v), "2" (a)); \ 283 : "0" (err), "1" (v), "2" (a)); \
283 if (err) \ 284 if (err) \
@@ -901,12 +902,10 @@ static int __init alignment_init(void)
901#ifdef CONFIG_PROC_FS 902#ifdef CONFIG_PROC_FS
902 struct proc_dir_entry *res; 903 struct proc_dir_entry *res;
903 904
904 res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL); 905 res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
906 &alignment_proc_fops);
905 if (!res) 907 if (!res)
906 return -ENOMEM; 908 return -ENOMEM;
907
908 res->read_proc = proc_alignment_read;
909 res->write_proc = proc_alignment_write;
910#endif 909#endif
911 910
912 /* 911 /*
@@ -925,8 +924,20 @@ static int __init alignment_init(void)
925 ai_usermode = UM_FIXUP; 924 ai_usermode = UM_FIXUP;
926 } 925 }
927 926
928 hook_fault_code(1, do_alignment, SIGILL, "alignment exception"); 927 hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
929 hook_fault_code(3, do_alignment, SIGILL, "alignment exception"); 928 "alignment exception");
929
930 /*
931 * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
932 * fault, not as alignment error.
933 *
934 * TODO: handle ARMv6K properly. Runtime check for 'K' extension is
935 * needed.
936 */
937 if (cpu_architecture() <= CPU_ARCH_ARMv6) {
938 hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
939 "alignment exception");
940 }
930 941
931 return 0; 942 return 0;
932} 943}
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 07334632d3e2..9982eb385c0f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -27,18 +27,19 @@
27 27
28static void __iomem *l2x0_base; 28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock); 29static DEFINE_SPINLOCK(l2x0_lock);
30static uint32_t l2x0_way_mask; /* Bitmask of active ways */
30 31
31static inline void cache_wait(void __iomem *reg, unsigned long mask) 32static inline void cache_wait(void __iomem *reg, unsigned long mask)
32{ 33{
33 /* wait for the operation to complete */ 34 /* wait for the operation to complete */
34 while (readl(reg) & mask) 35 while (readl_relaxed(reg) & mask)
35 ; 36 ;
36} 37}
37 38
38static inline void cache_sync(void) 39static inline void cache_sync(void)
39{ 40{
40 void __iomem *base = l2x0_base; 41 void __iomem *base = l2x0_base;
41 writel(0, base + L2X0_CACHE_SYNC); 42 writel_relaxed(0, base + L2X0_CACHE_SYNC);
42 cache_wait(base + L2X0_CACHE_SYNC, 1); 43 cache_wait(base + L2X0_CACHE_SYNC, 1);
43} 44}
44 45
@@ -46,14 +47,14 @@ static inline void l2x0_clean_line(unsigned long addr)
46{ 47{
47 void __iomem *base = l2x0_base; 48 void __iomem *base = l2x0_base;
48 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
49 writel(addr, base + L2X0_CLEAN_LINE_PA); 50 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
50} 51}
51 52
52static inline void l2x0_inv_line(unsigned long addr) 53static inline void l2x0_inv_line(unsigned long addr)
53{ 54{
54 void __iomem *base = l2x0_base; 55 void __iomem *base = l2x0_base;
55 cache_wait(base + L2X0_INV_LINE_PA, 1); 56 cache_wait(base + L2X0_INV_LINE_PA, 1);
56 writel(addr, base + L2X0_INV_LINE_PA); 57 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
57} 58}
58 59
59#ifdef CONFIG_PL310_ERRATA_588369 60#ifdef CONFIG_PL310_ERRATA_588369
@@ -74,9 +75,9 @@ static inline void l2x0_flush_line(unsigned long addr)
74 75
75 /* Clean by PA followed by Invalidate by PA */ 76 /* Clean by PA followed by Invalidate by PA */
76 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
77 writel(addr, base + L2X0_CLEAN_LINE_PA); 78 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
78 cache_wait(base + L2X0_INV_LINE_PA, 1); 79 cache_wait(base + L2X0_INV_LINE_PA, 1);
79 writel(addr, base + L2X0_INV_LINE_PA); 80 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
80} 81}
81#else 82#else
82 83
@@ -89,18 +90,27 @@ static inline void l2x0_flush_line(unsigned long addr)
89{ 90{
90 void __iomem *base = l2x0_base; 91 void __iomem *base = l2x0_base;
91 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
92 writel(addr, base + L2X0_CLEAN_INV_LINE_PA); 93 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
93} 94}
94#endif 95#endif
95 96
97static void l2x0_cache_sync(void)
98{
99 unsigned long flags;
100
101 spin_lock_irqsave(&l2x0_lock, flags);
102 cache_sync();
103 spin_unlock_irqrestore(&l2x0_lock, flags);
104}
105
96static inline void l2x0_inv_all(void) 106static inline void l2x0_inv_all(void)
97{ 107{
98 unsigned long flags; 108 unsigned long flags;
99 109
100 /* invalidate all ways */ 110 /* invalidate all ways */
101 spin_lock_irqsave(&l2x0_lock, flags); 111 spin_lock_irqsave(&l2x0_lock, flags);
102 writel(0xff, l2x0_base + L2X0_INV_WAY); 112 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
103 cache_wait(l2x0_base + L2X0_INV_WAY, 0xff); 113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
104 cache_sync(); 114 cache_sync();
105 spin_unlock_irqrestore(&l2x0_lock, flags); 115 spin_unlock_irqrestore(&l2x0_lock, flags);
106} 116}
@@ -199,32 +209,62 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
199void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 209void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
200{ 210{
201 __u32 aux; 211 __u32 aux;
212 __u32 cache_id;
213 int ways;
214 const char *type;
202 215
203 l2x0_base = base; 216 l2x0_base = base;
204 217
218 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
219 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
220
221 aux &= aux_mask;
222 aux |= aux_val;
223
224 /* Determine the number of ways */
225 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
226 case L2X0_CACHE_ID_PART_L310:
227 if (aux & (1 << 16))
228 ways = 16;
229 else
230 ways = 8;
231 type = "L310";
232 break;
233 case L2X0_CACHE_ID_PART_L210:
234 ways = (aux >> 13) & 0xf;
235 type = "L210";
236 break;
237 default:
238 /* Assume unknown chips have 8 ways */
239 ways = 8;
240 type = "L2x0 series";
241 break;
242 }
243
244 l2x0_way_mask = (1 << ways) - 1;
245
205 /* 246 /*
206 * Check if l2x0 controller is already enabled. 247 * Check if l2x0 controller is already enabled.
207 * If you are booting from non-secure mode 248 * If you are booting from non-secure mode
208 * accessing the below registers will fault. 249 * accessing the below registers will fault.
209 */ 250 */
210 if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { 251 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
211 252
212 /* l2x0 controller is disabled */ 253 /* l2x0 controller is disabled */
213 254 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
214 aux = readl(l2x0_base + L2X0_AUX_CTRL);
215 aux &= aux_mask;
216 aux |= aux_val;
217 writel(aux, l2x0_base + L2X0_AUX_CTRL);
218 255
219 l2x0_inv_all(); 256 l2x0_inv_all();
220 257
221 /* enable L2X0 */ 258 /* enable L2X0 */
222 writel(1, l2x0_base + L2X0_CTRL); 259 writel_relaxed(1, l2x0_base + L2X0_CTRL);
223 } 260 }
224 261
225 outer_cache.inv_range = l2x0_inv_range; 262 outer_cache.inv_range = l2x0_inv_range;
226 outer_cache.clean_range = l2x0_clean_range; 263 outer_cache.clean_range = l2x0_clean_range;
227 outer_cache.flush_range = l2x0_flush_range; 264 outer_cache.flush_range = l2x0_flush_range;
265 outer_cache.sync = l2x0_cache_sync;
228 266
229 printk(KERN_INFO "L2X0 cache controller enabled\n"); 267 printk(KERN_INFO "%s cache controller enabled\n", type);
268 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
269 ways, cache_id, aux);
230} 270}
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 9d89c67a1cc3..86aa689ef1aa 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,6 +211,10 @@ v6_dma_inv_range:
211 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 211 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
212#endif 212#endif
2131: 2131:
214#ifdef CONFIG_DMA_CACHE_RWFO
215 ldr r2, [r0] @ read for ownership
216 str r2, [r0] @ write for ownership
217#endif
214#ifdef HARVARD_CACHE 218#ifdef HARVARD_CACHE
215 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 219 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
216#else 220#else
@@ -231,6 +235,9 @@ v6_dma_inv_range:
231v6_dma_clean_range: 235v6_dma_clean_range:
232 bic r0, r0, #D_CACHE_LINE_SIZE - 1 236 bic r0, r0, #D_CACHE_LINE_SIZE - 1
2331: 2371:
238#ifdef CONFIG_DMA_CACHE_RWFO
239 ldr r2, [r0] @ read for ownership
240#endif
234#ifdef HARVARD_CACHE 241#ifdef HARVARD_CACHE
235 mcr p15, 0, r0, c7, c10, 1 @ clean D line 242 mcr p15, 0, r0, c7, c10, 1 @ clean D line
236#else 243#else
@@ -251,6 +258,10 @@ v6_dma_clean_range:
251ENTRY(v6_dma_flush_range) 258ENTRY(v6_dma_flush_range)
252 bic r0, r0, #D_CACHE_LINE_SIZE - 1 259 bic r0, r0, #D_CACHE_LINE_SIZE - 1
2531: 2601:
261#ifdef CONFIG_DMA_CACHE_RWFO
262 ldr r2, [r0] @ read for ownership
263 str r2, [r0] @ write for ownership
264#endif
254#ifdef HARVARD_CACHE 265#ifdef HARVARD_CACHE
255 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 266 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
256#else 267#else
@@ -273,7 +284,13 @@ ENTRY(v6_dma_map_area)
273 add r1, r1, r0 284 add r1, r1, r0
274 teq r2, #DMA_FROM_DEVICE 285 teq r2, #DMA_FROM_DEVICE
275 beq v6_dma_inv_range 286 beq v6_dma_inv_range
287#ifndef CONFIG_DMA_CACHE_RWFO
276 b v6_dma_clean_range 288 b v6_dma_clean_range
289#else
290 teq r2, #DMA_TO_DEVICE
291 beq v6_dma_clean_range
292 b v6_dma_flush_range
293#endif
277ENDPROC(v6_dma_map_area) 294ENDPROC(v6_dma_map_area)
278 295
279/* 296/*
@@ -283,9 +300,11 @@ ENDPROC(v6_dma_map_area)
283 * - dir - DMA direction 300 * - dir - DMA direction
284 */ 301 */
285ENTRY(v6_dma_unmap_area) 302ENTRY(v6_dma_unmap_area)
303#ifndef CONFIG_DMA_CACHE_RWFO
286 add r1, r1, r0 304 add r1, r1, r0
287 teq r2, #DMA_TO_DEVICE 305 teq r2, #DMA_TO_DEVICE
288 bne v6_dma_inv_range 306 bne v6_dma_inv_range
307#endif
289 mov pc, lr 308 mov pc, lr
290ENDPROC(v6_dma_unmap_area) 309ENDPROC(v6_dma_unmap_area)
291 310
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index bcd64f265870..37c8157e116e 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -91,7 +91,11 @@ ENTRY(v7_flush_kern_cache_all)
91 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) 91 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
92 bl v7_flush_dcache_all 92 bl v7_flush_dcache_all
93 mov r0, #0 93 mov r0, #0
94#ifdef CONFIG_SMP
95 mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
96#else
94 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 97 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
98#endif
95 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 99 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
96 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 100 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
97 mov pc, lr 101 mov pc, lr
@@ -167,7 +171,11 @@ ENTRY(v7_coherent_user_range)
167 cmp r0, r1 171 cmp r0, r1
168 blo 1b 172 blo 1b
169 mov r0, #0 173 mov r0, #0
174#ifdef CONFIG_SMP
175 mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
176#else
170 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 177 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
178#endif
171 dsb 179 dsb
172 isb 180 isb
173 mov pc, lr 181 mov pc, lr
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index b2a6008b0111..d2852e1635b1 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -40,7 +40,7 @@ fa_copy_user_page(void *kto, const void *kfrom)
40} 40}
41 41
42void fa_copy_user_highpage(struct page *to, struct page *from, 42void fa_copy_user_highpage(struct page *to, struct page *from,
43 unsigned long vaddr) 43 unsigned long vaddr, struct vm_area_struct *vma)
44{ 44{
45 void *kto, *kfrom; 45 void *kto, *kfrom;
46 46
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index 5eb4fd93893d..ac163de7dc01 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
18{ 18{
19 asm("\ 19 asm("\
20 stmfd sp!, {r4-r9, lr} \n\ 20 stmfd sp!, {r4-r9, lr} \n\
21 mov ip, %0 \n\ 21 mov ip, %2 \n\
221: mov lr, r1 \n\ 221: mov lr, r1 \n\
23 ldmia r1!, {r2 - r9} \n\ 23 ldmia r1!, {r2 - r9} \n\
24 pld [lr, #32] \n\ 24 pld [lr, #32] \n\
@@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
64 mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ 64 mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
65 ldmfd sp!, {r4-r9, pc}" 65 ldmfd sp!, {r4-r9, pc}"
66 : 66 :
67 : "I" (PAGE_SIZE)); 67 : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
68} 68}
69 69
70void feroceon_copy_user_highpage(struct page *to, struct page *from, 70void feroceon_copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 7c2eb55cd4a9..cb589cbb2b6c 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
27{ 27{
28 asm("\ 28 asm("\
29 stmfd sp!, {r4, lr} @ 2\n\ 29 stmfd sp!, {r4, lr} @ 2\n\
30 mov r2, %0 @ 1\n\ 30 mov r2, %2 @ 1\n\
31 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ 31 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
321: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 321: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
33 stmia r0!, {r3, r4, ip, lr} @ 4\n\ 33 stmia r0!, {r3, r4, ip, lr} @ 4\n\
@@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
44 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ 44 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
45 ldmfd sp!, {r4, pc} @ 3" 45 ldmfd sp!, {r4, pc} @ 3"
46 : 46 :
47 : "I" (PAGE_SIZE / 64)); 47 : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
48} 48}
49 49
50void v4wb_copy_user_highpage(struct page *to, struct page *from, 50void v4wb_copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 172e6a55458e..30c7d048a324 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
25{ 25{
26 asm("\ 26 asm("\
27 stmfd sp!, {r4, lr} @ 2\n\ 27 stmfd sp!, {r4, lr} @ 2\n\
28 mov r2, %0 @ 1\n\ 28 mov r2, %2 @ 1\n\
29 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ 29 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
301: stmia r0!, {r3, r4, ip, lr} @ 4\n\ 301: stmia r0!, {r3, r4, ip, lr} @ 4\n\
31 ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ 31 ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
@@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
40 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ 40 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
41 ldmfd sp!, {r4, pc} @ 3" 41 ldmfd sp!, {r4, pc} @ 3"
42 : 42 :
43 : "I" (PAGE_SIZE / 64)); 43 : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
44} 44}
45 45
46void v4wt_copy_user_highpage(struct page *to, struct page *from, 46void v4wt_copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa..f55fa1044f72 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM 44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
52 kunmap_atomic(kto, KM_USER1); 45 kunmap_atomic(kto, KM_USER1);
53 kunmap_atomic(kfrom, KM_USER0); 46 kunmap_atomic(kfrom, KM_USER0);
54} 47}
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 747ad4140fc7..f9cde0702f1e 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
34{ 34{
35 asm("\ 35 asm("\
36 stmfd sp!, {r4, r5, lr} \n\ 36 stmfd sp!, {r4, r5, lr} \n\
37 mov lr, %0 \n\ 37 mov lr, %2 \n\
38 \n\ 38 \n\
39 pld [r1, #0] \n\ 39 pld [r1, #0] \n\
40 pld [r1, #32] \n\ 40 pld [r1, #32] \n\
@@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
67 \n\ 67 \n\
68 ldmfd sp!, {r4, r5, pc}" 68 ldmfd sp!, {r4, r5, pc}"
69 : 69 :
70 : "I" (PAGE_SIZE / 64 - 1)); 70 : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
71} 71}
72 72
73void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, 73void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/arm/mm/discontig.c b/arch/arm/mm/discontig.c
deleted file mode 100644
index c8c0c4b0f0a3..000000000000
--- a/arch/arm/mm/discontig.c
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * linux/arch/arm/mm/discontig.c
3 *
4 * Discontiguous memory support.
5 *
6 * Initial code: Copyright (C) 1999-2000 Nicolas Pitre
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/module.h>
13#include <linux/mmzone.h>
14#include <linux/bootmem.h>
15
16#if MAX_NUMNODES != 4 && MAX_NUMNODES != 16
17# error Fix Me Please
18#endif
19
20/*
21 * Our node_data structure for discontiguous memory.
22 */
23
24pg_data_t discontig_node_data[MAX_NUMNODES] = {
25 { .bdata = &bootmem_node_data[0] },
26 { .bdata = &bootmem_node_data[1] },
27 { .bdata = &bootmem_node_data[2] },
28 { .bdata = &bootmem_node_data[3] },
29#if MAX_NUMNODES == 16
30 { .bdata = &bootmem_node_data[4] },
31 { .bdata = &bootmem_node_data[5] },
32 { .bdata = &bootmem_node_data[6] },
33 { .bdata = &bootmem_node_data[7] },
34 { .bdata = &bootmem_node_data[8] },
35 { .bdata = &bootmem_node_data[9] },
36 { .bdata = &bootmem_node_data[10] },
37 { .bdata = &bootmem_node_data[11] },
38 { .bdata = &bootmem_node_data[12] },
39 { .bdata = &bootmem_node_data[13] },
40 { .bdata = &bootmem_node_data[14] },
41 { .bdata = &bootmem_node_data[15] },
42#endif
43};
44
45EXPORT_SYMBOL(discontig_node_data);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0da7eccf7749..c704eed63c5d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -11,7 +11,7 @@
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/slab.h> 14#include <linux/gfp.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/init.h> 17#include <linux/init.h>
@@ -24,15 +24,6 @@
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25#include <asm/sizes.h> 25#include <asm/sizes.h>
26 26
27/* Sanity check size */
28#if (CONSISTENT_DMA_SIZE % SZ_2M)
29#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
30#endif
31
32#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
33#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
34#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
35
36static u64 get_coherent_dma_mask(struct device *dev) 27static u64 get_coherent_dma_mask(struct device *dev)
37{ 28{
38 u64 mask = ISA_DMA_THRESHOLD; 29 u64 mask = ISA_DMA_THRESHOLD;
@@ -123,6 +114,15 @@ static void __dma_free_buffer(struct page *page, size_t size)
123} 114}
124 115
125#ifdef CONFIG_MMU 116#ifdef CONFIG_MMU
117/* Sanity check size */
118#if (CONSISTENT_DMA_SIZE % SZ_2M)
119#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
120#endif
121
122#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
123#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
124#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
125
126/* 126/*
127 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 127 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
128 */ 128 */
@@ -183,6 +183,8 @@ static void *
183__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) 183__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
184{ 184{
185 struct arm_vmregion *c; 185 struct arm_vmregion *c;
186 size_t align;
187 int bit;
186 188
187 if (!consistent_pte[0]) { 189 if (!consistent_pte[0]) {
188 printk(KERN_ERR "%s: not initialised\n", __func__); 190 printk(KERN_ERR "%s: not initialised\n", __func__);
@@ -191,9 +193,20 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
191 } 193 }
192 194
193 /* 195 /*
196 * Align the virtual region allocation - maximum alignment is
197 * a section size, minimum is a page size. This helps reduce
198 * fragmentation of the DMA space, and also prevents allocations
199 * smaller than a section from crossing a section boundary.
200 */
201 bit = fls(size - 1) + 1;
202 if (bit > SECTION_SHIFT)
203 bit = SECTION_SHIFT;
204 align = 1 << bit;
205
206 /*
194 * Allocate a virtual address in the consistent mapping region. 207 * Allocate a virtual address in the consistent mapping region.
195 */ 208 */
196 c = arm_vmregion_alloc(&consistent_head, size, 209 c = arm_vmregion_alloc(&consistent_head, align, size,
197 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 210 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
198 if (c) { 211 if (c) {
199 pte_t *pte; 212 pte_t *pte;
@@ -464,6 +477,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
464 vaddr += offset; 477 vaddr += offset;
465 op(vaddr, len, dir); 478 op(vaddr, len, dir);
466 kunmap_high(page); 479 kunmap_high(page);
480 } else if (cache_is_vipt()) {
481 pte_t saved_pte;
482 vaddr = kmap_high_l1_vipt(page, &saved_pte);
483 op(vaddr + offset, len, dir);
484 kunmap_high_l1_vipt(page, saved_pte);
467 } 485 }
468 } else { 486 } else {
469 vaddr = page_address(page) + offset; 487 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index c9b97e9836a2..9b906dec1ca1 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -16,6 +16,7 @@
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19#include <linux/gfp.h>
19 20
20#include <asm/bugs.h> 21#include <asm/bugs.h>
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
@@ -133,8 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
133 flush_dcache_mmap_unlock(mapping); 134 flush_dcache_mmap_unlock(mapping);
134 if (aliases) 135 if (aliases)
135 do_adjust_pte(vma, addr, pfn, ptep); 136 do_adjust_pte(vma, addr, pfn, ptep);
136 else
137 flush_cache_page(vma, addr, pfn);
138} 137}
139 138
140/* 139/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9d40c341e07e..23b0b03af5ea 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -393,6 +393,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
393 if (addr < TASK_SIZE) 393 if (addr < TASK_SIZE)
394 return do_page_fault(addr, fsr, regs); 394 return do_page_fault(addr, fsr, regs);
395 395
396 if (user_mode(regs))
397 goto bad_area;
398
396 index = pgd_index(addr); 399 index = pgd_index(addr);
397 400
398 /* 401 /*
@@ -410,7 +413,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
410 pmd_k = pmd_offset(pgd_k, addr); 413 pmd_k = pmd_offset(pgd_k, addr);
411 pmd = pmd_offset(pgd, addr); 414 pmd = pmd_offset(pgd, addr);
412 415
413 if (pmd_none(*pmd_k)) 416 /*
417 * On ARM one Linux PGD entry contains two hardware entries (see page
418 * tables layout in pgtable.h). We normally guarantee that we always
419 * fill both L1 entries. But create_mapping() doesn't follow the rule.
420 * It can create inidividual L1 entries, so here we have to call
421 * pmd_none() check for the entry really corresponded to address, not
422 * for the first of pair.
423 */
424 index = (addr >> SECTION_SHIFT) & 1;
425 if (pmd_none(pmd_k[index]))
414 goto bad_area; 426 goto bad_area;
415 427
416 copy_pmd(pmd, pmd_k); 428 copy_pmd(pmd, pmd_k);
@@ -460,9 +472,9 @@ static struct fsr_info {
460 * defines these to be "precise" aborts. 472 * defines these to be "precise" aborts.
461 */ 473 */
462 { do_bad, SIGSEGV, 0, "vector exception" }, 474 { do_bad, SIGSEGV, 0, "vector exception" },
463 { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, 475 { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
464 { do_bad, SIGKILL, 0, "terminal exception" }, 476 { do_bad, SIGKILL, 0, "terminal exception" },
465 { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, 477 { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
466 { do_bad, SIGBUS, 0, "external abort on linefetch" }, 478 { do_bad, SIGBUS, 0, "external abort on linefetch" },
467 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, 479 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
468 { do_bad, SIGBUS, 0, "external abort on linefetch" }, 480 { do_bad, SIGBUS, 0, "external abort on linefetch" },
@@ -500,13 +512,15 @@ static struct fsr_info {
500 512
501void __init 513void __init
502hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), 514hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
503 int sig, const char *name) 515 int sig, int code, const char *name)
504{ 516{
505 if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) { 517 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
506 fsr_info[nr].fn = fn; 518 BUG();
507 fsr_info[nr].sig = sig; 519
508 fsr_info[nr].name = name; 520 fsr_info[nr].fn = fn;
509 } 521 fsr_info[nr].sig = sig;
522 fsr_info[nr].code = code;
523 fsr_info[nr].name = name;
510} 524}
511 525
512/* 526/*
@@ -586,3 +600,25 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
586 arm_notify_die("", regs, &info, ifsr, 0); 600 arm_notify_die("", regs, &info, ifsr, 0);
587} 601}
588 602
603static int __init exceptions_init(void)
604{
605 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
606 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
607 "I-cache maintenance fault");
608 }
609
610 if (cpu_architecture() >= CPU_ARCH_ARMv7) {
611 /*
612 * TODO: Access flag faults introduced in ARMv6K.
613 * Runtime check for 'K' extension is needed
614 */
615 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
616 "section access flag fault");
617 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
618 "section access flag fault");
619 }
620
621 return 0;
622}
623
624arch_initcall(exceptions_init);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090..c6844cb9b508 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 15#include <asm/cachetype.h>
16#include <asm/highmem.h>
16#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
152 153
153void __flush_dcache_page(struct address_space *mapping, struct page *page) 154void __flush_dcache_page(struct address_space *mapping, struct page *page)
154{ 155{
155 void *addr = page_address(page);
156
157 /* 156 /*
158 * Writeback any data associated with the kernel mapping of this 157 * Writeback any data associated with the kernel mapping of this
159 * page. This ensures that data in the physical page is mutually 158 * page. This ensures that data in the physical page is mutually
160 * coherent with the kernels mapping. 159 * coherent with the kernels mapping.
161 */ 160 */
162#ifdef CONFIG_HIGHMEM 161 if (!PageHighMem(page)) {
163 /* 162 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
164 * kmap_atomic() doesn't set the page virtual address, and 163 } else {
165 * kunmap_atomic() takes care of cache flushing already. 164 void *addr = kmap_high_get(page);
166 */ 165 if (addr) {
167 if (addr) 166 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
168#endif 167 kunmap_high(page);
169 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 168 } else if (cache_is_vipt()) {
169 pte_t saved_pte;
170 addr = kmap_high_l1_vipt(page, &saved_pte);
171 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
172 kunmap_high_l1_vipt(page, saved_pte);
173 }
174 }
170 175
171 /* 176 /*
172 * If this is a page cache page, and we have an aliasing VIPT cache, 177 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41..6ab244062b4a 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -48,7 +48,16 @@ void *kmap_atomic(struct page *page, enum km_type type)
48 48
49 debug_kmap_atomic(type); 49 debug_kmap_atomic(type);
50 50
51 kmap = kmap_high_get(page); 51#ifdef CONFIG_DEBUG_HIGHMEM
52 /*
53 * There is no cache coherency issue when non VIVT, so force the
54 * dedicated kmap usage for better debugging purposes in that case.
55 */
56 if (!cache_is_vivt())
57 kmap = NULL;
58 else
59#endif
60 kmap = kmap_high_get(page);
52 if (kmap) 61 if (kmap)
53 return kmap; 62 return kmap;
54 63
@@ -79,7 +88,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 88 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80 89
81 if (kvaddr >= (void *)FIXADDR_START) { 90 if (kvaddr >= (void *)FIXADDR_START) {
82 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 91 if (cache_is_vivt())
92 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
83#ifdef CONFIG_DEBUG_HIGHMEM 93#ifdef CONFIG_DEBUG_HIGHMEM
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 94 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 95 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +134,90 @@ struct page *kmap_atomic_to_page(const void *ptr)
124 pte = TOP_PTE(vaddr); 134 pte = TOP_PTE(vaddr);
125 return pte_page(*pte); 135 return pte_page(*pte);
126} 136}
137
138#ifdef CONFIG_CPU_CACHE_VIPT
139
140#include <linux/percpu.h>
141
142/*
143 * The VIVT cache of a highmem page is always flushed before the page
144 * is unmapped. Hence unmapped highmem pages need no cache maintenance
145 * in that case.
146 *
147 * However unmapped pages may still be cached with a VIPT cache, and
148 * it is not possible to perform cache maintenance on them using physical
149 * addresses unfortunately. So we have no choice but to set up a temporary
150 * virtual mapping for that purpose.
151 *
152 * Yet this VIPT cache maintenance may be triggered from DMA support
153 * functions which are possibly called from interrupt context. As we don't
154 * want to keep interrupt disabled all the time when such maintenance is
155 * taking place, we therefore allow for some reentrancy by preserving and
156 * restoring the previous fixmap entry before the interrupted context is
157 * resumed. If the reentrancy depth is 0 then there is no need to restore
158 * the previous fixmap, and leaving the current one in place allow it to
159 * be reused the next time without a TLB flush (common with DMA).
160 */
161
162static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
163
164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
165{
166 unsigned int idx, cpu;
167 int *depth;
168 unsigned long vaddr, flags;
169 pte_t pte, *ptep;
170
171 if (!in_interrupt())
172 preempt_disable();
173
174 cpu = smp_processor_id();
175 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
176
177 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
179 ptep = TOP_PTE(vaddr);
180 pte = mk_pte(page, kmap_prot);
181
182 raw_local_irq_save(flags);
183 (*depth)++;
184 if (pte_val(*ptep) == pte_val(pte)) {
185 *saved_pte = pte;
186 } else {
187 *saved_pte = *ptep;
188 set_pte_ext(ptep, pte, 0);
189 local_flush_tlb_kernel_page(vaddr);
190 }
191 raw_local_irq_restore(flags);
192
193 return (void *)vaddr;
194}
195
196void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
197{
198 unsigned int idx, cpu = smp_processor_id();
199 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
200 unsigned long vaddr, flags;
201 pte_t pte, *ptep;
202
203 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
204 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
205 ptep = TOP_PTE(vaddr);
206 pte = mk_pte(page, kmap_prot);
207
208 BUG_ON(pte_val(*ptep) != pte_val(pte));
209 BUG_ON(*depth <= 0);
210
211 raw_local_irq_save(flags);
212 (*depth)--;
213 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
214 set_pte_ext(ptep, saved_pte, 0);
215 local_flush_tlb_kernel_page(vaddr);
216 }
217 raw_local_irq_restore(flags);
218
219 if (!in_interrupt())
220 preempt_enable();
221}
222
223#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7829cb5425f5..7185b00650fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,8 +15,9 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/sort.h>
19#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <linux/gfp.h>
20#include <linux/memblock.h>
20 21
21#include <asm/mach-types.h> 22#include <asm/mach-types.h>
22#include <asm/sections.h> 23#include <asm/sections.h>
@@ -79,41 +80,37 @@ struct meminfo meminfo;
79void show_mem(void) 80void show_mem(void)
80{ 81{
81 int free = 0, total = 0, reserved = 0; 82 int free = 0, total = 0, reserved = 0;
82 int shared = 0, cached = 0, slab = 0, node, i; 83 int shared = 0, cached = 0, slab = 0, i;
83 struct meminfo * mi = &meminfo; 84 struct meminfo * mi = &meminfo;
84 85
85 printk("Mem-info:\n"); 86 printk("Mem-info:\n");
86 show_free_areas(); 87 show_free_areas();
87 for_each_online_node(node) { 88
88 pg_data_t *n = NODE_DATA(node); 89 for_each_bank (i, mi) {
89 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; 90 struct membank *bank = &mi->bank[i];
90 91 unsigned int pfn1, pfn2;
91 for_each_nodebank (i,mi,node) { 92 struct page *page, *end;
92 struct membank *bank = &mi->bank[i]; 93
93 unsigned int pfn1, pfn2; 94 pfn1 = bank_pfn_start(bank);
94 struct page *page, *end; 95 pfn2 = bank_pfn_end(bank);
95 96
96 pfn1 = bank_pfn_start(bank); 97 page = pfn_to_page(pfn1);
97 pfn2 = bank_pfn_end(bank); 98 end = pfn_to_page(pfn2 - 1) + 1;
98 99
99 page = map + pfn1; 100 do {
100 end = map + pfn2; 101 total++;
101 102 if (PageReserved(page))
102 do { 103 reserved++;
103 total++; 104 else if (PageSwapCache(page))
104 if (PageReserved(page)) 105 cached++;
105 reserved++; 106 else if (PageSlab(page))
106 else if (PageSwapCache(page)) 107 slab++;
107 cached++; 108 else if (!page_count(page))
108 else if (PageSlab(page)) 109 free++;
109 slab++; 110 else
110 else if (!page_count(page)) 111 shared += page_count(page) - 1;
111 free++; 112 page++;
112 else 113 } while (page < end);
113 shared += page_count(page) - 1;
114 page++;
115 } while (page < end);
116 }
117 } 114 }
118 115
119 printk("%d pages of RAM\n", total); 116 printk("%d pages of RAM\n", total);
@@ -124,7 +121,7 @@ void show_mem(void)
124 printk("%d pages swap cached\n", cached); 121 printk("%d pages swap cached\n", cached);
125} 122}
126 123
127static void __init find_node_limits(int node, struct meminfo *mi, 124static void __init find_limits(struct meminfo *mi,
128 unsigned long *min, unsigned long *max_low, unsigned long *max_high) 125 unsigned long *min, unsigned long *max_low, unsigned long *max_high)
129{ 126{
130 int i; 127 int i;
@@ -132,7 +129,7 @@ static void __init find_node_limits(int node, struct meminfo *mi,
132 *min = -1UL; 129 *min = -1UL;
133 *max_low = *max_high = 0; 130 *max_low = *max_high = 0;
134 131
135 for_each_nodebank(i, mi, node) { 132 for_each_bank (i, mi) {
136 struct membank *bank = &mi->bank[i]; 133 struct membank *bank = &mi->bank[i];
137 unsigned long start, end; 134 unsigned long start, end;
138 135
@@ -150,179 +147,64 @@ static void __init find_node_limits(int node, struct meminfo *mi,
150 } 147 }
151} 148}
152 149
153/* 150static void __init arm_bootmem_init(struct meminfo *mi,
154 * FIXME: We really want to avoid allocating the bootmap bitmap
155 * over the top of the initrd. Hopefully, this is located towards
156 * the start of a bank, so if we allocate the bootmap bitmap at
157 * the end, we won't clash.
158 */
159static unsigned int __init
160find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
161{
162 unsigned int start_pfn, i, bootmap_pfn;
163
164 start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT;
165 bootmap_pfn = 0;
166
167 for_each_nodebank(i, mi, node) {
168 struct membank *bank = &mi->bank[i];
169 unsigned int start, end;
170
171 start = bank_pfn_start(bank);
172 end = bank_pfn_end(bank);
173
174 if (end < start_pfn)
175 continue;
176
177 if (start < start_pfn)
178 start = start_pfn;
179
180 if (end <= start)
181 continue;
182
183 if (end - start >= bootmap_pages) {
184 bootmap_pfn = start;
185 break;
186 }
187 }
188
189 if (bootmap_pfn == 0)
190 BUG();
191
192 return bootmap_pfn;
193}
194
195static int __init check_initrd(struct meminfo *mi)
196{
197 int initrd_node = -2;
198#ifdef CONFIG_BLK_DEV_INITRD
199 unsigned long end = phys_initrd_start + phys_initrd_size;
200
201 /*
202 * Make sure that the initrd is within a valid area of
203 * memory.
204 */
205 if (phys_initrd_size) {
206 unsigned int i;
207
208 initrd_node = -1;
209
210 for (i = 0; i < mi->nr_banks; i++) {
211 struct membank *bank = &mi->bank[i];
212 if (bank_phys_start(bank) <= phys_initrd_start &&
213 end <= bank_phys_end(bank))
214 initrd_node = bank->node;
215 }
216 }
217
218 if (initrd_node == -1) {
219 printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
220 "physical memory - disabling initrd\n",
221 phys_initrd_start, phys_initrd_size);
222 phys_initrd_start = phys_initrd_size = 0;
223 }
224#endif
225
226 return initrd_node;
227}
228
229static inline void map_memory_bank(struct membank *bank)
230{
231#ifdef CONFIG_MMU
232 struct map_desc map;
233
234 map.pfn = bank_pfn_start(bank);
235 map.virtual = __phys_to_virt(bank_phys_start(bank));
236 map.length = bank_phys_size(bank);
237 map.type = MT_MEMORY;
238
239 create_mapping(&map);
240#endif
241}
242
243static void __init bootmem_init_node(int node, struct meminfo *mi,
244 unsigned long start_pfn, unsigned long end_pfn) 151 unsigned long start_pfn, unsigned long end_pfn)
245{ 152{
246 unsigned long boot_pfn;
247 unsigned int boot_pages; 153 unsigned int boot_pages;
154 phys_addr_t bitmap;
248 pg_data_t *pgdat; 155 pg_data_t *pgdat;
249 int i; 156 int i;
250 157
251 /* 158 /*
252 * Map the memory banks for this node. 159 * Allocate the bootmem bitmap page. This must be in a region
253 */ 160 * of memory which has already been mapped.
254 for_each_nodebank(i, mi, node) {
255 struct membank *bank = &mi->bank[i];
256
257 if (!bank->highmem)
258 map_memory_bank(bank);
259 }
260
261 /*
262 * Allocate the bootmem bitmap page.
263 */ 161 */
264 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 162 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
265 boot_pfn = find_bootmap_pfn(node, mi, boot_pages); 163 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
164 __pfn_to_phys(end_pfn));
266 165
267 /* 166 /*
268 * Initialise the bootmem allocator for this node, handing the 167 * Initialise the bootmem allocator, handing the
269 * memory banks over to bootmem. 168 * memory banks over to bootmem.
270 */ 169 */
271 node_set_online(node); 170 node_set_online(0);
272 pgdat = NODE_DATA(node); 171 pgdat = NODE_DATA(0);
273 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); 172 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
274 173
275 for_each_nodebank(i, mi, node) { 174 for_each_bank(i, mi) {
276 struct membank *bank = &mi->bank[i]; 175 struct membank *bank = &mi->bank[i];
277 if (!bank->highmem) 176 if (!bank->highmem)
278 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); 177 free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
279 } 178 }
280 179
281 /* 180 /*
282 * Reserve the bootmem bitmap for this node. 181 * Reserve the memblock reserved regions in bootmem.
283 */ 182 */
284 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 183 for (i = 0; i < memblock.reserved.cnt; i++) {
285 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 184 phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
286} 185 if (start >= start_pfn &&
287 186 memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
288static void __init bootmem_reserve_initrd(int node) 187 reserve_bootmem_node(pgdat, __pfn_to_phys(start),
289{ 188 memblock_size_bytes(&memblock.reserved, i),
290#ifdef CONFIG_BLK_DEV_INITRD 189 BOOTMEM_DEFAULT);
291 pg_data_t *pgdat = NODE_DATA(node);
292 int res;
293
294 res = reserve_bootmem_node(pgdat, phys_initrd_start,
295 phys_initrd_size, BOOTMEM_EXCLUSIVE);
296
297 if (res == 0) {
298 initrd_start = __phys_to_virt(phys_initrd_start);
299 initrd_end = initrd_start + phys_initrd_size;
300 } else {
301 printk(KERN_ERR
302 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
303 "memory region - disabling initrd\n",
304 phys_initrd_start, phys_initrd_size);
305 } 190 }
306#endif
307} 191}
308 192
309static void __init bootmem_free_node(int node, struct meminfo *mi) 193static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
194 unsigned long max_low, unsigned long max_high)
310{ 195{
311 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 196 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
312 unsigned long min, max_low, max_high;
313 int i; 197 int i;
314 198
315 find_node_limits(node, mi, &min, &max_low, &max_high);
316
317 /* 199 /*
318 * initialise the zones within this node. 200 * initialise the zones.
319 */ 201 */
320 memset(zone_size, 0, sizeof(zone_size)); 202 memset(zone_size, 0, sizeof(zone_size));
321 203
322 /* 204 /*
323 * The size of this node has already been determined. If we need 205 * The memory size has already been determined. If we need
324 * to do anything fancy with the allocation of this memory to the 206 * to do anything fancy with the allocation of this memory
325 * zones, now is the time to do it. 207 * to the zones, now is the time to do it.
326 */ 208 */
327 zone_size[0] = max_low - min; 209 zone_size[0] = max_low - min;
328#ifdef CONFIG_HIGHMEM 210#ifdef CONFIG_HIGHMEM
@@ -330,11 +212,11 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
330#endif 212#endif
331 213
332 /* 214 /*
333 * For each bank in this node, calculate the size of the holes. 215 * Calculate the size of the holes.
334 * holes = node_size - sum(bank_sizes_in_node) 216 * holes = node_size - sum(bank_sizes)
335 */ 217 */
336 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 218 memcpy(zhole_size, zone_size, sizeof(zhole_size));
337 for_each_nodebank(i, mi, node) { 219 for_each_bank(i, mi) {
338 int idx = 0; 220 int idx = 0;
339#ifdef CONFIG_HIGHMEM 221#ifdef CONFIG_HIGHMEM
340 if (mi->bank[i].highmem) 222 if (mi->bank[i].highmem)
@@ -347,24 +229,23 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
347 * Adjust the sizes according to any special requirements for 229 * Adjust the sizes according to any special requirements for
348 * this machine type. 230 * this machine type.
349 */ 231 */
350 arch_adjust_zones(node, zone_size, zhole_size); 232 arch_adjust_zones(zone_size, zhole_size);
351 233
352 free_area_init_node(node, zone_size, min, zhole_size); 234 free_area_init_node(0, zone_size, min, zhole_size);
353} 235}
354 236
355#ifndef CONFIG_SPARSEMEM 237#ifndef CONFIG_SPARSEMEM
356int pfn_valid(unsigned long pfn) 238int pfn_valid(unsigned long pfn)
357{ 239{
358 struct meminfo *mi = &meminfo; 240 struct memblock_region *mem = &memblock.memory;
359 unsigned int left = 0, right = mi->nr_banks; 241 unsigned int left = 0, right = mem->cnt;
360 242
361 do { 243 do {
362 unsigned int mid = (right + left) / 2; 244 unsigned int mid = (right + left) / 2;
363 struct membank *bank = &mi->bank[mid];
364 245
365 if (pfn < bank_pfn_start(bank)) 246 if (pfn < memblock_start_pfn(mem, mid))
366 right = mid; 247 right = mid;
367 else if (pfn >= bank_pfn_end(bank)) 248 else if (pfn >= memblock_end_pfn(mem, mid))
368 left = mid + 1; 249 left = mid + 1;
369 else 250 else
370 return 1; 251 return 1;
@@ -373,82 +254,69 @@ int pfn_valid(unsigned long pfn)
373} 254}
374EXPORT_SYMBOL(pfn_valid); 255EXPORT_SYMBOL(pfn_valid);
375 256
376static void arm_memory_present(struct meminfo *mi, int node) 257static void arm_memory_present(void)
377{ 258{
378} 259}
379#else 260#else
380static void arm_memory_present(struct meminfo *mi, int node) 261static void arm_memory_present(void)
381{ 262{
382 int i; 263 int i;
383 for_each_nodebank(i, mi, node) { 264 for (i = 0; i < memblock.memory.cnt; i++)
384 struct membank *bank = &mi->bank[i]; 265 memory_present(0, memblock_start_pfn(&memblock.memory, i),
385 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); 266 memblock_end_pfn(&memblock.memory, i));
386 }
387} 267}
388#endif 268#endif
389 269
390static int __init meminfo_cmp(const void *_a, const void *_b) 270void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
391{ 271{
392 const struct membank *a = _a, *b = _b; 272 int i;
393 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 273
394 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 274 memblock_init();
275 for (i = 0; i < mi->nr_banks; i++)
276 memblock_add(mi->bank[i].start, mi->bank[i].size);
277
278 /* Register the kernel text, kernel data and initrd with memblock. */
279#ifdef CONFIG_XIP_KERNEL
280 memblock_reserve(__pa(_data), _end - _data);
281#else
282 memblock_reserve(__pa(_stext), _end - _stext);
283#endif
284#ifdef CONFIG_BLK_DEV_INITRD
285 if (phys_initrd_size) {
286 memblock_reserve(phys_initrd_start, phys_initrd_size);
287
288 /* Now convert initrd to virtual addresses */
289 initrd_start = __phys_to_virt(phys_initrd_start);
290 initrd_end = initrd_start + phys_initrd_size;
291 }
292#endif
293
294 arm_mm_memblock_reserve();
295
296 /* reserve any platform specific memblock areas */
297 if (mdesc->reserve)
298 mdesc->reserve();
299
300 memblock_analyze();
301 memblock_dump_all();
395} 302}
396 303
397void __init bootmem_init(void) 304void __init bootmem_init(void)
398{ 305{
399 struct meminfo *mi = &meminfo; 306 struct meminfo *mi = &meminfo;
400 unsigned long min, max_low, max_high; 307 unsigned long min, max_low, max_high;
401 int node, initrd_node;
402 308
403 sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL); 309 max_low = max_high = 0;
404 310
405 /* 311 find_limits(mi, &min, &max_low, &max_high);
406 * Locate which node contains the ramdisk image, if any.
407 */
408 initrd_node = check_initrd(mi);
409 312
410 max_low = max_high = 0; 313 arm_bootmem_init(mi, min, max_low);
411 314
412 /* 315 /*
413 * Run through each node initialising the bootmem allocator. 316 * Sparsemem tries to allocate bootmem in memory_present(),
317 * so must be done after the fixed reservations
414 */ 318 */
415 for_each_node(node) { 319 arm_memory_present();
416 unsigned long node_low, node_high;
417
418 find_node_limits(node, mi, &min, &node_low, &node_high);
419
420 if (node_low > max_low)
421 max_low = node_low;
422 if (node_high > max_high)
423 max_high = node_high;
424
425 /*
426 * If there is no memory in this node, ignore it.
427 * (We can't have nodes which have no lowmem)
428 */
429 if (node_low == 0)
430 continue;
431
432 bootmem_init_node(node, mi, min, node_low);
433
434 /*
435 * Reserve any special node zero regions.
436 */
437 if (node == 0)
438 reserve_node_zero(NODE_DATA(node));
439
440 /*
441 * If the initrd is in this node, reserve its memory.
442 */
443 if (node == initrd_node)
444 bootmem_reserve_initrd(node);
445
446 /*
447 * Sparsemem tries to allocate bootmem in memory_present(),
448 * so must be done after the fixed reservations
449 */
450 arm_memory_present(mi, node);
451 }
452 320
453 /* 321 /*
454 * sparse_init() needs the bootmem allocator up and running. 322 * sparse_init() needs the bootmem allocator up and running.
@@ -456,12 +324,11 @@ void __init bootmem_init(void)
456 sparse_init(); 324 sparse_init();
457 325
458 /* 326 /*
459 * Now free memory in each node - free_area_init_node needs 327 * Now free the memory - free_area_init_node needs
460 * the sparse mem_map arrays initialized by sparse_init() 328 * the sparse mem_map arrays initialized by sparse_init()
461 * for memmap_init_zone(), otherwise all PFNs are invalid. 329 * for memmap_init_zone(), otherwise all PFNs are invalid.
462 */ 330 */
463 for_each_node(node) 331 arm_bootmem_free(mi, min, max_low, max_high);
464 bootmem_free_node(node, mi);
465 332
466 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 333 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
467 334
@@ -496,7 +363,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
496} 363}
497 364
498static inline void 365static inline void
499free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) 366free_memmap(unsigned long start_pfn, unsigned long end_pfn)
500{ 367{
501 struct page *start_pg, *end_pg; 368 struct page *start_pg, *end_pg;
502 unsigned long pg, pgend; 369 unsigned long pg, pgend;
@@ -519,40 +386,39 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
519 * free the section of the memmap array. 386 * free the section of the memmap array.
520 */ 387 */
521 if (pg < pgend) 388 if (pg < pgend)
522 free_bootmem_node(NODE_DATA(node), pg, pgend - pg); 389 free_bootmem(pg, pgend - pg);
523} 390}
524 391
525/* 392/*
526 * The mem_map array can get very big. Free the unused area of the memory map. 393 * The mem_map array can get very big. Free the unused area of the memory map.
527 */ 394 */
528static void __init free_unused_memmap_node(int node, struct meminfo *mi) 395static void __init free_unused_memmap(struct meminfo *mi)
529{ 396{
530 unsigned long bank_start, prev_bank_end = 0; 397 unsigned long bank_start, prev_bank_end = 0;
531 unsigned int i; 398 unsigned int i;
532 399
533 /* 400 /*
534 * [FIXME] This relies on each bank being in address order. This 401 * This relies on each bank being in address order.
535 * may not be the case, especially if the user has provided the 402 * The banks are sorted previously in bootmem_init().
536 * information on the command line.
537 */ 403 */
538 for_each_nodebank(i, mi, node) { 404 for_each_bank(i, mi) {
539 struct membank *bank = &mi->bank[i]; 405 struct membank *bank = &mi->bank[i];
540 406
541 bank_start = bank_pfn_start(bank); 407 bank_start = bank_pfn_start(bank);
542 if (bank_start < prev_bank_end) {
543 printk(KERN_ERR "MEM: unordered memory banks. "
544 "Not freeing memmap.\n");
545 break;
546 }
547 408
548 /* 409 /*
549 * If we had a previous bank, and there is a space 410 * If we had a previous bank, and there is a space
550 * between the current bank and the previous, free it. 411 * between the current bank and the previous, free it.
551 */ 412 */
552 if (prev_bank_end && prev_bank_end != bank_start) 413 if (prev_bank_end && prev_bank_end < bank_start)
553 free_memmap(node, prev_bank_end, bank_start); 414 free_memmap(prev_bank_end, bank_start);
554 415
555 prev_bank_end = bank_pfn_end(bank); 416 /*
417 * Align up here since the VM subsystem insists that the
418 * memmap entries are valid from the bank end aligned to
419 * MAX_ORDER_NR_PAGES.
420 */
421 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
556 } 422 }
557} 423}
558 424
@@ -564,21 +430,19 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
564void __init mem_init(void) 430void __init mem_init(void)
565{ 431{
566 unsigned long reserved_pages, free_pages; 432 unsigned long reserved_pages, free_pages;
567 int i, node; 433 int i;
434#ifdef CONFIG_HAVE_TCM
435 /* These pointers are filled in on TCM detection */
436 extern u32 dtcm_end;
437 extern u32 itcm_end;
438#endif
568 439
569#ifndef CONFIG_DISCONTIGMEM
570 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 440 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
571#endif
572 441
573 /* this will put all unused low memory onto the freelists */ 442 /* this will put all unused low memory onto the freelists */
574 for_each_online_node(node) { 443 free_unused_memmap(&meminfo);
575 pg_data_t *pgdat = NODE_DATA(node);
576
577 free_unused_memmap_node(node, &meminfo);
578 444
579 if (pgdat->node_spanned_pages != 0) 445 totalram_pages += free_all_bootmem();
580 totalram_pages += free_all_bootmem_node(pgdat);
581 }
582 446
583#ifdef CONFIG_SA1111 447#ifdef CONFIG_SA1111
584 /* now that our DMA memory is actually so designated, we can free it */ 448 /* now that our DMA memory is actually so designated, we can free it */
@@ -588,42 +452,35 @@ void __init mem_init(void)
588 452
589#ifdef CONFIG_HIGHMEM 453#ifdef CONFIG_HIGHMEM
590 /* set highmem page free */ 454 /* set highmem page free */
591 for_each_online_node(node) { 455 for_each_bank (i, &meminfo) {
592 for_each_nodebank (i, &meminfo, node) { 456 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
593 unsigned long start = bank_pfn_start(&meminfo.bank[i]); 457 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
594 unsigned long end = bank_pfn_end(&meminfo.bank[i]); 458 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
595 if (start >= max_low_pfn + PHYS_PFN_OFFSET) 459 totalhigh_pages += free_area(start, end, NULL);
596 totalhigh_pages += free_area(start, end, NULL);
597 }
598 } 460 }
599 totalram_pages += totalhigh_pages; 461 totalram_pages += totalhigh_pages;
600#endif 462#endif
601 463
602 reserved_pages = free_pages = 0; 464 reserved_pages = free_pages = 0;
603 465
604 for_each_online_node(node) { 466 for_each_bank(i, &meminfo) {
605 pg_data_t *n = NODE_DATA(node); 467 struct membank *bank = &meminfo.bank[i];
606 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; 468 unsigned int pfn1, pfn2;
607 469 struct page *page, *end;
608 for_each_nodebank(i, &meminfo, node) { 470
609 struct membank *bank = &meminfo.bank[i]; 471 pfn1 = bank_pfn_start(bank);
610 unsigned int pfn1, pfn2; 472 pfn2 = bank_pfn_end(bank);
611 struct page *page, *end; 473
612 474 page = pfn_to_page(pfn1);
613 pfn1 = bank_pfn_start(bank); 475 end = pfn_to_page(pfn2 - 1) + 1;
614 pfn2 = bank_pfn_end(bank); 476
615 477 do {
616 page = map + pfn1; 478 if (PageReserved(page))
617 end = map + pfn2; 479 reserved_pages++;
618 480 else if (!page_count(page))
619 do { 481 free_pages++;
620 if (PageReserved(page)) 482 page++;
621 reserved_pages++; 483 } while (page < end);
622 else if (!page_count(page))
623 free_pages++;
624 page++;
625 } while (page < end);
626 }
627 } 484 }
628 485
629 /* 486 /*
@@ -650,6 +507,10 @@ void __init mem_init(void)
650 507
651 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 508 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
652 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 509 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
510#ifdef CONFIG_HAVE_TCM
511 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
512 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
513#endif
653 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 514 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
654#ifdef CONFIG_MMU 515#ifdef CONFIG_MMU
655 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" 516 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
@@ -666,6 +527,10 @@ void __init mem_init(void)
666 527
667 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 528 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
668 (PAGE_SIZE)), 529 (PAGE_SIZE)),
530#ifdef CONFIG_HAVE_TCM
531 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
532 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
533#endif
669 MLK(FIXADDR_START, FIXADDR_TOP), 534 MLK(FIXADDR_START, FIXADDR_TOP),
670#ifdef CONFIG_MMU 535#ifdef CONFIG_MMU
671 MLM(CONSISTENT_BASE, CONSISTENT_END), 536 MLM(CONSISTENT_BASE, CONSISTENT_END),
@@ -717,10 +582,10 @@ void __init mem_init(void)
717void free_initmem(void) 582void free_initmem(void)
718{ 583{
719#ifdef CONFIG_HAVE_TCM 584#ifdef CONFIG_HAVE_TCM
720 extern char *__tcm_start, *__tcm_end; 585 extern char __tcm_start, __tcm_end;
721 586
722 totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)), 587 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
723 __phys_to_pfn(__pa(__tcm_end)), 588 __phys_to_pfn(__pa(&__tcm_end)),
724 "TCM link"); 589 "TCM link");
725#endif 590#endif
726 591
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 28c8b950ef04..ab506272b2d3 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -42,78 +42,11 @@
42 */ 42 */
43#define VM_ARM_SECTION_MAPPING 0x80000000 43#define VM_ARM_SECTION_MAPPING 0x80000000
44 44
45static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
46 unsigned long phys_addr, const struct mem_type *type)
47{
48 pgprot_t prot = __pgprot(type->prot_pte);
49 pte_t *pte;
50
51 pte = pte_alloc_kernel(pmd, addr);
52 if (!pte)
53 return -ENOMEM;
54
55 do {
56 if (!pte_none(*pte))
57 goto bad;
58
59 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
60 phys_addr += PAGE_SIZE;
61 } while (pte++, addr += PAGE_SIZE, addr != end);
62 return 0;
63
64 bad:
65 printk(KERN_CRIT "remap_area_pte: page already exists\n");
66 BUG();
67}
68
69static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
70 unsigned long end, unsigned long phys_addr,
71 const struct mem_type *type)
72{
73 unsigned long next;
74 pmd_t *pmd;
75 int ret = 0;
76
77 pmd = pmd_alloc(&init_mm, pgd, addr);
78 if (!pmd)
79 return -ENOMEM;
80
81 do {
82 next = pmd_addr_end(addr, end);
83 ret = remap_area_pte(pmd, addr, next, phys_addr, type);
84 if (ret)
85 return ret;
86 phys_addr += next - addr;
87 } while (pmd++, addr = next, addr != end);
88 return ret;
89}
90
91static int remap_area_pages(unsigned long start, unsigned long pfn,
92 size_t size, const struct mem_type *type)
93{
94 unsigned long addr = start;
95 unsigned long next, end = start + size;
96 unsigned long phys_addr = __pfn_to_phys(pfn);
97 pgd_t *pgd;
98 int err = 0;
99
100 BUG_ON(addr >= end);
101 pgd = pgd_offset_k(addr);
102 do {
103 next = pgd_addr_end(addr, end);
104 err = remap_area_pmd(pgd, addr, next, phys_addr, type);
105 if (err)
106 break;
107 phys_addr += next - addr;
108 } while (pgd++, addr = next, addr != end);
109
110 return err;
111}
112
113int ioremap_page(unsigned long virt, unsigned long phys, 45int ioremap_page(unsigned long virt, unsigned long phys,
114 const struct mem_type *mtype) 46 const struct mem_type *mtype)
115{ 47{
116 return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype); 48 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
49 __pgprot(mtype->prot_pte));
117} 50}
118EXPORT_SYMBOL(ioremap_page); 51EXPORT_SYMBOL(ioremap_page);
119 52
@@ -268,6 +201,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
268 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 201 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
269 return NULL; 202 return NULL;
270 203
204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */
207 if (WARN_ON(pfn_valid(pfn)))
208 return NULL;
209
271 type = get_mem_type(mtype); 210 type = get_mem_type(mtype);
272 if (!type) 211 if (!type)
273 return NULL; 212 return NULL;
@@ -294,7 +233,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
294 err = remap_area_sections(addr, pfn, size, type); 233 err = remap_area_sections(addr, pfn, size, type);
295 } else 234 } else
296#endif 235#endif
297 err = remap_area_pages(addr, pfn, size, type); 236 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
237 __pgprot(type->prot_pte));
298 238
299 if (err) { 239 if (err) {
300 vunmap((void *)addr); 240 vunmap((void *)addr);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a888363398f8..6630620380a4 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -28,10 +28,5 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
28 28
29#endif 29#endif
30 30
31struct map_desc;
32struct meminfo;
33struct pglist_data;
34
35void __init create_mapping(struct map_desc *md);
36void __init bootmem_init(void); 31void __init bootmem_init(void);
37void reserve_node_zero(struct pglist_data *pgdat); 32void arm_mm_memblock_reserve(void);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index f5abc51c5a07..4f5b39687df5 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -7,6 +7,7 @@
7#include <linux/shm.h> 7#include <linux/shm.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/random.h>
10#include <asm/cputype.h> 11#include <asm/cputype.h>
11#include <asm/system.h> 12#include <asm/system.h>
12 13
@@ -80,6 +81,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
80 start_addr = addr = TASK_UNMAPPED_BASE; 81 start_addr = addr = TASK_UNMAPPED_BASE;
81 mm->cached_hole_size = 0; 82 mm->cached_hole_size = 0;
82 } 83 }
84 /* 8 bits of randomness in 20 address space bits */
85 if (current->flags & PF_RANDOMIZE)
86 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
83 87
84full_search: 88full_search:
85 if (do_align) 89 if (do_align)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9d4da6ac28eb..6e1c4f6a2b3f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -11,12 +11,12 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h> 14#include <linux/mman.h>
16#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h>
17#include <linux/sort.h>
17 18
18#include <asm/cputype.h> 19#include <asm/cputype.h>
19#include <asm/mach-types.h>
20#include <asm/sections.h> 20#include <asm/sections.h>
21#include <asm/cachetype.h> 21#include <asm/cachetype.h>
22#include <asm/setup.h> 22#include <asm/setup.h>
@@ -257,6 +257,19 @@ static struct mem_type mem_types[] = {
257 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 257 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
258 .domain = DOMAIN_KERNEL, 258 .domain = DOMAIN_KERNEL,
259 }, 259 },
260 [MT_MEMORY_DTCM] = {
261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG |
262 L_PTE_DIRTY | L_PTE_WRITE,
263 .prot_l1 = PMD_TYPE_TABLE,
264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
265 .domain = DOMAIN_KERNEL,
266 },
267 [MT_MEMORY_ITCM] = {
268 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
269 L_PTE_USER | L_PTE_EXEC,
270 .prot_l1 = PMD_TYPE_TABLE,
271 .domain = DOMAIN_IO,
272 },
260}; 273};
261 274
262const struct mem_type *get_mem_type(unsigned int type) 275const struct mem_type *get_mem_type(unsigned int type)
@@ -420,6 +433,10 @@ static void __init build_mem_type_table(void)
420 user_pgprot |= L_PTE_SHARED; 433 user_pgprot |= L_PTE_SHARED;
421 kern_pgprot |= L_PTE_SHARED; 434 kern_pgprot |= L_PTE_SHARED;
422 vecs_pgprot |= L_PTE_SHARED; 435 vecs_pgprot |= L_PTE_SHARED;
436 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
437 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
439 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
423 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 440 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 441 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
425#endif 442#endif
@@ -483,18 +500,28 @@ static void __init build_mem_type_table(void)
483 500
484#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 501#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
485 502
486static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 503static void __init *early_alloc(unsigned long sz)
487 unsigned long end, unsigned long pfn,
488 const struct mem_type *type)
489{ 504{
490 pte_t *pte; 505 void *ptr = __va(memblock_alloc(sz, sz));
506 memset(ptr, 0, sz);
507 return ptr;
508}
491 509
510static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
511{
492 if (pmd_none(*pmd)) { 512 if (pmd_none(*pmd)) {
493 pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); 513 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t));
494 __pmd_populate(pmd, __pa(pte) | type->prot_l1); 514 __pmd_populate(pmd, __pa(pte) | prot);
495 } 515 }
516 BUG_ON(pmd_bad(*pmd));
517 return pte_offset_kernel(pmd, addr);
518}
496 519
497 pte = pte_offset_kernel(pmd, addr); 520static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
521 unsigned long end, unsigned long pfn,
522 const struct mem_type *type)
523{
524 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
498 do { 525 do {
499 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); 526 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
500 pfn++; 527 pfn++;
@@ -599,7 +626,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
599 * offsets, and we take full advantage of sections and 626 * offsets, and we take full advantage of sections and
600 * supersections. 627 * supersections.
601 */ 628 */
602void __init create_mapping(struct map_desc *md) 629static void __init create_mapping(struct map_desc *md)
603{ 630{
604 unsigned long phys, addr, length, end; 631 unsigned long phys, addr, length, end;
605 const struct mem_type *type; 632 const struct mem_type *type;
@@ -663,7 +690,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
663 create_mapping(io_desc + i); 690 create_mapping(io_desc + i);
664} 691}
665 692
666static unsigned long __initdata vmalloc_reserve = SZ_128M; 693static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
667 694
668/* 695/*
669 * vmalloc=size forces the vmalloc area to be exactly 'size' 696 * vmalloc=size forces the vmalloc area to be exactly 'size'
@@ -672,7 +699,7 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M;
672 */ 699 */
673static int __init early_vmalloc(char *arg) 700static int __init early_vmalloc(char *arg)
674{ 701{
675 vmalloc_reserve = memparse(arg, NULL); 702 unsigned long vmalloc_reserve = memparse(arg, NULL);
676 703
677 if (vmalloc_reserve < SZ_16M) { 704 if (vmalloc_reserve < SZ_16M) {
678 vmalloc_reserve = SZ_16M; 705 vmalloc_reserve = SZ_16M;
@@ -687,22 +714,26 @@ static int __init early_vmalloc(char *arg)
687 "vmalloc area is too big, limiting to %luMB\n", 714 "vmalloc area is too big, limiting to %luMB\n",
688 vmalloc_reserve >> 20); 715 vmalloc_reserve >> 20);
689 } 716 }
717
718 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
690 return 0; 719 return 0;
691} 720}
692early_param("vmalloc", early_vmalloc); 721early_param("vmalloc", early_vmalloc);
693 722
694#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) 723phys_addr_t lowmem_end_addr;
695 724
696static void __init sanity_check_meminfo(void) 725static void __init sanity_check_meminfo(void)
697{ 726{
698 int i, j, highmem = 0; 727 int i, j, highmem = 0;
699 728
729 lowmem_end_addr = __pa(vmalloc_min - 1) + 1;
730
700 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 731 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
701 struct membank *bank = &meminfo.bank[j]; 732 struct membank *bank = &meminfo.bank[j];
702 *bank = meminfo.bank[i]; 733 *bank = meminfo.bank[i];
703 734
704#ifdef CONFIG_HIGHMEM 735#ifdef CONFIG_HIGHMEM
705 if (__va(bank->start) > VMALLOC_MIN || 736 if (__va(bank->start) > vmalloc_min ||
706 __va(bank->start) < (void *)PAGE_OFFSET) 737 __va(bank->start) < (void *)PAGE_OFFSET)
707 highmem = 1; 738 highmem = 1;
708 739
@@ -712,8 +743,8 @@ static void __init sanity_check_meminfo(void)
712 * Split those memory banks which are partially overlapping 743 * Split those memory banks which are partially overlapping
713 * the vmalloc area greatly simplifying things later. 744 * the vmalloc area greatly simplifying things later.
714 */ 745 */
715 if (__va(bank->start) < VMALLOC_MIN && 746 if (__va(bank->start) < vmalloc_min &&
716 bank->size > VMALLOC_MIN - __va(bank->start)) { 747 bank->size > vmalloc_min - __va(bank->start)) {
717 if (meminfo.nr_banks >= NR_BANKS) { 748 if (meminfo.nr_banks >= NR_BANKS) {
718 printk(KERN_CRIT "NR_BANKS too low, " 749 printk(KERN_CRIT "NR_BANKS too low, "
719 "ignoring high memory\n"); 750 "ignoring high memory\n");
@@ -722,12 +753,12 @@ static void __init sanity_check_meminfo(void)
722 (meminfo.nr_banks - i) * sizeof(*bank)); 753 (meminfo.nr_banks - i) * sizeof(*bank));
723 meminfo.nr_banks++; 754 meminfo.nr_banks++;
724 i++; 755 i++;
725 bank[1].size -= VMALLOC_MIN - __va(bank->start); 756 bank[1].size -= vmalloc_min - __va(bank->start);
726 bank[1].start = __pa(VMALLOC_MIN - 1) + 1; 757 bank[1].start = __pa(vmalloc_min - 1) + 1;
727 bank[1].highmem = highmem = 1; 758 bank[1].highmem = highmem = 1;
728 j++; 759 j++;
729 } 760 }
730 bank->size = VMALLOC_MIN - __va(bank->start); 761 bank->size = vmalloc_min - __va(bank->start);
731 } 762 }
732#else 763#else
733 bank->highmem = highmem; 764 bank->highmem = highmem;
@@ -736,7 +767,7 @@ static void __init sanity_check_meminfo(void)
736 * Check whether this memory bank would entirely overlap 767 * Check whether this memory bank would entirely overlap
737 * the vmalloc area. 768 * the vmalloc area.
738 */ 769 */
739 if (__va(bank->start) >= VMALLOC_MIN || 770 if (__va(bank->start) >= vmalloc_min ||
740 __va(bank->start) < (void *)PAGE_OFFSET) { 771 __va(bank->start) < (void *)PAGE_OFFSET) {
741 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 772 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
742 "(vmalloc region overlap).\n", 773 "(vmalloc region overlap).\n",
@@ -748,9 +779,9 @@ static void __init sanity_check_meminfo(void)
748 * Check whether this memory bank would partially overlap 779 * Check whether this memory bank would partially overlap
749 * the vmalloc area. 780 * the vmalloc area.
750 */ 781 */
751 if (__va(bank->start + bank->size) > VMALLOC_MIN || 782 if (__va(bank->start + bank->size) > vmalloc_min ||
752 __va(bank->start + bank->size) < __va(bank->start)) { 783 __va(bank->start + bank->size) < __va(bank->start)) {
753 unsigned long newsize = VMALLOC_MIN - __va(bank->start); 784 unsigned long newsize = vmalloc_min - __va(bank->start);
754 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " 785 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
755 "to -%.8lx (vmalloc region overlap).\n", 786 "to -%.8lx (vmalloc region overlap).\n",
756 bank->start, bank->start + bank->size - 1, 787 bank->start, bank->start + bank->size - 1,
@@ -822,100 +853,23 @@ static inline void prepare_page_table(void)
822} 853}
823 854
824/* 855/*
825 * Reserve the various regions of node 0 856 * Reserve the special regions of memory
826 */ 857 */
827void __init reserve_node_zero(pg_data_t *pgdat) 858void __init arm_mm_memblock_reserve(void)
828{ 859{
829 unsigned long res_size = 0;
830
831 /*
832 * Register the kernel text and data with bootmem.
833 * Note that this can only be in node 0.
834 */
835#ifdef CONFIG_XIP_KERNEL
836 reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
837 BOOTMEM_DEFAULT);
838#else
839 reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
840 BOOTMEM_DEFAULT);
841#endif
842
843 /* 860 /*
844 * Reserve the page tables. These are already in use, 861 * Reserve the page tables. These are already in use,
845 * and can only be in node 0. 862 * and can only be in node 0.
846 */ 863 */
847 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), 864 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
848 PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
849
850 /*
851 * Hmm... This should go elsewhere, but we really really need to
852 * stop things allocating the low memory; ideally we need a better
853 * implementation of GFP_DMA which does not assume that DMA-able
854 * memory starts at zero.
855 */
856 if (machine_is_integrator() || machine_is_cintegrator())
857 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
858
859 /*
860 * These should likewise go elsewhere. They pre-reserve the
861 * screen memory region at the start of main system memory.
862 */
863 if (machine_is_edb7211())
864 res_size = 0x00020000;
865 if (machine_is_p720t())
866 res_size = 0x00014000;
867
868 /* H1940 and RX3715 need to reserve this for suspend */
869
870 if (machine_is_h1940() || machine_is_rx3715()) {
871 reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
872 BOOTMEM_DEFAULT);
873 reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
874 BOOTMEM_DEFAULT);
875 }
876
877 if (machine_is_palmld() || machine_is_palmtx()) {
878 reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
879 BOOTMEM_EXCLUSIVE);
880 reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
881 BOOTMEM_EXCLUSIVE);
882 }
883
884 if (machine_is_treo680() || machine_is_centro()) {
885 reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
886 BOOTMEM_EXCLUSIVE);
887 reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
888 BOOTMEM_EXCLUSIVE);
889 }
890
891 if (machine_is_palmt5())
892 reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
893 BOOTMEM_EXCLUSIVE);
894
895 /*
896 * U300 - This platform family can share physical memory
897 * between two ARM cpus, one running Linux and the other
898 * running another OS.
899 */
900 if (machine_is_u300()) {
901#ifdef CONFIG_MACH_U300_SINGLE_RAM
902#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \
903 CONFIG_MACH_U300_2MB_ALIGNMENT_FIX
904 res_size = 0x00100000;
905#endif
906#endif
907 }
908 865
909#ifdef CONFIG_SA1111 866#ifdef CONFIG_SA1111
910 /* 867 /*
911 * Because of the SA1111 DMA bug, we want to preserve our 868 * Because of the SA1111 DMA bug, we want to preserve our
912 * precious DMA-able memory... 869 * precious DMA-able memory...
913 */ 870 */
914 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; 871 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
915#endif 872#endif
916 if (res_size)
917 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
918 BOOTMEM_DEFAULT);
919} 873}
920 874
921/* 875/*
@@ -934,7 +888,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
934 /* 888 /*
935 * Allocate the vector page early. 889 * Allocate the vector page early.
936 */ 890 */
937 vectors = alloc_bootmem_low_pages(PAGE_SIZE); 891 vectors = early_alloc(PAGE_SIZE);
938 892
939 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 893 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
940 pmd_clear(pmd_off_k(addr)); 894 pmd_clear(pmd_off_k(addr));
@@ -1005,14 +959,44 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1005static void __init kmap_init(void) 959static void __init kmap_init(void)
1006{ 960{
1007#ifdef CONFIG_HIGHMEM 961#ifdef CONFIG_HIGHMEM
1008 pmd_t *pmd = pmd_off_k(PKMAP_BASE); 962 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1009 pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); 963 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1010 BUG_ON(!pmd_none(*pmd) || !pte);
1011 __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
1012 pkmap_page_table = pte + PTRS_PER_PTE;
1013#endif 964#endif
1014} 965}
1015 966
967static inline void map_memory_bank(struct membank *bank)
968{
969 struct map_desc map;
970
971 map.pfn = bank_pfn_start(bank);
972 map.virtual = __phys_to_virt(bank_phys_start(bank));
973 map.length = bank_phys_size(bank);
974 map.type = MT_MEMORY;
975
976 create_mapping(&map);
977}
978
979static void __init map_lowmem(void)
980{
981 struct meminfo *mi = &meminfo;
982 int i;
983
984 /* Map all the lowmem memory banks. */
985 for (i = 0; i < mi->nr_banks; i++) {
986 struct membank *bank = &mi->bank[i];
987
988 if (!bank->highmem)
989 map_memory_bank(bank);
990 }
991}
992
993static int __init meminfo_cmp(const void *_a, const void *_b)
994{
995 const struct membank *a = _a, *b = _b;
996 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
997 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
998}
999
1016/* 1000/*
1017 * paging_init() sets up the page tables, initialises the zone memory 1001 * paging_init() sets up the page tables, initialises the zone memory
1018 * maps, and sets up the zero page, bad page and bad page tables. 1002 * maps, and sets up the zero page, bad page and bad page tables.
@@ -1021,20 +1005,22 @@ void __init paging_init(struct machine_desc *mdesc)
1021{ 1005{
1022 void *zero_page; 1006 void *zero_page;
1023 1007
1008 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
1009
1024 build_mem_type_table(); 1010 build_mem_type_table();
1025 sanity_check_meminfo(); 1011 sanity_check_meminfo();
1026 prepare_page_table(); 1012 prepare_page_table();
1027 bootmem_init(); 1013 map_lowmem();
1028 devicemaps_init(mdesc); 1014 devicemaps_init(mdesc);
1029 kmap_init(); 1015 kmap_init();
1030 1016
1031 top_pmd = pmd_off_k(0xffff0000); 1017 top_pmd = pmd_off_k(0xffff0000);
1032 1018
1033 /* 1019 /* allocate the zero page. */
1034 * allocate the zero page. Note that this always succeeds and 1020 zero_page = early_alloc(PAGE_SIZE);
1035 * returns a zeroed result. 1021
1036 */ 1022 bootmem_init();
1037 zero_page = alloc_bootmem_low_pages(PAGE_SIZE); 1023
1038 empty_zero_page = virt_to_page(zero_page); 1024 empty_zero_page = virt_to_page(zero_page);
1039 __flush_dcache_page(NULL, empty_zero_page); 1025 __flush_dcache_page(NULL, empty_zero_page);
1040} 1026}
@@ -1050,10 +1036,12 @@ void setup_mm_for_reboot(char mode)
1050 pgd_t *pgd; 1036 pgd_t *pgd;
1051 int i; 1037 int i;
1052 1038
1053 if (current->mm && current->mm->pgd) 1039 /*
1054 pgd = current->mm->pgd; 1040 * We need to access to user-mode page tables here. For kernel threads
1055 else 1041 * we don't have any user-mode mappings so we use the context that we
1056 pgd = init_mm.pgd; 1042 * "borrowed".
1043 */
1044 pgd = current->active_mm->pgd;
1057 1045
1058 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1046 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1059 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1047 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 9bfeb6b9509a..687d02319a41 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -6,8 +6,8 @@
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/pagemap.h> 8#include <linux/pagemap.h>
9#include <linux/bootmem.h>
10#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/memblock.h>
11 11
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/sections.h> 13#include <asm/sections.h>
@@ -17,30 +17,14 @@
17 17
18#include "mm.h" 18#include "mm.h"
19 19
20/* 20void __init arm_mm_memblock_reserve(void)
21 * Reserve the various regions of node 0
22 */
23void __init reserve_node_zero(pg_data_t *pgdat)
24{ 21{
25 /* 22 /*
26 * Register the kernel text and data with bootmem.
27 * Note that this can only be in node 0.
28 */
29#ifdef CONFIG_XIP_KERNEL
30 reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
31 BOOTMEM_DEFAULT);
32#else
33 reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
34 BOOTMEM_DEFAULT);
35#endif
36
37 /*
38 * Register the exception vector page. 23 * Register the exception vector page.
39 * some architectures which the DRAM is the exception vector to trap, 24 * some architectures which the DRAM is the exception vector to trap,
40 * alloc_page breaks with error, although it is not NULL, but "0." 25 * alloc_page breaks with error, although it is not NULL, but "0."
41 */ 26 */
42 reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE, 27 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
43 BOOTMEM_DEFAULT);
44} 28}
45 29
46/* 30/*
@@ -65,6 +49,15 @@ void flush_dcache_page(struct page *page)
65} 49}
66EXPORT_SYMBOL(flush_dcache_page); 50EXPORT_SYMBOL(flush_dcache_page);
67 51
52void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
53 unsigned long uaddr, void *dst, const void *src,
54 unsigned long len)
55{
56 memcpy(dst, src, len);
57 if (vma->vm_flags & VM_EXEC)
58 __cpuc_coherent_user_range(uaddr, uaddr + len);
59}
60
68void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, 61void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
69 size_t size, unsigned int mtype) 62 size_t size, unsigned int mtype)
70{ 63{
@@ -87,8 +80,8 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
87} 80}
88EXPORT_SYMBOL(__arm_ioremap); 81EXPORT_SYMBOL(__arm_ioremap);
89 82
90void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, 83void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
91 unsigned int mtype, void *caller) 84 unsigned int mtype, void *caller)
92{ 85{
93 return __arm_ioremap(phys_addr, size, mtype); 86 return __arm_ioremap(phys_addr, size, mtype);
94} 87}
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 2690146161ba..be5f58e153bf 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/gfp.h>
11#include <linux/highmem.h> 12#include <linux/highmem.h>
12 13
13#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 72507c630ceb..203a4e944d9e 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -79,15 +79,11 @@ ENTRY(cpu_arm1020_proc_init)
79 * cpu_arm1020_proc_fin() 79 * cpu_arm1020_proc_fin()
80 */ 80 */
81ENTRY(cpu_arm1020_proc_fin) 81ENTRY(cpu_arm1020_proc_fin)
82 stmfd sp!, {lr}
83 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
84 msr cpsr_c, ip
85 bl arm1020_flush_kern_cache_all
86 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 82 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
87 bic r0, r0, #0x1000 @ ...i............ 83 bic r0, r0, #0x1000 @ ...i............
88 bic r0, r0, #0x000e @ ............wca. 84 bic r0, r0, #0x000e @ ............wca.
89 mcr p15, 0, r0, c1, c0, 0 @ disable caches 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches
90 ldmfd sp!, {pc} 86 mov pc, lr
91 87
92/* 88/*
93 * cpu_arm1020_reset(loc) 89 * cpu_arm1020_reset(loc)
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index d27829805609..1a511e765909 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -79,15 +79,11 @@ ENTRY(cpu_arm1020e_proc_init)
79 * cpu_arm1020e_proc_fin() 79 * cpu_arm1020e_proc_fin()
80 */ 80 */
81ENTRY(cpu_arm1020e_proc_fin) 81ENTRY(cpu_arm1020e_proc_fin)
82 stmfd sp!, {lr}
83 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
84 msr cpsr_c, ip
85 bl arm1020e_flush_kern_cache_all
86 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 82 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
87 bic r0, r0, #0x1000 @ ...i............ 83 bic r0, r0, #0x1000 @ ...i............
88 bic r0, r0, #0x000e @ ............wca. 84 bic r0, r0, #0x000e @ ............wca.
89 mcr p15, 0, r0, c1, c0, 0 @ disable caches 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches
90 ldmfd sp!, {pc} 86 mov pc, lr
91 87
92/* 88/*
93 * cpu_arm1020e_reset(loc) 89 * cpu_arm1020e_reset(loc)
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index ce13e4a827de..1ffa4eb9c34f 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -68,15 +68,11 @@ ENTRY(cpu_arm1022_proc_init)
68 * cpu_arm1022_proc_fin() 68 * cpu_arm1022_proc_fin()
69 */ 69 */
70ENTRY(cpu_arm1022_proc_fin) 70ENTRY(cpu_arm1022_proc_fin)
71 stmfd sp!, {lr}
72 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
73 msr cpsr_c, ip
74 bl arm1022_flush_kern_cache_all
75 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 71 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
76 bic r0, r0, #0x1000 @ ...i............ 72 bic r0, r0, #0x1000 @ ...i............
77 bic r0, r0, #0x000e @ ............wca. 73 bic r0, r0, #0x000e @ ............wca.
78 mcr p15, 0, r0, c1, c0, 0 @ disable caches 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches
79 ldmfd sp!, {pc} 75 mov pc, lr
80 76
81/* 77/*
82 * cpu_arm1022_reset(loc) 78 * cpu_arm1022_reset(loc)
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 636672a29c6d..5697c34b95b0 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -68,15 +68,11 @@ ENTRY(cpu_arm1026_proc_init)
68 * cpu_arm1026_proc_fin() 68 * cpu_arm1026_proc_fin()
69 */ 69 */
70ENTRY(cpu_arm1026_proc_fin) 70ENTRY(cpu_arm1026_proc_fin)
71 stmfd sp!, {lr}
72 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
73 msr cpsr_c, ip
74 bl arm1026_flush_kern_cache_all
75 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 71 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
76 bic r0, r0, #0x1000 @ ...i............ 72 bic r0, r0, #0x1000 @ ...i............
77 bic r0, r0, #0x000e @ ............wca. 73 bic r0, r0, #0x000e @ ............wca.
78 mcr p15, 0, r0, c1, c0, 0 @ disable caches 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches
79 ldmfd sp!, {pc} 75 mov pc, lr
80 76
81/* 77/*
82 * cpu_arm1026_reset(loc) 78 * cpu_arm1026_reset(loc)
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 795dc615f43b..64e0b327c7c5 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -184,8 +184,6 @@ ENTRY(cpu_arm7_proc_init)
184 184
185ENTRY(cpu_arm6_proc_fin) 185ENTRY(cpu_arm6_proc_fin)
186ENTRY(cpu_arm7_proc_fin) 186ENTRY(cpu_arm7_proc_fin)
187 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
188 msr cpsr_c, r0
189 mov r0, #0x31 @ ....S..DP...M 187 mov r0, #0x31 @ ....S..DP...M
190 mcr p15, 0, r0, c1, c0, 0 @ disable caches 188 mcr p15, 0, r0, c1, c0, 0 @ disable caches
191 mov pc, lr 189 mov pc, lr
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 0b62de244666..9d96824134fc 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -54,15 +54,11 @@ ENTRY(cpu_arm720_proc_init)
54 mov pc, lr 54 mov pc, lr
55 55
56ENTRY(cpu_arm720_proc_fin) 56ENTRY(cpu_arm720_proc_fin)
57 stmfd sp!, {lr}
58 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
59 msr cpsr_c, ip
60 mrc p15, 0, r0, c1, c0, 0 57 mrc p15, 0, r0, c1, c0, 0
61 bic r0, r0, #0x1000 @ ...i............ 58 bic r0, r0, #0x1000 @ ...i............
62 bic r0, r0, #0x000e @ ............wca. 59 bic r0, r0, #0x000e @ ............wca.
63 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches
64 mcr p15, 0, r1, c7, c7, 0 @ invalidate cache 61 mov pc, lr
65 ldmfd sp!, {pc}
66 62
67/* 63/*
68 * Function: arm720_proc_do_idle(void) 64 * Function: arm720_proc_do_idle(void)
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 01860cdeb2ec..6c1a9ab059ae 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -36,15 +36,11 @@ ENTRY(cpu_arm740_switch_mm)
36 * cpu_arm740_proc_fin() 36 * cpu_arm740_proc_fin()
37 */ 37 */
38ENTRY(cpu_arm740_proc_fin) 38ENTRY(cpu_arm740_proc_fin)
39 stmfd sp!, {lr}
40 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
41 msr cpsr_c, ip
42 mrc p15, 0, r0, c1, c0, 0 39 mrc p15, 0, r0, c1, c0, 0
43 bic r0, r0, #0x3f000000 @ bank/f/lock/s 40 bic r0, r0, #0x3f000000 @ bank/f/lock/s
44 bic r0, r0, #0x0000000c @ w-buffer/cache 41 bic r0, r0, #0x0000000c @ w-buffer/cache
45 mcr p15, 0, r0, c1, c0, 0 @ disable caches 42 mcr p15, 0, r0, c1, c0, 0 @ disable caches
46 mcr p15, 0, r0, c7, c0, 0 @ invalidate cache 43 mov pc, lr
47 ldmfd sp!, {pc}
48 44
49/* 45/*
50 * cpu_arm740_reset(loc) 46 * cpu_arm740_reset(loc)
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 1201b9863829..6a850dbba22e 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -36,8 +36,6 @@ ENTRY(cpu_arm7tdmi_switch_mm)
36 * cpu_arm7tdmi_proc_fin() 36 * cpu_arm7tdmi_proc_fin()
37 */ 37 */
38ENTRY(cpu_arm7tdmi_proc_fin) 38ENTRY(cpu_arm7tdmi_proc_fin)
39 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
40 msr cpsr_c, r0
41 mov pc, lr 39 mov pc, lr
42 40
43/* 41/*
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 8be81992645d..86f80aa56216 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -69,19 +69,11 @@ ENTRY(cpu_arm920_proc_init)
69 * cpu_arm920_proc_fin() 69 * cpu_arm920_proc_fin()
70 */ 70 */
71ENTRY(cpu_arm920_proc_fin) 71ENTRY(cpu_arm920_proc_fin)
72 stmfd sp!, {lr}
73 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
74 msr cpsr_c, ip
75#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
76 bl arm920_flush_kern_cache_all
77#else
78 bl v4wt_flush_kern_cache_all
79#endif
80 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 72 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
81 bic r0, r0, #0x1000 @ ...i............ 73 bic r0, r0, #0x1000 @ ...i............
82 bic r0, r0, #0x000e @ ............wca. 74 bic r0, r0, #0x000e @ ............wca.
83 mcr p15, 0, r0, c1, c0, 0 @ disable caches 75 mcr p15, 0, r0, c1, c0, 0 @ disable caches
84 ldmfd sp!, {pc} 76 mov pc, lr
85 77
86/* 78/*
87 * cpu_arm920_reset(loc) 79 * cpu_arm920_reset(loc)
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index c0ff8e4b1074..f76ce9b62883 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -71,19 +71,11 @@ ENTRY(cpu_arm922_proc_init)
71 * cpu_arm922_proc_fin() 71 * cpu_arm922_proc_fin()
72 */ 72 */
73ENTRY(cpu_arm922_proc_fin) 73ENTRY(cpu_arm922_proc_fin)
74 stmfd sp!, {lr}
75 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
76 msr cpsr_c, ip
77#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
78 bl arm922_flush_kern_cache_all
79#else
80 bl v4wt_flush_kern_cache_all
81#endif
82 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 74 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
83 bic r0, r0, #0x1000 @ ...i............ 75 bic r0, r0, #0x1000 @ ...i............
84 bic r0, r0, #0x000e @ ............wca. 76 bic r0, r0, #0x000e @ ............wca.
85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 77 mcr p15, 0, r0, c1, c0, 0 @ disable caches
86 ldmfd sp!, {pc} 78 mov pc, lr
87 79
88/* 80/*
89 * cpu_arm922_reset(loc) 81 * cpu_arm922_reset(loc)
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 3c6cffe400f6..657bd3f7c153 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -92,15 +92,11 @@ ENTRY(cpu_arm925_proc_init)
92 * cpu_arm925_proc_fin() 92 * cpu_arm925_proc_fin()
93 */ 93 */
94ENTRY(cpu_arm925_proc_fin) 94ENTRY(cpu_arm925_proc_fin)
95 stmfd sp!, {lr}
96 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
97 msr cpsr_c, ip
98 bl arm925_flush_kern_cache_all
99 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 95 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
100 bic r0, r0, #0x1000 @ ...i............ 96 bic r0, r0, #0x1000 @ ...i............
101 bic r0, r0, #0x000e @ ............wca. 97 bic r0, r0, #0x000e @ ............wca.
102 mcr p15, 0, r0, c1, c0, 0 @ disable caches 98 mcr p15, 0, r0, c1, c0, 0 @ disable caches
103 ldmfd sp!, {pc} 99 mov pc, lr
104 100
105/* 101/*
106 * cpu_arm925_reset(loc) 102 * cpu_arm925_reset(loc)
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 75b707c9cce1..73f1f3c68910 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -61,15 +61,11 @@ ENTRY(cpu_arm926_proc_init)
61 * cpu_arm926_proc_fin() 61 * cpu_arm926_proc_fin()
62 */ 62 */
63ENTRY(cpu_arm926_proc_fin) 63ENTRY(cpu_arm926_proc_fin)
64 stmfd sp!, {lr}
65 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
66 msr cpsr_c, ip
67 bl arm926_flush_kern_cache_all
68 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 64 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
69 bic r0, r0, #0x1000 @ ...i............ 65 bic r0, r0, #0x1000 @ ...i............
70 bic r0, r0, #0x000e @ ............wca. 66 bic r0, r0, #0x000e @ ............wca.
71 mcr p15, 0, r0, c1, c0, 0 @ disable caches 67 mcr p15, 0, r0, c1, c0, 0 @ disable caches
72 ldmfd sp!, {pc} 68 mov pc, lr
73 69
74/* 70/*
75 * cpu_arm926_reset(loc) 71 * cpu_arm926_reset(loc)
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 1af1657819eb..fffb061a45a5 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -37,15 +37,11 @@ ENTRY(cpu_arm940_switch_mm)
37 * cpu_arm940_proc_fin() 37 * cpu_arm940_proc_fin()
38 */ 38 */
39ENTRY(cpu_arm940_proc_fin) 39ENTRY(cpu_arm940_proc_fin)
40 stmfd sp!, {lr}
41 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
42 msr cpsr_c, ip
43 bl arm940_flush_kern_cache_all
44 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 40 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
45 bic r0, r0, #0x00001000 @ i-cache 41 bic r0, r0, #0x00001000 @ i-cache
46 bic r0, r0, #0x00000004 @ d-cache 42 bic r0, r0, #0x00000004 @ d-cache
47 mcr p15, 0, r0, c1, c0, 0 @ disable caches 43 mcr p15, 0, r0, c1, c0, 0 @ disable caches
48 ldmfd sp!, {pc} 44 mov pc, lr
49 45
50/* 46/*
51 * cpu_arm940_reset(loc) 47 * cpu_arm940_reset(loc)
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 1664b6aaff79..249a6053760a 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -44,15 +44,11 @@ ENTRY(cpu_arm946_switch_mm)
44 * cpu_arm946_proc_fin() 44 * cpu_arm946_proc_fin()
45 */ 45 */
46ENTRY(cpu_arm946_proc_fin) 46ENTRY(cpu_arm946_proc_fin)
47 stmfd sp!, {lr}
48 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
49 msr cpsr_c, ip
50 bl arm946_flush_kern_cache_all
51 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 47 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
52 bic r0, r0, #0x00001000 @ i-cache 48 bic r0, r0, #0x00001000 @ i-cache
53 bic r0, r0, #0x00000004 @ d-cache 49 bic r0, r0, #0x00000004 @ d-cache
54 mcr p15, 0, r0, c1, c0, 0 @ disable caches 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches
55 ldmfd sp!, {pc} 51 mov pc, lr
56 52
57/* 53/*
58 * cpu_arm946_reset(loc) 54 * cpu_arm946_reset(loc)
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 28545c29dbcd..db475667fac2 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -36,8 +36,6 @@ ENTRY(cpu_arm9tdmi_switch_mm)
36 * cpu_arm9tdmi_proc_fin() 36 * cpu_arm9tdmi_proc_fin()
37 */ 37 */
38ENTRY(cpu_arm9tdmi_proc_fin) 38ENTRY(cpu_arm9tdmi_proc_fin)
39 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
40 msr cpsr_c, r0
41 mov pc, lr 39 mov pc, lr
42 40
43/* 41/*
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 08f5ac237ad4..7803fdf70029 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -39,17 +39,13 @@ ENTRY(cpu_fa526_proc_init)
39 * cpu_fa526_proc_fin() 39 * cpu_fa526_proc_fin()
40 */ 40 */
41ENTRY(cpu_fa526_proc_fin) 41ENTRY(cpu_fa526_proc_fin)
42 stmfd sp!, {lr}
43 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
44 msr cpsr_c, ip
45 bl fa_flush_kern_cache_all
46 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
47 bic r0, r0, #0x1000 @ ...i............ 43 bic r0, r0, #0x1000 @ ...i............
48 bic r0, r0, #0x000e @ ............wca. 44 bic r0, r0, #0x000e @ ............wca.
49 mcr p15, 0, r0, c1, c0, 0 @ disable caches 45 mcr p15, 0, r0, c1, c0, 0 @ disable caches
50 nop 46 nop
51 nop 47 nop
52 ldmfd sp!, {pc} 48 mov pc, lr
53 49
54/* 50/*
55 * cpu_fa526_reset(loc) 51 * cpu_fa526_reset(loc)
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 53e632343849..b304d0104a4e 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -75,11 +75,6 @@ ENTRY(cpu_feroceon_proc_init)
75 * cpu_feroceon_proc_fin() 75 * cpu_feroceon_proc_fin()
76 */ 76 */
77ENTRY(cpu_feroceon_proc_fin) 77ENTRY(cpu_feroceon_proc_fin)
78 stmfd sp!, {lr}
79 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
80 msr cpsr_c, ip
81 bl feroceon_flush_kern_cache_all
82
83#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 78#if defined(CONFIG_CACHE_FEROCEON_L2) && \
84 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 79 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
85 mov r0, #0 80 mov r0, #0
@@ -91,7 +86,7 @@ ENTRY(cpu_feroceon_proc_fin)
91 bic r0, r0, #0x1000 @ ...i............ 86 bic r0, r0, #0x1000 @ ...i............
92 bic r0, r0, #0x000e @ ............wca. 87 bic r0, r0, #0x000e @ ............wca.
93 mcr p15, 0, r0, c1, c0, 0 @ disable caches 88 mcr p15, 0, r0, c1, c0, 0 @ disable caches
94 ldmfd sp!, {pc} 89 mov pc, lr
95 90
96/* 91/*
97 * cpu_feroceon_reset(loc) 92 * cpu_feroceon_reset(loc)
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index caa31154e7db..5f6892fcc167 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -51,15 +51,11 @@ ENTRY(cpu_mohawk_proc_init)
51 * cpu_mohawk_proc_fin() 51 * cpu_mohawk_proc_fin()
52 */ 52 */
53ENTRY(cpu_mohawk_proc_fin) 53ENTRY(cpu_mohawk_proc_fin)
54 stmfd sp!, {lr}
55 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
56 msr cpsr_c, ip
57 bl mohawk_flush_kern_cache_all
58 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 54 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
59 bic r0, r0, #0x1800 @ ...iz........... 55 bic r0, r0, #0x1800 @ ...iz...........
60 bic r0, r0, #0x0006 @ .............ca. 56 bic r0, r0, #0x0006 @ .............ca.
61 mcr p15, 0, r0, c1, c0, 0 @ disable caches 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches
62 ldmfd sp!, {pc} 58 mov pc, lr
63 59
64/* 60/*
65 * cpu_mohawk_reset(loc) 61 * cpu_mohawk_reset(loc)
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 7b706b389906..a201eb04b5e1 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -44,17 +44,13 @@ ENTRY(cpu_sa110_proc_init)
44 * cpu_sa110_proc_fin() 44 * cpu_sa110_proc_fin()
45 */ 45 */
46ENTRY(cpu_sa110_proc_fin) 46ENTRY(cpu_sa110_proc_fin)
47 stmfd sp!, {lr} 47 mov r0, #0
48 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
49 msr cpsr_c, ip
50 bl v4wb_flush_kern_cache_all @ clean caches
511: mov r0, #0
52 mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching 48 mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
53 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 49 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
54 bic r0, r0, #0x1000 @ ...i............ 50 bic r0, r0, #0x1000 @ ...i............
55 bic r0, r0, #0x000e @ ............wca. 51 bic r0, r0, #0x000e @ ............wca.
56 mcr p15, 0, r0, c1, c0, 0 @ disable caches 52 mcr p15, 0, r0, c1, c0, 0 @ disable caches
57 ldmfd sp!, {pc} 53 mov pc, lr
58 54
59/* 55/*
60 * cpu_sa110_reset(loc) 56 * cpu_sa110_reset(loc)
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index ee7700242c19..7ddc4805bf97 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -45,7 +45,7 @@ ENTRY(cpu_sa1100_proc_init)
45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
46 mov pc, lr 46 mov pc, lr
47 47
48 .previous 48 .section .text
49 49
50/* 50/*
51 * cpu_sa1100_proc_fin() 51 * cpu_sa1100_proc_fin()
@@ -55,16 +55,12 @@ ENTRY(cpu_sa1100_proc_init)
55 * - Clean and turn off caches. 55 * - Clean and turn off caches.
56 */ 56 */
57ENTRY(cpu_sa1100_proc_fin) 57ENTRY(cpu_sa1100_proc_fin)
58 stmfd sp!, {lr}
59 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
60 msr cpsr_c, ip
61 bl v4wb_flush_kern_cache_all
62 mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching 58 mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching
63 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 59 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
64 bic r0, r0, #0x1000 @ ...i............ 60 bic r0, r0, #0x1000 @ ...i............
65 bic r0, r0, #0x000e @ ............wca. 61 bic r0, r0, #0x000e @ ............wca.
66 mcr p15, 0, r0, c1, c0, 0 @ disable caches 62 mcr p15, 0, r0, c1, c0, 0 @ disable caches
67 ldmfd sp!, {pc} 63 mov pc, lr
68 64
69/* 65/*
70 * cpu_sa1100_reset(loc) 66 * cpu_sa1100_reset(loc)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 7a5337ed7d68..22aac8515196 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -42,14 +42,11 @@ ENTRY(cpu_v6_proc_init)
42 mov pc, lr 42 mov pc, lr
43 43
44ENTRY(cpu_v6_proc_fin) 44ENTRY(cpu_v6_proc_fin)
45 stmfd sp!, {lr}
46 cpsid if @ disable interrupts
47 bl v6_flush_kern_cache_all
48 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 45 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
49 bic r0, r0, #0x1000 @ ...i............ 46 bic r0, r0, #0x1000 @ ...i............
50 bic r0, r0, #0x0006 @ .............ca. 47 bic r0, r0, #0x0006 @ .............ca.
51 mcr p15, 0, r0, c1, c0, 0 @ disable caches 48 mcr p15, 0, r0, c1, c0, 0 @ disable caches
52 ldmfd sp!, {pc} 49 mov pc, lr
53 50
54/* 51/*
55 * cpu_v6_reset(loc) 52 * cpu_v6_reset(loc)
@@ -239,7 +236,8 @@ __v6_proc_info:
239 b __v6_setup 236 b __v6_setup
240 .long cpu_arch_name 237 .long cpu_arch_name
241 .long cpu_elf_name 238 .long cpu_elf_name
242 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 239 /* See also feat_v6_fixup() for HWCAP_TLS */
240 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA|HWCAP_TLS
243 .long cpu_v6_name 241 .long cpu_v6_name
244 .long v6_processor_functions 242 .long v6_processor_functions
245 .long v6wbi_tlb_fns 243 .long v6wbi_tlb_fns
@@ -262,7 +260,7 @@ __pj4_v6_proc_info:
262 b __v6_setup 260 b __v6_setup
263 .long cpu_arch_name 261 .long cpu_arch_name
264 .long cpu_elf_name 262 .long cpu_elf_name
265 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 263 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
266 .long cpu_pj4_name 264 .long cpu_pj4_name
267 .long v6_processor_functions 265 .long v6_processor_functions
268 .long v6wbi_tlb_fns 266 .long v6wbi_tlb_fns
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7aaf88a3b7aa..6a8506d99ee9 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -45,14 +45,11 @@ ENTRY(cpu_v7_proc_init)
45ENDPROC(cpu_v7_proc_init) 45ENDPROC(cpu_v7_proc_init)
46 46
47ENTRY(cpu_v7_proc_fin) 47ENTRY(cpu_v7_proc_fin)
48 stmfd sp!, {lr}
49 cpsid if @ disable interrupts
50 bl v7_flush_kern_cache_all
51 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 48 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
52 bic r0, r0, #0x1000 @ ...i............ 49 bic r0, r0, #0x1000 @ ...i............
53 bic r0, r0, #0x0006 @ .............ca. 50 bic r0, r0, #0x0006 @ .............ca.
54 mcr p15, 0, r0, c1, c0, 0 @ disable caches 51 mcr p15, 0, r0, c1, c0, 0 @ disable caches
55 ldmfd sp!, {pc} 52 mov pc, lr
56ENDPROC(cpu_v7_proc_fin) 53ENDPROC(cpu_v7_proc_fin)
57 54
58/* 55/*
@@ -344,7 +341,7 @@ __v7_proc_info:
344 b __v7_setup 341 b __v7_setup
345 .long cpu_arch_name 342 .long cpu_arch_name
346 .long cpu_elf_name 343 .long cpu_elf_name
347 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 344 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
348 .long cpu_v7_name 345 .long cpu_v7_name
349 .long v7_processor_functions 346 .long v7_processor_functions
350 .long v7wbi_tlb_fns 347 .long v7wbi_tlb_fns
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index e5797f1c1db7..361a51e49030 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -90,15 +90,11 @@ ENTRY(cpu_xsc3_proc_init)
90 * cpu_xsc3_proc_fin() 90 * cpu_xsc3_proc_fin()
91 */ 91 */
92ENTRY(cpu_xsc3_proc_fin) 92ENTRY(cpu_xsc3_proc_fin)
93 str lr, [sp, #-4]!
94 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
95 msr cpsr_c, r0
96 bl xsc3_flush_kern_cache_all @ clean caches
97 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 93 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
98 bic r0, r0, #0x1800 @ ...IZ........... 94 bic r0, r0, #0x1800 @ ...IZ...........
99 bic r0, r0, #0x0006 @ .............CA. 95 bic r0, r0, #0x0006 @ .............CA.
100 mcr p15, 0, r0, c1, c0, 0 @ disable caches 96 mcr p15, 0, r0, c1, c0, 0 @ disable caches
101 ldr pc, [sp], #4 97 mov pc, lr
102 98
103/* 99/*
104 * cpu_xsc3_reset(loc) 100 * cpu_xsc3_reset(loc)
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 63037e2162f2..14075979bcba 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -124,15 +124,11 @@ ENTRY(cpu_xscale_proc_init)
124 * cpu_xscale_proc_fin() 124 * cpu_xscale_proc_fin()
125 */ 125 */
126ENTRY(cpu_xscale_proc_fin) 126ENTRY(cpu_xscale_proc_fin)
127 str lr, [sp, #-4]!
128 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
129 msr cpsr_c, r0
130 bl xscale_flush_kern_cache_all @ clean caches
131 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 127 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
132 bic r0, r0, #0x1800 @ ...IZ........... 128 bic r0, r0, #0x1800 @ ...IZ...........
133 bic r0, r0, #0x0006 @ .............CA. 129 bic r0, r0, #0x0006 @ .............CA.
134 mcr p15, 0, r0, c1, c0, 0 @ disable caches 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches
135 ldr pc, [sp], #4 131 mov pc, lr
136 132
137/* 133/*
138 * cpu_xscale_reset(loc) 134 * cpu_xscale_reset(loc)
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 0cb1848bd876..f3f288a9546d 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -50,7 +50,11 @@ ENTRY(v7wbi_flush_user_tlb_range)
50 cmp r0, r1 50 cmp r0, r1
51 blo 1b 51 blo 1b
52 mov ip, #0 52 mov ip, #0
53#ifdef CONFIG_SMP
54 mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
55#else
53 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB 56 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
57#endif
54 dsb 58 dsb
55 mov pc, lr 59 mov pc, lr
56ENDPROC(v7wbi_flush_user_tlb_range) 60ENDPROC(v7wbi_flush_user_tlb_range)
@@ -79,7 +83,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
79 cmp r0, r1 83 cmp r0, r1
80 blo 1b 84 blo 1b
81 mov r2, #0 85 mov r2, #0
86#ifdef CONFIG_SMP
87 mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
88#else
82 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 89 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
90#endif
83 dsb 91 dsb
84 isb 92 isb
85 mov pc, lr 93 mov pc, lr
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 19e09bdb1b8a..935993e1b1ef 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -35,7 +35,8 @@
35 */ 35 */
36 36
37struct arm_vmregion * 37struct arm_vmregion *
38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) 38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39 size_t size, gfp_t gfp)
39{ 40{
40 unsigned long addr = head->vm_start, end = head->vm_end - size; 41 unsigned long addr = head->vm_start, end = head->vm_end - size;
41 unsigned long flags; 42 unsigned long flags;
@@ -58,7 +59,7 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp)
58 goto nospc; 59 goto nospc;
59 if ((addr + size) <= c->vm_start) 60 if ((addr + size) <= c->vm_start)
60 goto found; 61 goto found;
61 addr = c->vm_end; 62 addr = ALIGN(c->vm_end, align);
62 if (addr > end) 63 if (addr > end)
63 goto nospc; 64 goto nospc;
64 } 65 }
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 6b2cdbdf3a85..15e9f044db9f 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -21,7 +21,7 @@ struct arm_vmregion {
21 int vm_active; 21 int vm_active;
22}; 22};
23 23
24struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t); 24struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t);
25struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); 25struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
26struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); 26struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
27void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); 27void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);