diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-01 12:15:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-01 12:15:15 -0500 |
commit | ac0f6f927db539e03e1f3f61bcd4ed57d5cde7a9 (patch) | |
tree | 816e5ac643b15c2050c64a7075f0f7e13d86ea09 /arch/arm/mm | |
parent | b1bf9368407ae7e89d8a005bb40beb70a41df539 (diff) | |
parent | 9f33be2c3a80bdc2cc08342dd77fac87652e0548 (diff) |
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (100 commits)
ARM: Eliminate decompressor -Dstatic= PIC hack
ARM: 5958/1: ARM: U300: fix inverted clk round rate
ARM: 5956/1: misplaced parentheses
ARM: 5955/1: ep93xx: move timer defines into core.c and document
ARM: 5954/1: ep93xx: move gpio interrupt support to gpio.c
ARM: 5953/1: ep93xx: fix broken build of clock.c
ARM: 5952/1: ARM: MM: Add ARM_L1_CACHE_SHIFT_6 for handle inside each ARCH Kconfig
ARM: 5949/1: NUC900 add gpio virtual memory map
ARM: 5948/1: Enable timer0 to time4 clock support for nuc910
ARM: 5940/2: ARM: MMCI: remove custom DBG macro and printk
ARM: make_coherent(): fix problems with highpte, part 2
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
ARM: 5945/1: ep93xx: include correct irq.h in core.c
ARM: 5933/1: amba-pl011: support hardware flow control
ARM: 5930/1: Add PKMAP area description to memory.txt.
ARM: 5929/1: Add checks to detect overlap of memory regions.
ARM: 5928/1: Change type of VMALLOC_END to unsigned long.
ARM: 5927/1: Make delimiters of DMA area globally visibly.
ARM: 5926/1: Add "Virtual kernel memory..." printout.
ARM: 5920/1: OMAP4: Enable L2 Cache
...
Fix up trivial conflict in arch/arm/mach-mx25/clock.c
Diffstat (limited to 'arch/arm/mm')
41 files changed, 1143 insertions, 361 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index baf638487a2d..c4ed9f93f646 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -399,7 +399,7 @@ config CPU_V6 | |||
399 | config CPU_32v6K | 399 | config CPU_32v6K |
400 | bool "Support ARM V6K processor extensions" if !SMP | 400 | bool "Support ARM V6K processor extensions" if !SMP |
401 | depends on CPU_V6 | 401 | depends on CPU_V6 |
402 | default y if SMP && !ARCH_MX3 | 402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) |
403 | help | 403 | help |
404 | Say Y here if your ARMv6 processor supports the 'K' extension. | 404 | Say Y here if your ARMv6 processor supports the 'K' extension. |
405 | This enables the kernel to use some instructions not present | 405 | This enables the kernel to use some instructions not present |
@@ -410,7 +410,7 @@ config CPU_32v6K | |||
410 | # ARMv7 | 410 | # ARMv7 |
411 | config CPU_V7 | 411 | config CPU_V7 |
412 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 412 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
413 | select CPU_32v6K | 413 | select CPU_32v6K if !ARCH_OMAP2 |
414 | select CPU_32v7 | 414 | select CPU_32v7 |
415 | select CPU_ABRT_EV7 | 415 | select CPU_ABRT_EV7 |
416 | select CPU_PABRT_V7 | 416 | select CPU_PABRT_V7 |
@@ -754,7 +754,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH | |||
754 | config CACHE_L2X0 | 754 | config CACHE_L2X0 |
755 | bool "Enable the L2x0 outer cache controller" | 755 | bool "Enable the L2x0 outer cache controller" |
756 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ | 756 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ |
757 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK | 757 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 |
758 | default y | 758 | default y |
759 | select OUTER_CACHE | 759 | select OUTER_CACHE |
760 | help | 760 | help |
@@ -779,5 +779,5 @@ config CACHE_XSC3L2 | |||
779 | 779 | ||
780 | config ARM_L1_CACHE_SHIFT | 780 | config ARM_L1_CACHE_SHIFT |
781 | int | 781 | int |
782 | default 6 if ARCH_OMAP3 || ARCH_S5PC1XX | 782 | default 6 if ARM_L1_CACHE_SHIFT_6 |
783 | default 5 | 783 | default 5 |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 62820eda84d9..edddd66faac6 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -901,11 +901,7 @@ static int __init alignment_init(void) | |||
901 | #ifdef CONFIG_PROC_FS | 901 | #ifdef CONFIG_PROC_FS |
902 | struct proc_dir_entry *res; | 902 | struct proc_dir_entry *res; |
903 | 903 | ||
904 | res = proc_mkdir("cpu", NULL); | 904 | res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL); |
905 | if (!res) | ||
906 | return -ENOMEM; | ||
907 | |||
908 | res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res); | ||
909 | if (!res) | 905 | if (!res) |
910 | return -ENOMEM; | 906 | return -ENOMEM; |
911 | 907 | ||
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index a89444a3c016..7148e53e6078 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S | |||
@@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area) | |||
157 | * - start - virtual start address | 157 | * - start - virtual start address |
158 | * - end - virtual end address | 158 | * - end - virtual end address |
159 | */ | 159 | */ |
160 | ENTRY(fa_dma_inv_range) | 160 | fa_dma_inv_range: |
161 | tst r0, #CACHE_DLINESIZE - 1 | 161 | tst r0, #CACHE_DLINESIZE - 1 |
162 | bic r0, r0, #CACHE_DLINESIZE - 1 | 162 | bic r0, r0, #CACHE_DLINESIZE - 1 |
163 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry | 163 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry |
@@ -180,7 +180,7 @@ ENTRY(fa_dma_inv_range) | |||
180 | * - start - virtual start address | 180 | * - start - virtual start address |
181 | * - end - virtual end address | 181 | * - end - virtual end address |
182 | */ | 182 | */ |
183 | ENTRY(fa_dma_clean_range) | 183 | fa_dma_clean_range: |
184 | bic r0, r0, #CACHE_DLINESIZE - 1 | 184 | bic r0, r0, #CACHE_DLINESIZE - 1 |
185 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 185 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
186 | add r0, r0, #CACHE_DLINESIZE | 186 | add r0, r0, #CACHE_DLINESIZE |
@@ -205,6 +205,30 @@ ENTRY(fa_dma_flush_range) | |||
205 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 205 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
206 | mov pc, lr | 206 | mov pc, lr |
207 | 207 | ||
208 | /* | ||
209 | * dma_map_area(start, size, dir) | ||
210 | * - start - kernel virtual start address | ||
211 | * - size - size of region | ||
212 | * - dir - DMA direction | ||
213 | */ | ||
214 | ENTRY(fa_dma_map_area) | ||
215 | add r1, r1, r0 | ||
216 | cmp r2, #DMA_TO_DEVICE | ||
217 | beq fa_dma_clean_range | ||
218 | bcs fa_dma_inv_range | ||
219 | b fa_dma_flush_range | ||
220 | ENDPROC(fa_dma_map_area) | ||
221 | |||
222 | /* | ||
223 | * dma_unmap_area(start, size, dir) | ||
224 | * - start - kernel virtual start address | ||
225 | * - size - size of region | ||
226 | * - dir - DMA direction | ||
227 | */ | ||
228 | ENTRY(fa_dma_unmap_area) | ||
229 | mov pc, lr | ||
230 | ENDPROC(fa_dma_unmap_area) | ||
231 | |||
208 | __INITDATA | 232 | __INITDATA |
209 | 233 | ||
210 | .type fa_cache_fns, #object | 234 | .type fa_cache_fns, #object |
@@ -215,7 +239,7 @@ ENTRY(fa_cache_fns) | |||
215 | .long fa_coherent_kern_range | 239 | .long fa_coherent_kern_range |
216 | .long fa_coherent_user_range | 240 | .long fa_coherent_user_range |
217 | .long fa_flush_kern_dcache_area | 241 | .long fa_flush_kern_dcache_area |
218 | .long fa_dma_inv_range | 242 | .long fa_dma_map_area |
219 | .long fa_dma_clean_range | 243 | .long fa_dma_unmap_area |
220 | .long fa_dma_flush_range | 244 | .long fa_dma_flush_range |
221 | .size fa_cache_fns, . - fa_cache_fns | 245 | .size fa_cache_fns, . - fa_cache_fns |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index cb8fc6573b1b..07334632d3e2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -42,6 +42,57 @@ static inline void cache_sync(void) | |||
42 | cache_wait(base + L2X0_CACHE_SYNC, 1); | 42 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline void l2x0_clean_line(unsigned long addr) | ||
46 | { | ||
47 | void __iomem *base = l2x0_base; | ||
48 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | ||
49 | writel(addr, base + L2X0_CLEAN_LINE_PA); | ||
50 | } | ||
51 | |||
52 | static inline void l2x0_inv_line(unsigned long addr) | ||
53 | { | ||
54 | void __iomem *base = l2x0_base; | ||
55 | cache_wait(base + L2X0_INV_LINE_PA, 1); | ||
56 | writel(addr, base + L2X0_INV_LINE_PA); | ||
57 | } | ||
58 | |||
59 | #ifdef CONFIG_PL310_ERRATA_588369 | ||
60 | static void debug_writel(unsigned long val) | ||
61 | { | ||
62 | extern void omap_smc1(u32 fn, u32 arg); | ||
63 | |||
64 | /* | ||
65 | * Texas Instrument secure monitor api to modify the | ||
66 | * PL310 Debug Control Register. | ||
67 | */ | ||
68 | omap_smc1(0x100, val); | ||
69 | } | ||
70 | |||
71 | static inline void l2x0_flush_line(unsigned long addr) | ||
72 | { | ||
73 | void __iomem *base = l2x0_base; | ||
74 | |||
75 | /* Clean by PA followed by Invalidate by PA */ | ||
76 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | ||
77 | writel(addr, base + L2X0_CLEAN_LINE_PA); | ||
78 | cache_wait(base + L2X0_INV_LINE_PA, 1); | ||
79 | writel(addr, base + L2X0_INV_LINE_PA); | ||
80 | } | ||
81 | #else | ||
82 | |||
83 | /* Optimised out for non-errata case */ | ||
84 | static inline void debug_writel(unsigned long val) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | static inline void l2x0_flush_line(unsigned long addr) | ||
89 | { | ||
90 | void __iomem *base = l2x0_base; | ||
91 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | ||
92 | writel(addr, base + L2X0_CLEAN_INV_LINE_PA); | ||
93 | } | ||
94 | #endif | ||
95 | |||
45 | static inline void l2x0_inv_all(void) | 96 | static inline void l2x0_inv_all(void) |
46 | { | 97 | { |
47 | unsigned long flags; | 98 | unsigned long flags; |
@@ -62,23 +113,24 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
62 | spin_lock_irqsave(&l2x0_lock, flags); | 113 | spin_lock_irqsave(&l2x0_lock, flags); |
63 | if (start & (CACHE_LINE_SIZE - 1)) { | 114 | if (start & (CACHE_LINE_SIZE - 1)) { |
64 | start &= ~(CACHE_LINE_SIZE - 1); | 115 | start &= ~(CACHE_LINE_SIZE - 1); |
65 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 116 | debug_writel(0x03); |
66 | writel(start, base + L2X0_CLEAN_INV_LINE_PA); | 117 | l2x0_flush_line(start); |
118 | debug_writel(0x00); | ||
67 | start += CACHE_LINE_SIZE; | 119 | start += CACHE_LINE_SIZE; |
68 | } | 120 | } |
69 | 121 | ||
70 | if (end & (CACHE_LINE_SIZE - 1)) { | 122 | if (end & (CACHE_LINE_SIZE - 1)) { |
71 | end &= ~(CACHE_LINE_SIZE - 1); | 123 | end &= ~(CACHE_LINE_SIZE - 1); |
72 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 124 | debug_writel(0x03); |
73 | writel(end, base + L2X0_CLEAN_INV_LINE_PA); | 125 | l2x0_flush_line(end); |
126 | debug_writel(0x00); | ||
74 | } | 127 | } |
75 | 128 | ||
76 | while (start < end) { | 129 | while (start < end) { |
77 | unsigned long blk_end = start + min(end - start, 4096UL); | 130 | unsigned long blk_end = start + min(end - start, 4096UL); |
78 | 131 | ||
79 | while (start < blk_end) { | 132 | while (start < blk_end) { |
80 | cache_wait(base + L2X0_INV_LINE_PA, 1); | 133 | l2x0_inv_line(start); |
81 | writel(start, base + L2X0_INV_LINE_PA); | ||
82 | start += CACHE_LINE_SIZE; | 134 | start += CACHE_LINE_SIZE; |
83 | } | 135 | } |
84 | 136 | ||
@@ -103,8 +155,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
103 | unsigned long blk_end = start + min(end - start, 4096UL); | 155 | unsigned long blk_end = start + min(end - start, 4096UL); |
104 | 156 | ||
105 | while (start < blk_end) { | 157 | while (start < blk_end) { |
106 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | 158 | l2x0_clean_line(start); |
107 | writel(start, base + L2X0_CLEAN_LINE_PA); | ||
108 | start += CACHE_LINE_SIZE; | 159 | start += CACHE_LINE_SIZE; |
109 | } | 160 | } |
110 | 161 | ||
@@ -128,11 +179,12 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
128 | while (start < end) { | 179 | while (start < end) { |
129 | unsigned long blk_end = start + min(end - start, 4096UL); | 180 | unsigned long blk_end = start + min(end - start, 4096UL); |
130 | 181 | ||
182 | debug_writel(0x03); | ||
131 | while (start < blk_end) { | 183 | while (start < blk_end) { |
132 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 184 | l2x0_flush_line(start); |
133 | writel(start, base + L2X0_CLEAN_INV_LINE_PA); | ||
134 | start += CACHE_LINE_SIZE; | 185 | start += CACHE_LINE_SIZE; |
135 | } | 186 | } |
187 | debug_writel(0x00); | ||
136 | 188 | ||
137 | if (blk_end < end) { | 189 | if (blk_end < end) { |
138 | spin_unlock_irqrestore(&l2x0_lock, flags); | 190 | spin_unlock_irqrestore(&l2x0_lock, flags); |
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2a482731ea36..c2ff3c599fee 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S | |||
@@ -84,20 +84,6 @@ ENTRY(v3_flush_kern_dcache_area) | |||
84 | /* FALLTHROUGH */ | 84 | /* FALLTHROUGH */ |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * dma_inv_range(start, end) | ||
88 | * | ||
89 | * Invalidate (discard) the specified virtual address range. | ||
90 | * May not write back any entries. If 'start' or 'end' | ||
91 | * are not cache line aligned, those lines must be written | ||
92 | * back. | ||
93 | * | ||
94 | * - start - virtual start address | ||
95 | * - end - virtual end address | ||
96 | */ | ||
97 | ENTRY(v3_dma_inv_range) | ||
98 | /* FALLTHROUGH */ | ||
99 | |||
100 | /* | ||
101 | * dma_flush_range(start, end) | 87 | * dma_flush_range(start, end) |
102 | * | 88 | * |
103 | * Clean and invalidate the specified virtual address range. | 89 | * Clean and invalidate the specified virtual address range. |
@@ -108,18 +94,29 @@ ENTRY(v3_dma_inv_range) | |||
108 | ENTRY(v3_dma_flush_range) | 94 | ENTRY(v3_dma_flush_range) |
109 | mov r0, #0 | 95 | mov r0, #0 |
110 | mcr p15, 0, r0, c7, c0, 0 @ flush ID cache | 96 | mcr p15, 0, r0, c7, c0, 0 @ flush ID cache |
97 | mov pc, lr | ||
98 | |||
99 | /* | ||
100 | * dma_unmap_area(start, size, dir) | ||
101 | * - start - kernel virtual start address | ||
102 | * - size - size of region | ||
103 | * - dir - DMA direction | ||
104 | */ | ||
105 | ENTRY(v3_dma_unmap_area) | ||
106 | teq r2, #DMA_TO_DEVICE | ||
107 | bne v3_dma_flush_range | ||
111 | /* FALLTHROUGH */ | 108 | /* FALLTHROUGH */ |
112 | 109 | ||
113 | /* | 110 | /* |
114 | * dma_clean_range(start, end) | 111 | * dma_map_area(start, size, dir) |
115 | * | 112 | * - start - kernel virtual start address |
116 | * Clean (write back) the specified virtual address range. | 113 | * - size - size of region |
117 | * | 114 | * - dir - DMA direction |
118 | * - start - virtual start address | ||
119 | * - end - virtual end address | ||
120 | */ | 115 | */ |
121 | ENTRY(v3_dma_clean_range) | 116 | ENTRY(v3_dma_map_area) |
122 | mov pc, lr | 117 | mov pc, lr |
118 | ENDPROC(v3_dma_unmap_area) | ||
119 | ENDPROC(v3_dma_map_area) | ||
123 | 120 | ||
124 | __INITDATA | 121 | __INITDATA |
125 | 122 | ||
@@ -131,7 +128,7 @@ ENTRY(v3_cache_fns) | |||
131 | .long v3_coherent_kern_range | 128 | .long v3_coherent_kern_range |
132 | .long v3_coherent_user_range | 129 | .long v3_coherent_user_range |
133 | .long v3_flush_kern_dcache_area | 130 | .long v3_flush_kern_dcache_area |
134 | .long v3_dma_inv_range | 131 | .long v3_dma_map_area |
135 | .long v3_dma_clean_range | 132 | .long v3_dma_unmap_area |
136 | .long v3_dma_flush_range | 133 | .long v3_dma_flush_range |
137 | .size v3_cache_fns, . - v3_cache_fns | 134 | .size v3_cache_fns, . - v3_cache_fns |
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 5c7da3e372e9..4810f7e3e813 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -94,20 +94,6 @@ ENTRY(v4_flush_kern_dcache_area) | |||
94 | /* FALLTHROUGH */ | 94 | /* FALLTHROUGH */ |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * dma_inv_range(start, end) | ||
98 | * | ||
99 | * Invalidate (discard) the specified virtual address range. | ||
100 | * May not write back any entries. If 'start' or 'end' | ||
101 | * are not cache line aligned, those lines must be written | ||
102 | * back. | ||
103 | * | ||
104 | * - start - virtual start address | ||
105 | * - end - virtual end address | ||
106 | */ | ||
107 | ENTRY(v4_dma_inv_range) | ||
108 | /* FALLTHROUGH */ | ||
109 | |||
110 | /* | ||
111 | * dma_flush_range(start, end) | 97 | * dma_flush_range(start, end) |
112 | * | 98 | * |
113 | * Clean and invalidate the specified virtual address range. | 99 | * Clean and invalidate the specified virtual address range. |
@@ -120,18 +106,29 @@ ENTRY(v4_dma_flush_range) | |||
120 | mov r0, #0 | 106 | mov r0, #0 |
121 | mcr p15, 0, r0, c7, c7, 0 @ flush ID cache | 107 | mcr p15, 0, r0, c7, c7, 0 @ flush ID cache |
122 | #endif | 108 | #endif |
109 | mov pc, lr | ||
110 | |||
111 | /* | ||
112 | * dma_unmap_area(start, size, dir) | ||
113 | * - start - kernel virtual start address | ||
114 | * - size - size of region | ||
115 | * - dir - DMA direction | ||
116 | */ | ||
117 | ENTRY(v4_dma_unmap_area) | ||
118 | teq r2, #DMA_TO_DEVICE | ||
119 | bne v4_dma_flush_range | ||
123 | /* FALLTHROUGH */ | 120 | /* FALLTHROUGH */ |
124 | 121 | ||
125 | /* | 122 | /* |
126 | * dma_clean_range(start, end) | 123 | * dma_map_area(start, size, dir) |
127 | * | 124 | * - start - kernel virtual start address |
128 | * Clean (write back) the specified virtual address range. | 125 | * - size - size of region |
129 | * | 126 | * - dir - DMA direction |
130 | * - start - virtual start address | ||
131 | * - end - virtual end address | ||
132 | */ | 127 | */ |
133 | ENTRY(v4_dma_clean_range) | 128 | ENTRY(v4_dma_map_area) |
134 | mov pc, lr | 129 | mov pc, lr |
130 | ENDPROC(v4_dma_unmap_area) | ||
131 | ENDPROC(v4_dma_map_area) | ||
135 | 132 | ||
136 | __INITDATA | 133 | __INITDATA |
137 | 134 | ||
@@ -143,7 +140,7 @@ ENTRY(v4_cache_fns) | |||
143 | .long v4_coherent_kern_range | 140 | .long v4_coherent_kern_range |
144 | .long v4_coherent_user_range | 141 | .long v4_coherent_user_range |
145 | .long v4_flush_kern_dcache_area | 142 | .long v4_flush_kern_dcache_area |
146 | .long v4_dma_inv_range | 143 | .long v4_dma_map_area |
147 | .long v4_dma_clean_range | 144 | .long v4_dma_unmap_area |
148 | .long v4_dma_flush_range | 145 | .long v4_dma_flush_range |
149 | .size v4_cache_fns, . - v4_cache_fns | 146 | .size v4_cache_fns, . - v4_cache_fns |
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 3dbedf1ec0e7..df8368afa102 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S | |||
@@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range) | |||
173 | * - start - virtual start address | 173 | * - start - virtual start address |
174 | * - end - virtual end address | 174 | * - end - virtual end address |
175 | */ | 175 | */ |
176 | ENTRY(v4wb_dma_inv_range) | 176 | v4wb_dma_inv_range: |
177 | tst r0, #CACHE_DLINESIZE - 1 | 177 | tst r0, #CACHE_DLINESIZE - 1 |
178 | bic r0, r0, #CACHE_DLINESIZE - 1 | 178 | bic r0, r0, #CACHE_DLINESIZE - 1 |
179 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 179 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -194,7 +194,7 @@ ENTRY(v4wb_dma_inv_range) | |||
194 | * - start - virtual start address | 194 | * - start - virtual start address |
195 | * - end - virtual end address | 195 | * - end - virtual end address |
196 | */ | 196 | */ |
197 | ENTRY(v4wb_dma_clean_range) | 197 | v4wb_dma_clean_range: |
198 | bic r0, r0, #CACHE_DLINESIZE - 1 | 198 | bic r0, r0, #CACHE_DLINESIZE - 1 |
199 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 199 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
200 | add r0, r0, #CACHE_DLINESIZE | 200 | add r0, r0, #CACHE_DLINESIZE |
@@ -216,6 +216,30 @@ ENTRY(v4wb_dma_clean_range) | |||
216 | .globl v4wb_dma_flush_range | 216 | .globl v4wb_dma_flush_range |
217 | .set v4wb_dma_flush_range, v4wb_coherent_kern_range | 217 | .set v4wb_dma_flush_range, v4wb_coherent_kern_range |
218 | 218 | ||
219 | /* | ||
220 | * dma_map_area(start, size, dir) | ||
221 | * - start - kernel virtual start address | ||
222 | * - size - size of region | ||
223 | * - dir - DMA direction | ||
224 | */ | ||
225 | ENTRY(v4wb_dma_map_area) | ||
226 | add r1, r1, r0 | ||
227 | cmp r2, #DMA_TO_DEVICE | ||
228 | beq v4wb_dma_clean_range | ||
229 | bcs v4wb_dma_inv_range | ||
230 | b v4wb_dma_flush_range | ||
231 | ENDPROC(v4wb_dma_map_area) | ||
232 | |||
233 | /* | ||
234 | * dma_unmap_area(start, size, dir) | ||
235 | * - start - kernel virtual start address | ||
236 | * - size - size of region | ||
237 | * - dir - DMA direction | ||
238 | */ | ||
239 | ENTRY(v4wb_dma_unmap_area) | ||
240 | mov pc, lr | ||
241 | ENDPROC(v4wb_dma_unmap_area) | ||
242 | |||
219 | __INITDATA | 243 | __INITDATA |
220 | 244 | ||
221 | .type v4wb_cache_fns, #object | 245 | .type v4wb_cache_fns, #object |
@@ -226,7 +250,7 @@ ENTRY(v4wb_cache_fns) | |||
226 | .long v4wb_coherent_kern_range | 250 | .long v4wb_coherent_kern_range |
227 | .long v4wb_coherent_user_range | 251 | .long v4wb_coherent_user_range |
228 | .long v4wb_flush_kern_dcache_area | 252 | .long v4wb_flush_kern_dcache_area |
229 | .long v4wb_dma_inv_range | 253 | .long v4wb_dma_map_area |
230 | .long v4wb_dma_clean_range | 254 | .long v4wb_dma_unmap_area |
231 | .long v4wb_dma_flush_range | 255 | .long v4wb_dma_flush_range |
232 | .size v4wb_cache_fns, . - v4wb_cache_fns | 256 | .size v4wb_cache_fns, . - v4wb_cache_fns |
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index b3b7410270b4..45c70312f43b 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S | |||
@@ -142,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_area) | |||
142 | * - start - virtual start address | 142 | * - start - virtual start address |
143 | * - end - virtual end address | 143 | * - end - virtual end address |
144 | */ | 144 | */ |
145 | ENTRY(v4wt_dma_inv_range) | 145 | v4wt_dma_inv_range: |
146 | bic r0, r0, #CACHE_DLINESIZE - 1 | 146 | bic r0, r0, #CACHE_DLINESIZE - 1 |
147 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | 147 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry |
148 | add r0, r0, #CACHE_DLINESIZE | 148 | add r0, r0, #CACHE_DLINESIZE |
149 | cmp r0, r1 | 149 | cmp r0, r1 |
150 | blo 1b | 150 | blo 1b |
151 | /* FALLTHROUGH */ | ||
152 | |||
153 | /* | ||
154 | * dma_clean_range(start, end) | ||
155 | * | ||
156 | * Clean the specified virtual address range. | ||
157 | * | ||
158 | * - start - virtual start address | ||
159 | * - end - virtual end address | ||
160 | */ | ||
161 | ENTRY(v4wt_dma_clean_range) | ||
162 | mov pc, lr | 151 | mov pc, lr |
163 | 152 | ||
164 | /* | 153 | /* |
@@ -172,6 +161,29 @@ ENTRY(v4wt_dma_clean_range) | |||
172 | .globl v4wt_dma_flush_range | 161 | .globl v4wt_dma_flush_range |
173 | .equ v4wt_dma_flush_range, v4wt_dma_inv_range | 162 | .equ v4wt_dma_flush_range, v4wt_dma_inv_range |
174 | 163 | ||
164 | /* | ||
165 | * dma_unmap_area(start, size, dir) | ||
166 | * - start - kernel virtual start address | ||
167 | * - size - size of region | ||
168 | * - dir - DMA direction | ||
169 | */ | ||
170 | ENTRY(v4wt_dma_unmap_area) | ||
171 | add r1, r1, r0 | ||
172 | teq r2, #DMA_TO_DEVICE | ||
173 | bne v4wt_dma_inv_range | ||
174 | /* FALLTHROUGH */ | ||
175 | |||
176 | /* | ||
177 | * dma_map_area(start, size, dir) | ||
178 | * - start - kernel virtual start address | ||
179 | * - size - size of region | ||
180 | * - dir - DMA direction | ||
181 | */ | ||
182 | ENTRY(v4wt_dma_map_area) | ||
183 | mov pc, lr | ||
184 | ENDPROC(v4wt_dma_unmap_area) | ||
185 | ENDPROC(v4wt_dma_map_area) | ||
186 | |||
175 | __INITDATA | 187 | __INITDATA |
176 | 188 | ||
177 | .type v4wt_cache_fns, #object | 189 | .type v4wt_cache_fns, #object |
@@ -182,7 +194,7 @@ ENTRY(v4wt_cache_fns) | |||
182 | .long v4wt_coherent_kern_range | 194 | .long v4wt_coherent_kern_range |
183 | .long v4wt_coherent_user_range | 195 | .long v4wt_coherent_user_range |
184 | .long v4wt_flush_kern_dcache_area | 196 | .long v4wt_flush_kern_dcache_area |
185 | .long v4wt_dma_inv_range | 197 | .long v4wt_dma_map_area |
186 | .long v4wt_dma_clean_range | 198 | .long v4wt_dma_unmap_area |
187 | .long v4wt_dma_flush_range | 199 | .long v4wt_dma_flush_range |
188 | .size v4wt_cache_fns, . - v4wt_cache_fns | 200 | .size v4wt_cache_fns, . - v4wt_cache_fns |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 4ba0a24ce6f5..9d89c67a1cc3 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area) | |||
195 | * - start - virtual start address of region | 195 | * - start - virtual start address of region |
196 | * - end - virtual end address of region | 196 | * - end - virtual end address of region |
197 | */ | 197 | */ |
198 | ENTRY(v6_dma_inv_range) | 198 | v6_dma_inv_range: |
199 | tst r0, #D_CACHE_LINE_SIZE - 1 | 199 | tst r0, #D_CACHE_LINE_SIZE - 1 |
200 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 200 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
201 | #ifdef HARVARD_CACHE | 201 | #ifdef HARVARD_CACHE |
@@ -228,7 +228,7 @@ ENTRY(v6_dma_inv_range) | |||
228 | * - start - virtual start address of region | 228 | * - start - virtual start address of region |
229 | * - end - virtual end address of region | 229 | * - end - virtual end address of region |
230 | */ | 230 | */ |
231 | ENTRY(v6_dma_clean_range) | 231 | v6_dma_clean_range: |
232 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 232 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
233 | 1: | 233 | 1: |
234 | #ifdef HARVARD_CACHE | 234 | #ifdef HARVARD_CACHE |
@@ -263,6 +263,32 @@ ENTRY(v6_dma_flush_range) | |||
263 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 263 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
264 | mov pc, lr | 264 | mov pc, lr |
265 | 265 | ||
266 | /* | ||
267 | * dma_map_area(start, size, dir) | ||
268 | * - start - kernel virtual start address | ||
269 | * - size - size of region | ||
270 | * - dir - DMA direction | ||
271 | */ | ||
272 | ENTRY(v6_dma_map_area) | ||
273 | add r1, r1, r0 | ||
274 | teq r2, #DMA_FROM_DEVICE | ||
275 | beq v6_dma_inv_range | ||
276 | b v6_dma_clean_range | ||
277 | ENDPROC(v6_dma_map_area) | ||
278 | |||
279 | /* | ||
280 | * dma_unmap_area(start, size, dir) | ||
281 | * - start - kernel virtual start address | ||
282 | * - size - size of region | ||
283 | * - dir - DMA direction | ||
284 | */ | ||
285 | ENTRY(v6_dma_unmap_area) | ||
286 | add r1, r1, r0 | ||
287 | teq r2, #DMA_TO_DEVICE | ||
288 | bne v6_dma_inv_range | ||
289 | mov pc, lr | ||
290 | ENDPROC(v6_dma_unmap_area) | ||
291 | |||
266 | __INITDATA | 292 | __INITDATA |
267 | 293 | ||
268 | .type v6_cache_fns, #object | 294 | .type v6_cache_fns, #object |
@@ -273,7 +299,7 @@ ENTRY(v6_cache_fns) | |||
273 | .long v6_coherent_kern_range | 299 | .long v6_coherent_kern_range |
274 | .long v6_coherent_user_range | 300 | .long v6_coherent_user_range |
275 | .long v6_flush_kern_dcache_area | 301 | .long v6_flush_kern_dcache_area |
276 | .long v6_dma_inv_range | 302 | .long v6_dma_map_area |
277 | .long v6_dma_clean_range | 303 | .long v6_dma_unmap_area |
278 | .long v6_dma_flush_range | 304 | .long v6_dma_flush_range |
279 | .size v6_cache_fns, . - v6_cache_fns | 305 | .size v6_cache_fns, . - v6_cache_fns |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 9073db849fb4..bcd64f265870 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -216,7 +216,7 @@ ENDPROC(v7_flush_kern_dcache_area) | |||
216 | * - start - virtual start address of region | 216 | * - start - virtual start address of region |
217 | * - end - virtual end address of region | 217 | * - end - virtual end address of region |
218 | */ | 218 | */ |
219 | ENTRY(v7_dma_inv_range) | 219 | v7_dma_inv_range: |
220 | dcache_line_size r2, r3 | 220 | dcache_line_size r2, r3 |
221 | sub r3, r2, #1 | 221 | sub r3, r2, #1 |
222 | tst r0, r3 | 222 | tst r0, r3 |
@@ -240,7 +240,7 @@ ENDPROC(v7_dma_inv_range) | |||
240 | * - start - virtual start address of region | 240 | * - start - virtual start address of region |
241 | * - end - virtual end address of region | 241 | * - end - virtual end address of region |
242 | */ | 242 | */ |
243 | ENTRY(v7_dma_clean_range) | 243 | v7_dma_clean_range: |
244 | dcache_line_size r2, r3 | 244 | dcache_line_size r2, r3 |
245 | sub r3, r2, #1 | 245 | sub r3, r2, #1 |
246 | bic r0, r0, r3 | 246 | bic r0, r0, r3 |
@@ -271,6 +271,32 @@ ENTRY(v7_dma_flush_range) | |||
271 | mov pc, lr | 271 | mov pc, lr |
272 | ENDPROC(v7_dma_flush_range) | 272 | ENDPROC(v7_dma_flush_range) |
273 | 273 | ||
274 | /* | ||
275 | * dma_map_area(start, size, dir) | ||
276 | * - start - kernel virtual start address | ||
277 | * - size - size of region | ||
278 | * - dir - DMA direction | ||
279 | */ | ||
280 | ENTRY(v7_dma_map_area) | ||
281 | add r1, r1, r0 | ||
282 | teq r2, #DMA_FROM_DEVICE | ||
283 | beq v7_dma_inv_range | ||
284 | b v7_dma_clean_range | ||
285 | ENDPROC(v7_dma_map_area) | ||
286 | |||
287 | /* | ||
288 | * dma_unmap_area(start, size, dir) | ||
289 | * - start - kernel virtual start address | ||
290 | * - size - size of region | ||
291 | * - dir - DMA direction | ||
292 | */ | ||
293 | ENTRY(v7_dma_unmap_area) | ||
294 | add r1, r1, r0 | ||
295 | teq r2, #DMA_TO_DEVICE | ||
296 | bne v7_dma_inv_range | ||
297 | mov pc, lr | ||
298 | ENDPROC(v7_dma_unmap_area) | ||
299 | |||
274 | __INITDATA | 300 | __INITDATA |
275 | 301 | ||
276 | .type v7_cache_fns, #object | 302 | .type v7_cache_fns, #object |
@@ -281,7 +307,7 @@ ENTRY(v7_cache_fns) | |||
281 | .long v7_coherent_kern_range | 307 | .long v7_coherent_kern_range |
282 | .long v7_coherent_user_range | 308 | .long v7_coherent_user_range |
283 | .long v7_flush_kern_dcache_area | 309 | .long v7_flush_kern_dcache_area |
284 | .long v7_dma_inv_range | 310 | .long v7_dma_map_area |
285 | .long v7_dma_clean_range | 311 | .long v7_dma_unmap_area |
286 | .long v7_dma_flush_range | 312 | .long v7_dma_flush_range |
287 | .size v7_cache_fns, . - v7_cache_fns | 313 | .size v7_cache_fns, . - v7_cache_fns |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e31eaa1..b0ee9ba3cfab 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -10,12 +10,17 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | ||
14 | #include <linux/percpu.h> | ||
13 | 15 | ||
14 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
15 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
16 | 18 | ||
17 | static DEFINE_SPINLOCK(cpu_asid_lock); | 19 | static DEFINE_SPINLOCK(cpu_asid_lock); |
18 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
21 | #ifdef CONFIG_SMP | ||
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | ||
23 | #endif | ||
19 | 24 | ||
20 | /* | 25 | /* |
21 | * We fork()ed a process, and we need a new context for the child | 26 | * We fork()ed a process, and we need a new context for the child |
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; | |||
26 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
27 | { | 32 | { |
28 | mm->context.id = 0; | 33 | mm->context.id = 0; |
34 | spin_lock_init(&mm->context.id_lock); | ||
29 | } | 35 | } |
30 | 36 | ||
37 | static void flush_context(void) | ||
38 | { | ||
39 | /* set the reserved ASID before flushing the TLB */ | ||
40 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); | ||
41 | isb(); | ||
42 | local_flush_tlb_all(); | ||
43 | if (icache_is_vivt_asid_tagged()) { | ||
44 | __flush_icache_all(); | ||
45 | dsb(); | ||
46 | } | ||
47 | } | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | |||
51 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
52 | { | ||
53 | unsigned long flags; | ||
54 | |||
55 | /* | ||
56 | * Locking needed for multi-threaded applications where the | ||
57 | * same mm->context.id could be set from different CPUs during | ||
58 | * the broadcast. This function is also called via IPI so the | ||
59 | * mm->context.id_lock has to be IRQ-safe. | ||
60 | */ | ||
61 | spin_lock_irqsave(&mm->context.id_lock, flags); | ||
62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | ||
63 | /* | ||
64 | * Old version of ASID found. Set the new one and | ||
65 | * reset mm_cpumask(mm). | ||
66 | */ | ||
67 | mm->context.id = asid; | ||
68 | cpumask_clear(mm_cpumask(mm)); | ||
69 | } | ||
70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); | ||
71 | |||
72 | /* | ||
73 | * Set the mm_cpumask(mm) bit for the current CPU. | ||
74 | */ | ||
75 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Reset the ASID on the current CPU. This function call is broadcast | ||
80 | * from the CPU handling the ASID rollover and holding cpu_asid_lock. | ||
81 | */ | ||
82 | static void reset_context(void *info) | ||
83 | { | ||
84 | unsigned int asid; | ||
85 | unsigned int cpu = smp_processor_id(); | ||
86 | struct mm_struct *mm = per_cpu(current_mm, cpu); | ||
87 | |||
88 | /* | ||
89 | * Check if a current_mm was set on this CPU as it might still | ||
90 | * be in the early booting stages and using the reserved ASID. | ||
91 | */ | ||
92 | if (!mm) | ||
93 | return; | ||
94 | |||
95 | smp_rmb(); | ||
96 | asid = cpu_last_asid + cpu + 1; | ||
97 | |||
98 | flush_context(); | ||
99 | set_mm_context(mm, asid); | ||
100 | |||
101 | /* set the new ASID */ | ||
102 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); | ||
103 | isb(); | ||
104 | } | ||
105 | |||
106 | #else | ||
107 | |||
108 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
109 | { | ||
110 | mm->context.id = asid; | ||
111 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | ||
112 | } | ||
113 | |||
114 | #endif | ||
115 | |||
31 | void __new_context(struct mm_struct *mm) | 116 | void __new_context(struct mm_struct *mm) |
32 | { | 117 | { |
33 | unsigned int asid; | 118 | unsigned int asid; |
34 | 119 | ||
35 | spin_lock(&cpu_asid_lock); | 120 | spin_lock(&cpu_asid_lock); |
121 | #ifdef CONFIG_SMP | ||
122 | /* | ||
123 | * Check the ASID again, in case the change was broadcast from | ||
124 | * another CPU before we acquired the lock. | ||
125 | */ | ||
126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | ||
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
128 | spin_unlock(&cpu_asid_lock); | ||
129 | return; | ||
130 | } | ||
131 | #endif | ||
132 | /* | ||
133 | * At this point, it is guaranteed that the current mm (with | ||
134 | * an old ASID) isn't active on any other CPU since the ASIDs | ||
135 | * are changed simultaneously via IPI. | ||
136 | */ | ||
36 | asid = ++cpu_last_asid; | 137 | asid = ++cpu_last_asid; |
37 | if (asid == 0) | 138 | if (asid == 0) |
38 | asid = cpu_last_asid = ASID_FIRST_VERSION; | 139 | asid = cpu_last_asid = ASID_FIRST_VERSION; |
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm) | |||
42 | * to start a new version and flush the TLB. | 143 | * to start a new version and flush the TLB. |
43 | */ | 144 | */ |
44 | if (unlikely((asid & ~ASID_MASK) == 0)) { | 145 | if (unlikely((asid & ~ASID_MASK) == 0)) { |
45 | asid = ++cpu_last_asid; | 146 | asid = cpu_last_asid + smp_processor_id() + 1; |
46 | /* set the reserved ASID before flushing the TLB */ | 147 | flush_context(); |
47 | asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" | 148 | #ifdef CONFIG_SMP |
48 | : | 149 | smp_wmb(); |
49 | : "r" (0)); | 150 | smp_call_function(reset_context, NULL, 1); |
50 | isb(); | 151 | #endif |
51 | flush_tlb_all(); | 152 | cpu_last_asid += NR_CPUS; |
52 | if (icache_is_vivt_asid_tagged()) { | ||
53 | __flush_icache_all(); | ||
54 | dsb(); | ||
55 | } | ||
56 | } | 153 | } |
57 | spin_unlock(&cpu_asid_lock); | ||
58 | 154 | ||
59 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | 155 | set_mm_context(mm, asid); |
60 | mm->context.id = asid; | 156 | spin_unlock(&cpu_asid_lock); |
61 | } | 157 | } |
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 70997d5bee2d..5eb4fd93893d 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -68,12 +68,13 @@ feroceon_copy_user_page(void *kto, const void *kfrom) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | void feroceon_copy_user_highpage(struct page *to, struct page *from, | 70 | void feroceon_copy_user_highpage(struct page *to, struct page *from, |
71 | unsigned long vaddr) | 71 | unsigned long vaddr, struct vm_area_struct *vma) |
72 | { | 72 | { |
73 | void *kto, *kfrom; | 73 | void *kto, *kfrom; |
74 | 74 | ||
75 | kto = kmap_atomic(to, KM_USER0); | 75 | kto = kmap_atomic(to, KM_USER0); |
76 | kfrom = kmap_atomic(from, KM_USER1); | 76 | kfrom = kmap_atomic(from, KM_USER1); |
77 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | ||
77 | feroceon_copy_user_page(kto, kfrom); | 78 | feroceon_copy_user_page(kto, kfrom); |
78 | kunmap_atomic(kfrom, KM_USER1); | 79 | kunmap_atomic(kfrom, KM_USER1); |
79 | kunmap_atomic(kto, KM_USER0); | 80 | kunmap_atomic(kto, KM_USER0); |
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index de9c06854ad7..f72303e1d804 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom) | |||
38 | } | 38 | } |
39 | 39 | ||
40 | void v3_copy_user_highpage(struct page *to, struct page *from, | 40 | void v3_copy_user_highpage(struct page *to, struct page *from, |
41 | unsigned long vaddr) | 41 | unsigned long vaddr, struct vm_area_struct *vma) |
42 | { | 42 | { |
43 | void *kto, *kfrom; | 43 | void *kto, *kfrom; |
44 | 44 | ||
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7370a7142b04..598c51ad5071 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, | 71 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, |
72 | unsigned long vaddr) | 72 | unsigned long vaddr, struct vm_area_struct *vma) |
73 | { | 73 | { |
74 | void *kto = kmap_atomic(to, KM_USER1); | 74 | void *kto = kmap_atomic(to, KM_USER1); |
75 | 75 | ||
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9ab098414227..7c2eb55cd4a9 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -48,12 +48,13 @@ v4wb_copy_user_page(void *kto, const void *kfrom) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | void v4wb_copy_user_highpage(struct page *to, struct page *from, | 50 | void v4wb_copy_user_highpage(struct page *to, struct page *from, |
51 | unsigned long vaddr) | 51 | unsigned long vaddr, struct vm_area_struct *vma) |
52 | { | 52 | { |
53 | void *kto, *kfrom; | 53 | void *kto, *kfrom; |
54 | 54 | ||
55 | kto = kmap_atomic(to, KM_USER0); | 55 | kto = kmap_atomic(to, KM_USER0); |
56 | kfrom = kmap_atomic(from, KM_USER1); | 56 | kfrom = kmap_atomic(from, KM_USER1); |
57 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | ||
57 | v4wb_copy_user_page(kto, kfrom); | 58 | v4wb_copy_user_page(kto, kfrom); |
58 | kunmap_atomic(kfrom, KM_USER1); | 59 | kunmap_atomic(kfrom, KM_USER1); |
59 | kunmap_atomic(kto, KM_USER0); | 60 | kunmap_atomic(kto, KM_USER0); |
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 300efafd6643..172e6a55458e 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | void v4wt_copy_user_highpage(struct page *to, struct page *from, | 46 | void v4wt_copy_user_highpage(struct page *to, struct page *from, |
47 | unsigned long vaddr) | 47 | unsigned long vaddr, struct vm_area_struct *vma) |
48 | { | 48 | { |
49 | void *kto, *kfrom; | 49 | void *kto, *kfrom; |
50 | 50 | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 0fa1319273de..8bca4dea6dfa 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock); | |||
34 | * attack the kernel's existing mapping of these pages. | 34 | * attack the kernel's existing mapping of these pages. |
35 | */ | 35 | */ |
36 | static void v6_copy_user_highpage_nonaliasing(struct page *to, | 36 | static void v6_copy_user_highpage_nonaliasing(struct page *to, |
37 | struct page *from, unsigned long vaddr) | 37 | struct page *from, unsigned long vaddr, struct vm_area_struct *vma) |
38 | { | 38 | { |
39 | void *kto, *kfrom; | 39 | void *kto, *kfrom; |
40 | 40 | ||
@@ -81,7 +81,7 @@ static void discard_old_kernel_data(void *kto) | |||
81 | * Copy the page, taking account of the cache colour. | 81 | * Copy the page, taking account of the cache colour. |
82 | */ | 82 | */ |
83 | static void v6_copy_user_highpage_aliasing(struct page *to, | 83 | static void v6_copy_user_highpage_aliasing(struct page *to, |
84 | struct page *from, unsigned long vaddr) | 84 | struct page *from, unsigned long vaddr, struct vm_area_struct *vma) |
85 | { | 85 | { |
86 | unsigned int offset = CACHE_COLOUR(vaddr); | 86 | unsigned int offset = CACHE_COLOUR(vaddr); |
87 | unsigned long kfrom, kto; | 87 | unsigned long kfrom, kto; |
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index bc4525f5ab23..747ad4140fc7 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -71,12 +71,13 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | 73 | void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, |
74 | unsigned long vaddr) | 74 | unsigned long vaddr, struct vm_area_struct *vma) |
75 | { | 75 | { |
76 | void *kto, *kfrom; | 76 | void *kto, *kfrom; |
77 | 77 | ||
78 | kto = kmap_atomic(to, KM_USER0); | 78 | kto = kmap_atomic(to, KM_USER0); |
79 | kfrom = kmap_atomic(from, KM_USER1); | 79 | kfrom = kmap_atomic(from, KM_USER1); |
80 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | ||
80 | xsc3_mc_copy_user_page(kto, kfrom); | 81 | xsc3_mc_copy_user_page(kto, kfrom); |
81 | kunmap_atomic(kfrom, KM_USER1); | 82 | kunmap_atomic(kfrom, KM_USER1); |
82 | kunmap_atomic(kto, KM_USER0); | 83 | kunmap_atomic(kto, KM_USER0); |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 76824d3e966a..9920c0ae2096 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | 93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, |
94 | unsigned long vaddr) | 94 | unsigned long vaddr, struct vm_area_struct *vma) |
95 | { | 95 | { |
96 | void *kto = kmap_atomic(to, KM_USER1); | 96 | void *kto = kmap_atomic(to, KM_USER1); |
97 | 97 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb5d368..0da7eccf7749 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -29,9 +29,6 @@ | |||
29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | 29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #define CONSISTENT_END (0xffe00000) | ||
33 | #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) | ||
34 | |||
35 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | 32 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) |
36 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | 33 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) |
37 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | 34 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) |
@@ -404,78 +401,44 @@ EXPORT_SYMBOL(dma_free_coherent); | |||
404 | * platforms with CONFIG_DMABOUNCE. | 401 | * platforms with CONFIG_DMABOUNCE. |
405 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | 402 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) |
406 | */ | 403 | */ |
407 | void dma_cache_maint(const void *start, size_t size, int direction) | 404 | void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, |
405 | enum dma_data_direction dir) | ||
408 | { | 406 | { |
409 | void (*inner_op)(const void *, const void *); | 407 | unsigned long paddr; |
410 | void (*outer_op)(unsigned long, unsigned long); | 408 | |
411 | 409 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); | |
412 | BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); | ||
413 | |||
414 | switch (direction) { | ||
415 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
416 | inner_op = dmac_inv_range; | ||
417 | outer_op = outer_inv_range; | ||
418 | break; | ||
419 | case DMA_TO_DEVICE: /* writeback only */ | ||
420 | inner_op = dmac_clean_range; | ||
421 | outer_op = outer_clean_range; | ||
422 | break; | ||
423 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
424 | inner_op = dmac_flush_range; | ||
425 | outer_op = outer_flush_range; | ||
426 | break; | ||
427 | default: | ||
428 | BUG(); | ||
429 | } | ||
430 | 410 | ||
431 | inner_op(start, start + size); | 411 | dmac_map_area(kaddr, size, dir); |
432 | outer_op(__pa(start), __pa(start) + size); | 412 | |
413 | paddr = __pa(kaddr); | ||
414 | if (dir == DMA_FROM_DEVICE) { | ||
415 | outer_inv_range(paddr, paddr + size); | ||
416 | } else { | ||
417 | outer_clean_range(paddr, paddr + size); | ||
418 | } | ||
419 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | ||
433 | } | 420 | } |
434 | EXPORT_SYMBOL(dma_cache_maint); | 421 | EXPORT_SYMBOL(___dma_single_cpu_to_dev); |
435 | 422 | ||
436 | static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, | 423 | void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, |
437 | size_t size, int direction) | 424 | enum dma_data_direction dir) |
438 | { | 425 | { |
439 | void *vaddr; | 426 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); |
440 | unsigned long paddr; | ||
441 | void (*inner_op)(const void *, const void *); | ||
442 | void (*outer_op)(unsigned long, unsigned long); | ||
443 | |||
444 | switch (direction) { | ||
445 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
446 | inner_op = dmac_inv_range; | ||
447 | outer_op = outer_inv_range; | ||
448 | break; | ||
449 | case DMA_TO_DEVICE: /* writeback only */ | ||
450 | inner_op = dmac_clean_range; | ||
451 | outer_op = outer_clean_range; | ||
452 | break; | ||
453 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
454 | inner_op = dmac_flush_range; | ||
455 | outer_op = outer_flush_range; | ||
456 | break; | ||
457 | default: | ||
458 | BUG(); | ||
459 | } | ||
460 | 427 | ||
461 | if (!PageHighMem(page)) { | 428 | /* FIXME: non-speculating: not required */ |
462 | vaddr = page_address(page) + offset; | 429 | /* don't bother invalidating if DMA to device */ |
463 | inner_op(vaddr, vaddr + size); | 430 | if (dir != DMA_TO_DEVICE) { |
464 | } else { | 431 | unsigned long paddr = __pa(kaddr); |
465 | vaddr = kmap_high_get(page); | 432 | outer_inv_range(paddr, paddr + size); |
466 | if (vaddr) { | ||
467 | vaddr += offset; | ||
468 | inner_op(vaddr, vaddr + size); | ||
469 | kunmap_high(page); | ||
470 | } | ||
471 | } | 433 | } |
472 | 434 | ||
473 | paddr = page_to_phys(page) + offset; | 435 | dmac_unmap_area(kaddr, size, dir); |
474 | outer_op(paddr, paddr + size); | ||
475 | } | 436 | } |
437 | EXPORT_SYMBOL(___dma_single_dev_to_cpu); | ||
476 | 438 | ||
477 | void dma_cache_maint_page(struct page *page, unsigned long offset, | 439 | static void dma_cache_maint_page(struct page *page, unsigned long offset, |
478 | size_t size, int dir) | 440 | size_t size, enum dma_data_direction dir, |
441 | void (*op)(const void *, size_t, int)) | ||
479 | { | 442 | { |
480 | /* | 443 | /* |
481 | * A single sg entry may refer to multiple physically contiguous | 444 | * A single sg entry may refer to multiple physically contiguous |
@@ -486,20 +449,62 @@ void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
486 | size_t left = size; | 449 | size_t left = size; |
487 | do { | 450 | do { |
488 | size_t len = left; | 451 | size_t len = left; |
489 | if (PageHighMem(page) && len + offset > PAGE_SIZE) { | 452 | void *vaddr; |
490 | if (offset >= PAGE_SIZE) { | 453 | |
491 | page += offset / PAGE_SIZE; | 454 | if (PageHighMem(page)) { |
492 | offset %= PAGE_SIZE; | 455 | if (len + offset > PAGE_SIZE) { |
456 | if (offset >= PAGE_SIZE) { | ||
457 | page += offset / PAGE_SIZE; | ||
458 | offset %= PAGE_SIZE; | ||
459 | } | ||
460 | len = PAGE_SIZE - offset; | ||
493 | } | 461 | } |
494 | len = PAGE_SIZE - offset; | 462 | vaddr = kmap_high_get(page); |
463 | if (vaddr) { | ||
464 | vaddr += offset; | ||
465 | op(vaddr, len, dir); | ||
466 | kunmap_high(page); | ||
467 | } | ||
468 | } else { | ||
469 | vaddr = page_address(page) + offset; | ||
470 | op(vaddr, len, dir); | ||
495 | } | 471 | } |
496 | dma_cache_maint_contiguous(page, offset, len, dir); | ||
497 | offset = 0; | 472 | offset = 0; |
498 | page++; | 473 | page++; |
499 | left -= len; | 474 | left -= len; |
500 | } while (left); | 475 | } while (left); |
501 | } | 476 | } |
502 | EXPORT_SYMBOL(dma_cache_maint_page); | 477 | |
478 | void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
479 | size_t size, enum dma_data_direction dir) | ||
480 | { | ||
481 | unsigned long paddr; | ||
482 | |||
483 | dma_cache_maint_page(page, off, size, dir, dmac_map_area); | ||
484 | |||
485 | paddr = page_to_phys(page) + off; | ||
486 | if (dir == DMA_FROM_DEVICE) { | ||
487 | outer_inv_range(paddr, paddr + size); | ||
488 | } else { | ||
489 | outer_clean_range(paddr, paddr + size); | ||
490 | } | ||
491 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | ||
492 | } | ||
493 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); | ||
494 | |||
495 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
496 | size_t size, enum dma_data_direction dir) | ||
497 | { | ||
498 | unsigned long paddr = page_to_phys(page) + off; | ||
499 | |||
500 | /* FIXME: non-speculating: not required */ | ||
501 | /* don't bother invalidating if DMA to device */ | ||
502 | if (dir != DMA_TO_DEVICE) | ||
503 | outer_inv_range(paddr, paddr + size); | ||
504 | |||
505 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | ||
506 | } | ||
507 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | ||
503 | 508 | ||
504 | /** | 509 | /** |
505 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | 510 | * dma_map_sg - map a set of SG buffers for streaming mode DMA |
@@ -573,8 +578,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
573 | int i; | 578 | int i; |
574 | 579 | ||
575 | for_each_sg(sg, s, nents, i) { | 580 | for_each_sg(sg, s, nents, i) { |
576 | dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, | 581 | if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, |
577 | sg_dma_len(s), dir); | 582 | sg_dma_len(s), dir)) |
583 | continue; | ||
584 | |||
585 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | ||
586 | s->length, dir); | ||
578 | } | 587 | } |
579 | } | 588 | } |
580 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 589 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
@@ -597,9 +606,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
597 | sg_dma_len(s), dir)) | 606 | sg_dma_len(s), dir)) |
598 | continue; | 607 | continue; |
599 | 608 | ||
600 | if (!arch_is_coherent()) | 609 | __dma_page_cpu_to_dev(sg_page(s), s->offset, |
601 | dma_cache_maint_page(sg_page(s), s->offset, | 610 | s->length, dir); |
602 | s->length, dir); | ||
603 | } | 611 | } |
604 | } | 612 | } |
605 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 613 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 56ee15321b00..c9b97e9836a2 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -36,28 +36,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | |||
36 | * Therefore those configurations which might call adjust_pte (those | 36 | * Therefore those configurations which might call adjust_pte (those |
37 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | 37 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. |
38 | */ | 38 | */ |
39 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | 39 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
40 | unsigned long pfn, pte_t *ptep) | ||
40 | { | 41 | { |
41 | pgd_t *pgd; | 42 | pte_t entry = *ptep; |
42 | pmd_t *pmd; | ||
43 | pte_t *pte, entry; | ||
44 | int ret; | 43 | int ret; |
45 | 44 | ||
46 | pgd = pgd_offset(vma->vm_mm, address); | ||
47 | if (pgd_none(*pgd)) | ||
48 | goto no_pgd; | ||
49 | if (pgd_bad(*pgd)) | ||
50 | goto bad_pgd; | ||
51 | |||
52 | pmd = pmd_offset(pgd, address); | ||
53 | if (pmd_none(*pmd)) | ||
54 | goto no_pmd; | ||
55 | if (pmd_bad(*pmd)) | ||
56 | goto bad_pmd; | ||
57 | |||
58 | pte = pte_offset_map(pmd, address); | ||
59 | entry = *pte; | ||
60 | |||
61 | /* | 45 | /* |
62 | * If this page is present, it's actually being shared. | 46 | * If this page is present, it's actually being shared. |
63 | */ | 47 | */ |
@@ -68,33 +52,55 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | |||
68 | * fault (ie, is old), we can safely ignore any issues. | 52 | * fault (ie, is old), we can safely ignore any issues. |
69 | */ | 53 | */ |
70 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { | 54 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
71 | unsigned long pfn = pte_pfn(entry); | ||
72 | flush_cache_page(vma, address, pfn); | 55 | flush_cache_page(vma, address, pfn); |
73 | outer_flush_range((pfn << PAGE_SHIFT), | 56 | outer_flush_range((pfn << PAGE_SHIFT), |
74 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | 57 | (pfn << PAGE_SHIFT) + PAGE_SIZE); |
75 | pte_val(entry) &= ~L_PTE_MT_MASK; | 58 | pte_val(entry) &= ~L_PTE_MT_MASK; |
76 | pte_val(entry) |= shared_pte_mask; | 59 | pte_val(entry) |= shared_pte_mask; |
77 | set_pte_at(vma->vm_mm, address, pte, entry); | 60 | set_pte_at(vma->vm_mm, address, ptep, entry); |
78 | flush_tlb_page(vma, address); | 61 | flush_tlb_page(vma, address); |
79 | } | 62 | } |
80 | pte_unmap(pte); | 63 | |
81 | return ret; | 64 | return ret; |
65 | } | ||
66 | |||
67 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | ||
68 | unsigned long pfn) | ||
69 | { | ||
70 | spinlock_t *ptl; | ||
71 | pgd_t *pgd; | ||
72 | pmd_t *pmd; | ||
73 | pte_t *pte; | ||
74 | int ret; | ||
75 | |||
76 | pgd = pgd_offset(vma->vm_mm, address); | ||
77 | if (pgd_none_or_clear_bad(pgd)) | ||
78 | return 0; | ||
79 | |||
80 | pmd = pmd_offset(pgd, address); | ||
81 | if (pmd_none_or_clear_bad(pmd)) | ||
82 | return 0; | ||
82 | 83 | ||
83 | bad_pgd: | 84 | /* |
84 | pgd_ERROR(*pgd); | 85 | * This is called while another page table is mapped, so we |
85 | pgd_clear(pgd); | 86 | * must use the nested version. This also means we need to |
86 | no_pgd: | 87 | * open-code the spin-locking. |
87 | return 0; | 88 | */ |
88 | 89 | ptl = pte_lockptr(vma->vm_mm, pmd); | |
89 | bad_pmd: | 90 | pte = pte_offset_map_nested(pmd, address); |
90 | pmd_ERROR(*pmd); | 91 | spin_lock(ptl); |
91 | pmd_clear(pmd); | 92 | |
92 | no_pmd: | 93 | ret = do_adjust_pte(vma, address, pfn, pte); |
93 | return 0; | 94 | |
95 | spin_unlock(ptl); | ||
96 | pte_unmap_nested(pte); | ||
97 | |||
98 | return ret; | ||
94 | } | 99 | } |
95 | 100 | ||
96 | static void | 101 | static void |
97 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) | 102 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, |
103 | unsigned long addr, pte_t *ptep, unsigned long pfn) | ||
98 | { | 104 | { |
99 | struct mm_struct *mm = vma->vm_mm; | 105 | struct mm_struct *mm = vma->vm_mm; |
100 | struct vm_area_struct *mpnt; | 106 | struct vm_area_struct *mpnt; |
@@ -122,11 +128,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne | |||
122 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | 128 | if (!(mpnt->vm_flags & VM_MAYSHARE)) |
123 | continue; | 129 | continue; |
124 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | 130 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
125 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset); | 131 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); |
126 | } | 132 | } |
127 | flush_dcache_mmap_unlock(mapping); | 133 | flush_dcache_mmap_unlock(mapping); |
128 | if (aliases) | 134 | if (aliases) |
129 | adjust_pte(vma, addr); | 135 | do_adjust_pte(vma, addr, pfn, ptep); |
130 | else | 136 | else |
131 | flush_cache_page(vma, addr, pfn); | 137 | flush_cache_page(vma, addr, pfn); |
132 | } | 138 | } |
@@ -144,9 +150,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne | |||
144 | * | 150 | * |
145 | * Note that the pte lock will be held. | 151 | * Note that the pte lock will be held. |
146 | */ | 152 | */ |
147 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | 153 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, |
154 | pte_t *ptep) | ||
148 | { | 155 | { |
149 | unsigned long pfn = pte_pfn(pte); | 156 | unsigned long pfn = pte_pfn(*ptep); |
150 | struct address_space *mapping; | 157 | struct address_space *mapping; |
151 | struct page *page; | 158 | struct page *page; |
152 | 159 | ||
@@ -168,7 +175,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | |||
168 | #endif | 175 | #endif |
169 | if (mapping) { | 176 | if (mapping) { |
170 | if (cache_is_vivt()) | 177 | if (cache_is_vivt()) |
171 | make_coherent(mapping, vma, addr, pfn); | 178 | make_coherent(mapping, vma, addr, ptep, pfn); |
172 | else if (vma->vm_flags & VM_EXEC) | 179 | else if (vma->vm_flags & VM_EXEC) |
173 | __flush_icache_all(); | 180 | __flush_icache_all(); |
174 | } | 181 | } |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 10e06801afb3..9d40c341e07e 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/page-flags.h> | 18 | #include <linux/page-flags.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
21 | #include <linux/perf_event.h> | ||
21 | 22 | ||
22 | #include <asm/system.h> | 23 | #include <asm/system.h> |
23 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
@@ -302,6 +303,12 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
302 | fault = __do_page_fault(mm, addr, fsr, tsk); | 303 | fault = __do_page_fault(mm, addr, fsr, tsk); |
303 | up_read(&mm->mmap_sem); | 304 | up_read(&mm->mmap_sem); |
304 | 305 | ||
306 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); | ||
307 | if (fault & VM_FAULT_MAJOR) | ||
308 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); | ||
309 | else if (fault & VM_FAULT_MINOR) | ||
310 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); | ||
311 | |||
305 | /* | 312 | /* |
306 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR | 313 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |
307 | */ | 314 | */ |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6f3a4b7a3b82..e34f095e2090 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
15 | #include <asm/cachetype.h> | 15 | #include <asm/cachetype.h> |
16 | #include <asm/smp_plat.h> | ||
16 | #include <asm/system.h> | 17 | #include <asm/system.h> |
17 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
18 | 19 | ||
@@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig | |||
87 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | 88 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) |
88 | __flush_icache_all(); | 89 | __flush_icache_all(); |
89 | } | 90 | } |
91 | #else | ||
92 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | ||
93 | #endif | ||
90 | 94 | ||
95 | #ifdef CONFIG_SMP | ||
96 | static void flush_ptrace_access_other(void *args) | ||
97 | { | ||
98 | __flush_icache_all(); | ||
99 | } | ||
100 | #endif | ||
101 | |||
102 | static | ||
91 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 103 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
92 | unsigned long uaddr, void *kaddr, | 104 | unsigned long uaddr, void *kaddr, unsigned long len) |
93 | unsigned long len, int write) | ||
94 | { | 105 | { |
95 | if (cache_is_vivt()) { | 106 | if (cache_is_vivt()) { |
96 | vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); | 107 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
108 | unsigned long addr = (unsigned long)kaddr; | ||
109 | __cpuc_coherent_kern_range(addr, addr + len); | ||
110 | } | ||
97 | return; | 111 | return; |
98 | } | 112 | } |
99 | 113 | ||
@@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
104 | } | 118 | } |
105 | 119 | ||
106 | /* VIPT non-aliasing cache */ | 120 | /* VIPT non-aliasing cache */ |
107 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && | 121 | if (vma->vm_flags & VM_EXEC) { |
108 | vma->vm_flags & VM_EXEC) { | ||
109 | unsigned long addr = (unsigned long)kaddr; | 122 | unsigned long addr = (unsigned long)kaddr; |
110 | /* only flushing the kernel mapping on non-aliasing VIPT */ | ||
111 | __cpuc_coherent_kern_range(addr, addr + len); | 123 | __cpuc_coherent_kern_range(addr, addr + len); |
124 | #ifdef CONFIG_SMP | ||
125 | if (cache_ops_need_broadcast()) | ||
126 | smp_call_function(flush_ptrace_access_other, | ||
127 | NULL, 1); | ||
128 | #endif | ||
112 | } | 129 | } |
113 | } | 130 | } |
114 | #else | 131 | |
115 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | 132 | /* |
133 | * Copy user data from/to a page which is mapped into a different | ||
134 | * processes address space. Really, we want to allow our "user | ||
135 | * space" model to handle this. | ||
136 | * | ||
137 | * Note that this code needs to run on the current CPU. | ||
138 | */ | ||
139 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
140 | unsigned long uaddr, void *dst, const void *src, | ||
141 | unsigned long len) | ||
142 | { | ||
143 | #ifdef CONFIG_SMP | ||
144 | preempt_disable(); | ||
116 | #endif | 145 | #endif |
146 | memcpy(dst, src, len); | ||
147 | flush_ptrace_access(vma, page, uaddr, dst, len); | ||
148 | #ifdef CONFIG_SMP | ||
149 | preempt_enable(); | ||
150 | #endif | ||
151 | } | ||
117 | 152 | ||
118 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | 153 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
119 | { | 154 | { |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a04ffbbbe253..7829cb5425f5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
24 | #include <asm/sizes.h> | 24 | #include <asm/sizes.h> |
25 | #include <asm/tlb.h> | 25 | #include <asm/tlb.h> |
26 | #include <asm/fixmap.h> | ||
26 | 27 | ||
27 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
28 | #include <asm/mach/map.h> | 29 | #include <asm/mach/map.h> |
@@ -32,19 +33,21 @@ | |||
32 | static unsigned long phys_initrd_start __initdata = 0; | 33 | static unsigned long phys_initrd_start __initdata = 0; |
33 | static unsigned long phys_initrd_size __initdata = 0; | 34 | static unsigned long phys_initrd_size __initdata = 0; |
34 | 35 | ||
35 | static void __init early_initrd(char **p) | 36 | static int __init early_initrd(char *p) |
36 | { | 37 | { |
37 | unsigned long start, size; | 38 | unsigned long start, size; |
39 | char *endp; | ||
38 | 40 | ||
39 | start = memparse(*p, p); | 41 | start = memparse(p, &endp); |
40 | if (**p == ',') { | 42 | if (*endp == ',') { |
41 | size = memparse((*p) + 1, p); | 43 | size = memparse(endp + 1, NULL); |
42 | 44 | ||
43 | phys_initrd_start = start; | 45 | phys_initrd_start = start; |
44 | phys_initrd_size = size; | 46 | phys_initrd_size = size; |
45 | } | 47 | } |
48 | return 0; | ||
46 | } | 49 | } |
47 | __early_param("initrd=", early_initrd); | 50 | early_param("initrd", early_initrd); |
48 | 51 | ||
49 | static int __init parse_tag_initrd(const struct tag *tag) | 52 | static int __init parse_tag_initrd(const struct tag *tag) |
50 | { | 53 | { |
@@ -560,7 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
560 | */ | 563 | */ |
561 | void __init mem_init(void) | 564 | void __init mem_init(void) |
562 | { | 565 | { |
563 | unsigned int codesize, datasize, initsize; | 566 | unsigned long reserved_pages, free_pages; |
564 | int i, node; | 567 | int i, node; |
565 | 568 | ||
566 | #ifndef CONFIG_DISCONTIGMEM | 569 | #ifndef CONFIG_DISCONTIGMEM |
@@ -596,6 +599,33 @@ void __init mem_init(void) | |||
596 | totalram_pages += totalhigh_pages; | 599 | totalram_pages += totalhigh_pages; |
597 | #endif | 600 | #endif |
598 | 601 | ||
602 | reserved_pages = free_pages = 0; | ||
603 | |||
604 | for_each_online_node(node) { | ||
605 | pg_data_t *n = NODE_DATA(node); | ||
606 | struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; | ||
607 | |||
608 | for_each_nodebank(i, &meminfo, node) { | ||
609 | struct membank *bank = &meminfo.bank[i]; | ||
610 | unsigned int pfn1, pfn2; | ||
611 | struct page *page, *end; | ||
612 | |||
613 | pfn1 = bank_pfn_start(bank); | ||
614 | pfn2 = bank_pfn_end(bank); | ||
615 | |||
616 | page = map + pfn1; | ||
617 | end = map + pfn2; | ||
618 | |||
619 | do { | ||
620 | if (PageReserved(page)) | ||
621 | reserved_pages++; | ||
622 | else if (!page_count(page)) | ||
623 | free_pages++; | ||
624 | page++; | ||
625 | } while (page < end); | ||
626 | } | ||
627 | } | ||
628 | |||
599 | /* | 629 | /* |
600 | * Since our memory may not be contiguous, calculate the | 630 | * Since our memory may not be contiguous, calculate the |
601 | * real number of pages we have in this system | 631 | * real number of pages we have in this system |
@@ -608,16 +638,71 @@ void __init mem_init(void) | |||
608 | } | 638 | } |
609 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | 639 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); |
610 | 640 | ||
611 | codesize = _etext - _text; | 641 | printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", |
612 | datasize = _end - _data; | 642 | nr_free_pages() << (PAGE_SHIFT-10), |
613 | initsize = __init_end - __init_begin; | 643 | free_pages << (PAGE_SHIFT-10), |
614 | 644 | reserved_pages << (PAGE_SHIFT-10), | |
615 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " | ||
616 | "%dK data, %dK init, %luK highmem)\n", | ||
617 | nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, | ||
618 | datasize >> 10, initsize >> 10, | ||
619 | totalhigh_pages << (PAGE_SHIFT-10)); | 645 | totalhigh_pages << (PAGE_SHIFT-10)); |
620 | 646 | ||
647 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | ||
648 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | ||
649 | #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) | ||
650 | |||
651 | printk(KERN_NOTICE "Virtual kernel memory layout:\n" | ||
652 | " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
653 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
654 | #ifdef CONFIG_MMU | ||
655 | " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
656 | #endif | ||
657 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
658 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
659 | #ifdef CONFIG_HIGHMEM | ||
660 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
661 | #endif | ||
662 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
663 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
664 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
665 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n", | ||
666 | |||
667 | MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + | ||
668 | (PAGE_SIZE)), | ||
669 | MLK(FIXADDR_START, FIXADDR_TOP), | ||
670 | #ifdef CONFIG_MMU | ||
671 | MLM(CONSISTENT_BASE, CONSISTENT_END), | ||
672 | #endif | ||
673 | MLM(VMALLOC_START, VMALLOC_END), | ||
674 | MLM(PAGE_OFFSET, (unsigned long)high_memory), | ||
675 | #ifdef CONFIG_HIGHMEM | ||
676 | MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * | ||
677 | (PAGE_SIZE)), | ||
678 | #endif | ||
679 | MLM(MODULES_VADDR, MODULES_END), | ||
680 | |||
681 | MLK_ROUNDUP(__init_begin, __init_end), | ||
682 | MLK_ROUNDUP(_text, _etext), | ||
683 | MLK_ROUNDUP(_data, _edata)); | ||
684 | |||
685 | #undef MLK | ||
686 | #undef MLM | ||
687 | #undef MLK_ROUNDUP | ||
688 | |||
689 | /* | ||
690 | * Check boundaries twice: Some fundamental inconsistencies can | ||
691 | * be detected at build time already. | ||
692 | */ | ||
693 | #ifdef CONFIG_MMU | ||
694 | BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
695 | BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
696 | |||
697 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); | ||
698 | BUG_ON(TASK_SIZE > MODULES_VADDR); | ||
699 | #endif | ||
700 | |||
701 | #ifdef CONFIG_HIGHMEM | ||
702 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | ||
703 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | ||
704 | #endif | ||
705 | |||
621 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 706 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { |
622 | extern int sysctl_overcommit_memory; | 707 | extern int sysctl_overcommit_memory; |
623 | /* | 708 | /* |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0ab75c60f7cf..28c8b950ef04 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm) | |||
139 | * which requires the new ioremap'd region to be referenced, the CPU will | 139 | * which requires the new ioremap'd region to be referenced, the CPU will |
140 | * reference the _old_ region. | 140 | * reference the _old_ region. |
141 | * | 141 | * |
142 | * Note that get_vm_area() allocates a guard 4K page, so we need to mask | 142 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to |
143 | * the size back to 1MB aligned or we will overflow in the loop below. | 143 | * mask the size back to 1MB aligned or we will overflow in the loop below. |
144 | */ | 144 | */ |
145 | static void unmap_area_sections(unsigned long virt, unsigned long size) | 145 | static void unmap_area_sections(unsigned long virt, unsigned long size) |
146 | { | 146 | { |
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
254 | } | 254 | } |
255 | #endif | 255 | #endif |
256 | 256 | ||
257 | 257 | void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |
258 | /* | 258 | unsigned long offset, size_t size, unsigned int mtype, void *caller) |
259 | * Remap an arbitrary physical address space into the kernel virtual | ||
260 | * address space. Needed when the kernel wants to access high addresses | ||
261 | * directly. | ||
262 | * | ||
263 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
264 | * have to convert them into an offset in a page-aligned mapping, but the | ||
265 | * caller shouldn't need to know that small detail. | ||
266 | * | ||
267 | * 'flags' are the extra L_PTE_ flags that you want to specify for this | ||
268 | * mapping. See <asm/pgtable.h> for more information. | ||
269 | */ | ||
270 | void __iomem * | ||
271 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | ||
272 | unsigned int mtype) | ||
273 | { | 259 | { |
274 | const struct mem_type *type; | 260 | const struct mem_type *type; |
275 | int err; | 261 | int err; |
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
291 | */ | 277 | */ |
292 | size = PAGE_ALIGN(offset + size); | 278 | size = PAGE_ALIGN(offset + size); |
293 | 279 | ||
294 | area = get_vm_area(size, VM_IOREMAP); | 280 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
295 | if (!area) | 281 | if (!area) |
296 | return NULL; | 282 | return NULL; |
297 | addr = (unsigned long)area->addr; | 283 | addr = (unsigned long)area->addr; |
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
318 | flush_cache_vmap(addr, addr + size); | 304 | flush_cache_vmap(addr, addr + size); |
319 | return (void __iomem *) (offset + addr); | 305 | return (void __iomem *) (offset + addr); |
320 | } | 306 | } |
321 | EXPORT_SYMBOL(__arm_ioremap_pfn); | ||
322 | 307 | ||
323 | void __iomem * | 308 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, |
324 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | 309 | unsigned int mtype, void *caller) |
325 | { | 310 | { |
326 | unsigned long last_addr; | 311 | unsigned long last_addr; |
327 | unsigned long offset = phys_addr & ~PAGE_MASK; | 312 | unsigned long offset = phys_addr & ~PAGE_MASK; |
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | |||
334 | if (!size || last_addr < phys_addr) | 319 | if (!size || last_addr < phys_addr) |
335 | return NULL; | 320 | return NULL; |
336 | 321 | ||
337 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | 322 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, |
323 | caller); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Remap an arbitrary physical address space into the kernel virtual | ||
328 | * address space. Needed when the kernel wants to access high addresses | ||
329 | * directly. | ||
330 | * | ||
331 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
332 | * have to convert them into an offset in a page-aligned mapping, but the | ||
333 | * caller shouldn't need to know that small detail. | ||
334 | */ | ||
335 | void __iomem * | ||
336 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | ||
337 | unsigned int mtype) | ||
338 | { | ||
339 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, | ||
340 | __builtin_return_address(0)); | ||
341 | } | ||
342 | EXPORT_SYMBOL(__arm_ioremap_pfn); | ||
343 | |||
344 | void __iomem * | ||
345 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | ||
346 | { | ||
347 | return __arm_ioremap_caller(phys_addr, size, mtype, | ||
348 | __builtin_return_address(0)); | ||
338 | } | 349 | } |
339 | EXPORT_SYMBOL(__arm_ioremap); | 350 | EXPORT_SYMBOL(__arm_ioremap); |
340 | 351 | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 761ffede6a23..9d4da6ac28eb 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
100 | * writebuffer to be turned off. (Note: the write | 100 | * writebuffer to be turned off. (Note: the write |
101 | * buffer should not be on and the cache off). | 101 | * buffer should not be on and the cache off). |
102 | */ | 102 | */ |
103 | static void __init early_cachepolicy(char **p) | 103 | static int __init early_cachepolicy(char *p) |
104 | { | 104 | { |
105 | int i; | 105 | int i; |
106 | 106 | ||
107 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | 107 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { |
108 | int len = strlen(cache_policies[i].policy); | 108 | int len = strlen(cache_policies[i].policy); |
109 | 109 | ||
110 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | 110 | if (memcmp(p, cache_policies[i].policy, len) == 0) { |
111 | cachepolicy = i; | 111 | cachepolicy = i; |
112 | cr_alignment &= ~cache_policies[i].cr_mask; | 112 | cr_alignment &= ~cache_policies[i].cr_mask; |
113 | cr_no_alignment &= ~cache_policies[i].cr_mask; | 113 | cr_no_alignment &= ~cache_policies[i].cr_mask; |
114 | *p += len; | ||
115 | break; | 114 | break; |
116 | } | 115 | } |
117 | } | 116 | } |
@@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p) | |||
130 | } | 129 | } |
131 | flush_cache_all(); | 130 | flush_cache_all(); |
132 | set_cr(cr_alignment); | 131 | set_cr(cr_alignment); |
132 | return 0; | ||
133 | } | 133 | } |
134 | __early_param("cachepolicy=", early_cachepolicy); | 134 | early_param("cachepolicy", early_cachepolicy); |
135 | 135 | ||
136 | static void __init early_nocache(char **__unused) | 136 | static int __init early_nocache(char *__unused) |
137 | { | 137 | { |
138 | char *p = "buffered"; | 138 | char *p = "buffered"; |
139 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | 139 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); |
140 | early_cachepolicy(&p); | 140 | early_cachepolicy(p); |
141 | return 0; | ||
141 | } | 142 | } |
142 | __early_param("nocache", early_nocache); | 143 | early_param("nocache", early_nocache); |
143 | 144 | ||
144 | static void __init early_nowrite(char **__unused) | 145 | static int __init early_nowrite(char *__unused) |
145 | { | 146 | { |
146 | char *p = "uncached"; | 147 | char *p = "uncached"; |
147 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | 148 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); |
148 | early_cachepolicy(&p); | 149 | early_cachepolicy(p); |
150 | return 0; | ||
149 | } | 151 | } |
150 | __early_param("nowb", early_nowrite); | 152 | early_param("nowb", early_nowrite); |
151 | 153 | ||
152 | static void __init early_ecc(char **p) | 154 | static int __init early_ecc(char *p) |
153 | { | 155 | { |
154 | if (memcmp(*p, "on", 2) == 0) { | 156 | if (memcmp(p, "on", 2) == 0) |
155 | ecc_mask = PMD_PROTECTION; | 157 | ecc_mask = PMD_PROTECTION; |
156 | *p += 2; | 158 | else if (memcmp(p, "off", 3) == 0) |
157 | } else if (memcmp(*p, "off", 3) == 0) { | ||
158 | ecc_mask = 0; | 159 | ecc_mask = 0; |
159 | *p += 3; | 160 | return 0; |
160 | } | ||
161 | } | 161 | } |
162 | __early_param("ecc=", early_ecc); | 162 | early_param("ecc", early_ecc); |
163 | 163 | ||
164 | static int __init noalign_setup(char *__unused) | 164 | static int __init noalign_setup(char *__unused) |
165 | { | 165 | { |
@@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; | |||
670 | * bytes. This can be used to increase (or decrease) the vmalloc | 670 | * bytes. This can be used to increase (or decrease) the vmalloc |
671 | * area - the default is 128m. | 671 | * area - the default is 128m. |
672 | */ | 672 | */ |
673 | static void __init early_vmalloc(char **arg) | 673 | static int __init early_vmalloc(char *arg) |
674 | { | 674 | { |
675 | vmalloc_reserve = memparse(*arg, arg); | 675 | vmalloc_reserve = memparse(arg, NULL); |
676 | 676 | ||
677 | if (vmalloc_reserve < SZ_16M) { | 677 | if (vmalloc_reserve < SZ_16M) { |
678 | vmalloc_reserve = SZ_16M; | 678 | vmalloc_reserve = SZ_16M; |
@@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg) | |||
687 | "vmalloc area is too big, limiting to %luMB\n", | 687 | "vmalloc area is too big, limiting to %luMB\n", |
688 | vmalloc_reserve >> 20); | 688 | vmalloc_reserve >> 20); |
689 | } | 689 | } |
690 | return 0; | ||
690 | } | 691 | } |
691 | __early_param("vmalloc=", early_vmalloc); | 692 | early_param("vmalloc", early_vmalloc); |
692 | 693 | ||
693 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | 694 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) |
694 | 695 | ||
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 374a8311bc84..9bfeb6b9509a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, | |||
74 | } | 74 | } |
75 | EXPORT_SYMBOL(__arm_ioremap_pfn); | 75 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
76 | 76 | ||
77 | void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, | ||
78 | size_t size, unsigned int mtype, void *caller) | ||
79 | { | ||
80 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | ||
81 | } | ||
82 | |||
77 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | 83 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, |
78 | unsigned int mtype) | 84 | unsigned int mtype) |
79 | { | 85 | { |
@@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | |||
81 | } | 87 | } |
82 | EXPORT_SYMBOL(__arm_ioremap); | 88 | EXPORT_SYMBOL(__arm_ioremap); |
83 | 89 | ||
90 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | ||
91 | unsigned int mtype, void *caller) | ||
92 | { | ||
93 | return __arm_ioremap(phys_addr, size, mtype); | ||
94 | } | ||
95 | |||
84 | void __iounmap(volatile void __iomem *addr) | 96 | void __iounmap(volatile void __iomem *addr) |
85 | { | 97 | { |
86 | } | 98 | } |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 8012e24282b2..72507c630ceb 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
@@ -265,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_area) | |||
265 | * | 265 | * |
266 | * (same as v4wb) | 266 | * (same as v4wb) |
267 | */ | 267 | */ |
268 | ENTRY(arm1020_dma_inv_range) | 268 | arm1020_dma_inv_range: |
269 | mov ip, #0 | 269 | mov ip, #0 |
270 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 270 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
271 | tst r0, #CACHE_DLINESIZE - 1 | 271 | tst r0, #CACHE_DLINESIZE - 1 |
@@ -295,7 +295,7 @@ ENTRY(arm1020_dma_inv_range) | |||
295 | * | 295 | * |
296 | * (same as v4wb) | 296 | * (same as v4wb) |
297 | */ | 297 | */ |
298 | ENTRY(arm1020_dma_clean_range) | 298 | arm1020_dma_clean_range: |
299 | mov ip, #0 | 299 | mov ip, #0 |
300 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 300 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
301 | bic r0, r0, #CACHE_DLINESIZE - 1 | 301 | bic r0, r0, #CACHE_DLINESIZE - 1 |
@@ -330,6 +330,30 @@ ENTRY(arm1020_dma_flush_range) | |||
330 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 330 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
331 | mov pc, lr | 331 | mov pc, lr |
332 | 332 | ||
333 | /* | ||
334 | * dma_map_area(start, size, dir) | ||
335 | * - start - kernel virtual start address | ||
336 | * - size - size of region | ||
337 | * - dir - DMA direction | ||
338 | */ | ||
339 | ENTRY(arm1020_dma_map_area) | ||
340 | add r1, r1, r0 | ||
341 | cmp r2, #DMA_TO_DEVICE | ||
342 | beq arm1020_dma_clean_range | ||
343 | bcs arm1020_dma_inv_range | ||
344 | b arm1020_dma_flush_range | ||
345 | ENDPROC(arm1020_dma_map_area) | ||
346 | |||
347 | /* | ||
348 | * dma_unmap_area(start, size, dir) | ||
349 | * - start - kernel virtual start address | ||
350 | * - size - size of region | ||
351 | * - dir - DMA direction | ||
352 | */ | ||
353 | ENTRY(arm1020_dma_unmap_area) | ||
354 | mov pc, lr | ||
355 | ENDPROC(arm1020_dma_unmap_area) | ||
356 | |||
333 | ENTRY(arm1020_cache_fns) | 357 | ENTRY(arm1020_cache_fns) |
334 | .long arm1020_flush_kern_cache_all | 358 | .long arm1020_flush_kern_cache_all |
335 | .long arm1020_flush_user_cache_all | 359 | .long arm1020_flush_user_cache_all |
@@ -337,8 +361,8 @@ ENTRY(arm1020_cache_fns) | |||
337 | .long arm1020_coherent_kern_range | 361 | .long arm1020_coherent_kern_range |
338 | .long arm1020_coherent_user_range | 362 | .long arm1020_coherent_user_range |
339 | .long arm1020_flush_kern_dcache_area | 363 | .long arm1020_flush_kern_dcache_area |
340 | .long arm1020_dma_inv_range | 364 | .long arm1020_dma_map_area |
341 | .long arm1020_dma_clean_range | 365 | .long arm1020_dma_unmap_area |
342 | .long arm1020_dma_flush_range | 366 | .long arm1020_dma_flush_range |
343 | 367 | ||
344 | .align 5 | 368 | .align 5 |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 41fe25d234f5..d27829805609 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
@@ -258,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_area) | |||
258 | * | 258 | * |
259 | * (same as v4wb) | 259 | * (same as v4wb) |
260 | */ | 260 | */ |
261 | ENTRY(arm1020e_dma_inv_range) | 261 | arm1020e_dma_inv_range: |
262 | mov ip, #0 | 262 | mov ip, #0 |
263 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 263 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
264 | tst r0, #CACHE_DLINESIZE - 1 | 264 | tst r0, #CACHE_DLINESIZE - 1 |
@@ -284,7 +284,7 @@ ENTRY(arm1020e_dma_inv_range) | |||
284 | * | 284 | * |
285 | * (same as v4wb) | 285 | * (same as v4wb) |
286 | */ | 286 | */ |
287 | ENTRY(arm1020e_dma_clean_range) | 287 | arm1020e_dma_clean_range: |
288 | mov ip, #0 | 288 | mov ip, #0 |
289 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 289 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
290 | bic r0, r0, #CACHE_DLINESIZE - 1 | 290 | bic r0, r0, #CACHE_DLINESIZE - 1 |
@@ -316,6 +316,30 @@ ENTRY(arm1020e_dma_flush_range) | |||
316 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 316 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
317 | mov pc, lr | 317 | mov pc, lr |
318 | 318 | ||
319 | /* | ||
320 | * dma_map_area(start, size, dir) | ||
321 | * - start - kernel virtual start address | ||
322 | * - size - size of region | ||
323 | * - dir - DMA direction | ||
324 | */ | ||
325 | ENTRY(arm1020e_dma_map_area) | ||
326 | add r1, r1, r0 | ||
327 | cmp r2, #DMA_TO_DEVICE | ||
328 | beq arm1020e_dma_clean_range | ||
329 | bcs arm1020e_dma_inv_range | ||
330 | b arm1020e_dma_flush_range | ||
331 | ENDPROC(arm1020e_dma_map_area) | ||
332 | |||
333 | /* | ||
334 | * dma_unmap_area(start, size, dir) | ||
335 | * - start - kernel virtual start address | ||
336 | * - size - size of region | ||
337 | * - dir - DMA direction | ||
338 | */ | ||
339 | ENTRY(arm1020e_dma_unmap_area) | ||
340 | mov pc, lr | ||
341 | ENDPROC(arm1020e_dma_unmap_area) | ||
342 | |||
319 | ENTRY(arm1020e_cache_fns) | 343 | ENTRY(arm1020e_cache_fns) |
320 | .long arm1020e_flush_kern_cache_all | 344 | .long arm1020e_flush_kern_cache_all |
321 | .long arm1020e_flush_user_cache_all | 345 | .long arm1020e_flush_user_cache_all |
@@ -323,8 +347,8 @@ ENTRY(arm1020e_cache_fns) | |||
323 | .long arm1020e_coherent_kern_range | 347 | .long arm1020e_coherent_kern_range |
324 | .long arm1020e_coherent_user_range | 348 | .long arm1020e_coherent_user_range |
325 | .long arm1020e_flush_kern_dcache_area | 349 | .long arm1020e_flush_kern_dcache_area |
326 | .long arm1020e_dma_inv_range | 350 | .long arm1020e_dma_map_area |
327 | .long arm1020e_dma_clean_range | 351 | .long arm1020e_dma_unmap_area |
328 | .long arm1020e_dma_flush_range | 352 | .long arm1020e_dma_flush_range |
329 | 353 | ||
330 | .align 5 | 354 | .align 5 |
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 20a5b1b31a70..ce13e4a827de 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
@@ -247,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_area) | |||
247 | * | 247 | * |
248 | * (same as v4wb) | 248 | * (same as v4wb) |
249 | */ | 249 | */ |
250 | ENTRY(arm1022_dma_inv_range) | 250 | arm1022_dma_inv_range: |
251 | mov ip, #0 | 251 | mov ip, #0 |
252 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 252 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
253 | tst r0, #CACHE_DLINESIZE - 1 | 253 | tst r0, #CACHE_DLINESIZE - 1 |
@@ -273,7 +273,7 @@ ENTRY(arm1022_dma_inv_range) | |||
273 | * | 273 | * |
274 | * (same as v4wb) | 274 | * (same as v4wb) |
275 | */ | 275 | */ |
276 | ENTRY(arm1022_dma_clean_range) | 276 | arm1022_dma_clean_range: |
277 | mov ip, #0 | 277 | mov ip, #0 |
278 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 278 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
279 | bic r0, r0, #CACHE_DLINESIZE - 1 | 279 | bic r0, r0, #CACHE_DLINESIZE - 1 |
@@ -305,6 +305,30 @@ ENTRY(arm1022_dma_flush_range) | |||
305 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 305 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
306 | mov pc, lr | 306 | mov pc, lr |
307 | 307 | ||
308 | /* | ||
309 | * dma_map_area(start, size, dir) | ||
310 | * - start - kernel virtual start address | ||
311 | * - size - size of region | ||
312 | * - dir - DMA direction | ||
313 | */ | ||
314 | ENTRY(arm1022_dma_map_area) | ||
315 | add r1, r1, r0 | ||
316 | cmp r2, #DMA_TO_DEVICE | ||
317 | beq arm1022_dma_clean_range | ||
318 | bcs arm1022_dma_inv_range | ||
319 | b arm1022_dma_flush_range | ||
320 | ENDPROC(arm1022_dma_map_area) | ||
321 | |||
322 | /* | ||
323 | * dma_unmap_area(start, size, dir) | ||
324 | * - start - kernel virtual start address | ||
325 | * - size - size of region | ||
326 | * - dir - DMA direction | ||
327 | */ | ||
328 | ENTRY(arm1022_dma_unmap_area) | ||
329 | mov pc, lr | ||
330 | ENDPROC(arm1022_dma_unmap_area) | ||
331 | |||
308 | ENTRY(arm1022_cache_fns) | 332 | ENTRY(arm1022_cache_fns) |
309 | .long arm1022_flush_kern_cache_all | 333 | .long arm1022_flush_kern_cache_all |
310 | .long arm1022_flush_user_cache_all | 334 | .long arm1022_flush_user_cache_all |
@@ -312,8 +336,8 @@ ENTRY(arm1022_cache_fns) | |||
312 | .long arm1022_coherent_kern_range | 336 | .long arm1022_coherent_kern_range |
313 | .long arm1022_coherent_user_range | 337 | .long arm1022_coherent_user_range |
314 | .long arm1022_flush_kern_dcache_area | 338 | .long arm1022_flush_kern_dcache_area |
315 | .long arm1022_dma_inv_range | 339 | .long arm1022_dma_map_area |
316 | .long arm1022_dma_clean_range | 340 | .long arm1022_dma_unmap_area |
317 | .long arm1022_dma_flush_range | 341 | .long arm1022_dma_flush_range |
318 | 342 | ||
319 | .align 5 | 343 | .align 5 |
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 96aedb10fcc4..636672a29c6d 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
@@ -241,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_area) | |||
241 | * | 241 | * |
242 | * (same as v4wb) | 242 | * (same as v4wb) |
243 | */ | 243 | */ |
244 | ENTRY(arm1026_dma_inv_range) | 244 | arm1026_dma_inv_range: |
245 | mov ip, #0 | 245 | mov ip, #0 |
246 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 246 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
247 | tst r0, #CACHE_DLINESIZE - 1 | 247 | tst r0, #CACHE_DLINESIZE - 1 |
@@ -267,7 +267,7 @@ ENTRY(arm1026_dma_inv_range) | |||
267 | * | 267 | * |
268 | * (same as v4wb) | 268 | * (same as v4wb) |
269 | */ | 269 | */ |
270 | ENTRY(arm1026_dma_clean_range) | 270 | arm1026_dma_clean_range: |
271 | mov ip, #0 | 271 | mov ip, #0 |
272 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 272 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
273 | bic r0, r0, #CACHE_DLINESIZE - 1 | 273 | bic r0, r0, #CACHE_DLINESIZE - 1 |
@@ -299,6 +299,30 @@ ENTRY(arm1026_dma_flush_range) | |||
299 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 299 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
300 | mov pc, lr | 300 | mov pc, lr |
301 | 301 | ||
302 | /* | ||
303 | * dma_map_area(start, size, dir) | ||
304 | * - start - kernel virtual start address | ||
305 | * - size - size of region | ||
306 | * - dir - DMA direction | ||
307 | */ | ||
308 | ENTRY(arm1026_dma_map_area) | ||
309 | add r1, r1, r0 | ||
310 | cmp r2, #DMA_TO_DEVICE | ||
311 | beq arm1026_dma_clean_range | ||
312 | bcs arm1026_dma_inv_range | ||
313 | b arm1026_dma_flush_range | ||
314 | ENDPROC(arm1026_dma_map_area) | ||
315 | |||
316 | /* | ||
317 | * dma_unmap_area(start, size, dir) | ||
318 | * - start - kernel virtual start address | ||
319 | * - size - size of region | ||
320 | * - dir - DMA direction | ||
321 | */ | ||
322 | ENTRY(arm1026_dma_unmap_area) | ||
323 | mov pc, lr | ||
324 | ENDPROC(arm1026_dma_unmap_area) | ||
325 | |||
302 | ENTRY(arm1026_cache_fns) | 326 | ENTRY(arm1026_cache_fns) |
303 | .long arm1026_flush_kern_cache_all | 327 | .long arm1026_flush_kern_cache_all |
304 | .long arm1026_flush_user_cache_all | 328 | .long arm1026_flush_user_cache_all |
@@ -306,8 +330,8 @@ ENTRY(arm1026_cache_fns) | |||
306 | .long arm1026_coherent_kern_range | 330 | .long arm1026_coherent_kern_range |
307 | .long arm1026_coherent_user_range | 331 | .long arm1026_coherent_user_range |
308 | .long arm1026_flush_kern_dcache_area | 332 | .long arm1026_flush_kern_dcache_area |
309 | .long arm1026_dma_inv_range | 333 | .long arm1026_dma_map_area |
310 | .long arm1026_dma_clean_range | 334 | .long arm1026_dma_unmap_area |
311 | .long arm1026_dma_flush_range | 335 | .long arm1026_dma_flush_range |
312 | 336 | ||
313 | .align 5 | 337 | .align 5 |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 471669e2d7cb..8be81992645d 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -239,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_area) | |||
239 | * | 239 | * |
240 | * (same as v4wb) | 240 | * (same as v4wb) |
241 | */ | 241 | */ |
242 | ENTRY(arm920_dma_inv_range) | 242 | arm920_dma_inv_range: |
243 | tst r0, #CACHE_DLINESIZE - 1 | 243 | tst r0, #CACHE_DLINESIZE - 1 |
244 | bic r0, r0, #CACHE_DLINESIZE - 1 | 244 | bic r0, r0, #CACHE_DLINESIZE - 1 |
245 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 245 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -262,7 +262,7 @@ ENTRY(arm920_dma_inv_range) | |||
262 | * | 262 | * |
263 | * (same as v4wb) | 263 | * (same as v4wb) |
264 | */ | 264 | */ |
265 | ENTRY(arm920_dma_clean_range) | 265 | arm920_dma_clean_range: |
266 | bic r0, r0, #CACHE_DLINESIZE - 1 | 266 | bic r0, r0, #CACHE_DLINESIZE - 1 |
267 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 267 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
268 | add r0, r0, #CACHE_DLINESIZE | 268 | add r0, r0, #CACHE_DLINESIZE |
@@ -288,6 +288,30 @@ ENTRY(arm920_dma_flush_range) | |||
288 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 288 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
289 | mov pc, lr | 289 | mov pc, lr |
290 | 290 | ||
291 | /* | ||
292 | * dma_map_area(start, size, dir) | ||
293 | * - start - kernel virtual start address | ||
294 | * - size - size of region | ||
295 | * - dir - DMA direction | ||
296 | */ | ||
297 | ENTRY(arm920_dma_map_area) | ||
298 | add r1, r1, r0 | ||
299 | cmp r2, #DMA_TO_DEVICE | ||
300 | beq arm920_dma_clean_range | ||
301 | bcs arm920_dma_inv_range | ||
302 | b arm920_dma_flush_range | ||
303 | ENDPROC(arm920_dma_map_area) | ||
304 | |||
305 | /* | ||
306 | * dma_unmap_area(start, size, dir) | ||
307 | * - start - kernel virtual start address | ||
308 | * - size - size of region | ||
309 | * - dir - DMA direction | ||
310 | */ | ||
311 | ENTRY(arm920_dma_unmap_area) | ||
312 | mov pc, lr | ||
313 | ENDPROC(arm920_dma_unmap_area) | ||
314 | |||
291 | ENTRY(arm920_cache_fns) | 315 | ENTRY(arm920_cache_fns) |
292 | .long arm920_flush_kern_cache_all | 316 | .long arm920_flush_kern_cache_all |
293 | .long arm920_flush_user_cache_all | 317 | .long arm920_flush_user_cache_all |
@@ -295,8 +319,8 @@ ENTRY(arm920_cache_fns) | |||
295 | .long arm920_coherent_kern_range | 319 | .long arm920_coherent_kern_range |
296 | .long arm920_coherent_user_range | 320 | .long arm920_coherent_user_range |
297 | .long arm920_flush_kern_dcache_area | 321 | .long arm920_flush_kern_dcache_area |
298 | .long arm920_dma_inv_range | 322 | .long arm920_dma_map_area |
299 | .long arm920_dma_clean_range | 323 | .long arm920_dma_unmap_area |
300 | .long arm920_dma_flush_range | 324 | .long arm920_dma_flush_range |
301 | 325 | ||
302 | #endif | 326 | #endif |
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index ee111b00fa41..c0ff8e4b1074 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
@@ -241,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_area) | |||
241 | * | 241 | * |
242 | * (same as v4wb) | 242 | * (same as v4wb) |
243 | */ | 243 | */ |
244 | ENTRY(arm922_dma_inv_range) | 244 | arm922_dma_inv_range: |
245 | tst r0, #CACHE_DLINESIZE - 1 | 245 | tst r0, #CACHE_DLINESIZE - 1 |
246 | bic r0, r0, #CACHE_DLINESIZE - 1 | 246 | bic r0, r0, #CACHE_DLINESIZE - 1 |
247 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 247 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -264,7 +264,7 @@ ENTRY(arm922_dma_inv_range) | |||
264 | * | 264 | * |
265 | * (same as v4wb) | 265 | * (same as v4wb) |
266 | */ | 266 | */ |
267 | ENTRY(arm922_dma_clean_range) | 267 | arm922_dma_clean_range: |
268 | bic r0, r0, #CACHE_DLINESIZE - 1 | 268 | bic r0, r0, #CACHE_DLINESIZE - 1 |
269 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 269 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
270 | add r0, r0, #CACHE_DLINESIZE | 270 | add r0, r0, #CACHE_DLINESIZE |
@@ -290,6 +290,30 @@ ENTRY(arm922_dma_flush_range) | |||
290 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 290 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
291 | mov pc, lr | 291 | mov pc, lr |
292 | 292 | ||
293 | /* | ||
294 | * dma_map_area(start, size, dir) | ||
295 | * - start - kernel virtual start address | ||
296 | * - size - size of region | ||
297 | * - dir - DMA direction | ||
298 | */ | ||
299 | ENTRY(arm922_dma_map_area) | ||
300 | add r1, r1, r0 | ||
301 | cmp r2, #DMA_TO_DEVICE | ||
302 | beq arm922_dma_clean_range | ||
303 | bcs arm922_dma_inv_range | ||
304 | b arm922_dma_flush_range | ||
305 | ENDPROC(arm922_dma_map_area) | ||
306 | |||
307 | /* | ||
308 | * dma_unmap_area(start, size, dir) | ||
309 | * - start - kernel virtual start address | ||
310 | * - size - size of region | ||
311 | * - dir - DMA direction | ||
312 | */ | ||
313 | ENTRY(arm922_dma_unmap_area) | ||
314 | mov pc, lr | ||
315 | ENDPROC(arm922_dma_unmap_area) | ||
316 | |||
293 | ENTRY(arm922_cache_fns) | 317 | ENTRY(arm922_cache_fns) |
294 | .long arm922_flush_kern_cache_all | 318 | .long arm922_flush_kern_cache_all |
295 | .long arm922_flush_user_cache_all | 319 | .long arm922_flush_user_cache_all |
@@ -297,8 +321,8 @@ ENTRY(arm922_cache_fns) | |||
297 | .long arm922_coherent_kern_range | 321 | .long arm922_coherent_kern_range |
298 | .long arm922_coherent_user_range | 322 | .long arm922_coherent_user_range |
299 | .long arm922_flush_kern_dcache_area | 323 | .long arm922_flush_kern_dcache_area |
300 | .long arm922_dma_inv_range | 324 | .long arm922_dma_map_area |
301 | .long arm922_dma_clean_range | 325 | .long arm922_dma_unmap_area |
302 | .long arm922_dma_flush_range | 326 | .long arm922_dma_flush_range |
303 | 327 | ||
304 | #endif | 328 | #endif |
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 8deb5bde58e4..3c6cffe400f6 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
@@ -283,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_area) | |||
283 | * | 283 | * |
284 | * (same as v4wb) | 284 | * (same as v4wb) |
285 | */ | 285 | */ |
286 | ENTRY(arm925_dma_inv_range) | 286 | arm925_dma_inv_range: |
287 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 287 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
288 | tst r0, #CACHE_DLINESIZE - 1 | 288 | tst r0, #CACHE_DLINESIZE - 1 |
289 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 289 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -308,7 +308,7 @@ ENTRY(arm925_dma_inv_range) | |||
308 | * | 308 | * |
309 | * (same as v4wb) | 309 | * (same as v4wb) |
310 | */ | 310 | */ |
311 | ENTRY(arm925_dma_clean_range) | 311 | arm925_dma_clean_range: |
312 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 312 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
313 | bic r0, r0, #CACHE_DLINESIZE - 1 | 313 | bic r0, r0, #CACHE_DLINESIZE - 1 |
314 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 314 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -341,6 +341,30 @@ ENTRY(arm925_dma_flush_range) | |||
341 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 341 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
342 | mov pc, lr | 342 | mov pc, lr |
343 | 343 | ||
344 | /* | ||
345 | * dma_map_area(start, size, dir) | ||
346 | * - start - kernel virtual start address | ||
347 | * - size - size of region | ||
348 | * - dir - DMA direction | ||
349 | */ | ||
350 | ENTRY(arm925_dma_map_area) | ||
351 | add r1, r1, r0 | ||
352 | cmp r2, #DMA_TO_DEVICE | ||
353 | beq arm925_dma_clean_range | ||
354 | bcs arm925_dma_inv_range | ||
355 | b arm925_dma_flush_range | ||
356 | ENDPROC(arm925_dma_map_area) | ||
357 | |||
358 | /* | ||
359 | * dma_unmap_area(start, size, dir) | ||
360 | * - start - kernel virtual start address | ||
361 | * - size - size of region | ||
362 | * - dir - DMA direction | ||
363 | */ | ||
364 | ENTRY(arm925_dma_unmap_area) | ||
365 | mov pc, lr | ||
366 | ENDPROC(arm925_dma_unmap_area) | ||
367 | |||
344 | ENTRY(arm925_cache_fns) | 368 | ENTRY(arm925_cache_fns) |
345 | .long arm925_flush_kern_cache_all | 369 | .long arm925_flush_kern_cache_all |
346 | .long arm925_flush_user_cache_all | 370 | .long arm925_flush_user_cache_all |
@@ -348,8 +372,8 @@ ENTRY(arm925_cache_fns) | |||
348 | .long arm925_coherent_kern_range | 372 | .long arm925_coherent_kern_range |
349 | .long arm925_coherent_user_range | 373 | .long arm925_coherent_user_range |
350 | .long arm925_flush_kern_dcache_area | 374 | .long arm925_flush_kern_dcache_area |
351 | .long arm925_dma_inv_range | 375 | .long arm925_dma_map_area |
352 | .long arm925_dma_clean_range | 376 | .long arm925_dma_unmap_area |
353 | .long arm925_dma_flush_range | 377 | .long arm925_dma_flush_range |
354 | 378 | ||
355 | ENTRY(cpu_arm925_dcache_clean_area) | 379 | ENTRY(cpu_arm925_dcache_clean_area) |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 64db6e275a44..75b707c9cce1 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -246,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_area) | |||
246 | * | 246 | * |
247 | * (same as v4wb) | 247 | * (same as v4wb) |
248 | */ | 248 | */ |
249 | ENTRY(arm926_dma_inv_range) | 249 | arm926_dma_inv_range: |
250 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 250 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
251 | tst r0, #CACHE_DLINESIZE - 1 | 251 | tst r0, #CACHE_DLINESIZE - 1 |
252 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 252 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -271,7 +271,7 @@ ENTRY(arm926_dma_inv_range) | |||
271 | * | 271 | * |
272 | * (same as v4wb) | 272 | * (same as v4wb) |
273 | */ | 273 | */ |
274 | ENTRY(arm926_dma_clean_range) | 274 | arm926_dma_clean_range: |
275 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 275 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
276 | bic r0, r0, #CACHE_DLINESIZE - 1 | 276 | bic r0, r0, #CACHE_DLINESIZE - 1 |
277 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 277 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -304,6 +304,30 @@ ENTRY(arm926_dma_flush_range) | |||
304 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 304 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
305 | mov pc, lr | 305 | mov pc, lr |
306 | 306 | ||
307 | /* | ||
308 | * dma_map_area(start, size, dir) | ||
309 | * - start - kernel virtual start address | ||
310 | * - size - size of region | ||
311 | * - dir - DMA direction | ||
312 | */ | ||
313 | ENTRY(arm926_dma_map_area) | ||
314 | add r1, r1, r0 | ||
315 | cmp r2, #DMA_TO_DEVICE | ||
316 | beq arm926_dma_clean_range | ||
317 | bcs arm926_dma_inv_range | ||
318 | b arm926_dma_flush_range | ||
319 | ENDPROC(arm926_dma_map_area) | ||
320 | |||
321 | /* | ||
322 | * dma_unmap_area(start, size, dir) | ||
323 | * - start - kernel virtual start address | ||
324 | * - size - size of region | ||
325 | * - dir - DMA direction | ||
326 | */ | ||
327 | ENTRY(arm926_dma_unmap_area) | ||
328 | mov pc, lr | ||
329 | ENDPROC(arm926_dma_unmap_area) | ||
330 | |||
307 | ENTRY(arm926_cache_fns) | 331 | ENTRY(arm926_cache_fns) |
308 | .long arm926_flush_kern_cache_all | 332 | .long arm926_flush_kern_cache_all |
309 | .long arm926_flush_user_cache_all | 333 | .long arm926_flush_user_cache_all |
@@ -311,8 +335,8 @@ ENTRY(arm926_cache_fns) | |||
311 | .long arm926_coherent_kern_range | 335 | .long arm926_coherent_kern_range |
312 | .long arm926_coherent_user_range | 336 | .long arm926_coherent_user_range |
313 | .long arm926_flush_kern_dcache_area | 337 | .long arm926_flush_kern_dcache_area |
314 | .long arm926_dma_inv_range | 338 | .long arm926_dma_map_area |
315 | .long arm926_dma_clean_range | 339 | .long arm926_dma_unmap_area |
316 | .long arm926_dma_flush_range | 340 | .long arm926_dma_flush_range |
317 | 341 | ||
318 | ENTRY(cpu_arm926_dcache_clean_area) | 342 | ENTRY(cpu_arm926_dcache_clean_area) |
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 8196b9f401fb..1af1657819eb 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
@@ -171,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_area) | |||
171 | * - start - virtual start address | 171 | * - start - virtual start address |
172 | * - end - virtual end address | 172 | * - end - virtual end address |
173 | */ | 173 | */ |
174 | ENTRY(arm940_dma_inv_range) | 174 | arm940_dma_inv_range: |
175 | mov ip, #0 | 175 | mov ip, #0 |
176 | mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments | 176 | mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments |
177 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries | 177 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries |
@@ -192,7 +192,7 @@ ENTRY(arm940_dma_inv_range) | |||
192 | * - start - virtual start address | 192 | * - start - virtual start address |
193 | * - end - virtual end address | 193 | * - end - virtual end address |
194 | */ | 194 | */ |
195 | ENTRY(arm940_dma_clean_range) | 195 | arm940_dma_clean_range: |
196 | ENTRY(cpu_arm940_dcache_clean_area) | 196 | ENTRY(cpu_arm940_dcache_clean_area) |
197 | mov ip, #0 | 197 | mov ip, #0 |
198 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 198 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
@@ -233,6 +233,30 @@ ENTRY(arm940_dma_flush_range) | |||
233 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 233 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
234 | mov pc, lr | 234 | mov pc, lr |
235 | 235 | ||
236 | /* | ||
237 | * dma_map_area(start, size, dir) | ||
238 | * - start - kernel virtual start address | ||
239 | * - size - size of region | ||
240 | * - dir - DMA direction | ||
241 | */ | ||
242 | ENTRY(arm940_dma_map_area) | ||
243 | add r1, r1, r0 | ||
244 | cmp r2, #DMA_TO_DEVICE | ||
245 | beq arm940_dma_clean_range | ||
246 | bcs arm940_dma_inv_range | ||
247 | b arm940_dma_flush_range | ||
248 | ENDPROC(arm940_dma_map_area) | ||
249 | |||
250 | /* | ||
251 | * dma_unmap_area(start, size, dir) | ||
252 | * - start - kernel virtual start address | ||
253 | * - size - size of region | ||
254 | * - dir - DMA direction | ||
255 | */ | ||
256 | ENTRY(arm940_dma_unmap_area) | ||
257 | mov pc, lr | ||
258 | ENDPROC(arm940_dma_unmap_area) | ||
259 | |||
236 | ENTRY(arm940_cache_fns) | 260 | ENTRY(arm940_cache_fns) |
237 | .long arm940_flush_kern_cache_all | 261 | .long arm940_flush_kern_cache_all |
238 | .long arm940_flush_user_cache_all | 262 | .long arm940_flush_user_cache_all |
@@ -240,8 +264,8 @@ ENTRY(arm940_cache_fns) | |||
240 | .long arm940_coherent_kern_range | 264 | .long arm940_coherent_kern_range |
241 | .long arm940_coherent_user_range | 265 | .long arm940_coherent_user_range |
242 | .long arm940_flush_kern_dcache_area | 266 | .long arm940_flush_kern_dcache_area |
243 | .long arm940_dma_inv_range | 267 | .long arm940_dma_map_area |
244 | .long arm940_dma_clean_range | 268 | .long arm940_dma_unmap_area |
245 | .long arm940_dma_flush_range | 269 | .long arm940_dma_flush_range |
246 | 270 | ||
247 | __INIT | 271 | __INIT |
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 9a951239c86c..1664b6aaff79 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
@@ -215,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_area) | |||
215 | * - end - virtual end address | 215 | * - end - virtual end address |
216 | * (same as arm926) | 216 | * (same as arm926) |
217 | */ | 217 | */ |
218 | ENTRY(arm946_dma_inv_range) | 218 | arm946_dma_inv_range: |
219 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 219 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
220 | tst r0, #CACHE_DLINESIZE - 1 | 220 | tst r0, #CACHE_DLINESIZE - 1 |
221 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 221 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -240,7 +240,7 @@ ENTRY(arm946_dma_inv_range) | |||
240 | * | 240 | * |
241 | * (same as arm926) | 241 | * (same as arm926) |
242 | */ | 242 | */ |
243 | ENTRY(arm946_dma_clean_range) | 243 | arm946_dma_clean_range: |
244 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 244 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
245 | bic r0, r0, #CACHE_DLINESIZE - 1 | 245 | bic r0, r0, #CACHE_DLINESIZE - 1 |
246 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 246 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -275,6 +275,30 @@ ENTRY(arm946_dma_flush_range) | |||
275 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 275 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
276 | mov pc, lr | 276 | mov pc, lr |
277 | 277 | ||
278 | /* | ||
279 | * dma_map_area(start, size, dir) | ||
280 | * - start - kernel virtual start address | ||
281 | * - size - size of region | ||
282 | * - dir - DMA direction | ||
283 | */ | ||
284 | ENTRY(arm946_dma_map_area) | ||
285 | add r1, r1, r0 | ||
286 | cmp r2, #DMA_TO_DEVICE | ||
287 | beq arm946_dma_clean_range | ||
288 | bcs arm946_dma_inv_range | ||
289 | b arm946_dma_flush_range | ||
290 | ENDPROC(arm946_dma_map_area) | ||
291 | |||
292 | /* | ||
293 | * dma_unmap_area(start, size, dir) | ||
294 | * - start - kernel virtual start address | ||
295 | * - size - size of region | ||
296 | * - dir - DMA direction | ||
297 | */ | ||
298 | ENTRY(arm946_dma_unmap_area) | ||
299 | mov pc, lr | ||
300 | ENDPROC(arm946_dma_unmap_area) | ||
301 | |||
278 | ENTRY(arm946_cache_fns) | 302 | ENTRY(arm946_cache_fns) |
279 | .long arm946_flush_kern_cache_all | 303 | .long arm946_flush_kern_cache_all |
280 | .long arm946_flush_user_cache_all | 304 | .long arm946_flush_user_cache_all |
@@ -282,8 +306,8 @@ ENTRY(arm946_cache_fns) | |||
282 | .long arm946_coherent_kern_range | 306 | .long arm946_coherent_kern_range |
283 | .long arm946_coherent_user_range | 307 | .long arm946_coherent_user_range |
284 | .long arm946_flush_kern_dcache_area | 308 | .long arm946_flush_kern_dcache_area |
285 | .long arm946_dma_inv_range | 309 | .long arm946_dma_map_area |
286 | .long arm946_dma_clean_range | 310 | .long arm946_dma_unmap_area |
287 | .long arm946_dma_flush_range | 311 | .long arm946_dma_flush_range |
288 | 312 | ||
289 | 313 | ||
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index dbc39383e66a..53e632343849 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
@@ -274,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area) | |||
274 | * (same as v4wb) | 274 | * (same as v4wb) |
275 | */ | 275 | */ |
276 | .align 5 | 276 | .align 5 |
277 | ENTRY(feroceon_dma_inv_range) | 277 | feroceon_dma_inv_range: |
278 | tst r0, #CACHE_DLINESIZE - 1 | 278 | tst r0, #CACHE_DLINESIZE - 1 |
279 | bic r0, r0, #CACHE_DLINESIZE - 1 | 279 | bic r0, r0, #CACHE_DLINESIZE - 1 |
280 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 280 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -288,7 +288,7 @@ ENTRY(feroceon_dma_inv_range) | |||
288 | mov pc, lr | 288 | mov pc, lr |
289 | 289 | ||
290 | .align 5 | 290 | .align 5 |
291 | ENTRY(feroceon_range_dma_inv_range) | 291 | feroceon_range_dma_inv_range: |
292 | mrs r2, cpsr | 292 | mrs r2, cpsr |
293 | tst r0, #CACHE_DLINESIZE - 1 | 293 | tst r0, #CACHE_DLINESIZE - 1 |
294 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 294 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -314,7 +314,7 @@ ENTRY(feroceon_range_dma_inv_range) | |||
314 | * (same as v4wb) | 314 | * (same as v4wb) |
315 | */ | 315 | */ |
316 | .align 5 | 316 | .align 5 |
317 | ENTRY(feroceon_dma_clean_range) | 317 | feroceon_dma_clean_range: |
318 | bic r0, r0, #CACHE_DLINESIZE - 1 | 318 | bic r0, r0, #CACHE_DLINESIZE - 1 |
319 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 319 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
320 | add r0, r0, #CACHE_DLINESIZE | 320 | add r0, r0, #CACHE_DLINESIZE |
@@ -324,7 +324,7 @@ ENTRY(feroceon_dma_clean_range) | |||
324 | mov pc, lr | 324 | mov pc, lr |
325 | 325 | ||
326 | .align 5 | 326 | .align 5 |
327 | ENTRY(feroceon_range_dma_clean_range) | 327 | feroceon_range_dma_clean_range: |
328 | mrs r2, cpsr | 328 | mrs r2, cpsr |
329 | cmp r1, r0 | 329 | cmp r1, r0 |
330 | subne r1, r1, #1 @ top address is inclusive | 330 | subne r1, r1, #1 @ top address is inclusive |
@@ -367,6 +367,44 @@ ENTRY(feroceon_range_dma_flush_range) | |||
367 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 367 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
368 | mov pc, lr | 368 | mov pc, lr |
369 | 369 | ||
370 | /* | ||
371 | * dma_map_area(start, size, dir) | ||
372 | * - start - kernel virtual start address | ||
373 | * - size - size of region | ||
374 | * - dir - DMA direction | ||
375 | */ | ||
376 | ENTRY(feroceon_dma_map_area) | ||
377 | add r1, r1, r0 | ||
378 | cmp r2, #DMA_TO_DEVICE | ||
379 | beq feroceon_dma_clean_range | ||
380 | bcs feroceon_dma_inv_range | ||
381 | b feroceon_dma_flush_range | ||
382 | ENDPROC(feroceon_dma_map_area) | ||
383 | |||
384 | /* | ||
385 | * dma_map_area(start, size, dir) | ||
386 | * - start - kernel virtual start address | ||
387 | * - size - size of region | ||
388 | * - dir - DMA direction | ||
389 | */ | ||
390 | ENTRY(feroceon_range_dma_map_area) | ||
391 | add r1, r1, r0 | ||
392 | cmp r2, #DMA_TO_DEVICE | ||
393 | beq feroceon_range_dma_clean_range | ||
394 | bcs feroceon_range_dma_inv_range | ||
395 | b feroceon_range_dma_flush_range | ||
396 | ENDPROC(feroceon_range_dma_map_area) | ||
397 | |||
398 | /* | ||
399 | * dma_unmap_area(start, size, dir) | ||
400 | * - start - kernel virtual start address | ||
401 | * - size - size of region | ||
402 | * - dir - DMA direction | ||
403 | */ | ||
404 | ENTRY(feroceon_dma_unmap_area) | ||
405 | mov pc, lr | ||
406 | ENDPROC(feroceon_dma_unmap_area) | ||
407 | |||
370 | ENTRY(feroceon_cache_fns) | 408 | ENTRY(feroceon_cache_fns) |
371 | .long feroceon_flush_kern_cache_all | 409 | .long feroceon_flush_kern_cache_all |
372 | .long feroceon_flush_user_cache_all | 410 | .long feroceon_flush_user_cache_all |
@@ -374,8 +412,8 @@ ENTRY(feroceon_cache_fns) | |||
374 | .long feroceon_coherent_kern_range | 412 | .long feroceon_coherent_kern_range |
375 | .long feroceon_coherent_user_range | 413 | .long feroceon_coherent_user_range |
376 | .long feroceon_flush_kern_dcache_area | 414 | .long feroceon_flush_kern_dcache_area |
377 | .long feroceon_dma_inv_range | 415 | .long feroceon_dma_map_area |
378 | .long feroceon_dma_clean_range | 416 | .long feroceon_dma_unmap_area |
379 | .long feroceon_dma_flush_range | 417 | .long feroceon_dma_flush_range |
380 | 418 | ||
381 | ENTRY(feroceon_range_cache_fns) | 419 | ENTRY(feroceon_range_cache_fns) |
@@ -385,8 +423,8 @@ ENTRY(feroceon_range_cache_fns) | |||
385 | .long feroceon_coherent_kern_range | 423 | .long feroceon_coherent_kern_range |
386 | .long feroceon_coherent_user_range | 424 | .long feroceon_coherent_user_range |
387 | .long feroceon_range_flush_kern_dcache_area | 425 | .long feroceon_range_flush_kern_dcache_area |
388 | .long feroceon_range_dma_inv_range | 426 | .long feroceon_range_dma_map_area |
389 | .long feroceon_range_dma_clean_range | 427 | .long feroceon_dma_unmap_area |
390 | .long feroceon_range_dma_flush_range | 428 | .long feroceon_range_dma_flush_range |
391 | 429 | ||
392 | .align 5 | 430 | .align 5 |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 9674d36cc97d..caa31154e7db 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -218,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_area) | |||
218 | * | 218 | * |
219 | * (same as v4wb) | 219 | * (same as v4wb) |
220 | */ | 220 | */ |
221 | ENTRY(mohawk_dma_inv_range) | 221 | mohawk_dma_inv_range: |
222 | tst r0, #CACHE_DLINESIZE - 1 | 222 | tst r0, #CACHE_DLINESIZE - 1 |
223 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 223 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
224 | tst r1, #CACHE_DLINESIZE - 1 | 224 | tst r1, #CACHE_DLINESIZE - 1 |
@@ -241,7 +241,7 @@ ENTRY(mohawk_dma_inv_range) | |||
241 | * | 241 | * |
242 | * (same as v4wb) | 242 | * (same as v4wb) |
243 | */ | 243 | */ |
244 | ENTRY(mohawk_dma_clean_range) | 244 | mohawk_dma_clean_range: |
245 | bic r0, r0, #CACHE_DLINESIZE - 1 | 245 | bic r0, r0, #CACHE_DLINESIZE - 1 |
246 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 246 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
247 | add r0, r0, #CACHE_DLINESIZE | 247 | add r0, r0, #CACHE_DLINESIZE |
@@ -268,6 +268,30 @@ ENTRY(mohawk_dma_flush_range) | |||
268 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 268 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
269 | mov pc, lr | 269 | mov pc, lr |
270 | 270 | ||
271 | /* | ||
272 | * dma_map_area(start, size, dir) | ||
273 | * - start - kernel virtual start address | ||
274 | * - size - size of region | ||
275 | * - dir - DMA direction | ||
276 | */ | ||
277 | ENTRY(mohawk_dma_map_area) | ||
278 | add r1, r1, r0 | ||
279 | cmp r2, #DMA_TO_DEVICE | ||
280 | beq mohawk_dma_clean_range | ||
281 | bcs mohawk_dma_inv_range | ||
282 | b mohawk_dma_flush_range | ||
283 | ENDPROC(mohawk_dma_map_area) | ||
284 | |||
285 | /* | ||
286 | * dma_unmap_area(start, size, dir) | ||
287 | * - start - kernel virtual start address | ||
288 | * - size - size of region | ||
289 | * - dir - DMA direction | ||
290 | */ | ||
291 | ENTRY(mohawk_dma_unmap_area) | ||
292 | mov pc, lr | ||
293 | ENDPROC(mohawk_dma_unmap_area) | ||
294 | |||
271 | ENTRY(mohawk_cache_fns) | 295 | ENTRY(mohawk_cache_fns) |
272 | .long mohawk_flush_kern_cache_all | 296 | .long mohawk_flush_kern_cache_all |
273 | .long mohawk_flush_user_cache_all | 297 | .long mohawk_flush_user_cache_all |
@@ -275,8 +299,8 @@ ENTRY(mohawk_cache_fns) | |||
275 | .long mohawk_coherent_kern_range | 299 | .long mohawk_coherent_kern_range |
276 | .long mohawk_coherent_user_range | 300 | .long mohawk_coherent_user_range |
277 | .long mohawk_flush_kern_dcache_area | 301 | .long mohawk_flush_kern_dcache_area |
278 | .long mohawk_dma_inv_range | 302 | .long mohawk_dma_map_area |
279 | .long mohawk_dma_clean_range | 303 | .long mohawk_dma_unmap_area |
280 | .long mohawk_dma_flush_range | 304 | .long mohawk_dma_flush_range |
281 | 305 | ||
282 | ENTRY(cpu_mohawk_dcache_clean_area) | 306 | ENTRY(cpu_mohawk_dcache_clean_area) |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 8e4f6dca8997..e5797f1c1db7 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -257,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_area) | |||
257 | * - start - virtual start address | 257 | * - start - virtual start address |
258 | * - end - virtual end address | 258 | * - end - virtual end address |
259 | */ | 259 | */ |
260 | ENTRY(xsc3_dma_inv_range) | 260 | xsc3_dma_inv_range: |
261 | tst r0, #CACHELINESIZE - 1 | 261 | tst r0, #CACHELINESIZE - 1 |
262 | bic r0, r0, #CACHELINESIZE - 1 | 262 | bic r0, r0, #CACHELINESIZE - 1 |
263 | mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line | 263 | mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line |
@@ -278,7 +278,7 @@ ENTRY(xsc3_dma_inv_range) | |||
278 | * - start - virtual start address | 278 | * - start - virtual start address |
279 | * - end - virtual end address | 279 | * - end - virtual end address |
280 | */ | 280 | */ |
281 | ENTRY(xsc3_dma_clean_range) | 281 | xsc3_dma_clean_range: |
282 | bic r0, r0, #CACHELINESIZE - 1 | 282 | bic r0, r0, #CACHELINESIZE - 1 |
283 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line | 283 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line |
284 | add r0, r0, #CACHELINESIZE | 284 | add r0, r0, #CACHELINESIZE |
@@ -304,6 +304,30 @@ ENTRY(xsc3_dma_flush_range) | |||
304 | mcr p15, 0, r0, c7, c10, 4 @ data write barrier | 304 | mcr p15, 0, r0, c7, c10, 4 @ data write barrier |
305 | mov pc, lr | 305 | mov pc, lr |
306 | 306 | ||
307 | /* | ||
308 | * dma_map_area(start, size, dir) | ||
309 | * - start - kernel virtual start address | ||
310 | * - size - size of region | ||
311 | * - dir - DMA direction | ||
312 | */ | ||
313 | ENTRY(xsc3_dma_map_area) | ||
314 | add r1, r1, r0 | ||
315 | cmp r2, #DMA_TO_DEVICE | ||
316 | beq xsc3_dma_clean_range | ||
317 | bcs xsc3_dma_inv_range | ||
318 | b xsc3_dma_flush_range | ||
319 | ENDPROC(xsc3_dma_map_area) | ||
320 | |||
321 | /* | ||
322 | * dma_unmap_area(start, size, dir) | ||
323 | * - start - kernel virtual start address | ||
324 | * - size - size of region | ||
325 | * - dir - DMA direction | ||
326 | */ | ||
327 | ENTRY(xsc3_dma_unmap_area) | ||
328 | mov pc, lr | ||
329 | ENDPROC(xsc3_dma_unmap_area) | ||
330 | |||
307 | ENTRY(xsc3_cache_fns) | 331 | ENTRY(xsc3_cache_fns) |
308 | .long xsc3_flush_kern_cache_all | 332 | .long xsc3_flush_kern_cache_all |
309 | .long xsc3_flush_user_cache_all | 333 | .long xsc3_flush_user_cache_all |
@@ -311,8 +335,8 @@ ENTRY(xsc3_cache_fns) | |||
311 | .long xsc3_coherent_kern_range | 335 | .long xsc3_coherent_kern_range |
312 | .long xsc3_coherent_user_range | 336 | .long xsc3_coherent_user_range |
313 | .long xsc3_flush_kern_dcache_area | 337 | .long xsc3_flush_kern_dcache_area |
314 | .long xsc3_dma_inv_range | 338 | .long xsc3_dma_map_area |
315 | .long xsc3_dma_clean_range | 339 | .long xsc3_dma_unmap_area |
316 | .long xsc3_dma_flush_range | 340 | .long xsc3_dma_flush_range |
317 | 341 | ||
318 | ENTRY(cpu_xsc3_dcache_clean_area) | 342 | ENTRY(cpu_xsc3_dcache_clean_area) |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 93df47265f2d..63037e2162f2 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -315,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_area) | |||
315 | * - start - virtual start address | 315 | * - start - virtual start address |
316 | * - end - virtual end address | 316 | * - end - virtual end address |
317 | */ | 317 | */ |
318 | ENTRY(xscale_dma_inv_range) | 318 | xscale_dma_inv_range: |
319 | tst r0, #CACHELINESIZE - 1 | 319 | tst r0, #CACHELINESIZE - 1 |
320 | bic r0, r0, #CACHELINESIZE - 1 | 320 | bic r0, r0, #CACHELINESIZE - 1 |
321 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 321 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -336,7 +336,7 @@ ENTRY(xscale_dma_inv_range) | |||
336 | * - start - virtual start address | 336 | * - start - virtual start address |
337 | * - end - virtual end address | 337 | * - end - virtual end address |
338 | */ | 338 | */ |
339 | ENTRY(xscale_dma_clean_range) | 339 | xscale_dma_clean_range: |
340 | bic r0, r0, #CACHELINESIZE - 1 | 340 | bic r0, r0, #CACHELINESIZE - 1 |
341 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 341 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
342 | add r0, r0, #CACHELINESIZE | 342 | add r0, r0, #CACHELINESIZE |
@@ -363,6 +363,43 @@ ENTRY(xscale_dma_flush_range) | |||
363 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer | 363 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer |
364 | mov pc, lr | 364 | mov pc, lr |
365 | 365 | ||
366 | /* | ||
367 | * dma_map_area(start, size, dir) | ||
368 | * - start - kernel virtual start address | ||
369 | * - size - size of region | ||
370 | * - dir - DMA direction | ||
371 | */ | ||
372 | ENTRY(xscale_dma_map_area) | ||
373 | add r1, r1, r0 | ||
374 | cmp r2, #DMA_TO_DEVICE | ||
375 | beq xscale_dma_clean_range | ||
376 | bcs xscale_dma_inv_range | ||
377 | b xscale_dma_flush_range | ||
378 | ENDPROC(xscale_dma_map_area) | ||
379 | |||
380 | /* | ||
381 | * dma_map_area(start, size, dir) | ||
382 | * - start - kernel virtual start address | ||
383 | * - size - size of region | ||
384 | * - dir - DMA direction | ||
385 | */ | ||
386 | ENTRY(xscale_dma_a0_map_area) | ||
387 | add r1, r1, r0 | ||
388 | teq r2, #DMA_TO_DEVICE | ||
389 | beq xscale_dma_clean_range | ||
390 | b xscale_dma_flush_range | ||
391 | ENDPROC(xscsale_dma_a0_map_area) | ||
392 | |||
393 | /* | ||
394 | * dma_unmap_area(start, size, dir) | ||
395 | * - start - kernel virtual start address | ||
396 | * - size - size of region | ||
397 | * - dir - DMA direction | ||
398 | */ | ||
399 | ENTRY(xscale_dma_unmap_area) | ||
400 | mov pc, lr | ||
401 | ENDPROC(xscale_dma_unmap_area) | ||
402 | |||
366 | ENTRY(xscale_cache_fns) | 403 | ENTRY(xscale_cache_fns) |
367 | .long xscale_flush_kern_cache_all | 404 | .long xscale_flush_kern_cache_all |
368 | .long xscale_flush_user_cache_all | 405 | .long xscale_flush_user_cache_all |
@@ -370,8 +407,8 @@ ENTRY(xscale_cache_fns) | |||
370 | .long xscale_coherent_kern_range | 407 | .long xscale_coherent_kern_range |
371 | .long xscale_coherent_user_range | 408 | .long xscale_coherent_user_range |
372 | .long xscale_flush_kern_dcache_area | 409 | .long xscale_flush_kern_dcache_area |
373 | .long xscale_dma_inv_range | 410 | .long xscale_dma_map_area |
374 | .long xscale_dma_clean_range | 411 | .long xscale_dma_unmap_area |
375 | .long xscale_dma_flush_range | 412 | .long xscale_dma_flush_range |
376 | 413 | ||
377 | /* | 414 | /* |
@@ -394,8 +431,8 @@ ENTRY(xscale_80200_A0_A1_cache_fns) | |||
394 | .long xscale_coherent_kern_range | 431 | .long xscale_coherent_kern_range |
395 | .long xscale_coherent_user_range | 432 | .long xscale_coherent_user_range |
396 | .long xscale_flush_kern_dcache_area | 433 | .long xscale_flush_kern_dcache_area |
397 | .long xscale_dma_flush_range | 434 | .long xscale_dma_a0_map_area |
398 | .long xscale_dma_clean_range | 435 | .long xscale_dma_unmap_area |
399 | .long xscale_dma_flush_range | 436 | .long xscale_dma_flush_range |
400 | 437 | ||
401 | ENTRY(cpu_xscale_dcache_clean_area) | 438 | ENTRY(cpu_xscale_dcache_clean_area) |