aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/abort-macro.S2
-rw-r--r--arch/arm/mm/alignment.c56
-rw-r--r--arch/arm/mm/cache-l2x0.c235
-rw-r--r--arch/arm/mm/cache-v7.S20
-rw-r--r--arch/arm/mm/dma-mapping.c8
-rw-r--r--arch/arm/mm/fault.c1
-rw-r--r--arch/arm/mm/init.c11
-rw-r--r--arch/arm/mm/mm.h4
-rw-r--r--arch/arm/mm/mmu.c18
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/arm/mm/proc-sa1100.S10
-rw-r--r--arch/arm/mm/proc-v6.S16
-rw-r--r--arch/arm/mm/proc-v7.S8
-rw-r--r--arch/arm/mm/proc-xsc3.S6
16 files changed, 357 insertions, 45 deletions
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 52162d59407a..2cbf68ef0e83 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -17,7 +17,7 @@
17 cmp \tmp, # 0x5600 @ Is it ldrsb? 17 cmp \tmp, # 0x5600 @ Is it ldrsb?
18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes 18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
19 tst \tmp, #1 << 11 @ L = 0 -> write 19 tst \tmp, #1 << 11 @ L = 0 -> write
20 orreq \psr, \psr, #1 << 11 @ yes. 20 orreq \fsr, \fsr, #1 << 11 @ yes.
21 b do_DataAbort 21 b do_DataAbort
22not_thumb: 22not_thumb:
23 .endm 23 .endm
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index be7c638b648b..c335c76e0d88 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -22,6 +22,7 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24 24
25#include <asm/system.h>
25#include <asm/unaligned.h> 26#include <asm/unaligned.h>
26 27
27#include "fault.h" 28#include "fault.h"
@@ -85,6 +86,33 @@ core_param(alignment, ai_usermode, int, 0600);
85#define UM_FIXUP (1 << 1) 86#define UM_FIXUP (1 << 1)
86#define UM_SIGNAL (1 << 2) 87#define UM_SIGNAL (1 << 2)
87 88
89/* Return true if and only if the ARMv6 unaligned access model is in use. */
90static bool cpu_is_v6_unaligned(void)
91{
92 return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U);
93}
94
95static int safe_usermode(int new_usermode, bool warn)
96{
97 /*
98 * ARMv6 and later CPUs can perform unaligned accesses for
99 * most single load and store instructions up to word size.
100 * LDM, STM, LDRD and STRD still need to be handled.
101 *
102 * Ignoring the alignment fault is not an option on these
103 * CPUs since we spin re-faulting the instruction without
104 * making any progress.
105 */
106 if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
107 new_usermode |= UM_FIXUP;
108
109 if (warn)
110 printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
111 }
112
113 return new_usermode;
114}
115
88#ifdef CONFIG_PROC_FS 116#ifdef CONFIG_PROC_FS
89static const char *usermode_action[] = { 117static const char *usermode_action[] = {
90 "ignored", 118 "ignored",
@@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer
125 if (get_user(mode, buffer)) 153 if (get_user(mode, buffer))
126 return -EFAULT; 154 return -EFAULT;
127 if (mode >= '0' && mode <= '5') 155 if (mode >= '0' && mode <= '5')
128 ai_usermode = mode - '0'; 156 ai_usermode = safe_usermode(mode - '0', true);
129 } 157 }
130 return count; 158 return count;
131} 159}
@@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
886 if (ai_usermode & UM_FIXUP) 914 if (ai_usermode & UM_FIXUP)
887 goto fixup; 915 goto fixup;
888 916
889 if (ai_usermode & UM_SIGNAL) 917 if (ai_usermode & UM_SIGNAL) {
890 force_sig(SIGBUS, current); 918 siginfo_t si;
891 else { 919
920 si.si_signo = SIGBUS;
921 si.si_errno = 0;
922 si.si_code = BUS_ADRALN;
923 si.si_addr = (void __user *)addr;
924
925 force_sig_info(si.si_signo, &si, current);
926 } else {
892 /* 927 /*
893 * We're about to disable the alignment trap and return to 928 * We're about to disable the alignment trap and return to
894 * user space. But if an interrupt occurs before actually 929 * user space. But if an interrupt occurs before actually
@@ -926,20 +961,11 @@ static int __init alignment_init(void)
926 return -ENOMEM; 961 return -ENOMEM;
927#endif 962#endif
928 963
929 /* 964 if (cpu_is_v6_unaligned()) {
930 * ARMv6 and later CPUs can perform unaligned accesses for
931 * most single load and store instructions up to word size.
932 * LDM, STM, LDRD and STRD still need to be handled.
933 *
934 * Ignoring the alignment fault is not an option on these
935 * CPUs since we spin re-faulting the instruction without
936 * making any progress.
937 */
938 if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
939 cr_alignment &= ~CR_A; 965 cr_alignment &= ~CR_A;
940 cr_no_alignment &= ~CR_A; 966 cr_no_alignment &= ~CR_A;
941 set_cr(cr_alignment); 967 set_cr(cr_alignment);
942 ai_usermode = UM_FIXUP; 968 ai_usermode = safe_usermode(ai_usermode, false);
943 } 969 }
944 970
945 hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, 971 hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 44c086710d2b..3f9b9980478e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -16,9 +16,12 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#include <linux/err.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/spinlock.h> 21#include <linux/spinlock.h>
21#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
22 25
23#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
24#include <asm/hardware/cache-l2x0.h> 27#include <asm/hardware/cache-l2x0.h>
@@ -30,11 +33,19 @@ static DEFINE_SPINLOCK(l2x0_lock);
30static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 33static uint32_t l2x0_way_mask; /* Bitmask of active ways */
31static uint32_t l2x0_size; 34static uint32_t l2x0_size;
32 35
36struct l2x0_regs l2x0_saved_regs;
37
38struct l2x0_of_data {
39 void (*setup)(const struct device_node *, __u32 *, __u32 *);
40 void (*save)(void);
41 void (*resume)(void);
42};
43
33static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 44static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
34{ 45{
35 /* wait for cache operation by line or way to complete */ 46 /* wait for cache operation by line or way to complete */
36 while (readl_relaxed(reg) & mask) 47 while (readl_relaxed(reg) & mask)
37 ; 48 cpu_relax();
38} 49}
39 50
40#ifdef CONFIG_CACHE_PL310 51#ifdef CONFIG_CACHE_PL310
@@ -277,6 +288,25 @@ static void l2x0_disable(void)
277 spin_unlock_irqrestore(&l2x0_lock, flags); 288 spin_unlock_irqrestore(&l2x0_lock, flags);
278} 289}
279 290
291static void l2x0_unlock(__u32 cache_id)
292{
293 int lockregs;
294 int i;
295
296 if (cache_id == L2X0_CACHE_ID_PART_L310)
297 lockregs = 8;
298 else
299 /* L210 and unknown types */
300 lockregs = 1;
301
302 for (i = 0; i < lockregs; i++) {
303 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
304 i * L2X0_LOCKDOWN_STRIDE);
305 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
306 i * L2X0_LOCKDOWN_STRIDE);
307 }
308}
309
280void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 310void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
281{ 311{
282 __u32 aux; 312 __u32 aux;
@@ -328,10 +358,14 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
328 * accessing the below registers will fault. 358 * accessing the below registers will fault.
329 */ 359 */
330 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 360 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
361 /* Make sure that I&D is not locked down when starting */
362 l2x0_unlock(cache_id);
331 363
332 /* l2x0 controller is disabled */ 364 /* l2x0 controller is disabled */
333 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 365 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
334 366
367 l2x0_saved_regs.aux_ctrl = aux;
368
335 l2x0_inv_all(); 369 l2x0_inv_all();
336 370
337 /* enable L2X0 */ 371 /* enable L2X0 */
@@ -351,3 +385,202 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
351 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 385 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
352 ways, cache_id, aux, l2x0_size); 386 ways, cache_id, aux, l2x0_size);
353} 387}
388
389#ifdef CONFIG_OF
390static void __init l2x0_of_setup(const struct device_node *np,
391 __u32 *aux_val, __u32 *aux_mask)
392{
393 u32 data[2] = { 0, 0 };
394 u32 tag = 0;
395 u32 dirty = 0;
396 u32 val = 0, mask = 0;
397
398 of_property_read_u32(np, "arm,tag-latency", &tag);
399 if (tag) {
400 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
401 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
402 }
403
404 of_property_read_u32_array(np, "arm,data-latency",
405 data, ARRAY_SIZE(data));
406 if (data[0] && data[1]) {
407 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
408 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
409 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
410 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
411 }
412
413 of_property_read_u32(np, "arm,dirty-latency", &dirty);
414 if (dirty) {
415 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
416 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
417 }
418
419 *aux_val &= ~mask;
420 *aux_val |= val;
421 *aux_mask &= ~mask;
422}
423
424static void __init pl310_of_setup(const struct device_node *np,
425 __u32 *aux_val, __u32 *aux_mask)
426{
427 u32 data[3] = { 0, 0, 0 };
428 u32 tag[3] = { 0, 0, 0 };
429 u32 filter[2] = { 0, 0 };
430
431 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
432 if (tag[0] && tag[1] && tag[2])
433 writel_relaxed(
434 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
435 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
436 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
437 l2x0_base + L2X0_TAG_LATENCY_CTRL);
438
439 of_property_read_u32_array(np, "arm,data-latency",
440 data, ARRAY_SIZE(data));
441 if (data[0] && data[1] && data[2])
442 writel_relaxed(
443 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
444 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
445 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
446 l2x0_base + L2X0_DATA_LATENCY_CTRL);
447
448 of_property_read_u32_array(np, "arm,filter-ranges",
449 filter, ARRAY_SIZE(filter));
450 if (filter[1]) {
451 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
452 l2x0_base + L2X0_ADDR_FILTER_END);
453 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
454 l2x0_base + L2X0_ADDR_FILTER_START);
455 }
456}
457
458static void __init pl310_save(void)
459{
460 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
461 L2X0_CACHE_ID_RTL_MASK;
462
463 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
464 L2X0_TAG_LATENCY_CTRL);
465 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
466 L2X0_DATA_LATENCY_CTRL);
467 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
468 L2X0_ADDR_FILTER_END);
469 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
470 L2X0_ADDR_FILTER_START);
471
472 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
473 /*
474 * From r2p0, there is Prefetch offset/control register
475 */
476 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
477 L2X0_PREFETCH_CTRL);
478 /*
479 * From r3p0, there is Power control register
480 */
481 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
482 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
483 L2X0_POWER_CTRL);
484 }
485}
486
487static void l2x0_resume(void)
488{
489 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
490 /* restore aux ctrl and enable l2 */
491 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
492
493 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
494 L2X0_AUX_CTRL);
495
496 l2x0_inv_all();
497
498 writel_relaxed(1, l2x0_base + L2X0_CTRL);
499 }
500}
501
502static void pl310_resume(void)
503{
504 u32 l2x0_revision;
505
506 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
507 /* restore pl310 setup */
508 writel_relaxed(l2x0_saved_regs.tag_latency,
509 l2x0_base + L2X0_TAG_LATENCY_CTRL);
510 writel_relaxed(l2x0_saved_regs.data_latency,
511 l2x0_base + L2X0_DATA_LATENCY_CTRL);
512 writel_relaxed(l2x0_saved_regs.filter_end,
513 l2x0_base + L2X0_ADDR_FILTER_END);
514 writel_relaxed(l2x0_saved_regs.filter_start,
515 l2x0_base + L2X0_ADDR_FILTER_START);
516
517 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
518 L2X0_CACHE_ID_RTL_MASK;
519
520 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
521 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
522 l2x0_base + L2X0_PREFETCH_CTRL);
523 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
524 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
525 l2x0_base + L2X0_POWER_CTRL);
526 }
527 }
528
529 l2x0_resume();
530}
531
532static const struct l2x0_of_data pl310_data = {
533 pl310_of_setup,
534 pl310_save,
535 pl310_resume,
536};
537
538static const struct l2x0_of_data l2x0_data = {
539 l2x0_of_setup,
540 NULL,
541 l2x0_resume,
542};
543
544static const struct of_device_id l2x0_ids[] __initconst = {
545 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
546 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
547 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
548 {}
549};
550
551int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
552{
553 struct device_node *np;
554 struct l2x0_of_data *data;
555 struct resource res;
556
557 np = of_find_matching_node(NULL, l2x0_ids);
558 if (!np)
559 return -ENODEV;
560
561 if (of_address_to_resource(np, 0, &res))
562 return -ENODEV;
563
564 l2x0_base = ioremap(res.start, resource_size(&res));
565 if (!l2x0_base)
566 return -ENOMEM;
567
568 l2x0_saved_regs.phy_base = res.start;
569
570 data = of_match_node(l2x0_ids, np)->data;
571
572 /* L2 configuration can only be changed if the cache is disabled */
573 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
574 if (data->setup)
575 data->setup(np, &aux_val, &aux_mask);
576 }
577
578 if (data->save)
579 data->save();
580
581 l2x0_init(l2x0_base, aux_val, aux_mask);
582
583 outer_cache.resume = data->resume;
584 return 0;
585}
586#endif
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 3b24bfa3b828..07c4bc8ea0a4 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)
174 dcache_line_size r2, r3 174 dcache_line_size r2, r3
175 sub r3, r2, #1 175 sub r3, r2, #1
176 bic r12, r0, r3 176 bic r12, r0, r3
177#ifdef CONFIG_ARM_ERRATA_764369
178 ALT_SMP(W(dsb))
179 ALT_UP(W(nop))
180#endif
1771: 1811:
178 USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification 182 USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
179 add r12, r12, r2 183 add r12, r12, r2
@@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)
223 add r1, r0, r1 227 add r1, r0, r1
224 sub r3, r2, #1 228 sub r3, r2, #1
225 bic r0, r0, r3 229 bic r0, r0, r3
230#ifdef CONFIG_ARM_ERRATA_764369
231 ALT_SMP(W(dsb))
232 ALT_UP(W(nop))
233#endif
2261: 2341:
227 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 235 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
228 add r0, r0, r2 236 add r0, r0, r2
@@ -247,6 +255,10 @@ v7_dma_inv_range:
247 sub r3, r2, #1 255 sub r3, r2, #1
248 tst r0, r3 256 tst r0, r3
249 bic r0, r0, r3 257 bic r0, r0, r3
258#ifdef CONFIG_ARM_ERRATA_764369
259 ALT_SMP(W(dsb))
260 ALT_UP(W(nop))
261#endif
250 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 262 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
251 263
252 tst r1, r3 264 tst r1, r3
@@ -270,6 +282,10 @@ v7_dma_clean_range:
270 dcache_line_size r2, r3 282 dcache_line_size r2, r3
271 sub r3, r2, #1 283 sub r3, r2, #1
272 bic r0, r0, r3 284 bic r0, r0, r3
285#ifdef CONFIG_ARM_ERRATA_764369
286 ALT_SMP(W(dsb))
287 ALT_UP(W(nop))
288#endif
2731: 2891:
274 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line 290 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
275 add r0, r0, r2 291 add r0, r0, r2
@@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)
288 dcache_line_size r2, r3 304 dcache_line_size r2, r3
289 sub r3, r2, #1 305 sub r3, r2, #1
290 bic r0, r0, r3 306 bic r0, r0, r3
307#ifdef CONFIG_ARM_ERRATA_764369
308 ALT_SMP(W(dsb))
309 ALT_UP(W(nop))
310#endif
2911: 3111:
292 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 312 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
293 add r0, r0, r2 313 add r0, r0, r2
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0a0a1e7c20d2..235eb775fc78 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -123,8 +123,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
123#endif 123#endif
124 124
125#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 125#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
126#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 126#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PMD_SHIFT)
127#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 127#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PMD_SHIFT)
128 128
129/* 129/*
130 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 130 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
@@ -183,7 +183,7 @@ static int __init consistent_init(void)
183 } 183 }
184 184
185 consistent_pte[i++] = pte; 185 consistent_pte[i++] = pte;
186 base += (1 << PGDIR_SHIFT); 186 base += PMD_SIZE;
187 } while (base < CONSISTENT_END); 187 } while (base < CONSISTENT_END);
188 188
189 return ret; 189 return ret;
@@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
324 324
325 if (addr) 325 if (addr)
326 *handle = pfn_to_dma(dev, page_to_pfn(page)); 326 *handle = pfn_to_dma(dev, page_to_pfn(page));
327 else
328 __dma_free_buffer(page, size);
327 329
328 return addr; 330 return addr;
329} 331}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3b5ea68acbb8..aa33949fef60 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -20,6 +20,7 @@
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/perf_event.h> 21#include <linux/perf_event.h>
22 22
23#include <asm/exception.h>
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
25#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2fee782077c1..f8037ba338ac 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
298#ifdef CONFIG_HAVE_ARCH_PFN_VALID 298#ifdef CONFIG_HAVE_ARCH_PFN_VALID
299int pfn_valid(unsigned long pfn) 299int pfn_valid(unsigned long pfn)
300{ 300{
301 return memblock_is_memory(pfn << PAGE_SHIFT); 301 return memblock_is_memory(__pfn_to_phys(pfn));
302} 302}
303EXPORT_SYMBOL(pfn_valid); 303EXPORT_SYMBOL(pfn_valid);
304#endif 304#endif
@@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
441static inline void poison_init_mem(void *s, size_t count) 441static inline void poison_init_mem(void *s, size_t count)
442{ 442{
443 u32 *p = (u32 *)s; 443 u32 *p = (u32 *)s;
444 while ((count = count - 4)) 444 for (; count != 0; count -= 4)
445 *p++ = 0xe7fddef0; 445 *p++ = 0xe7fddef0;
446} 446}
447 447
@@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
496 */ 496 */
497 bank_start = min(bank_start, 497 bank_start = min(bank_start,
498 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 498 ALIGN(prev_bank_end, PAGES_PER_SECTION));
499#else
500 /*
501 * Align down here since the VM subsystem insists that the
502 * memmap entries are valid from the bank start aligned to
503 * MAX_ORDER_NR_PAGES.
504 */
505 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
499#endif 506#endif
500 /* 507 /*
501 * If we had a previous bank, and there is a space 508 * If we had a previous bank, and there is a space
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 010566799c80..ad7cce3bc431 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -12,8 +12,8 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
12 12
13struct mem_type { 13struct mem_type {
14 pteval_t prot_pte; 14 pteval_t prot_pte;
15 unsigned int prot_l1; 15 pmdval_t prot_l1;
16 unsigned int prot_sect; 16 pmdval_t prot_sect;
17 unsigned int domain; 17 unsigned int domain;
18}; 18};
19 19
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677b92c8..226f1804be12 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(pgprot_kernel);
60struct cachepolicy { 60struct cachepolicy {
61 const char policy[16]; 61 const char policy[16];
62 unsigned int cr_mask; 62 unsigned int cr_mask;
63 unsigned int pmd; 63 pmdval_t pmd;
64 pteval_t pte; 64 pteval_t pte;
65}; 65};
66 66
@@ -288,7 +288,7 @@ static void __init build_mem_type_table(void)
288{ 288{
289 struct cachepolicy *cp; 289 struct cachepolicy *cp;
290 unsigned int cr = get_cr(); 290 unsigned int cr = get_cr();
291 unsigned int user_pgprot, kern_pgprot, vecs_pgprot; 291 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
292 int cpu_arch = cpu_architecture(); 292 int cpu_arch = cpu_architecture();
293 int i; 293 int i;
294 294
@@ -863,14 +863,14 @@ static inline void prepare_page_table(void)
863 /* 863 /*
864 * Clear out all the mappings below the kernel image. 864 * Clear out all the mappings below the kernel image.
865 */ 865 */
866 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) 866 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
867 pmd_clear(pmd_off_k(addr)); 867 pmd_clear(pmd_off_k(addr));
868 868
869#ifdef CONFIG_XIP_KERNEL 869#ifdef CONFIG_XIP_KERNEL
870 /* The XIP kernel is mapped in the module area -- skip over it */ 870 /* The XIP kernel is mapped in the module area -- skip over it */
871 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; 871 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
872#endif 872#endif
873 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) 873 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
874 pmd_clear(pmd_off_k(addr)); 874 pmd_clear(pmd_off_k(addr));
875 875
876 /* 876 /*
@@ -885,10 +885,12 @@ static inline void prepare_page_table(void)
885 * memory bank, up to the end of the vmalloc region. 885 * memory bank, up to the end of the vmalloc region.
886 */ 886 */
887 for (addr = __phys_to_virt(end); 887 for (addr = __phys_to_virt(end);
888 addr < VMALLOC_END; addr += PGDIR_SIZE) 888 addr < VMALLOC_END; addr += PMD_SIZE)
889 pmd_clear(pmd_off_k(addr)); 889 pmd_clear(pmd_off_k(addr));
890} 890}
891 891
892#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
893
892/* 894/*
893 * Reserve the special regions of memory 895 * Reserve the special regions of memory
894 */ 896 */
@@ -898,7 +900,7 @@ void __init arm_mm_memblock_reserve(void)
898 * Reserve the page tables. These are already in use, 900 * Reserve the page tables. These are already in use,
899 * and can only be in node 0. 901 * and can only be in node 0.
900 */ 902 */
901 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); 903 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
902 904
903#ifdef CONFIG_SA1111 905#ifdef CONFIG_SA1111
904 /* 906 /*
@@ -926,7 +928,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
926 */ 928 */
927 vectors_page = early_alloc(PAGE_SIZE); 929 vectors_page = early_alloc(PAGE_SIZE);
928 930
929 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 931 for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
930 pmd_clear(pmd_off_k(addr)); 932 pmd_clear(pmd_off_k(addr));
931 933
932 /* 934 /*
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 92bd102e3982..2e6849b41f66 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -379,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext)
379 379
380/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 380/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
381.globl cpu_arm920_suspend_size 381.globl cpu_arm920_suspend_size
382.equ cpu_arm920_suspend_size, 4 * 3 382.equ cpu_arm920_suspend_size, 4 * 4
383#ifdef CONFIG_PM_SLEEP 383#ifdef CONFIG_PM_SLEEP
384ENTRY(cpu_arm920_do_suspend) 384ENTRY(cpu_arm920_do_suspend)
385 stmfd sp!, {r4 - r7, lr} 385 stmfd sp!, {r4 - r7, lr}
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 2bbcf053dffd..cd8f79c3a282 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -394,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext)
394 394
395/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 395/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
396.globl cpu_arm926_suspend_size 396.globl cpu_arm926_suspend_size
397.equ cpu_arm926_suspend_size, 4 * 3 397.equ cpu_arm926_suspend_size, 4 * 4
398#ifdef CONFIG_PM_SLEEP 398#ifdef CONFIG_PM_SLEEP
399ENTRY(cpu_arm926_do_suspend) 399ENTRY(cpu_arm926_do_suspend)
400 stmfd sp!, {r4 - r7, lr} 400 stmfd sp!, {r4 - r7, lr}
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f8f7ea34bfc5..683af3a182b7 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -410,6 +410,7 @@ __arm946_proc_info:
410 .long 0x41009460 410 .long 0x41009460
411 .long 0xff00fff0 411 .long 0xff00fff0
412 .long 0 412 .long 0
413 .long 0
413 b __arm946_setup 414 b __arm946_setup
414 .long cpu_arch_name 415 .long cpu_arch_name
415 .long cpu_elf_name 416 .long cpu_elf_name
@@ -418,6 +419,6 @@ __arm946_proc_info:
418 .long arm946_processor_functions 419 .long arm946_processor_functions
419 .long 0 420 .long 0
420 .long 0 421 .long 0
421 .long arm940_cache_fns 422 .long arm946_cache_fns
422 .size __arm946_proc_info, . - __arm946_proc_info 423 .size __arm946_proc_info, . - __arm946_proc_info
423 424
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 07219c2ae114..69e7f2ef7384 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -182,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend)
182 182
183ENTRY(cpu_sa1100_do_resume) 183ENTRY(cpu_sa1100_do_resume)
184 ldmia r0, {r4 - r7} @ load cp regs 184 ldmia r0, {r4 - r7} @ load cp regs
185 mov r1, #0 185 mov ip, #0
186 mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs 186 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs
187 mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache 187 mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache
188 mcr p15, 0, r1, c9, c0, 0 @ invalidate RB 188 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
189 mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB 189 mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB
190 190
191 mcr p15, 0, r4, c3, c0, 0 @ domain ID 191 mcr p15, 0, r4, c3, c0, 0 @ domain ID
192 mcr p15, 0, r5, c2, c0, 0 @ translation table base addr 192 mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 219138d2f158..a923aa0fd00d 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -223,6 +223,22 @@ __v6_setup:
223 mrc p15, 0, r0, c1, c0, 0 @ read control register 223 mrc p15, 0, r0, c1, c0, 0 @ read control register
224 bic r0, r0, r5 @ clear bits them 224 bic r0, r0, r5 @ clear bits them
225 orr r0, r0, r6 @ set them 225 orr r0, r0, r6 @ set them
226#ifdef CONFIG_ARM_ERRATA_364296
227 /*
228 * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data
229 * corruption with hit-under-miss enabled). The conditional code below
230 * (setting the undocumented bit 31 in the auxiliary control register
231 * and the FI bit in the control register) disables hit-under-miss
232 * without putting the processor into full low interrupt latency mode.
233 */
234 ldr r6, =0x4107b362 @ id for ARM1136 r0p2
235 mrc p15, 0, r5, c0, c0, 0 @ get processor id
236 teq r5, r6 @ check for the faulty core
237 mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg
238 orreq r5, r5, #(1 << 31) @ set the undocumented bit 31
239 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
240 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
241#endif
226 mov pc, lr @ return to head.S:__ret 242 mov pc, lr @ return to head.S:__ret
227 243
228 /* 244 /*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index a30e78542ccf..9591c8e9fb8c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -66,6 +66,7 @@ ENDPROC(cpu_v7_proc_fin)
66ENTRY(cpu_v7_reset) 66ENTRY(cpu_v7_reset)
67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
68 bic r1, r1, #0x1 @ ...............m 68 bic r1, r1, #0x1 @ ...............m
69 THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
69 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 70 mcr p15, 0, r1, c1, c0, 0 @ disable MMU
70 isb 71 isb
71 mov pc, r0 72 mov pc, r0
@@ -217,7 +218,7 @@ ENDPROC(cpu_v7_set_pte_ext)
217/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ 218/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
218.globl cpu_v7_suspend_size 219.globl cpu_v7_suspend_size
219.equ cpu_v7_suspend_size, 4 * 9 220.equ cpu_v7_suspend_size, 4 * 9
220#ifdef CONFIG_PM_SLEEP 221#ifdef CONFIG_ARM_CPU_SUSPEND
221ENTRY(cpu_v7_do_suspend) 222ENTRY(cpu_v7_do_suspend)
222 stmfd sp!, {r4 - r11, lr} 223 stmfd sp!, {r4 - r11, lr}
223 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 224 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
@@ -247,13 +248,16 @@ ENTRY(cpu_v7_do_resume)
247 mcr p15, 0, r7, c2, c0, 0 @ TTB 0 248 mcr p15, 0, r7, c2, c0, 0 @ TTB 0
248 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 249 mcr p15, 0, r8, c2, c0, 1 @ TTB 1
249 mcr p15, 0, ip, c2, c0, 2 @ TTB control register 250 mcr p15, 0, ip, c2, c0, 2 @ TTB control register
250 mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register 251 mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
252 teq r4, r10 @ Is it already set?
253 mcrne p15, 0, r10, c1, c0, 1 @ No, so write it
251 mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control 254 mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control
252 ldr r4, =PRRR @ PRRR 255 ldr r4, =PRRR @ PRRR
253 ldr r5, =NMRR @ NMRR 256 ldr r5, =NMRR @ NMRR
254 mcr p15, 0, r4, c10, c2, 0 @ write PRRR 257 mcr p15, 0, r4, c10, c2, 0 @ write PRRR
255 mcr p15, 0, r5, c10, c2, 1 @ write NMRR 258 mcr p15, 0, r5, c10, c2, 1 @ write NMRR
256 isb 259 isb
260 dsb
257 mov r0, r9 @ control register 261 mov r0, r9 @ control register
258 mov r2, r7, lsr #14 @ get TTB0 base 262 mov r2, r7, lsr #14 @ get TTB0 base
259 mov r2, r2, lsl #14 263 mov r2, r2, lsl #14
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 28c72a2006a1..755e1bf22681 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
406 .align 406 .align
407 407
408.globl cpu_xsc3_suspend_size 408.globl cpu_xsc3_suspend_size
409.equ cpu_xsc3_suspend_size, 4 * 8 409.equ cpu_xsc3_suspend_size, 4 * 7
410#ifdef CONFIG_PM_SLEEP 410#ifdef CONFIG_PM_SLEEP
411ENTRY(cpu_xsc3_do_suspend) 411ENTRY(cpu_xsc3_do_suspend)
412 stmfd sp!, {r4 - r10, lr} 412 stmfd sp!, {r4 - r10, lr}
@@ -418,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend)
418 mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg 418 mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
419 mrc p15, 0, r10, c1, c0, 0 @ control reg 419 mrc p15, 0, r10, c1, c0, 0 @ control reg
420 bic r4, r4, #2 @ clear frequency change bit 420 bic r4, r4, #2 @ clear frequency change bit
421 stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs 421 stmia r0, {r4 - r10} @ store cp regs
422 ldmia sp!, {r4 - r10, pc} 422 ldmia sp!, {r4 - r10, pc}
423ENDPROC(cpu_xsc3_do_suspend) 423ENDPROC(cpu_xsc3_do_suspend)
424 424
425ENTRY(cpu_xsc3_do_resume) 425ENTRY(cpu_xsc3_do_resume)
426 ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs 426 ldmia r0, {r4 - r10} @ load cp regs
427 mov ip, #0 427 mov ip, #0
428 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 428 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
429 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer 429 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer