aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig3
-rw-r--r--arch/arm/mm/abort-ev6.S3
-rw-r--r--arch/arm/mm/copypage-feroceon.c2
-rw-r--r--arch/arm/mm/copypage-v3.c2
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v4wb.c2
-rw-r--r--arch/arm/mm/copypage-v4wt.c2
-rw-r--r--arch/arm/mm/copypage-xsc3.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/mmu.c26
10 files changed, 36 insertions, 10 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 64086f4f5fcc..a6230f7a24c8 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -715,7 +715,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
715 715
716config CACHE_L2X0 716config CACHE_L2X0
717 bool "Enable the L2x0 outer cache controller" 717 bool "Enable the L2x0 outer cache controller"
718 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || REALVIEW_EB_A9MP 718 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
719 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31
719 default y 720 default y
720 select OUTER_CACHE 721 select OUTER_CACHE
721 help 722 help
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8a7f65ba14b7..94077fbd96b7 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -23,7 +23,8 @@ ENTRY(v6_early_abort)
23#ifdef CONFIG_CPU_32v6K 23#ifdef CONFIG_CPU_32v6K
24 clrex 24 clrex
25#else 25#else
26 strex r0, r1, [sp] @ Clear the exclusive monitor 26 sub r1, sp, #4 @ Get unused stack location
27 strex r0, r1, [r1] @ Clear the exclusive monitor
27#endif 28#endif
28 mrc p15, 0, r1, c5, c0, 0 @ get FSR 29 mrc p15, 0, r1, c5, c0, 0 @ get FSR
29 mrc p15, 0, r0, c6, c0, 0 @ get FAR 30 mrc p15, 0, r0, c6, c0, 0 @ get FAR
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index c3ba6a94da0c..70997d5bee2d 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -13,7 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/highmem.h> 14#include <linux/highmem.h>
15 15
16static void __attribute__((naked)) 16static void __naked
17feroceon_copy_user_page(void *kto, const void *kfrom) 17feroceon_copy_user_page(void *kto, const void *kfrom)
18{ 18{
19 asm("\ 19 asm("\
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index 70ed96c8af8e..de9c06854ad7 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -15,7 +15,7 @@
15 * 15 *
16 * FIXME: do we need to handle cache stuff... 16 * FIXME: do we need to handle cache stuff...
17 */ 17 */
18static void __attribute__((naked)) 18static void __naked
19v3_copy_user_page(void *kto, const void *kfrom) 19v3_copy_user_page(void *kto, const void *kfrom)
20{ 20{
21 asm("\n\ 21 asm("\n\
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 1601698b9800..7370a7142b04 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock);
44 * instruction. If your processor does not supply this, you have to write your 44 * instruction. If your processor does not supply this, you have to write your
45 * own copy_user_highpage that does the right thing. 45 * own copy_user_highpage that does the right thing.
46 */ 46 */
47static void __attribute__((naked)) 47static void __naked
48mc_copy_user_page(void *from, void *to) 48mc_copy_user_page(void *from, void *to)
49{ 49{
50 asm volatile( 50 asm volatile(
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 3ec93dab7656..9ab098414227 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -22,7 +22,7 @@
22 * instruction. If your processor does not supply this, you have to write your 22 * instruction. If your processor does not supply this, you have to write your
23 * own copy_user_highpage that does the right thing. 23 * own copy_user_highpage that does the right thing.
24 */ 24 */
25static void __attribute__((naked)) 25static void __naked
26v4wb_copy_user_page(void *kto, const void *kfrom) 26v4wb_copy_user_page(void *kto, const void *kfrom)
27{ 27{
28 asm("\ 28 asm("\
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 0f1188efae45..300efafd6643 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -20,7 +20,7 @@
20 * dirty data in the cache. However, we do have to ensure that 20 * dirty data in the cache. However, we do have to ensure that
21 * subsequent reads are up to date. 21 * subsequent reads are up to date.
22 */ 22 */
23static void __attribute__((naked)) 23static void __naked
24v4wt_copy_user_page(void *kto, const void *kfrom) 24v4wt_copy_user_page(void *kto, const void *kfrom)
25{ 25{
26 asm("\ 26 asm("\
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 39a994542cad..bc4525f5ab23 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -29,7 +29,7 @@
29 * if we eventually end up using our copied page. 29 * if we eventually end up using our copied page.
30 * 30 *
31 */ 31 */
32static void __attribute__((naked)) 32static void __naked
33xsc3_mc_copy_user_page(void *kto, const void *kfrom) 33xsc3_mc_copy_user_page(void *kto, const void *kfrom)
34{ 34{
35 asm("\ 35 asm("\
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index d18f2397ee2d..76824d3e966a 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
42 * Dcache aliasing issue. The writes will be forwarded to the write buffer, 42 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
43 * and merged as appropriate. 43 * and merged as appropriate.
44 */ 44 */
45static void __attribute__((naked)) 45static void __naked
46mc_copy_user_page(void *from, void *to) 46mc_copy_user_page(void *from, void *to)
47{ 47{
48 /* 48 /*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9b36c5cb5e9f..8c6fc5a6237e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -243,6 +243,10 @@ static struct mem_type mem_types[] = {
243 .prot_sect = PMD_TYPE_SECT, 243 .prot_sect = PMD_TYPE_SECT,
244 .domain = DOMAIN_KERNEL, 244 .domain = DOMAIN_KERNEL,
245 }, 245 },
246 [MT_MEMORY_NONCACHED] = {
247 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
248 .domain = DOMAIN_KERNEL,
249 },
246}; 250};
247 251
248const struct mem_type *get_mem_type(unsigned int type) 252const struct mem_type *get_mem_type(unsigned int type)
@@ -406,9 +410,28 @@ static void __init build_mem_type_table(void)
406 kern_pgprot |= L_PTE_SHARED; 410 kern_pgprot |= L_PTE_SHARED;
407 vecs_pgprot |= L_PTE_SHARED; 411 vecs_pgprot |= L_PTE_SHARED;
408 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 412 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
413 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
409#endif 414#endif
410 } 415 }
411 416
417 /*
418 * Non-cacheable Normal - intended for memory areas that must
419 * not cause dirty cache line writebacks when used
420 */
421 if (cpu_arch >= CPU_ARCH_ARMv6) {
422 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
423 /* Non-cacheable Normal is XCB = 001 */
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
425 PMD_SECT_BUFFERED;
426 } else {
427 /* For both ARMv6 and non-TEX-remapping ARMv7 */
428 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
429 PMD_SECT_TEX(1);
430 }
431 } else {
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
433 }
434
412 for (i = 0; i < 16; i++) { 435 for (i = 0; i < 16; i++) {
413 unsigned long v = pgprot_val(protection_map[i]); 436 unsigned long v = pgprot_val(protection_map[i]);
414 protection_map[i] = __pgprot(v | user_pgprot); 437 protection_map[i] = __pgprot(v | user_pgprot);
@@ -693,7 +716,8 @@ static void __init sanity_check_meminfo(void)
693 * Check whether this memory bank would entirely overlap 716 * Check whether this memory bank would entirely overlap
694 * the vmalloc area. 717 * the vmalloc area.
695 */ 718 */
696 if (__va(bank->start) >= VMALLOC_MIN) { 719 if (__va(bank->start) >= VMALLOC_MIN ||
720 __va(bank->start) < PAGE_OFFSET) {
697 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 721 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
698 "(vmalloc region overlap).\n", 722 "(vmalloc region overlap).\n",
699 bank->start, bank->start + bank->size - 1); 723 bank->start, bank->start + bank->size - 1);