aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-l2x0.c22
-rw-r--r--arch/arm/mm/copypage-v4mc.c9
-rw-r--r--arch/arm/mm/copypage-v6.c20
-rw-r--r--arch/arm/mm/copypage-xscale.c9
-rw-r--r--arch/arm/mm/flush.c14
-rw-r--r--arch/arm/mm/highmem.c21
-rw-r--r--arch/arm/mm/mm.h26
7 files changed, 58 insertions, 63 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b1e192ba8c24..a53fd2aaa2f4 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -30,13 +30,13 @@
30 30
31static void __iomem *l2x0_base; 31static void __iomem *l2x0_base;
32static DEFINE_RAW_SPINLOCK(l2x0_lock); 32static DEFINE_RAW_SPINLOCK(l2x0_lock);
33static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 33static u32 l2x0_way_mask; /* Bitmask of active ways */
34static uint32_t l2x0_size; 34static u32 l2x0_size;
35 35
36struct l2x0_regs l2x0_saved_regs; 36struct l2x0_regs l2x0_saved_regs;
37 37
38struct l2x0_of_data { 38struct l2x0_of_data {
39 void (*setup)(const struct device_node *, __u32 *, __u32 *); 39 void (*setup)(const struct device_node *, u32 *, u32 *);
40 void (*save)(void); 40 void (*save)(void);
41 void (*resume)(void); 41 void (*resume)(void);
42}; 42};
@@ -288,7 +288,7 @@ static void l2x0_disable(void)
288 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 288 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
289} 289}
290 290
291static void l2x0_unlock(__u32 cache_id) 291static void l2x0_unlock(u32 cache_id)
292{ 292{
293 int lockregs; 293 int lockregs;
294 int i; 294 int i;
@@ -307,11 +307,11 @@ static void l2x0_unlock(__u32 cache_id)
307 } 307 }
308} 308}
309 309
310void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 310void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
311{ 311{
312 __u32 aux; 312 u32 aux;
313 __u32 cache_id; 313 u32 cache_id;
314 __u32 way_size = 0; 314 u32 way_size = 0;
315 int ways; 315 int ways;
316 const char *type; 316 const char *type;
317 317
@@ -388,7 +388,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
388 388
389#ifdef CONFIG_OF 389#ifdef CONFIG_OF
390static void __init l2x0_of_setup(const struct device_node *np, 390static void __init l2x0_of_setup(const struct device_node *np,
391 __u32 *aux_val, __u32 *aux_mask) 391 u32 *aux_val, u32 *aux_mask)
392{ 392{
393 u32 data[2] = { 0, 0 }; 393 u32 data[2] = { 0, 0 };
394 u32 tag = 0; 394 u32 tag = 0;
@@ -422,7 +422,7 @@ static void __init l2x0_of_setup(const struct device_node *np,
422} 422}
423 423
424static void __init pl310_of_setup(const struct device_node *np, 424static void __init pl310_of_setup(const struct device_node *np,
425 __u32 *aux_val, __u32 *aux_mask) 425 u32 *aux_val, u32 *aux_mask)
426{ 426{
427 u32 data[3] = { 0, 0, 0 }; 427 u32 data[3] = { 0, 0, 0 };
428 u32 tag[3] = { 0, 0, 0 }; 428 u32 tag[3] = { 0, 0, 0 };
@@ -548,7 +548,7 @@ static const struct of_device_id l2x0_ids[] __initconst = {
548 {} 548 {}
549}; 549};
550 550
551int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) 551int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
552{ 552{
553 struct device_node *np; 553 struct device_node *np;
554 struct l2x0_of_data *data; 554 struct l2x0_of_data *data;
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 7d0a8c230342..6e06180a8bc0 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -23,10 +23,6 @@
23 23
24#include "mm.h" 24#include "mm.h"
25 25
26/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 26#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
31 L_PTE_MT_MINICACHE) 27 L_PTE_MT_MINICACHE)
32 28
@@ -78,10 +74,9 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
78 74
79 raw_spin_lock(&minicache_lock); 75 raw_spin_lock(&minicache_lock);
80 76
81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 77 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
82 flush_tlb_kernel_page(0xffff8000);
83 78
84 mc_copy_user_page((void *)0xffff8000, kto); 79 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
85 80
86 raw_spin_unlock(&minicache_lock); 81 raw_spin_unlock(&minicache_lock);
87 82
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 3d9a1552cef6..29c770463e41 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -24,9 +24,6 @@
24#error FIX ME 24#error FIX ME
25#endif 25#endif
26 26
27#define from_address (0xffff8000)
28#define to_address (0xffffc000)
29
30static DEFINE_RAW_SPINLOCK(v6_lock); 27static DEFINE_RAW_SPINLOCK(v6_lock);
31 28
32/* 29/*
@@ -90,14 +87,11 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
90 */ 87 */
91 raw_spin_lock(&v6_lock); 88 raw_spin_lock(&v6_lock);
92 89
93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); 90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); 91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
95
96 kfrom = from_address + (offset << PAGE_SHIFT);
97 kto = to_address + (offset << PAGE_SHIFT);
98 92
99 flush_tlb_kernel_page(kfrom); 93 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
100 flush_tlb_kernel_page(kto); 94 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
101 95
102 copy_page((void *)kto, (void *)kfrom); 96 copy_page((void *)kto, (void *)kfrom);
103 97
@@ -111,8 +105,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
111 */ 105 */
112static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) 106static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
113{ 107{
114 unsigned int offset = CACHE_COLOUR(vaddr); 108 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
115 unsigned long to = to_address + (offset << PAGE_SHIFT);
116 109
117 /* FIXME: not highmem safe */ 110 /* FIXME: not highmem safe */
118 discard_old_kernel_data(page_address(page)); 111 discard_old_kernel_data(page_address(page));
@@ -123,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
123 */ 116 */
124 raw_spin_lock(&v6_lock); 117 raw_spin_lock(&v6_lock);
125 118
126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); 119 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
127 flush_tlb_kernel_page(to);
128 clear_page((void *)to); 120 clear_page((void *)to);
129 121
130 raw_spin_unlock(&v6_lock); 122 raw_spin_unlock(&v6_lock);
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 610c24ced310..804eeddda97f 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -23,12 +23,6 @@
23 23
24#include "mm.h" 24#include "mm.h"
25 25
26/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define COPYPAGE_MINICACHE 0xffff8000
31
32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 26#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 L_PTE_MT_MINICACHE) 27 L_PTE_MT_MINICACHE)
34 28
@@ -100,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
100 94
101 raw_spin_lock(&minicache_lock); 95 raw_spin_lock(&minicache_lock);
102 96
103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 97 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
104 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
105 98
106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 99 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
107 100
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 1a8d4aa821be..4d0b70f035eb 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -23,15 +23,12 @@
23 23
24#ifdef CONFIG_CPU_CACHE_VIPT 24#ifdef CONFIG_CPU_CACHE_VIPT
25 25
26#define ALIAS_FLUSH_START 0xffff4000
27
28static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 26static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
29{ 27{
30 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 28 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
31 const int zero = 0; 29 const int zero = 0;
32 30
33 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 31 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
34 flush_tlb_kernel_page(to);
35 32
36 asm( "mcrr p15, 0, %1, %0, c14\n" 33 asm( "mcrr p15, 0, %1, %0, c14\n"
37 " mcr p15, 0, %2, c7, c10, 4" 34 " mcr p15, 0, %2, c7, c10, 4"
@@ -42,13 +39,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
42 39
43static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) 40static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
44{ 41{
45 unsigned long colour = CACHE_COLOUR(vaddr); 42 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
46 unsigned long offset = vaddr & (PAGE_SIZE - 1); 43 unsigned long offset = vaddr & (PAGE_SIZE - 1);
47 unsigned long to; 44 unsigned long to;
48 45
49 set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); 46 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
50 to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; 47 to = va + offset;
51 flush_tlb_kernel_page(to);
52 flush_icache_range(to, to + len); 48 flush_icache_range(to, to + len);
53} 49}
54 50
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 807c0573abbe..3a9e8aa19759 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -69,15 +69,14 @@ void *__kmap_atomic(struct page *page)
69 * With debugging enabled, kunmap_atomic forces that entry to 0. 69 * With debugging enabled, kunmap_atomic forces that entry to 0.
70 * Make sure it was indeed properly unmapped. 70 * Make sure it was indeed properly unmapped.
71 */ 71 */
72 BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 72 BUG_ON(!pte_none(get_top_pte(vaddr)));
73#endif 73#endif
74 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
75 /* 74 /*
76 * When debugging is off, kunmap_atomic leaves the previous mapping 75 * When debugging is off, kunmap_atomic leaves the previous mapping
77 * in place, so this TLB flush ensures the TLB is updated with the 76 * in place, so the contained TLB flush ensures the TLB is updated
78 * new mapping. 77 * with the new mapping.
79 */ 78 */
80 local_flush_tlb_kernel_page(vaddr); 79 set_top_pte(vaddr, mk_pte(page, kmap_prot));
81 80
82 return (void *)vaddr; 81 return (void *)vaddr;
83} 82}
@@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr)
96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 95 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
97#ifdef CONFIG_DEBUG_HIGHMEM 96#ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 97 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
99 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 98 set_top_pte(vaddr, __pte(0));
100 local_flush_tlb_kernel_page(vaddr);
101#else 99#else
102 (void) idx; /* to kill a warning */ 100 (void) idx; /* to kill a warning */
103#endif 101#endif
@@ -121,10 +119,9 @@ void *kmap_atomic_pfn(unsigned long pfn)
121 idx = type + KM_TYPE_NR * smp_processor_id(); 119 idx = type + KM_TYPE_NR * smp_processor_id();
122 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 120 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
123#ifdef CONFIG_DEBUG_HIGHMEM 121#ifdef CONFIG_DEBUG_HIGHMEM
124 BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 122 BUG_ON(!pte_none(get_top_pte(vaddr)));
125#endif 123#endif
126 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); 124 set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
127 local_flush_tlb_kernel_page(vaddr);
128 125
129 return (void *)vaddr; 126 return (void *)vaddr;
130} 127}
@@ -132,11 +129,9 @@ void *kmap_atomic_pfn(unsigned long pfn)
132struct page *kmap_atomic_to_page(const void *ptr) 129struct page *kmap_atomic_to_page(const void *ptr)
133{ 130{
134 unsigned long vaddr = (unsigned long)ptr; 131 unsigned long vaddr = (unsigned long)ptr;
135 pte_t *pte;
136 132
137 if (vaddr < FIXADDR_START) 133 if (vaddr < FIXADDR_START)
138 return virt_to_page(ptr); 134 return virt_to_page(ptr);
139 135
140 pte = TOP_PTE(vaddr); 136 return pte_page(get_top_pte(vaddr));
141 return pte_page(*pte);
142} 137}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 70f6d3ea4834..27f4a619b35d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -3,7 +3,31 @@
3/* the upper-most page table pointer */ 3/* the upper-most page table pointer */
4extern pmd_t *top_pmd; 4extern pmd_t *top_pmd;
5 5
6#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 6/*
7 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
8 * specific hacks for copying pages efficiently, while 0xffff4000
9 * is reserved for VIPT aliasing flushing by generic code.
10 *
11 * Note that we don't allow VIPT aliasing caches with SMP.
12 */
13#define COPYPAGE_MINICACHE 0xffff8000
14#define COPYPAGE_V6_FROM 0xffff8000
15#define COPYPAGE_V6_TO 0xffffc000
16/* PFN alias flushing, for VIPT caches */
17#define FLUSH_ALIAS_START 0xffff4000
18
19static inline void set_top_pte(unsigned long va, pte_t pte)
20{
21 pte_t *ptep = pte_offset_kernel(top_pmd, va);
22 set_pte_ext(ptep, pte, 0);
23 local_flush_tlb_kernel_page(va);
24}
25
26static inline pte_t get_top_pte(unsigned long va)
27{
28 pte_t *ptep = pte_offset_kernel(top_pmd, va);
29 return *ptep;
30}
7 31
8static inline pmd_t *pmd_off_k(unsigned long virt) 32static inline pmd_t *pmd_off_k(unsigned long virt)
9{ 33{