aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig13
-rw-r--r--arch/arm/mm/cache-l2x0.c10
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/fault-armv.c1
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/init.c1
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm/mm/pgd.c1
10 files changed, 144 insertions, 24 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c4ed9f93f646..5bd7c89a6045 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -736,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
736config OUTER_CACHE 736config OUTER_CACHE
737 bool 737 bool
738 738
739config OUTER_CACHE_SYNC
740 bool
741 help
742 The outer cache has a outer_cache_fns.sync function pointer
743 that can be used to drain the write buffer of the outer cache.
744
739config CACHE_FEROCEON_L2 745config CACHE_FEROCEON_L2
740 bool "Enable the Feroceon L2 cache controller" 746 bool "Enable the Feroceon L2 cache controller"
741 depends on ARCH_KIRKWOOD || ARCH_MV78XX0 747 depends on ARCH_KIRKWOOD || ARCH_MV78XX0
@@ -757,6 +763,7 @@ config CACHE_L2X0
757 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 763 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
758 default y 764 default y
759 select OUTER_CACHE 765 select OUTER_CACHE
766 select OUTER_CACHE_SYNC
760 help 767 help
761 This option enables the L2x0 PrimeCell. 768 This option enables the L2x0 PrimeCell.
762 769
@@ -781,3 +788,9 @@ config ARM_L1_CACHE_SHIFT
781 int 788 int
782 default 6 if ARM_L1_CACHE_SHIFT_6 789 default 6 if ARM_L1_CACHE_SHIFT_6
783 default 5 790 default 5
791
792config ARCH_HAS_BARRIERS
793 bool
794 help
795 This option allows the use of custom mandatory barriers
796 included via the mach/barriers.h file.
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 07334632d3e2..21ad68ba22ba 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -93,6 +93,15 @@ static inline void l2x0_flush_line(unsigned long addr)
93} 93}
94#endif 94#endif
95 95
96static void l2x0_cache_sync(void)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&l2x0_lock, flags);
101 cache_sync();
102 spin_unlock_irqrestore(&l2x0_lock, flags);
103}
104
96static inline void l2x0_inv_all(void) 105static inline void l2x0_inv_all(void)
97{ 106{
98 unsigned long flags; 107 unsigned long flags;
@@ -225,6 +234,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
225 outer_cache.inv_range = l2x0_inv_range; 234 outer_cache.inv_range = l2x0_inv_range;
226 outer_cache.clean_range = l2x0_clean_range; 235 outer_cache.clean_range = l2x0_clean_range;
227 outer_cache.flush_range = l2x0_flush_range; 236 outer_cache.flush_range = l2x0_flush_range;
237 outer_cache.sync = l2x0_cache_sync;
228 238
229 printk(KERN_INFO "L2X0 cache controller enabled\n"); 239 printk(KERN_INFO "L2X0 cache controller enabled\n");
230} 240}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa..f55fa1044f72 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM 44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
52 kunmap_atomic(kto, KM_USER1); 45 kunmap_atomic(kto, KM_USER1);
53 kunmap_atomic(kfrom, KM_USER0); 46 kunmap_atomic(kfrom, KM_USER0);
54} 47}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0da7eccf7749..13fa536d82e6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -11,7 +11,7 @@
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/slab.h> 14#include <linux/gfp.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/init.h> 17#include <linux/init.h>
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
464 vaddr += offset; 464 vaddr += offset;
465 op(vaddr, len, dir); 465 op(vaddr, len, dir);
466 kunmap_high(page); 466 kunmap_high(page);
467 } else if (cache_is_vipt()) {
468 pte_t saved_pte;
469 vaddr = kmap_high_l1_vipt(page, &saved_pte);
470 op(vaddr + offset, len, dir);
471 kunmap_high_l1_vipt(page, saved_pte);
467 } 472 }
468 } else { 473 } else {
469 vaddr = page_address(page) + offset; 474 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index c9b97e9836a2..0d414c28eb2c 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -16,6 +16,7 @@
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19#include <linux/gfp.h>
19 20
20#include <asm/bugs.h> 21#include <asm/bugs.h>
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090..c6844cb9b508 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 15#include <asm/cachetype.h>
16#include <asm/highmem.h>
16#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
152 153
153void __flush_dcache_page(struct address_space *mapping, struct page *page) 154void __flush_dcache_page(struct address_space *mapping, struct page *page)
154{ 155{
155 void *addr = page_address(page);
156
157 /* 156 /*
158 * Writeback any data associated with the kernel mapping of this 157 * Writeback any data associated with the kernel mapping of this
159 * page. This ensures that data in the physical page is mutually 158 * page. This ensures that data in the physical page is mutually
160 * coherent with the kernels mapping. 159 * coherent with the kernels mapping.
161 */ 160 */
162#ifdef CONFIG_HIGHMEM 161 if (!PageHighMem(page)) {
163 /* 162 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
164 * kmap_atomic() doesn't set the page virtual address, and 163 } else {
165 * kunmap_atomic() takes care of cache flushing already. 164 void *addr = kmap_high_get(page);
166 */ 165 if (addr) {
167 if (addr) 166 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
168#endif 167 kunmap_high(page);
169 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 168 } else if (cache_is_vipt()) {
169 pte_t saved_pte;
170 addr = kmap_high_l1_vipt(page, &saved_pte);
171 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
172 kunmap_high_l1_vipt(page, saved_pte);
173 }
174 }
170 175
171 /* 176 /*
172 * If this is a page cache page, and we have an aliasing VIPT cache, 177 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41..77b030f5ec09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80 80
81 if (kvaddr >= (void *)FIXADDR_START) { 81 if (kvaddr >= (void *)FIXADDR_START) {
82 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 82 if (cache_is_vivt())
83 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
83#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 86 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
124 pte = TOP_PTE(vaddr); 125 pte = TOP_PTE(vaddr);
125 return pte_page(*pte); 126 return pte_page(*pte);
126} 127}
128
129#ifdef CONFIG_CPU_CACHE_VIPT
130
131#include <linux/percpu.h>
132
133/*
134 * The VIVT cache of a highmem page is always flushed before the page
135 * is unmapped. Hence unmapped highmem pages need no cache maintenance
136 * in that case.
137 *
138 * However unmapped pages may still be cached with a VIPT cache, and
139 * it is not possible to perform cache maintenance on them using physical
140 * addresses unfortunately. So we have no choice but to set up a temporary
141 * virtual mapping for that purpose.
142 *
143 * Yet this VIPT cache maintenance may be triggered from DMA support
144 * functions which are possibly called from interrupt context. As we don't
145 * want to keep interrupt disabled all the time when such maintenance is
146 * taking place, we therefore allow for some reentrancy by preserving and
147 * restoring the previous fixmap entry before the interrupted context is
148 * resumed. If the reentrancy depth is 0 then there is no need to restore
149 * the previous fixmap, and leaving the current one in place allow it to
150 * be reused the next time without a TLB flush (common with DMA).
151 */
152
153static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
154
155void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
156{
157 unsigned int idx, cpu = smp_processor_id();
158 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
159 unsigned long vaddr, flags;
160 pte_t pte, *ptep;
161
162 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
163 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
164 ptep = TOP_PTE(vaddr);
165 pte = mk_pte(page, kmap_prot);
166
167 if (!in_interrupt())
168 preempt_disable();
169
170 raw_local_irq_save(flags);
171 (*depth)++;
172 if (pte_val(*ptep) == pte_val(pte)) {
173 *saved_pte = pte;
174 } else {
175 *saved_pte = *ptep;
176 set_pte_ext(ptep, pte, 0);
177 local_flush_tlb_kernel_page(vaddr);
178 }
179 raw_local_irq_restore(flags);
180
181 return (void *)vaddr;
182}
183
184void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
185{
186 unsigned int idx, cpu = smp_processor_id();
187 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
188 unsigned long vaddr, flags;
189 pte_t pte, *ptep;
190
191 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
192 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
193 ptep = TOP_PTE(vaddr);
194 pte = mk_pte(page, kmap_prot);
195
196 BUG_ON(pte_val(*ptep) != pte_val(pte));
197 BUG_ON(*depth <= 0);
198
199 raw_local_irq_save(flags);
200 (*depth)--;
201 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
202 set_pte_ext(ptep, saved_pte, 0);
203 local_flush_tlb_kernel_page(vaddr);
204 }
205 raw_local_irq_restore(flags);
206
207 if (!in_interrupt())
208 preempt_enable();
209}
210
211#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7829cb5425f5..83db12a68d56 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -17,6 +17,7 @@
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/sort.h> 18#include <linux/sort.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/gfp.h>
20 21
21#include <asm/mach-types.h> 22#include <asm/mach-types.h>
22#include <asm/sections.h> 23#include <asm/sections.h>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9d4da6ac28eb..241c24a1c18f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -420,6 +420,10 @@ static void __init build_mem_type_table(void)
420 user_pgprot |= L_PTE_SHARED; 420 user_pgprot |= L_PTE_SHARED;
421 kern_pgprot |= L_PTE_SHARED; 421 kern_pgprot |= L_PTE_SHARED;
422 vecs_pgprot |= L_PTE_SHARED; 422 vecs_pgprot |= L_PTE_SHARED;
423 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
424 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
425 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
426 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
423 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 427 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 428 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
425#endif 429#endif
@@ -1050,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
1050 pgd_t *pgd; 1054 pgd_t *pgd;
1051 int i; 1055 int i;
1052 1056
1053 if (current->mm && current->mm->pgd) 1057 /*
1054 pgd = current->mm->pgd; 1058 * We need to access to user-mode page tables here. For kernel threads
1055 else 1059 * we don't have any user-mode mappings so we use the context that we
1056 pgd = init_mm.pgd; 1060 * "borrowed".
1061 */
1062 pgd = current->active_mm->pgd;
1057 1063
1058 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1064 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1059 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1065 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 2690146161ba..be5f58e153bf 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/gfp.h>
11#include <linux/highmem.h> 12#include <linux/highmem.h>
12 13
13#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>