aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/fault-armv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r--arch/arm/mm/fault-armv.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index a8ec97b4752e..6f92904a81e9 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -21,7 +21,7 @@
21#include <asm/pgtable.h> 21#include <asm/pgtable.h>
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23 23
24static unsigned long shared_pte_mask = L_PTE_CACHEABLE; 24static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
25 25
26/* 26/*
27 * We take the easy way out of this problem - we make the 27 * We take the easy way out of this problem - we make the
@@ -63,9 +63,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
63 * If this page isn't present, or is already setup to 63 * If this page isn't present, or is already setup to
64 * fault (ie, is old), we can safely ignore any issues. 64 * fault (ie, is old), we can safely ignore any issues.
65 */ 65 */
66 if (ret && pte_val(entry) & shared_pte_mask) { 66 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
67 flush_cache_page(vma, address, pte_pfn(entry)); 67 flush_cache_page(vma, address, pte_pfn(entry));
68 pte_val(entry) &= ~shared_pte_mask; 68 pte_val(entry) &= ~L_PTE_MT_MASK;
69 pte_val(entry) |= shared_pte_mask;
69 set_pte_at(vma->vm_mm, address, pte, entry); 70 set_pte_at(vma->vm_mm, address, pte, entry);
70 flush_tlb_page(vma, address); 71 flush_tlb_page(vma, address);
71 } 72 }
@@ -197,7 +198,7 @@ void __init check_writebuffer_bugs(void)
197 unsigned long *p1, *p2; 198 unsigned long *p1, *p2;
198 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| 199 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
199 L_PTE_DIRTY|L_PTE_WRITE| 200 L_PTE_DIRTY|L_PTE_WRITE|
200 L_PTE_BUFFERABLE); 201 L_PTE_MT_BUFFERABLE);
201 202
202 p1 = vmap(&page, 1, VM_IOREMAP, prot); 203 p1 = vmap(&page, 1, VM_IOREMAP, prot);
203 p2 = vmap(&page, 1, VM_IOREMAP, prot); 204 p2 = vmap(&page, 1, VM_IOREMAP, prot);
@@ -218,7 +219,7 @@ void __init check_writebuffer_bugs(void)
218 219
219 if (v) { 220 if (v) {
220 printk("failed, %s\n", reason); 221 printk("failed, %s\n", reason);
221 shared_pte_mask |= L_PTE_BUFFERABLE; 222 shared_pte_mask = L_PTE_MT_UNCACHED;
222 } else { 223 } else {
223 printk("ok\n"); 224 printk("ok\n");
224 } 225 }