aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/fault-armv.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-09-06 15:04:59 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-10-01 11:40:56 -0400
commitbb30f36f9b71c31dc8fe3483bba4c9884fc86080 (patch)
treec99b583586ebec2a29be2b0173d1eb9ad07a68f9 /arch/arm/mm/fault-armv.c
parent9cff96e5bfc8e366166bfb07610604c7604ac48c (diff)
[ARM] Introduce new PTE memory type bits
Provide L_PTE_MT_xxx definitions to describe the memory types that we use in Linux/ARM. These definitions are carefully picked such that: 1. their LSBs match what is required for pre-ARMv6 CPUs. 2. they all have a unique encoding, including after modification by build_mem_type_table() (the result being that some have more than one combination.) Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r--arch/arm/mm/fault-armv.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index a8ec97b4752e..6f92904a81e9 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -21,7 +21,7 @@
21#include <asm/pgtable.h> 21#include <asm/pgtable.h>
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23 23
24static unsigned long shared_pte_mask = L_PTE_CACHEABLE; 24static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
25 25
26/* 26/*
27 * We take the easy way out of this problem - we make the 27 * We take the easy way out of this problem - we make the
@@ -63,9 +63,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
63 * If this page isn't present, or is already setup to 63 * If this page isn't present, or is already setup to
64 * fault (ie, is old), we can safely ignore any issues. 64 * fault (ie, is old), we can safely ignore any issues.
65 */ 65 */
66 if (ret && pte_val(entry) & shared_pte_mask) { 66 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
67 flush_cache_page(vma, address, pte_pfn(entry)); 67 flush_cache_page(vma, address, pte_pfn(entry));
68 pte_val(entry) &= ~shared_pte_mask; 68 pte_val(entry) &= ~L_PTE_MT_MASK;
69 pte_val(entry) |= shared_pte_mask;
69 set_pte_at(vma->vm_mm, address, pte, entry); 70 set_pte_at(vma->vm_mm, address, pte, entry);
70 flush_tlb_page(vma, address); 71 flush_tlb_page(vma, address);
71 } 72 }
@@ -197,7 +198,7 @@ void __init check_writebuffer_bugs(void)
197 unsigned long *p1, *p2; 198 unsigned long *p1, *p2;
198 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| 199 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
199 L_PTE_DIRTY|L_PTE_WRITE| 200 L_PTE_DIRTY|L_PTE_WRITE|
200 L_PTE_BUFFERABLE); 201 L_PTE_MT_BUFFERABLE);
201 202
202 p1 = vmap(&page, 1, VM_IOREMAP, prot); 203 p1 = vmap(&page, 1, VM_IOREMAP, prot);
203 p2 = vmap(&page, 1, VM_IOREMAP, prot); 204 p2 = vmap(&page, 1, VM_IOREMAP, prot);
@@ -218,7 +219,7 @@ void __init check_writebuffer_bugs(void)
218 219
219 if (v) { 220 if (v) {
220 printk("failed, %s\n", reason); 221 printk("failed, %s\n", reason);
221 shared_pte_mask |= L_PTE_BUFFERABLE; 222 shared_pte_mask = L_PTE_MT_UNCACHED;
222 } else { 223 } else {
223 printk("ok\n"); 224 printk("ok\n");
224 } 225 }