aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-09-06 15:04:59 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-10-01 11:40:56 -0400
commitbb30f36f9b71c31dc8fe3483bba4c9884fc86080 (patch)
treec99b583586ebec2a29be2b0173d1eb9ad07a68f9 /arch/arm
parent9cff96e5bfc8e366166bfb07610604c7604ac48c (diff)
[ARM] Introduce new PTE memory type bits
Provide L_PTE_MT_xxx definitions to describe the memory types that we use in Linux/ARM. These definitions are carefully picked such that: 1. their LSBs match what is required for pre-ARMv6 CPUs. 2. they all have a unique encoding, including after modification by build_mem_type_table() (the result being that some have more than one combination.) Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/pgtable.h33
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/fault-armv.c11
-rw-r--r--arch/arm/mm/mmu.c62
5 files changed, 68 insertions, 42 deletions
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5c75e02b3c7c..8df2e254a3e4 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -164,14 +164,35 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
164#define L_PTE_PRESENT (1 << 0) 164#define L_PTE_PRESENT (1 << 0)
165#define L_PTE_FILE (1 << 1) /* only when !PRESENT */ 165#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
166#define L_PTE_YOUNG (1 << 1) 166#define L_PTE_YOUNG (1 << 1)
167#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ 167#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */
168#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ 168#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */
169#define L_PTE_DIRTY (1 << 6) 169#define L_PTE_DIRTY (1 << 6)
170#define L_PTE_WRITE (1 << 7) 170#define L_PTE_WRITE (1 << 7)
171#define L_PTE_USER (1 << 8) 171#define L_PTE_USER (1 << 8)
172#define L_PTE_EXEC (1 << 9) 172#define L_PTE_EXEC (1 << 9)
173#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ 173#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */
174 174
175/*
176 * These are the memory types, defined to be compatible with
177 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
178 * (note: build_mem_type_table modifies these bits
179 * to work with our existing proc-*.S setup.)
180 */
181#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */
182#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */
183#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */
184#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */
185#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */
186#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */
187#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 (pre-v6) */
188#define L_PTE_MT_DEV_SHARED2 (0x05 << 2) /* 0101 (v6) */
189#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
190#define L_PTE_MT_DEV_IXP2000 (0x0d << 2) /* 1101 */
191#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 (pre-v6, !xsc3) */
192#define L_PTE_MT_DEV_WC2 (0x08 << 2) /* 1000 (xsc3, v6) */
193#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
194#define L_PTE_MT_MASK (0x0f << 2)
195
175#ifndef __ASSEMBLY__ 196#ifndef __ASSEMBLY__
176 197
177/* 198/*
@@ -180,7 +201,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
180 * as well as any architecture dependent bits like global/ASID and SMP 201 * as well as any architecture dependent bits like global/ASID and SMP
181 * shared mapping bits. 202 * shared mapping bits.
182 */ 203 */
183#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE 204#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
184#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC 205#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
185 206
186extern pgprot_t pgprot_user; 207extern pgprot_t pgprot_user;
@@ -286,8 +307,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
286/* 307/*
287 * Mark the prot value as uncacheable and unbufferable. 308 * Mark the prot value as uncacheable and unbufferable.
288 */ 309 */
289#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) 310#define pgprot_noncached(prot) \
290#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) 311 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
312#define pgprot_writecombine(prot) \
313 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
291 314
292#define pmd_none(pmd) (!pmd_val(pmd)) 315#define pmd_none(pmd) (!pmd_val(pmd))
293#define pmd_present(pmd) (pmd_val(pmd)) 316#define pmd_present(pmd) (pmd_val(pmd))
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index ded0e96d069d..8d33e2549344 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -28,7 +28,7 @@
28 * specific hacks for copying pages efficiently. 28 * specific hacks for copying pages efficiently.
29 */ 29 */
30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
31 L_PTE_CACHEABLE) 31 L_PTE_MT_MINICACHE)
32 32
33static DEFINE_SPINLOCK(minicache_lock); 33static DEFINE_SPINLOCK(minicache_lock);
34 34
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 2e455f82a4d5..bad49331bbf9 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -30,7 +30,7 @@
30#define COPYPAGE_MINICACHE 0xffff8000 30#define COPYPAGE_MINICACHE 0xffff8000
31 31
32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 L_PTE_CACHEABLE) 33 L_PTE_MT_MINICACHE)
34 34
35static DEFINE_SPINLOCK(minicache_lock); 35static DEFINE_SPINLOCK(minicache_lock);
36 36
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index a8ec97b4752e..6f92904a81e9 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -21,7 +21,7 @@
21#include <asm/pgtable.h> 21#include <asm/pgtable.h>
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23 23
24static unsigned long shared_pte_mask = L_PTE_CACHEABLE; 24static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
25 25
26/* 26/*
27 * We take the easy way out of this problem - we make the 27 * We take the easy way out of this problem - we make the
@@ -63,9 +63,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
63 * If this page isn't present, or is already setup to 63 * If this page isn't present, or is already setup to
64 * fault (ie, is old), we can safely ignore any issues. 64 * fault (ie, is old), we can safely ignore any issues.
65 */ 65 */
66 if (ret && pte_val(entry) & shared_pte_mask) { 66 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
67 flush_cache_page(vma, address, pte_pfn(entry)); 67 flush_cache_page(vma, address, pte_pfn(entry));
68 pte_val(entry) &= ~shared_pte_mask; 68 pte_val(entry) &= ~L_PTE_MT_MASK;
69 pte_val(entry) |= shared_pte_mask;
69 set_pte_at(vma->vm_mm, address, pte, entry); 70 set_pte_at(vma->vm_mm, address, pte, entry);
70 flush_tlb_page(vma, address); 71 flush_tlb_page(vma, address);
71 } 72 }
@@ -197,7 +198,7 @@ void __init check_writebuffer_bugs(void)
197 unsigned long *p1, *p2; 198 unsigned long *p1, *p2;
198 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| 199 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
199 L_PTE_DIRTY|L_PTE_WRITE| 200 L_PTE_DIRTY|L_PTE_WRITE|
200 L_PTE_BUFFERABLE); 201 L_PTE_MT_BUFFERABLE);
201 202
202 p1 = vmap(&page, 1, VM_IOREMAP, prot); 203 p1 = vmap(&page, 1, VM_IOREMAP, prot);
203 p2 = vmap(&page, 1, VM_IOREMAP, prot); 204 p2 = vmap(&page, 1, VM_IOREMAP, prot);
@@ -218,7 +219,7 @@ void __init check_writebuffer_bugs(void)
218 219
219 if (v) { 220 if (v) {
220 printk("failed, %s\n", reason); 221 printk("failed, %s\n", reason);
221 shared_pte_mask |= L_PTE_BUFFERABLE; 222 shared_pte_mask = L_PTE_MT_UNCACHED;
222 } else { 223 } else {
223 printk("ok\n"); 224 printk("ok\n");
224 } 225 }
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a713e40e1f1a..cfc0add4874e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -68,27 +68,27 @@ static struct cachepolicy cache_policies[] __initdata = {
68 .policy = "uncached", 68 .policy = "uncached",
69 .cr_mask = CR_W|CR_C, 69 .cr_mask = CR_W|CR_C,
70 .pmd = PMD_SECT_UNCACHED, 70 .pmd = PMD_SECT_UNCACHED,
71 .pte = 0, 71 .pte = L_PTE_MT_UNCACHED,
72 }, { 72 }, {
73 .policy = "buffered", 73 .policy = "buffered",
74 .cr_mask = CR_C, 74 .cr_mask = CR_C,
75 .pmd = PMD_SECT_BUFFERED, 75 .pmd = PMD_SECT_BUFFERED,
76 .pte = PTE_BUFFERABLE, 76 .pte = L_PTE_MT_BUFFERABLE,
77 }, { 77 }, {
78 .policy = "writethrough", 78 .policy = "writethrough",
79 .cr_mask = 0, 79 .cr_mask = 0,
80 .pmd = PMD_SECT_WT, 80 .pmd = PMD_SECT_WT,
81 .pte = PTE_CACHEABLE, 81 .pte = L_PTE_MT_WRITETHROUGH,
82 }, { 82 }, {
83 .policy = "writeback", 83 .policy = "writeback",
84 .cr_mask = 0, 84 .cr_mask = 0,
85 .pmd = PMD_SECT_WB, 85 .pmd = PMD_SECT_WB,
86 .pte = PTE_BUFFERABLE|PTE_CACHEABLE, 86 .pte = L_PTE_MT_WRITEBACK,
87 }, { 87 }, {
88 .policy = "writealloc", 88 .policy = "writealloc",
89 .cr_mask = 0, 89 .cr_mask = 0,
90 .pmd = PMD_SECT_WBWA, 90 .pmd = PMD_SECT_WBWA,
91 .pte = PTE_BUFFERABLE|PTE_CACHEABLE, 91 .pte = L_PTE_MT_WRITEALLOC,
92 } 92 }
93}; 93};
94 94
@@ -186,35 +186,36 @@ void adjust_cr(unsigned long mask, unsigned long set)
186 186
187static struct mem_type mem_types[] = { 187static struct mem_type mem_types[] = {
188 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 188 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
189 .prot_pte = PROT_PTE_DEVICE, 189 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
190 L_PTE_SHARED,
190 .prot_l1 = PMD_TYPE_TABLE, 191 .prot_l1 = PMD_TYPE_TABLE,
191 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, 192 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
192 .domain = DOMAIN_IO, 193 .domain = DOMAIN_IO,
193 }, 194 },
194 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 195 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
195 .prot_pte = PROT_PTE_DEVICE, 196 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
196 .prot_pte_ext = PTE_EXT_TEX(2), 197 .prot_pte_ext = PTE_EXT_TEX(2),
197 .prot_l1 = PMD_TYPE_TABLE, 198 .prot_l1 = PMD_TYPE_TABLE,
198 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), 199 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
199 .domain = DOMAIN_IO, 200 .domain = DOMAIN_IO,
200 }, 201 },
201 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 202 [MT_DEVICE_CACHED] = { /* ioremap_cached */
202 .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE, 203 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
203 .prot_l1 = PMD_TYPE_TABLE, 204 .prot_l1 = PMD_TYPE_TABLE,
204 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 205 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
205 .domain = DOMAIN_IO, 206 .domain = DOMAIN_IO,
206 }, 207 },
207 [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */ 208 [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */
208 .prot_pte = PROT_PTE_DEVICE, 209 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_IXP2000,
209 .prot_l1 = PMD_TYPE_TABLE, 210 .prot_l1 = PMD_TYPE_TABLE,
210 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | 211 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE |
211 PMD_SECT_TEX(1), 212 PMD_SECT_TEX(1),
212 .domain = DOMAIN_IO, 213 .domain = DOMAIN_IO,
213 }, 214 },
214 [MT_DEVICE_WC] = { /* ioremap_wc */ 215 [MT_DEVICE_WC] = { /* ioremap_wc */
215 .prot_pte = PROT_PTE_DEVICE, 216 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
216 .prot_l1 = PMD_TYPE_TABLE, 217 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE, 218 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE,
218 .domain = DOMAIN_IO, 219 .domain = DOMAIN_IO,
219 }, 220 },
220 [MT_CACHECLEAN] = { 221 [MT_CACHECLEAN] = {
@@ -259,7 +260,7 @@ static void __init build_mem_type_table(void)
259{ 260{
260 struct cachepolicy *cp; 261 struct cachepolicy *cp;
261 unsigned int cr = get_cr(); 262 unsigned int cr = get_cr();
262 unsigned int user_pgprot, kern_pgprot; 263 unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
263 int cpu_arch = cpu_architecture(); 264 int cpu_arch = cpu_architecture();
264 int i; 265 int i;
265 266
@@ -277,6 +278,9 @@ static void __init build_mem_type_table(void)
277 cachepolicy = CPOLICY_WRITEBACK; 278 cachepolicy = CPOLICY_WRITEBACK;
278 ecc_mask = 0; 279 ecc_mask = 0;
279 } 280 }
281#ifdef CONFIG_SMP
282 cachepolicy = CPOLICY_WRITEALLOC;
283#endif
280 284
281 /* 285 /*
282 * On non-Xscale3 ARMv5-and-older systems, use CB=01 286 * On non-Xscale3 ARMv5-and-older systems, use CB=01
@@ -286,10 +290,9 @@ static void __init build_mem_type_table(void)
286 */ 290 */
287 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { 291 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
288 mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1); 292 mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1);
293 mem_types[MT_DEVICE_WC].prot_pte &= ~L_PTE_BUFFERABLE;
289 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 294 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
290 } else { 295 mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE;
291 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE;
292 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
293 } 296 }
294 297
295 /* 298 /*
@@ -312,7 +315,15 @@ static void __init build_mem_type_table(void)
312 } 315 }
313 316
314 cp = &cache_policies[cachepolicy]; 317 cp = &cache_policies[cachepolicy];
315 kern_pgprot = user_pgprot = cp->pte; 318 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
319
320#ifndef CONFIG_SMP
321 /*
322 * Only use write-through for non-SMP systems
323 */
324 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
325 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
326#endif
316 327
317 /* 328 /*
318 * Enable CPU-specific coherency if supported. 329 * Enable CPU-specific coherency if supported.
@@ -349,30 +360,21 @@ static void __init build_mem_type_table(void)
349 */ 360 */
350 user_pgprot |= L_PTE_SHARED; 361 user_pgprot |= L_PTE_SHARED;
351 kern_pgprot |= L_PTE_SHARED; 362 kern_pgprot |= L_PTE_SHARED;
363 vecs_pgprot |= L_PTE_SHARED;
352 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 364 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
353#endif 365#endif
354 } 366 }
355 367
356 for (i = 0; i < 16; i++) { 368 for (i = 0; i < 16; i++) {
357 unsigned long v = pgprot_val(protection_map[i]); 369 unsigned long v = pgprot_val(protection_map[i]);
358 v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; 370 protection_map[i] = __pgprot(v | user_pgprot);
359 protection_map[i] = __pgprot(v);
360 } 371 }
361 372
362 mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; 373 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
363 mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; 374 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
364 375
365 if (cpu_arch >= CPU_ARCH_ARMv5) { 376 if (cpu_arch < CPU_ARCH_ARMv5)
366#ifndef CONFIG_SMP
367 /*
368 * Only use write-through for non-SMP systems
369 */
370 mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
371 mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
372#endif
373 } else {
374 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); 377 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
375 }
376 378
377 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 379 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
378 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 380 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |