diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-06 15:04:59 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-10-01 11:40:56 -0400 |
commit | bb30f36f9b71c31dc8fe3483bba4c9884fc86080 (patch) | |
tree | c99b583586ebec2a29be2b0173d1eb9ad07a68f9 /arch/arm/mm/mmu.c | |
parent | 9cff96e5bfc8e366166bfb07610604c7604ac48c (diff) |
[ARM] Introduce new PTE memory type bits
Provide L_PTE_MT_xxx definitions to describe the memory types that we
use in Linux/ARM. These definitions are carefully picked such that:
1. their LSBs match what is required for pre-ARMv6 CPUs.
2. they all have a unique encoding, including after modification
by build_mem_type_table() (the result being that some have more
than one combination.)
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 62 |
1 files changed, 32 insertions, 30 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a713e40e1f1a..cfc0add4874e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -68,27 +68,27 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
68 | .policy = "uncached", | 68 | .policy = "uncached", |
69 | .cr_mask = CR_W|CR_C, | 69 | .cr_mask = CR_W|CR_C, |
70 | .pmd = PMD_SECT_UNCACHED, | 70 | .pmd = PMD_SECT_UNCACHED, |
71 | .pte = 0, | 71 | .pte = L_PTE_MT_UNCACHED, |
72 | }, { | 72 | }, { |
73 | .policy = "buffered", | 73 | .policy = "buffered", |
74 | .cr_mask = CR_C, | 74 | .cr_mask = CR_C, |
75 | .pmd = PMD_SECT_BUFFERED, | 75 | .pmd = PMD_SECT_BUFFERED, |
76 | .pte = PTE_BUFFERABLE, | 76 | .pte = L_PTE_MT_BUFFERABLE, |
77 | }, { | 77 | }, { |
78 | .policy = "writethrough", | 78 | .policy = "writethrough", |
79 | .cr_mask = 0, | 79 | .cr_mask = 0, |
80 | .pmd = PMD_SECT_WT, | 80 | .pmd = PMD_SECT_WT, |
81 | .pte = PTE_CACHEABLE, | 81 | .pte = L_PTE_MT_WRITETHROUGH, |
82 | }, { | 82 | }, { |
83 | .policy = "writeback", | 83 | .policy = "writeback", |
84 | .cr_mask = 0, | 84 | .cr_mask = 0, |
85 | .pmd = PMD_SECT_WB, | 85 | .pmd = PMD_SECT_WB, |
86 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | 86 | .pte = L_PTE_MT_WRITEBACK, |
87 | }, { | 87 | }, { |
88 | .policy = "writealloc", | 88 | .policy = "writealloc", |
89 | .cr_mask = 0, | 89 | .cr_mask = 0, |
90 | .pmd = PMD_SECT_WBWA, | 90 | .pmd = PMD_SECT_WBWA, |
91 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | 91 | .pte = L_PTE_MT_WRITEALLOC, |
92 | } | 92 | } |
93 | }; | 93 | }; |
94 | 94 | ||
@@ -186,35 +186,36 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
186 | 186 | ||
187 | static struct mem_type mem_types[] = { | 187 | static struct mem_type mem_types[] = { |
188 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ | 188 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
189 | .prot_pte = PROT_PTE_DEVICE, | 189 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
190 | L_PTE_SHARED, | ||
190 | .prot_l1 = PMD_TYPE_TABLE, | 191 | .prot_l1 = PMD_TYPE_TABLE, |
191 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, | 192 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, |
192 | .domain = DOMAIN_IO, | 193 | .domain = DOMAIN_IO, |
193 | }, | 194 | }, |
194 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ | 195 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ |
195 | .prot_pte = PROT_PTE_DEVICE, | 196 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, |
196 | .prot_pte_ext = PTE_EXT_TEX(2), | 197 | .prot_pte_ext = PTE_EXT_TEX(2), |
197 | .prot_l1 = PMD_TYPE_TABLE, | 198 | .prot_l1 = PMD_TYPE_TABLE, |
198 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), | 199 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), |
199 | .domain = DOMAIN_IO, | 200 | .domain = DOMAIN_IO, |
200 | }, | 201 | }, |
201 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ | 202 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ |
202 | .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE, | 203 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, |
203 | .prot_l1 = PMD_TYPE_TABLE, | 204 | .prot_l1 = PMD_TYPE_TABLE, |
204 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, | 205 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, |
205 | .domain = DOMAIN_IO, | 206 | .domain = DOMAIN_IO, |
206 | }, | 207 | }, |
207 | [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | 208 | [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */ |
208 | .prot_pte = PROT_PTE_DEVICE, | 209 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_IXP2000, |
209 | .prot_l1 = PMD_TYPE_TABLE, | 210 | .prot_l1 = PMD_TYPE_TABLE, |
210 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | | 211 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | |
211 | PMD_SECT_TEX(1), | 212 | PMD_SECT_TEX(1), |
212 | .domain = DOMAIN_IO, | 213 | .domain = DOMAIN_IO, |
213 | }, | 214 | }, |
214 | [MT_DEVICE_WC] = { /* ioremap_wc */ | 215 | [MT_DEVICE_WC] = { /* ioremap_wc */ |
215 | .prot_pte = PROT_PTE_DEVICE, | 216 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, |
216 | .prot_l1 = PMD_TYPE_TABLE, | 217 | .prot_l1 = PMD_TYPE_TABLE, |
217 | .prot_sect = PROT_SECT_DEVICE, | 218 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, |
218 | .domain = DOMAIN_IO, | 219 | .domain = DOMAIN_IO, |
219 | }, | 220 | }, |
220 | [MT_CACHECLEAN] = { | 221 | [MT_CACHECLEAN] = { |
@@ -259,7 +260,7 @@ static void __init build_mem_type_table(void) | |||
259 | { | 260 | { |
260 | struct cachepolicy *cp; | 261 | struct cachepolicy *cp; |
261 | unsigned int cr = get_cr(); | 262 | unsigned int cr = get_cr(); |
262 | unsigned int user_pgprot, kern_pgprot; | 263 | unsigned int user_pgprot, kern_pgprot, vecs_pgprot; |
263 | int cpu_arch = cpu_architecture(); | 264 | int cpu_arch = cpu_architecture(); |
264 | int i; | 265 | int i; |
265 | 266 | ||
@@ -277,6 +278,9 @@ static void __init build_mem_type_table(void) | |||
277 | cachepolicy = CPOLICY_WRITEBACK; | 278 | cachepolicy = CPOLICY_WRITEBACK; |
278 | ecc_mask = 0; | 279 | ecc_mask = 0; |
279 | } | 280 | } |
281 | #ifdef CONFIG_SMP | ||
282 | cachepolicy = CPOLICY_WRITEALLOC; | ||
283 | #endif | ||
280 | 284 | ||
281 | /* | 285 | /* |
282 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 | 286 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 |
@@ -286,10 +290,9 @@ static void __init build_mem_type_table(void) | |||
286 | */ | 290 | */ |
287 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { | 291 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { |
288 | mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1); | 292 | mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1); |
293 | mem_types[MT_DEVICE_WC].prot_pte &= ~L_PTE_BUFFERABLE; | ||
289 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | 294 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); |
290 | } else { | 295 | mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; |
291 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE; | ||
292 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; | ||
293 | } | 296 | } |
294 | 297 | ||
295 | /* | 298 | /* |
@@ -312,7 +315,15 @@ static void __init build_mem_type_table(void) | |||
312 | } | 315 | } |
313 | 316 | ||
314 | cp = &cache_policies[cachepolicy]; | 317 | cp = &cache_policies[cachepolicy]; |
315 | kern_pgprot = user_pgprot = cp->pte; | 318 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
319 | |||
320 | #ifndef CONFIG_SMP | ||
321 | /* | ||
322 | * Only use write-through for non-SMP systems | ||
323 | */ | ||
324 | if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | ||
325 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | ||
326 | #endif | ||
316 | 327 | ||
317 | /* | 328 | /* |
318 | * Enable CPU-specific coherency if supported. | 329 | * Enable CPU-specific coherency if supported. |
@@ -349,30 +360,21 @@ static void __init build_mem_type_table(void) | |||
349 | */ | 360 | */ |
350 | user_pgprot |= L_PTE_SHARED; | 361 | user_pgprot |= L_PTE_SHARED; |
351 | kern_pgprot |= L_PTE_SHARED; | 362 | kern_pgprot |= L_PTE_SHARED; |
363 | vecs_pgprot |= L_PTE_SHARED; | ||
352 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 364 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
353 | #endif | 365 | #endif |
354 | } | 366 | } |
355 | 367 | ||
356 | for (i = 0; i < 16; i++) { | 368 | for (i = 0; i < 16; i++) { |
357 | unsigned long v = pgprot_val(protection_map[i]); | 369 | unsigned long v = pgprot_val(protection_map[i]); |
358 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; | 370 | protection_map[i] = __pgprot(v | user_pgprot); |
359 | protection_map[i] = __pgprot(v); | ||
360 | } | 371 | } |
361 | 372 | ||
362 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; | 373 | mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; |
363 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; | 374 | mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; |
364 | 375 | ||
365 | if (cpu_arch >= CPU_ARCH_ARMv5) { | 376 | if (cpu_arch < CPU_ARCH_ARMv5) |
366 | #ifndef CONFIG_SMP | ||
367 | /* | ||
368 | * Only use write-through for non-SMP systems | ||
369 | */ | ||
370 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
371 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
372 | #endif | ||
373 | } else { | ||
374 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | 377 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); |
375 | } | ||
376 | 378 | ||
377 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 379 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
378 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 380 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |