diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-10-09 16:31:56 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-10-09 16:31:56 -0400 |
commit | 6a4690c22f5da1eb1c898b61b6a80da52fbd976f (patch) | |
tree | a03891a32abe0da191fb765fe669a597e07423c6 /arch/arm/mm/mmu.c | |
parent | 90bb28b0644f7324f8bd1feb27b35146e6785ba2 (diff) | |
parent | 8ec53663d2698076468b3e1edc4e1b418bd54de3 (diff) |
Merge branch 'ptebits' into devel
Conflicts:
arch/arm/Kconfig
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 74 |
1 files changed, 42 insertions, 32 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e7af83e569d7..8ba754064559 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -66,27 +66,27 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
66 | .policy = "uncached", | 66 | .policy = "uncached", |
67 | .cr_mask = CR_W|CR_C, | 67 | .cr_mask = CR_W|CR_C, |
68 | .pmd = PMD_SECT_UNCACHED, | 68 | .pmd = PMD_SECT_UNCACHED, |
69 | .pte = 0, | 69 | .pte = L_PTE_MT_UNCACHED, |
70 | }, { | 70 | }, { |
71 | .policy = "buffered", | 71 | .policy = "buffered", |
72 | .cr_mask = CR_C, | 72 | .cr_mask = CR_C, |
73 | .pmd = PMD_SECT_BUFFERED, | 73 | .pmd = PMD_SECT_BUFFERED, |
74 | .pte = PTE_BUFFERABLE, | 74 | .pte = L_PTE_MT_BUFFERABLE, |
75 | }, { | 75 | }, { |
76 | .policy = "writethrough", | 76 | .policy = "writethrough", |
77 | .cr_mask = 0, | 77 | .cr_mask = 0, |
78 | .pmd = PMD_SECT_WT, | 78 | .pmd = PMD_SECT_WT, |
79 | .pte = PTE_CACHEABLE, | 79 | .pte = L_PTE_MT_WRITETHROUGH, |
80 | }, { | 80 | }, { |
81 | .policy = "writeback", | 81 | .policy = "writeback", |
82 | .cr_mask = 0, | 82 | .cr_mask = 0, |
83 | .pmd = PMD_SECT_WB, | 83 | .pmd = PMD_SECT_WB, |
84 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | 84 | .pte = L_PTE_MT_WRITEBACK, |
85 | }, { | 85 | }, { |
86 | .policy = "writealloc", | 86 | .policy = "writealloc", |
87 | .cr_mask = 0, | 87 | .cr_mask = 0, |
88 | .pmd = PMD_SECT_WBWA, | 88 | .pmd = PMD_SECT_WBWA, |
89 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | 89 | .pte = L_PTE_MT_WRITEALLOC, |
90 | } | 90 | } |
91 | }; | 91 | }; |
92 | 92 | ||
@@ -184,29 +184,28 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
184 | 184 | ||
185 | static struct mem_type mem_types[] = { | 185 | static struct mem_type mem_types[] = { |
186 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ | 186 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
187 | .prot_pte = PROT_PTE_DEVICE, | 187 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
188 | L_PTE_SHARED, | ||
188 | .prot_l1 = PMD_TYPE_TABLE, | 189 | .prot_l1 = PMD_TYPE_TABLE, |
189 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, | 190 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, |
190 | .domain = DOMAIN_IO, | 191 | .domain = DOMAIN_IO, |
191 | }, | 192 | }, |
192 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ | 193 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ |
193 | .prot_pte = PROT_PTE_DEVICE, | 194 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, |
194 | .prot_pte_ext = PTE_EXT_TEX(2), | ||
195 | .prot_l1 = PMD_TYPE_TABLE, | 195 | .prot_l1 = PMD_TYPE_TABLE, |
196 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), | 196 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), |
197 | .domain = DOMAIN_IO, | 197 | .domain = DOMAIN_IO, |
198 | }, | 198 | }, |
199 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ | 199 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ |
200 | .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE, | 200 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, |
201 | .prot_l1 = PMD_TYPE_TABLE, | 201 | .prot_l1 = PMD_TYPE_TABLE, |
202 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, | 202 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, |
203 | .domain = DOMAIN_IO, | 203 | .domain = DOMAIN_IO, |
204 | }, | 204 | }, |
205 | [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | 205 | [MT_DEVICE_WC] = { /* ioremap_wc */ |
206 | .prot_pte = PROT_PTE_DEVICE, | 206 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, |
207 | .prot_l1 = PMD_TYPE_TABLE, | 207 | .prot_l1 = PMD_TYPE_TABLE, |
208 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | | 208 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, |
209 | PMD_SECT_TEX(1), | ||
210 | .domain = DOMAIN_IO, | 209 | .domain = DOMAIN_IO, |
211 | }, | 210 | }, |
212 | [MT_CACHECLEAN] = { | 211 | [MT_CACHECLEAN] = { |
@@ -251,7 +250,7 @@ static void __init build_mem_type_table(void) | |||
251 | { | 250 | { |
252 | struct cachepolicy *cp; | 251 | struct cachepolicy *cp; |
253 | unsigned int cr = get_cr(); | 252 | unsigned int cr = get_cr(); |
254 | unsigned int user_pgprot, kern_pgprot; | 253 | unsigned int user_pgprot, kern_pgprot, vecs_pgprot; |
255 | int cpu_arch = cpu_architecture(); | 254 | int cpu_arch = cpu_architecture(); |
256 | int i; | 255 | int i; |
257 | 256 | ||
@@ -269,6 +268,20 @@ static void __init build_mem_type_table(void) | |||
269 | cachepolicy = CPOLICY_WRITEBACK; | 268 | cachepolicy = CPOLICY_WRITEBACK; |
270 | ecc_mask = 0; | 269 | ecc_mask = 0; |
271 | } | 270 | } |
271 | #ifdef CONFIG_SMP | ||
272 | cachepolicy = CPOLICY_WRITEALLOC; | ||
273 | #endif | ||
274 | |||
275 | /* | ||
276 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 | ||
277 | * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 | ||
278 | * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable | ||
279 | * in xsc3 parlance, Uncached Normal in ARMv6 parlance). | ||
280 | */ | ||
281 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { | ||
282 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | ||
283 | mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; | ||
284 | } | ||
272 | 285 | ||
273 | /* | 286 | /* |
274 | * ARMv5 and lower, bit 4 must be set for page tables. | 287 | * ARMv5 and lower, bit 4 must be set for page tables. |
@@ -290,7 +303,15 @@ static void __init build_mem_type_table(void) | |||
290 | } | 303 | } |
291 | 304 | ||
292 | cp = &cache_policies[cachepolicy]; | 305 | cp = &cache_policies[cachepolicy]; |
293 | kern_pgprot = user_pgprot = cp->pte; | 306 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
307 | |||
308 | #ifndef CONFIG_SMP | ||
309 | /* | ||
310 | * Only use write-through for non-SMP systems | ||
311 | */ | ||
312 | if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | ||
313 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | ||
314 | #endif | ||
294 | 315 | ||
295 | /* | 316 | /* |
296 | * Enable CPU-specific coherency if supported. | 317 | * Enable CPU-specific coherency if supported. |
@@ -318,7 +339,6 @@ static void __init build_mem_type_table(void) | |||
318 | /* | 339 | /* |
319 | * Mark the device area as "shared device" | 340 | * Mark the device area as "shared device" |
320 | */ | 341 | */ |
321 | mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; | ||
322 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | 342 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; |
323 | 343 | ||
324 | #ifdef CONFIG_SMP | 344 | #ifdef CONFIG_SMP |
@@ -327,30 +347,21 @@ static void __init build_mem_type_table(void) | |||
327 | */ | 347 | */ |
328 | user_pgprot |= L_PTE_SHARED; | 348 | user_pgprot |= L_PTE_SHARED; |
329 | kern_pgprot |= L_PTE_SHARED; | 349 | kern_pgprot |= L_PTE_SHARED; |
350 | vecs_pgprot |= L_PTE_SHARED; | ||
330 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 351 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
331 | #endif | 352 | #endif |
332 | } | 353 | } |
333 | 354 | ||
334 | for (i = 0; i < 16; i++) { | 355 | for (i = 0; i < 16; i++) { |
335 | unsigned long v = pgprot_val(protection_map[i]); | 356 | unsigned long v = pgprot_val(protection_map[i]); |
336 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; | 357 | protection_map[i] = __pgprot(v | user_pgprot); |
337 | protection_map[i] = __pgprot(v); | ||
338 | } | 358 | } |
339 | 359 | ||
340 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; | 360 | mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; |
341 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; | 361 | mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; |
342 | 362 | ||
343 | if (cpu_arch >= CPU_ARCH_ARMv5) { | 363 | if (cpu_arch < CPU_ARCH_ARMv5) |
344 | #ifndef CONFIG_SMP | ||
345 | /* | ||
346 | * Only use write-through for non-SMP systems | ||
347 | */ | ||
348 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
349 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
350 | #endif | ||
351 | } else { | ||
352 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | 364 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); |
353 | } | ||
354 | 365 | ||
355 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 366 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
356 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 367 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
@@ -398,8 +409,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
398 | 409 | ||
399 | pte = pte_offset_kernel(pmd, addr); | 410 | pte = pte_offset_kernel(pmd, addr); |
400 | do { | 411 | do { |
401 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), | 412 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); |
402 | type->prot_pte_ext); | ||
403 | pfn++; | 413 | pfn++; |
404 | } while (pte++, addr += PAGE_SIZE, addr != end); | 414 | } while (pte++, addr += PAGE_SIZE, addr != end); |
405 | } | 415 | } |