aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2007-05-05 15:03:35 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-05-05 15:03:35 -0400
commit9ef7963503abd3287943125681c2dc17879e8d4e (patch)
tree21fc04bda77ea16d3f54c52d265f924ad954f8f9 /arch/arm/mm/mmu.c
parent0058ca32c3004547ede575668a2be31862b92000 (diff)
[ARM] mm 8: define mem_types table L1 bit 4 to be for ARMv6
Change the memory types table to define the L1 descriptor bit 4 to be in terms of the ARMv6 definition - execute never. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 360405515bbd..44f385a3eb3f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -181,16 +181,16 @@ static struct mem_type mem_types[] = {
181 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 181 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
182 L_PTE_WRITE, 182 L_PTE_WRITE,
183 .prot_l1 = PMD_TYPE_TABLE, 183 .prot_l1 = PMD_TYPE_TABLE,
184 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | 184 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_UNCACHED |
185 PMD_SECT_AP_WRITE, 185 PMD_SECT_AP_WRITE,
186 .domain = DOMAIN_IO, 186 .domain = DOMAIN_IO,
187 }, 187 },
188 [MT_CACHECLEAN] = { 188 [MT_CACHECLEAN] = {
189 .prot_sect = PMD_TYPE_SECT | PMD_BIT4, 189 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
190 .domain = DOMAIN_KERNEL, 190 .domain = DOMAIN_KERNEL,
191 }, 191 },
192 [MT_MINICLEAN] = { 192 [MT_MINICLEAN] = {
193 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, 193 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
194 .domain = DOMAIN_KERNEL, 194 .domain = DOMAIN_KERNEL,
195 }, 195 },
196 [MT_LOW_VECTORS] = { 196 [MT_LOW_VECTORS] = {
@@ -206,25 +206,25 @@ static struct mem_type mem_types[] = {
206 .domain = DOMAIN_USER, 206 .domain = DOMAIN_USER,
207 }, 207 },
208 [MT_MEMORY] = { 208 [MT_MEMORY] = {
209 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, 209 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
210 .domain = DOMAIN_KERNEL, 210 .domain = DOMAIN_KERNEL,
211 }, 211 },
212 [MT_ROM] = { 212 [MT_ROM] = {
213 .prot_sect = PMD_TYPE_SECT | PMD_BIT4, 213 .prot_sect = PMD_TYPE_SECT,
214 .domain = DOMAIN_KERNEL, 214 .domain = DOMAIN_KERNEL,
215 }, 215 },
216 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ 216 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
217 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 217 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
218 L_PTE_WRITE, 218 L_PTE_WRITE,
219 .prot_l1 = PMD_TYPE_TABLE, 219 .prot_l1 = PMD_TYPE_TABLE,
220 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | 220 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_UNCACHED |
221 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | 221 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
222 PMD_SECT_TEX(1), 222 PMD_SECT_TEX(1),
223 .domain = DOMAIN_IO, 223 .domain = DOMAIN_IO,
224 }, 224 },
225 [MT_NONSHARED_DEVICE] = { 225 [MT_NONSHARED_DEVICE] = {
226 .prot_l1 = PMD_TYPE_TABLE, 226 .prot_l1 = PMD_TYPE_TABLE,
227 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV | 227 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_NONSHARED_DEV |
228 PMD_SECT_AP_WRITE, 228 PMD_SECT_AP_WRITE,
229 .domain = DOMAIN_IO, 229 .domain = DOMAIN_IO,
230 } 230 }
@@ -260,20 +260,23 @@ static void __init build_mem_type_table(void)
260 } 260 }
261 261
262 /* 262 /*
263 * Xscale must not have PMD bit 4 set for section mappings. 263 * ARMv5 and lower, bit 4 must be set for page tables.
264 * (was: cache "update-able on write" bit on ARM610)
265 * However, Xscale cores require this bit to be cleared.
264 */ 266 */
265 if (cpu_is_xscale()) 267 if (cpu_is_xscale()) {
266 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 268 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
267 mem_types[i].prot_sect &= ~PMD_BIT4; 269 mem_types[i].prot_sect &= ~PMD_BIT4;
268 270 mem_types[i].prot_l1 &= ~PMD_BIT4;
269 /* 271 }
270 * ARMv5 and lower, excluding Xscale, bit 4 must be set for 272 } else if (cpu_arch < CPU_ARCH_ARMv6) {
271 * page tables. 273 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
272 */
273 if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
274 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
275 if (mem_types[i].prot_l1) 274 if (mem_types[i].prot_l1)
276 mem_types[i].prot_l1 |= PMD_BIT4; 275 mem_types[i].prot_l1 |= PMD_BIT4;
276 if (mem_types[i].prot_sect)
277 mem_types[i].prot_sect |= PMD_BIT4;
278 }
279 }
277 280
278 cp = &cache_policies[cachepolicy]; 281 cp = &cache_policies[cachepolicy];
279 kern_pgprot = user_pgprot = cp->pte; 282 kern_pgprot = user_pgprot = cp->pte;
@@ -294,13 +297,6 @@ static void __init build_mem_type_table(void)
294 */ 297 */
295 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 298 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
296 /* 299 /*
297 * bit 4 becomes XN which we must clear for the
298 * kernel memory mapping.
299 */
300 mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
301 mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
302
303 /*
304 * Mark cache clean areas and XIP ROM read only 300 * Mark cache clean areas and XIP ROM read only
305 * from SVC mode and no access from userspace. 301 * from SVC mode and no access from userspace.
306 */ 302 */