diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 133 |
1 files changed, 111 insertions, 22 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 580ef2de82d7..a623cb3ad012 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
23 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
24 | #include <asm/cachetype.h> | 24 | #include <asm/cachetype.h> |
25 | #include <asm/sections.h> | ||
25 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
26 | #include <asm/smp_plat.h> | 27 | #include <asm/smp_plat.h> |
27 | #include <asm/tlb.h> | 28 | #include <asm/tlb.h> |
@@ -231,12 +232,16 @@ __setup("noalign", noalign_setup); | |||
231 | #endif /* ifdef CONFIG_CPU_CP15 / else */ | 232 | #endif /* ifdef CONFIG_CPU_CP15 / else */ |
232 | 233 | ||
233 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN | 234 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN |
235 | #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE | ||
234 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE | 236 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
235 | 237 | ||
236 | static struct mem_type mem_types[] = { | 238 | static struct mem_type mem_types[] = { |
237 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ | 239 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
238 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | | 240 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
239 | L_PTE_SHARED, | 241 | L_PTE_SHARED, |
242 | .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | | ||
243 | s2_policy(L_PTE_S2_MT_DEV_SHARED) | | ||
244 | L_PTE_SHARED, | ||
240 | .prot_l1 = PMD_TYPE_TABLE, | 245 | .prot_l1 = PMD_TYPE_TABLE, |
241 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, | 246 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, |
242 | .domain = DOMAIN_IO, | 247 | .domain = DOMAIN_IO, |
@@ -287,36 +292,43 @@ static struct mem_type mem_types[] = { | |||
287 | .prot_l1 = PMD_TYPE_TABLE, | 292 | .prot_l1 = PMD_TYPE_TABLE, |
288 | .domain = DOMAIN_USER, | 293 | .domain = DOMAIN_USER, |
289 | }, | 294 | }, |
290 | [MT_MEMORY] = { | 295 | [MT_MEMORY_RWX] = { |
291 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, | 296 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, |
292 | .prot_l1 = PMD_TYPE_TABLE, | 297 | .prot_l1 = PMD_TYPE_TABLE, |
293 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 298 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
294 | .domain = DOMAIN_KERNEL, | 299 | .domain = DOMAIN_KERNEL, |
295 | }, | 300 | }, |
301 | [MT_MEMORY_RW] = { | ||
302 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
303 | L_PTE_XN, | ||
304 | .prot_l1 = PMD_TYPE_TABLE, | ||
305 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | ||
306 | .domain = DOMAIN_KERNEL, | ||
307 | }, | ||
296 | [MT_ROM] = { | 308 | [MT_ROM] = { |
297 | .prot_sect = PMD_TYPE_SECT, | 309 | .prot_sect = PMD_TYPE_SECT, |
298 | .domain = DOMAIN_KERNEL, | 310 | .domain = DOMAIN_KERNEL, |
299 | }, | 311 | }, |
300 | [MT_MEMORY_NONCACHED] = { | 312 | [MT_MEMORY_RWX_NONCACHED] = { |
301 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 313 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
302 | L_PTE_MT_BUFFERABLE, | 314 | L_PTE_MT_BUFFERABLE, |
303 | .prot_l1 = PMD_TYPE_TABLE, | 315 | .prot_l1 = PMD_TYPE_TABLE, |
304 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 316 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
305 | .domain = DOMAIN_KERNEL, | 317 | .domain = DOMAIN_KERNEL, |
306 | }, | 318 | }, |
307 | [MT_MEMORY_DTCM] = { | 319 | [MT_MEMORY_RW_DTCM] = { |
308 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 320 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
309 | L_PTE_XN, | 321 | L_PTE_XN, |
310 | .prot_l1 = PMD_TYPE_TABLE, | 322 | .prot_l1 = PMD_TYPE_TABLE, |
311 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 323 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
312 | .domain = DOMAIN_KERNEL, | 324 | .domain = DOMAIN_KERNEL, |
313 | }, | 325 | }, |
314 | [MT_MEMORY_ITCM] = { | 326 | [MT_MEMORY_RWX_ITCM] = { |
315 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, | 327 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, |
316 | .prot_l1 = PMD_TYPE_TABLE, | 328 | .prot_l1 = PMD_TYPE_TABLE, |
317 | .domain = DOMAIN_KERNEL, | 329 | .domain = DOMAIN_KERNEL, |
318 | }, | 330 | }, |
319 | [MT_MEMORY_SO] = { | 331 | [MT_MEMORY_RW_SO] = { |
320 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 332 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
321 | L_PTE_MT_UNCACHED | L_PTE_XN, | 333 | L_PTE_MT_UNCACHED | L_PTE_XN, |
322 | .prot_l1 = PMD_TYPE_TABLE, | 334 | .prot_l1 = PMD_TYPE_TABLE, |
@@ -325,7 +337,8 @@ static struct mem_type mem_types[] = { | |||
325 | .domain = DOMAIN_KERNEL, | 337 | .domain = DOMAIN_KERNEL, |
326 | }, | 338 | }, |
327 | [MT_MEMORY_DMA_READY] = { | 339 | [MT_MEMORY_DMA_READY] = { |
328 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, | 340 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
341 | L_PTE_XN, | ||
329 | .prot_l1 = PMD_TYPE_TABLE, | 342 | .prot_l1 = PMD_TYPE_TABLE, |
330 | .domain = DOMAIN_KERNEL, | 343 | .domain = DOMAIN_KERNEL, |
331 | }, | 344 | }, |
@@ -337,6 +350,44 @@ const struct mem_type *get_mem_type(unsigned int type) | |||
337 | } | 350 | } |
338 | EXPORT_SYMBOL(get_mem_type); | 351 | EXPORT_SYMBOL(get_mem_type); |
339 | 352 | ||
353 | #define PTE_SET_FN(_name, pteop) \ | ||
354 | static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \ | ||
355 | void *data) \ | ||
356 | { \ | ||
357 | pte_t pte = pteop(*ptep); \ | ||
358 | \ | ||
359 | set_pte_ext(ptep, pte, 0); \ | ||
360 | return 0; \ | ||
361 | } \ | ||
362 | |||
363 | #define SET_MEMORY_FN(_name, callback) \ | ||
364 | int set_memory_##_name(unsigned long addr, int numpages) \ | ||
365 | { \ | ||
366 | unsigned long start = addr; \ | ||
367 | unsigned long size = PAGE_SIZE*numpages; \ | ||
368 | unsigned end = start + size; \ | ||
369 | \ | ||
370 | if (start < MODULES_VADDR || start >= MODULES_END) \ | ||
371 | return -EINVAL;\ | ||
372 | \ | ||
373 | if (end < MODULES_VADDR || end >= MODULES_END) \ | ||
374 | return -EINVAL; \ | ||
375 | \ | ||
376 | apply_to_page_range(&init_mm, start, size, callback, NULL); \ | ||
377 | flush_tlb_kernel_range(start, end); \ | ||
378 | return 0;\ | ||
379 | } | ||
380 | |||
381 | PTE_SET_FN(ro, pte_wrprotect) | ||
382 | PTE_SET_FN(rw, pte_mkwrite) | ||
383 | PTE_SET_FN(x, pte_mkexec) | ||
384 | PTE_SET_FN(nx, pte_mknexec) | ||
385 | |||
386 | SET_MEMORY_FN(ro, pte_set_ro) | ||
387 | SET_MEMORY_FN(rw, pte_set_rw) | ||
388 | SET_MEMORY_FN(x, pte_set_x) | ||
389 | SET_MEMORY_FN(nx, pte_set_nx) | ||
390 | |||
340 | /* | 391 | /* |
341 | * Adjust the PMD section entries according to the CPU in use. | 392 | * Adjust the PMD section entries according to the CPU in use. |
342 | */ | 393 | */ |
@@ -410,6 +461,9 @@ static void __init build_mem_type_table(void) | |||
410 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; | 461 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; |
411 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; | 462 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; |
412 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; | 463 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; |
464 | |||
465 | /* Also setup NX memory mapping */ | ||
466 | mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; | ||
413 | } | 467 | } |
414 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { | 468 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { |
415 | /* | 469 | /* |
@@ -458,7 +512,8 @@ static void __init build_mem_type_table(void) | |||
458 | cp = &cache_policies[cachepolicy]; | 512 | cp = &cache_policies[cachepolicy]; |
459 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 513 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
460 | s2_pgprot = cp->pte_s2; | 514 | s2_pgprot = cp->pte_s2; |
461 | hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; | 515 | hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; |
516 | s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; | ||
462 | 517 | ||
463 | /* | 518 | /* |
464 | * ARMv6 and above have extended page tables. | 519 | * ARMv6 and above have extended page tables. |
@@ -487,11 +542,13 @@ static void __init build_mem_type_table(void) | |||
487 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | 542 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; |
488 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | 543 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; |
489 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | 544 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; |
490 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 545 | mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; |
491 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | 546 | mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; |
547 | mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; | ||
548 | mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; | ||
492 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; | 549 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; |
493 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 550 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; |
494 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | 551 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; |
495 | } | 552 | } |
496 | } | 553 | } |
497 | 554 | ||
@@ -502,15 +559,15 @@ static void __init build_mem_type_table(void) | |||
502 | if (cpu_arch >= CPU_ARCH_ARMv6) { | 559 | if (cpu_arch >= CPU_ARCH_ARMv6) { |
503 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { | 560 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { |
504 | /* Non-cacheable Normal is XCB = 001 */ | 561 | /* Non-cacheable Normal is XCB = 001 */ |
505 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= | 562 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= |
506 | PMD_SECT_BUFFERED; | 563 | PMD_SECT_BUFFERED; |
507 | } else { | 564 | } else { |
508 | /* For both ARMv6 and non-TEX-remapping ARMv7 */ | 565 | /* For both ARMv6 and non-TEX-remapping ARMv7 */ |
509 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= | 566 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= |
510 | PMD_SECT_TEX(1); | 567 | PMD_SECT_TEX(1); |
511 | } | 568 | } |
512 | } else { | 569 | } else { |
513 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; | 570 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; |
514 | } | 571 | } |
515 | 572 | ||
516 | #ifdef CONFIG_ARM_LPAE | 573 | #ifdef CONFIG_ARM_LPAE |
@@ -543,10 +600,12 @@ static void __init build_mem_type_table(void) | |||
543 | 600 | ||
544 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 601 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
545 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 602 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
546 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 603 | mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; |
547 | mem_types[MT_MEMORY].prot_pte |= kern_pgprot; | 604 | mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; |
605 | mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; | ||
606 | mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; | ||
548 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; | 607 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; |
549 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; | 608 | mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; |
550 | mem_types[MT_ROM].prot_sect |= cp->pmd; | 609 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
551 | 610 | ||
552 | switch (cp->pmd) { | 611 | switch (cp->pmd) { |
@@ -1296,6 +1355,8 @@ static void __init kmap_init(void) | |||
1296 | static void __init map_lowmem(void) | 1355 | static void __init map_lowmem(void) |
1297 | { | 1356 | { |
1298 | struct memblock_region *reg; | 1357 | struct memblock_region *reg; |
1358 | unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); | ||
1359 | unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); | ||
1299 | 1360 | ||
1300 | /* Map all the lowmem memory banks. */ | 1361 | /* Map all the lowmem memory banks. */ |
1301 | for_each_memblock(memory, reg) { | 1362 | for_each_memblock(memory, reg) { |
@@ -1308,12 +1369,40 @@ static void __init map_lowmem(void) | |||
1308 | if (start >= end) | 1369 | if (start >= end) |
1309 | break; | 1370 | break; |
1310 | 1371 | ||
1311 | map.pfn = __phys_to_pfn(start); | 1372 | if (end < kernel_x_start || start >= kernel_x_end) { |
1312 | map.virtual = __phys_to_virt(start); | 1373 | map.pfn = __phys_to_pfn(start); |
1313 | map.length = end - start; | 1374 | map.virtual = __phys_to_virt(start); |
1314 | map.type = MT_MEMORY; | 1375 | map.length = end - start; |
1376 | map.type = MT_MEMORY_RWX; | ||
1315 | 1377 | ||
1316 | create_mapping(&map); | 1378 | create_mapping(&map); |
1379 | } else { | ||
1380 | /* This better cover the entire kernel */ | ||
1381 | if (start < kernel_x_start) { | ||
1382 | map.pfn = __phys_to_pfn(start); | ||
1383 | map.virtual = __phys_to_virt(start); | ||
1384 | map.length = kernel_x_start - start; | ||
1385 | map.type = MT_MEMORY_RW; | ||
1386 | |||
1387 | create_mapping(&map); | ||
1388 | } | ||
1389 | |||
1390 | map.pfn = __phys_to_pfn(kernel_x_start); | ||
1391 | map.virtual = __phys_to_virt(kernel_x_start); | ||
1392 | map.length = kernel_x_end - kernel_x_start; | ||
1393 | map.type = MT_MEMORY_RWX; | ||
1394 | |||
1395 | create_mapping(&map); | ||
1396 | |||
1397 | if (kernel_x_end < end) { | ||
1398 | map.pfn = __phys_to_pfn(kernel_x_end); | ||
1399 | map.virtual = __phys_to_virt(kernel_x_end); | ||
1400 | map.length = end - kernel_x_end; | ||
1401 | map.type = MT_MEMORY_RW; | ||
1402 | |||
1403 | create_mapping(&map); | ||
1404 | } | ||
1405 | } | ||
1317 | } | 1406 | } |
1318 | } | 1407 | } |
1319 | 1408 | ||