diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 73 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 19 |
4 files changed, 70 insertions, 36 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308a..c465faca51b0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) | |||
299 | int lockregs; | 299 | int lockregs; |
300 | int i; | 300 | int i; |
301 | 301 | ||
302 | switch (cache_id) { | 302 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
303 | case L2X0_CACHE_ID_PART_L310: | 303 | case L2X0_CACHE_ID_PART_L310: |
304 | lockregs = 8; | 304 | lockregs = 8; |
305 | break; | 305 | break; |
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
333 | if (cache_id_part_number_from_dt) | 333 | if (cache_id_part_number_from_dt) |
334 | cache_id = cache_id_part_number_from_dt; | 334 | cache_id = cache_id_part_number_from_dt; |
335 | else | 335 | else |
336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) | 336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); |
337 | & L2X0_CACHE_ID_PART_MASK; | ||
338 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 337 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
339 | 338 | ||
340 | aux &= aux_mask; | 339 | aux &= aux_mask; |
341 | aux |= aux_val; | 340 | aux |= aux_val; |
342 | 341 | ||
343 | /* Determine the number of ways */ | 342 | /* Determine the number of ways */ |
344 | switch (cache_id) { | 343 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
345 | case L2X0_CACHE_ID_PART_L310: | 344 | case L2X0_CACHE_ID_PART_L310: |
346 | if (aux & (1 << 16)) | 345 | if (aux & (1 << 16)) |
347 | ways = 16; | 346 | ways = 16; |
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { | |||
725 | .flush_all = l2x0_flush_all, | 724 | .flush_all = l2x0_flush_all, |
726 | .inv_all = l2x0_inv_all, | 725 | .inv_all = l2x0_inv_all, |
727 | .disable = l2x0_disable, | 726 | .disable = l2x0_disable, |
728 | .set_debug = pl310_set_debug, | ||
729 | }, | 727 | }, |
730 | }; | 728 | }; |
731 | 729 | ||
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) | |||
814 | data->save(); | 812 | data->save(); |
815 | 813 | ||
816 | of_init = true; | 814 | of_init = true; |
817 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
818 | |||
819 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); | 815 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); |
816 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
820 | 817 | ||
821 | return 0; | 818 | return 0; |
822 | } | 819 | } |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42ba..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |||
48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
50 | 50 | ||
51 | static DEFINE_PER_CPU(atomic64_t, active_asids); | 51 | DEFINE_PER_CPU(atomic64_t, active_asids); |
52 | static DEFINE_PER_CPU(u64, reserved_asids); | 52 | static DEFINE_PER_CPU(u64, reserved_asids); |
53 | static cpumask_t tlb_flush_pending; | 53 | static cpumask_t tlb_flush_pending; |
54 | 54 | ||
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { | 215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
216 | local_flush_bp_all(); | 216 | local_flush_bp_all(); |
217 | local_flush_tlb_all(); | 217 | local_flush_tlb_all(); |
218 | dummy_flush_tlb_a15_erratum(); | ||
218 | } | 219 | } |
219 | 220 | ||
220 | atomic64_set(&per_cpu(active_asids, cpu), asid); | 221 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78f..78978945492a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
598 | } while (pte++, addr += PAGE_SIZE, addr != end); | 598 | } while (pte++, addr += PAGE_SIZE, addr != end); |
599 | } | 599 | } |
600 | 600 | ||
601 | static void __init alloc_init_section(pud_t *pud, unsigned long addr, | 601 | static void __init map_init_section(pmd_t *pmd, unsigned long addr, |
602 | unsigned long end, phys_addr_t phys, | 602 | unsigned long end, phys_addr_t phys, |
603 | const struct mem_type *type) | 603 | const struct mem_type *type) |
604 | { | 604 | { |
605 | pmd_t *pmd = pmd_offset(pud, addr); | 605 | #ifndef CONFIG_ARM_LPAE |
606 | |||
607 | /* | 606 | /* |
608 | * Try a section mapping - end, addr and phys must all be aligned | 607 | * In classic MMU format, puds and pmds are folded in to |
609 | * to a section boundary. Note that PMDs refer to the individual | 608 | * the pgds. pmd_offset gives the PGD entry. PGDs refer to a |
610 | * L1 entries, whereas PGDs refer to a group of L1 entries making | 609 | * group of L1 entries making up one logical pointer to |
611 | * up one logical pointer to an L2 table. | 610 | * an L2 table (2MB), where as PMDs refer to the individual |
611 | * L1 entries (1MB). Hence increment to get the correct | ||
612 | * offset for odd 1MB sections. | ||
613 | * (See arch/arm/include/asm/pgtable-2level.h) | ||
612 | */ | 614 | */ |
613 | if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { | 615 | if (addr & SECTION_SIZE) |
614 | pmd_t *p = pmd; | 616 | pmd++; |
615 | |||
616 | #ifndef CONFIG_ARM_LPAE | ||
617 | if (addr & SECTION_SIZE) | ||
618 | pmd++; | ||
619 | #endif | 617 | #endif |
618 | do { | ||
619 | *pmd = __pmd(phys | type->prot_sect); | ||
620 | phys += SECTION_SIZE; | ||
621 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
620 | 622 | ||
621 | do { | 623 | flush_pmd_entry(pmd); |
622 | *pmd = __pmd(phys | type->prot_sect); | 624 | } |
623 | phys += SECTION_SIZE; | ||
624 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
625 | 625 | ||
626 | flush_pmd_entry(p); | 626 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, |
627 | } else { | 627 | unsigned long end, phys_addr_t phys, |
628 | const struct mem_type *type) | ||
629 | { | ||
630 | pmd_t *pmd = pmd_offset(pud, addr); | ||
631 | unsigned long next; | ||
632 | |||
633 | do { | ||
628 | /* | 634 | /* |
629 | * No need to loop; pte's aren't interested in the | 635 | * With LPAE, we must loop over to map |
630 | * individual L1 entries. | 636 | * all the pmds for the given range. |
631 | */ | 637 | */ |
632 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | 638 | next = pmd_addr_end(addr, end); |
633 | } | 639 | |
640 | /* | ||
641 | * Try a section mapping - addr, next and phys must all be | ||
642 | * aligned to a section boundary. | ||
643 | */ | ||
644 | if (type->prot_sect && | ||
645 | ((addr | next | phys) & ~SECTION_MASK) == 0) { | ||
646 | map_init_section(pmd, addr, next, phys, type); | ||
647 | } else { | ||
648 | alloc_init_pte(pmd, addr, next, | ||
649 | __phys_to_pfn(phys), type); | ||
650 | } | ||
651 | |||
652 | phys += next - addr; | ||
653 | |||
654 | } while (pmd++, addr = next, addr != end); | ||
634 | } | 655 | } |
635 | 656 | ||
636 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 657 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
@@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
641 | 662 | ||
642 | do { | 663 | do { |
643 | next = pud_addr_end(addr, end); | 664 | next = pud_addr_end(addr, end); |
644 | alloc_init_section(pud, addr, next, phys, type); | 665 | alloc_init_pmd(pud, addr, next, phys, type); |
645 | phys += next - addr; | 666 | phys += next - addr; |
646 | } while (pud++, addr = next, addr != end); | 667 | } while (pud++, addr = next, addr != end); |
647 | } | 668 | } |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5c..f584d3f5b37c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info: | |||
420 | __v7_ca7mp_proc_info: | 420 | __v7_ca7mp_proc_info: |
421 | .long 0x410fc070 | 421 | .long 0x410fc070 |
422 | .long 0xff0ffff0 | 422 | .long 0xff0ffff0 |
423 | __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV | 423 | __v7_proc __v7_ca7mp_setup |
424 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info | 424 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info |
425 | 425 | ||
426 | /* | 426 | /* |
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info: | |||
430 | __v7_ca15mp_proc_info: | 430 | __v7_ca15mp_proc_info: |
431 | .long 0x410fc0f0 | 431 | .long 0x410fc0f0 |
432 | .long 0xff0ffff0 | 432 | .long 0xff0ffff0 |
433 | __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV | 433 | __v7_proc __v7_ca15mp_setup |
434 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info | 434 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info |
435 | 435 | ||
436 | /* | 436 | /* |
437 | * Qualcomm Inc. Krait processors. | ||
438 | */ | ||
439 | .type __krait_proc_info, #object | ||
440 | __krait_proc_info: | ||
441 | .long 0x510f0400 @ Required ID value | ||
442 | .long 0xff0ffc00 @ Mask for ID | ||
443 | /* | ||
444 | * Some Krait processors don't indicate support for SDIV and UDIV | ||
445 | * instructions in the ARM instruction set, even though they actually | ||
446 | * do support them. | ||
447 | */ | ||
448 | __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | ||
449 | .size __krait_proc_info, . - __krait_proc_info | ||
450 | |||
451 | /* | ||
437 | * Match any ARMv7 processor core. | 452 | * Match any ARMv7 processor core. |
438 | */ | 453 | */ |
439 | .type __v7_proc_info, #object | 454 | .type __v7_proc_info, #object |