aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-l2x0.c11
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/mmu.c73
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S19
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
11 files changed, 77 insertions, 43 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c2f37390308a..c465faca51b0 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
299 int lockregs; 299 int lockregs;
300 int i; 300 int i;
301 301
302 switch (cache_id) { 302 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
303 case L2X0_CACHE_ID_PART_L310: 303 case L2X0_CACHE_ID_PART_L310:
304 lockregs = 8; 304 lockregs = 8;
305 break; 305 break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
333 if (cache_id_part_number_from_dt) 333 if (cache_id_part_number_from_dt)
334 cache_id = cache_id_part_number_from_dt; 334 cache_id = cache_id_part_number_from_dt;
335 else 335 else
336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) 336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
337 & L2X0_CACHE_ID_PART_MASK;
338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 337 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
339 338
340 aux &= aux_mask; 339 aux &= aux_mask;
341 aux |= aux_val; 340 aux |= aux_val;
342 341
343 /* Determine the number of ways */ 342 /* Determine the number of ways */
344 switch (cache_id) { 343 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
345 case L2X0_CACHE_ID_PART_L310: 344 case L2X0_CACHE_ID_PART_L310:
346 if (aux & (1 << 16)) 345 if (aux & (1 << 16))
347 ways = 16; 346 ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
725 .flush_all = l2x0_flush_all, 724 .flush_all = l2x0_flush_all,
726 .inv_all = l2x0_inv_all, 725 .inv_all = l2x0_inv_all,
727 .disable = l2x0_disable, 726 .disable = l2x0_disable,
728 .set_debug = pl310_set_debug,
729 }, 727 },
730}; 728};
731 729
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
814 data->save(); 812 data->save();
815 813
816 of_init = true; 814 of_init = true;
817 l2x0_init(l2x0_base, aux_val, aux_mask);
818
819 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); 815 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
816 l2x0_init(l2x0_base, aux_val, aux_mask);
820 817
821 return 0; 818 return 0;
822} 819}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index a5a4b2bc42ba..2ac37372ef52 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
48static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 48static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
49static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 49static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
50 50
51static DEFINE_PER_CPU(atomic64_t, active_asids); 51DEFINE_PER_CPU(atomic64_t, active_asids);
52static DEFINE_PER_CPU(u64, reserved_asids); 52static DEFINE_PER_CPU(u64, reserved_asids);
53static cpumask_t tlb_flush_pending; 53static cpumask_t tlb_flush_pending;
54 54
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { 215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
216 local_flush_bp_all(); 216 local_flush_bp_all();
217 local_flush_tlb_all(); 217 local_flush_tlb_all();
218 dummy_flush_tlb_a15_erratum();
218 } 219 }
219 220
220 atomic64_set(&per_cpu(active_asids, cpu), asid); 221 atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e95a996ab78f..78978945492a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
598 } while (pte++, addr += PAGE_SIZE, addr != end); 598 } while (pte++, addr += PAGE_SIZE, addr != end);
599} 599}
600 600
601static void __init alloc_init_section(pud_t *pud, unsigned long addr, 601static void __init map_init_section(pmd_t *pmd, unsigned long addr,
602 unsigned long end, phys_addr_t phys, 602 unsigned long end, phys_addr_t phys,
603 const struct mem_type *type) 603 const struct mem_type *type)
604{ 604{
605 pmd_t *pmd = pmd_offset(pud, addr); 605#ifndef CONFIG_ARM_LPAE
606
607 /* 606 /*
608 * Try a section mapping - end, addr and phys must all be aligned 607 * In classic MMU format, puds and pmds are folded in to
609 * to a section boundary. Note that PMDs refer to the individual 608 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
610 * L1 entries, whereas PGDs refer to a group of L1 entries making 609 * group of L1 entries making up one logical pointer to
611 * up one logical pointer to an L2 table. 610 * an L2 table (2MB), where as PMDs refer to the individual
611 * L1 entries (1MB). Hence increment to get the correct
612 * offset for odd 1MB sections.
613 * (See arch/arm/include/asm/pgtable-2level.h)
612 */ 614 */
613 if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { 615 if (addr & SECTION_SIZE)
614 pmd_t *p = pmd; 616 pmd++;
615
616#ifndef CONFIG_ARM_LPAE
617 if (addr & SECTION_SIZE)
618 pmd++;
619#endif 617#endif
618 do {
619 *pmd = __pmd(phys | type->prot_sect);
620 phys += SECTION_SIZE;
621 } while (pmd++, addr += SECTION_SIZE, addr != end);
620 622
621 do { 623 flush_pmd_entry(pmd);
622 *pmd = __pmd(phys | type->prot_sect); 624}
623 phys += SECTION_SIZE;
624 } while (pmd++, addr += SECTION_SIZE, addr != end);
625 625
626 flush_pmd_entry(p); 626static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
627 } else { 627 unsigned long end, phys_addr_t phys,
628 const struct mem_type *type)
629{
630 pmd_t *pmd = pmd_offset(pud, addr);
631 unsigned long next;
632
633 do {
628 /* 634 /*
629 * No need to loop; pte's aren't interested in the 635 * With LPAE, we must loop over to map
630 * individual L1 entries. 636 * all the pmds for the given range.
631 */ 637 */
632 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); 638 next = pmd_addr_end(addr, end);
633 } 639
640 /*
641 * Try a section mapping - addr, next and phys must all be
642 * aligned to a section boundary.
643 */
644 if (type->prot_sect &&
645 ((addr | next | phys) & ~SECTION_MASK) == 0) {
646 map_init_section(pmd, addr, next, phys, type);
647 } else {
648 alloc_init_pte(pmd, addr, next,
649 __phys_to_pfn(phys), type);
650 }
651
652 phys += next - addr;
653
654 } while (pmd++, addr = next, addr != end);
634} 655}
635 656
636static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, 657static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
641 662
642 do { 663 do {
643 next = pud_addr_end(addr, end); 664 next = pud_addr_end(addr, end);
644 alloc_init_section(pud, addr, next, phys, type); 665 alloc_init_pmd(pud, addr, next, phys, type);
645 phys += next - addr; 666 phys += next - addr;
646 } while (pud++, addr = next, addr != end); 667 } while (pud++, addr = next, addr != end);
647} 668}
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2c3b9421ab5e..2556cf1c2da1 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
388.globl cpu_arm920_suspend_size 388.globl cpu_arm920_suspend_size
389.equ cpu_arm920_suspend_size, 4 * 3 389.equ cpu_arm920_suspend_size, 4 * 3
390#ifdef CONFIG_PM_SLEEP 390#ifdef CONFIG_ARM_CPU_SUSPEND
391ENTRY(cpu_arm920_do_suspend) 391ENTRY(cpu_arm920_do_suspend)
392 stmfd sp!, {r4 - r6, lr} 392 stmfd sp!, {r4 - r6, lr}
393 mrc p15, 0, r4, c13, c0, 0 @ PID 393 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index f1803f7e2972..344c8a548cc0 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
403.globl cpu_arm926_suspend_size 403.globl cpu_arm926_suspend_size
404.equ cpu_arm926_suspend_size, 4 * 3 404.equ cpu_arm926_suspend_size, 4 * 3
405#ifdef CONFIG_PM_SLEEP 405#ifdef CONFIG_ARM_CPU_SUSPEND
406ENTRY(cpu_arm926_do_suspend) 406ENTRY(cpu_arm926_do_suspend)
407 stmfd sp!, {r4 - r6, lr} 407 stmfd sp!, {r4 - r6, lr}
408 mrc p15, 0, r4, c13, c0, 0 @ PID 408 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 82f9cdc751d6..0b60dd3d742a 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
350 350
351.globl cpu_mohawk_suspend_size 351.globl cpu_mohawk_suspend_size
352.equ cpu_mohawk_suspend_size, 4 * 6 352.equ cpu_mohawk_suspend_size, 4 * 6
353#ifdef CONFIG_PM_SLEEP 353#ifdef CONFIG_ARM_CPU_SUSPEND
354ENTRY(cpu_mohawk_do_suspend) 354ENTRY(cpu_mohawk_do_suspend)
355 stmfd sp!, {r4 - r9, lr} 355 stmfd sp!, {r4 - r9, lr}
356 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 356 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 3aa0da11fd84..d92dfd081429 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
172 172
173.globl cpu_sa1100_suspend_size 173.globl cpu_sa1100_suspend_size
174.equ cpu_sa1100_suspend_size, 4 * 3 174.equ cpu_sa1100_suspend_size, 4 * 3
175#ifdef CONFIG_PM_SLEEP 175#ifdef CONFIG_ARM_CPU_SUSPEND
176ENTRY(cpu_sa1100_do_suspend) 176ENTRY(cpu_sa1100_do_suspend)
177 stmfd sp!, {r4 - r6, lr} 177 stmfd sp!, {r4 - r6, lr}
178 mrc p15, 0, r4, c3, c0, 0 @ domain ID 178 mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bcaaa8de9325..5c07ee4fe3eb 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
138/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 138/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
139.globl cpu_v6_suspend_size 139.globl cpu_v6_suspend_size
140.equ cpu_v6_suspend_size, 4 * 6 140.equ cpu_v6_suspend_size, 4 * 6
141#ifdef CONFIG_PM_SLEEP 141#ifdef CONFIG_ARM_CPU_SUSPEND
142ENTRY(cpu_v6_do_suspend) 142ENTRY(cpu_v6_do_suspend)
143 stmfd sp!, {r4 - r9, lr} 143 stmfd sp!, {r4 - r9, lr}
144 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 144 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3a3c015f8d5c..f584d3f5b37c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
420__v7_ca7mp_proc_info: 420__v7_ca7mp_proc_info:
421 .long 0x410fc070 421 .long 0x410fc070
422 .long 0xff0ffff0 422 .long 0xff0ffff0
423 __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV 423 __v7_proc __v7_ca7mp_setup
424 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info 424 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
425 425
426 /* 426 /*
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info:
430__v7_ca15mp_proc_info: 430__v7_ca15mp_proc_info:
431 .long 0x410fc0f0 431 .long 0x410fc0f0
432 .long 0xff0ffff0 432 .long 0xff0ffff0
433 __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV 433 __v7_proc __v7_ca15mp_setup
434 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 434 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
435 435
436 /* 436 /*
437 * Qualcomm Inc. Krait processors.
438 */
439 .type __krait_proc_info, #object
440__krait_proc_info:
441 .long 0x510f0400 @ Required ID value
442 .long 0xff0ffc00 @ Mask for ID
443 /*
444 * Some Krait processors don't indicate support for SDIV and UDIV
445 * instructions in the ARM instruction set, even though they actually
446 * do support them.
447 */
448 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
449 .size __krait_proc_info, . - __krait_proc_info
450
451 /*
437 * Match any ARMv7 processor core. 452 * Match any ARMv7 processor core.
438 */ 453 */
439 .type __v7_proc_info, #object 454 .type __v7_proc_info, #object
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index eb93d6487f35..e8efd83b6f25 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
413 413
414.globl cpu_xsc3_suspend_size 414.globl cpu_xsc3_suspend_size
415.equ cpu_xsc3_suspend_size, 4 * 6 415.equ cpu_xsc3_suspend_size, 4 * 6
416#ifdef CONFIG_PM_SLEEP 416#ifdef CONFIG_ARM_CPU_SUSPEND
417ENTRY(cpu_xsc3_do_suspend) 417ENTRY(cpu_xsc3_do_suspend)
418 stmfd sp!, {r4 - r9, lr} 418 stmfd sp!, {r4 - r9, lr}
419 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 419 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 25510361aa18..e766f889bfd6 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
528 528
529.globl cpu_xscale_suspend_size 529.globl cpu_xscale_suspend_size
530.equ cpu_xscale_suspend_size, 4 * 6 530.equ cpu_xscale_suspend_size, 4 * 6
531#ifdef CONFIG_PM_SLEEP 531#ifdef CONFIG_ARM_CPU_SUSPEND
532ENTRY(cpu_xscale_do_suspend) 532ENTRY(cpu_xscale_do_suspend)
533 stmfd sp!, {r4 - r9, lr} 533 stmfd sp!, {r4 - r9, lr}
534 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 534 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode