diff options
| -rw-r--r-- | arch/arm/include/asm/kmap_types.h | 1 | ||||
| -rw-r--r-- | arch/arm/mm/cache-feroceon-l2.c | 54 |
2 files changed, 38 insertions, 17 deletions
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index 45def13ee17a..d16ec97ec9a9 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h | |||
| @@ -18,6 +18,7 @@ enum km_type { | |||
| 18 | KM_IRQ1, | 18 | KM_IRQ1, |
| 19 | KM_SOFTIRQ0, | 19 | KM_SOFTIRQ0, |
| 20 | KM_SOFTIRQ1, | 20 | KM_SOFTIRQ1, |
| 21 | KM_L2_CACHE, | ||
| 21 | KM_TYPE_NR | 22 | KM_TYPE_NR |
| 22 | }; | 23 | }; |
| 23 | 24 | ||
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 80cd207cbaea..d6dd83826f8a 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
| @@ -14,8 +14,12 @@ | |||
| 14 | 14 | ||
| 15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| 16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
| 17 | #include <asm/kmap_types.h> | ||
| 18 | #include <asm/fixmap.h> | ||
| 19 | #include <asm/pgtable.h> | ||
| 20 | #include <asm/tlbflush.h> | ||
| 17 | #include <plat/cache-feroceon-l2.h> | 21 | #include <plat/cache-feroceon-l2.h> |
| 18 | 22 | #include "mm.h" | |
| 19 | 23 | ||
| 20 | /* | 24 | /* |
| 21 | * Low-level cache maintenance operations. | 25 | * Low-level cache maintenance operations. |
| @@ -34,14 +38,36 @@ | |||
| 34 | * The range operations require two successive cp15 writes, in | 38 | * The range operations require two successive cp15 writes, in |
| 35 | * between which we don't want to be preempted. | 39 | * between which we don't want to be preempted. |
| 36 | */ | 40 | */ |
| 41 | |||
| 42 | static inline unsigned long l2_start_va(unsigned long paddr) | ||
| 43 | { | ||
| 44 | #ifdef CONFIG_HIGHMEM | ||
| 45 | /* | ||
| 46 | * Let's do our own fixmap stuff in a minimal way here. | ||
| 47 | * Because range ops can't be done on physical addresses, | ||
| 48 | * we simply install a virtual mapping for it only for the | ||
| 49 | * TLB lookup to occur, hence no need to flush the untouched | ||
| 50 | * memory mapping. This is protected with the disabling of | ||
| 51 | * interrupts by the caller. | ||
| 52 | */ | ||
| 53 | unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | ||
| 54 | unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 55 | set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
| 56 | local_flush_tlb_kernel_page(vaddr); | ||
| 57 | return vaddr + (paddr & ~PAGE_MASK); | ||
| 58 | #else | ||
| 59 | return __phys_to_virt(paddr); | ||
| 60 | #endif | ||
| 61 | } | ||
| 62 | |||
| 37 | static inline void l2_clean_pa(unsigned long addr) | 63 | static inline void l2_clean_pa(unsigned long addr) |
| 38 | { | 64 | { |
| 39 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); | 65 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); |
| 40 | } | 66 | } |
| 41 | 67 | ||
| 42 | static inline void l2_clean_mva_range(unsigned long start, unsigned long end) | 68 | static inline void l2_clean_pa_range(unsigned long start, unsigned long end) |
| 43 | { | 69 | { |
| 44 | unsigned long flags; | 70 | unsigned long va_start, va_end, flags; |
| 45 | 71 | ||
| 46 | /* | 72 | /* |
| 47 | * Make sure 'start' and 'end' reference the same page, as | 73 | * Make sure 'start' and 'end' reference the same page, as |
| @@ -51,17 +77,14 @@ static inline void l2_clean_mva_range(unsigned long start, unsigned long end) | |||
| 51 | BUG_ON((start ^ end) >> PAGE_SHIFT); | 77 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
| 52 | 78 | ||
| 53 | raw_local_irq_save(flags); | 79 | raw_local_irq_save(flags); |
| 80 | va_start = l2_start_va(start); | ||
| 81 | va_end = va_start + (end - start); | ||
| 54 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" | 82 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" |
| 55 | "mcr p15, 1, %1, c15, c9, 5" | 83 | "mcr p15, 1, %1, c15, c9, 5" |
| 56 | : : "r" (start), "r" (end)); | 84 | : : "r" (va_start), "r" (va_end)); |
| 57 | raw_local_irq_restore(flags); | 85 | raw_local_irq_restore(flags); |
| 58 | } | 86 | } |
| 59 | 87 | ||
| 60 | static inline void l2_clean_pa_range(unsigned long start, unsigned long end) | ||
| 61 | { | ||
| 62 | l2_clean_mva_range(__phys_to_virt(start), __phys_to_virt(end)); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline void l2_clean_inv_pa(unsigned long addr) | 88 | static inline void l2_clean_inv_pa(unsigned long addr) |
| 66 | { | 89 | { |
| 67 | __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr)); | 90 | __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr)); |
| @@ -72,9 +95,9 @@ static inline void l2_inv_pa(unsigned long addr) | |||
| 72 | __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr)); | 95 | __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr)); |
| 73 | } | 96 | } |
| 74 | 97 | ||
| 75 | static inline void l2_inv_mva_range(unsigned long start, unsigned long end) | 98 | static inline void l2_inv_pa_range(unsigned long start, unsigned long end) |
| 76 | { | 99 | { |
| 77 | unsigned long flags; | 100 | unsigned long va_start, va_end, flags; |
| 78 | 101 | ||
| 79 | /* | 102 | /* |
| 80 | * Make sure 'start' and 'end' reference the same page, as | 103 | * Make sure 'start' and 'end' reference the same page, as |
| @@ -84,17 +107,14 @@ static inline void l2_inv_mva_range(unsigned long start, unsigned long end) | |||
| 84 | BUG_ON((start ^ end) >> PAGE_SHIFT); | 107 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
| 85 | 108 | ||
| 86 | raw_local_irq_save(flags); | 109 | raw_local_irq_save(flags); |
| 110 | va_start = l2_start_va(start); | ||
| 111 | va_end = va_start + (end - start); | ||
| 87 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" | 112 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" |
| 88 | "mcr p15, 1, %1, c15, c11, 5" | 113 | "mcr p15, 1, %1, c15, c11, 5" |
| 89 | : : "r" (start), "r" (end)); | 114 | : : "r" (va_start), "r" (va_end)); |
| 90 | raw_local_irq_restore(flags); | 115 | raw_local_irq_restore(flags); |
| 91 | } | 116 | } |
| 92 | 117 | ||
| 93 | static inline void l2_inv_pa_range(unsigned long start, unsigned long end) | ||
| 94 | { | ||
| 95 | l2_inv_mva_range(__phys_to_virt(start), __phys_to_virt(end)); | ||
| 96 | } | ||
| 97 | |||
| 98 | 118 | ||
| 99 | /* | 119 | /* |
| 100 | * Linux primitives. | 120 | * Linux primitives. |
