diff options
| -rw-r--r-- | arch/arm/include/asm/highmem.h | 3 | ||||
| -rw-r--r-- | arch/arm/mm/cache-feroceon-l2.c | 37 | ||||
| -rw-r--r-- | arch/arm/mm/cache-xsc3l2.c | 57 | ||||
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 7 | ||||
| -rw-r--r-- | arch/arm/mm/flush.c | 7 | ||||
| -rw-r--r-- | arch/arm/mm/highmem.c | 87 |
6 files changed, 48 insertions, 150 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 1fc684e70ab..7080e2c8fa6 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
| @@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page); | |||
| 25 | extern void *kmap_high_get(struct page *page); | 25 | extern void *kmap_high_get(struct page *page); |
| 26 | extern void kunmap_high(struct page *page); | 26 | extern void kunmap_high(struct page *page); |
| 27 | 27 | ||
| 28 | extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); | ||
| 29 | extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); | ||
| 30 | |||
| 31 | /* | 28 | /* |
| 32 | * The following functions are already defined by <linux/highmem.h> | 29 | * The following functions are already defined by <linux/highmem.h> |
| 33 | * when CONFIG_HIGHMEM is not set. | 30 | * when CONFIG_HIGHMEM is not set. |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 6e77c042d8e..e0b0e7a4ec6 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
| @@ -13,13 +13,9 @@ | |||
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| 16 | #include <linux/highmem.h> | ||
| 16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
| 17 | #include <asm/kmap_types.h> | ||
| 18 | #include <asm/fixmap.h> | ||
| 19 | #include <asm/pgtable.h> | ||
| 20 | #include <asm/tlbflush.h> | ||
| 21 | #include <plat/cache-feroceon-l2.h> | 18 | #include <plat/cache-feroceon-l2.h> |
| 22 | #include "mm.h" | ||
| 23 | 19 | ||
| 24 | /* | 20 | /* |
| 25 | * Low-level cache maintenance operations. | 21 | * Low-level cache maintenance operations. |
| @@ -39,27 +35,30 @@ | |||
| 39 | * between which we don't want to be preempted. | 35 | * between which we don't want to be preempted. |
| 40 | */ | 36 | */ |
| 41 | 37 | ||
| 42 | static inline unsigned long l2_start_va(unsigned long paddr) | 38 | static inline unsigned long l2_get_va(unsigned long paddr) |
| 43 | { | 39 | { |
| 44 | #ifdef CONFIG_HIGHMEM | 40 | #ifdef CONFIG_HIGHMEM |
| 45 | /* | 41 | /* |
| 46 | * Let's do our own fixmap stuff in a minimal way here. | ||
| 47 | * Because range ops can't be done on physical addresses, | 42 | * Because range ops can't be done on physical addresses, |
| 48 | * we simply install a virtual mapping for it only for the | 43 | * we simply install a virtual mapping for it only for the |
| 49 | * TLB lookup to occur, hence no need to flush the untouched | 44 | * TLB lookup to occur, hence no need to flush the untouched |
| 50 | * memory mapping. This is protected with the disabling of | 45 | * memory mapping afterwards (note: a cache flush may happen |
| 51 | * interrupts by the caller. | 46 | * in some circumstances depending on the path taken in kunmap_atomic). |
| 52 | */ | 47 | */ |
| 53 | unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | 48 | void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT); |
| 54 | unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 49 | return (unsigned long)vaddr + (paddr & ~PAGE_MASK); |
| 55 | set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
| 56 | local_flush_tlb_kernel_page(vaddr); | ||
| 57 | return vaddr + (paddr & ~PAGE_MASK); | ||
| 58 | #else | 50 | #else |
| 59 | return __phys_to_virt(paddr); | 51 | return __phys_to_virt(paddr); |
| 60 | #endif | 52 | #endif |
| 61 | } | 53 | } |
| 62 | 54 | ||
| 55 | static inline void l2_put_va(unsigned long vaddr) | ||
| 56 | { | ||
| 57 | #ifdef CONFIG_HIGHMEM | ||
| 58 | kunmap_atomic((void *)vaddr); | ||
| 59 | #endif | ||
| 60 | } | ||
| 61 | |||
| 63 | static inline void l2_clean_pa(unsigned long addr) | 62 | static inline void l2_clean_pa(unsigned long addr) |
| 64 | { | 63 | { |
| 65 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); | 64 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); |
| @@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end) | |||
| 76 | */ | 75 | */ |
| 77 | BUG_ON((start ^ end) >> PAGE_SHIFT); | 76 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
| 78 | 77 | ||
| 79 | raw_local_irq_save(flags); | 78 | va_start = l2_get_va(start); |
| 80 | va_start = l2_start_va(start); | ||
| 81 | va_end = va_start + (end - start); | 79 | va_end = va_start + (end - start); |
| 80 | raw_local_irq_save(flags); | ||
| 82 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" | 81 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" |
| 83 | "mcr p15, 1, %1, c15, c9, 5" | 82 | "mcr p15, 1, %1, c15, c9, 5" |
| 84 | : : "r" (va_start), "r" (va_end)); | 83 | : : "r" (va_start), "r" (va_end)); |
| 85 | raw_local_irq_restore(flags); | 84 | raw_local_irq_restore(flags); |
| 85 | l2_put_va(va_start); | ||
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static inline void l2_clean_inv_pa(unsigned long addr) | 88 | static inline void l2_clean_inv_pa(unsigned long addr) |
| @@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end) | |||
| 106 | */ | 106 | */ |
| 107 | BUG_ON((start ^ end) >> PAGE_SHIFT); | 107 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
| 108 | 108 | ||
| 109 | raw_local_irq_save(flags); | 109 | va_start = l2_get_va(start); |
| 110 | va_start = l2_start_va(start); | ||
| 111 | va_end = va_start + (end - start); | 110 | va_end = va_start + (end - start); |
| 111 | raw_local_irq_save(flags); | ||
| 112 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" | 112 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" |
| 113 | "mcr p15, 1, %1, c15, c11, 5" | 113 | "mcr p15, 1, %1, c15, c11, 5" |
| 114 | : : "r" (va_start), "r" (va_end)); | 114 | : : "r" (va_start), "r" (va_end)); |
| 115 | raw_local_irq_restore(flags); | 115 | raw_local_irq_restore(flags); |
| 116 | l2_put_va(va_start); | ||
| 116 | } | 117 | } |
| 117 | 118 | ||
| 118 | static inline void l2_inv_all(void) | 119 | static inline void l2_inv_all(void) |
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index c3154928bcc..5a32020471e 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c | |||
| @@ -17,14 +17,10 @@ | |||
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ | 18 | */ |
| 19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 20 | #include <linux/highmem.h> | ||
| 20 | #include <asm/system.h> | 21 | #include <asm/system.h> |
| 21 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
| 22 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
| 23 | #include <asm/kmap_types.h> | ||
| 24 | #include <asm/fixmap.h> | ||
| 25 | #include <asm/pgtable.h> | ||
| 26 | #include <asm/tlbflush.h> | ||
| 27 | #include "mm.h" | ||
| 28 | 24 | ||
| 29 | #define CR_L2 (1 << 26) | 25 | #define CR_L2 (1 << 26) |
| 30 | 26 | ||
| @@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void) | |||
| 71 | dsb(); | 67 | dsb(); |
| 72 | } | 68 | } |
| 73 | 69 | ||
| 70 | static inline void l2_unmap_va(unsigned long va) | ||
| 71 | { | ||
| 74 | #ifdef CONFIG_HIGHMEM | 72 | #ifdef CONFIG_HIGHMEM |
| 75 | #define l2_map_save_flags(x) raw_local_save_flags(x) | 73 | if (va != -1) |
| 76 | #define l2_map_restore_flags(x) raw_local_irq_restore(x) | 74 | kunmap_atomic((void *)va); |
| 77 | #else | ||
| 78 | #define l2_map_save_flags(x) ((x) = 0) | ||
| 79 | #define l2_map_restore_flags(x) ((void)(x)) | ||
| 80 | #endif | 75 | #endif |
| 76 | } | ||
| 81 | 77 | ||
| 82 | static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | 78 | static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) |
| 83 | unsigned long flags) | ||
| 84 | { | 79 | { |
| 85 | #ifdef CONFIG_HIGHMEM | 80 | #ifdef CONFIG_HIGHMEM |
| 86 | unsigned long va = prev_va & PAGE_MASK; | 81 | unsigned long va = prev_va & PAGE_MASK; |
| @@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | |||
| 89 | /* | 84 | /* |
| 90 | * Switching to a new page. Because cache ops are | 85 | * Switching to a new page. Because cache ops are |
| 91 | * using virtual addresses only, we must put a mapping | 86 | * using virtual addresses only, we must put a mapping |
| 92 | * in place for it. We also enable interrupts for a | 87 | * in place for it. |
| 93 | * short while and disable them again to protect this | ||
| 94 | * mapping. | ||
| 95 | */ | 88 | */ |
| 96 | unsigned long idx; | 89 | l2_unmap_va(prev_va); |
| 97 | raw_local_irq_restore(flags); | 90 | va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); |
| 98 | idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | ||
| 99 | va = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 100 | raw_local_irq_restore(flags | PSR_I_BIT); | ||
| 101 | set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
| 102 | local_flush_tlb_kernel_page(va); | ||
| 103 | } | 91 | } |
| 104 | return va + (pa_offset >> (32 - PAGE_SHIFT)); | 92 | return va + (pa_offset >> (32 - PAGE_SHIFT)); |
| 105 | #else | 93 | #else |
| @@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | |||
| 109 | 97 | ||
| 110 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | 98 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) |
| 111 | { | 99 | { |
| 112 | unsigned long vaddr, flags; | 100 | unsigned long vaddr; |
| 113 | 101 | ||
| 114 | if (start == 0 && end == -1ul) { | 102 | if (start == 0 && end == -1ul) { |
| 115 | xsc3_l2_inv_all(); | 103 | xsc3_l2_inv_all(); |
| @@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
| 117 | } | 105 | } |
| 118 | 106 | ||
| 119 | vaddr = -1; /* to force the first mapping */ | 107 | vaddr = -1; /* to force the first mapping */ |
| 120 | l2_map_save_flags(flags); | ||
| 121 | 108 | ||
| 122 | /* | 109 | /* |
| 123 | * Clean and invalidate partial first cache line. | 110 | * Clean and invalidate partial first cache line. |
| 124 | */ | 111 | */ |
| 125 | if (start & (CACHE_LINE_SIZE - 1)) { | 112 | if (start & (CACHE_LINE_SIZE - 1)) { |
| 126 | vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); | 113 | vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); |
| 127 | xsc3_l2_clean_mva(vaddr); | 114 | xsc3_l2_clean_mva(vaddr); |
| 128 | xsc3_l2_inv_mva(vaddr); | 115 | xsc3_l2_inv_mva(vaddr); |
| 129 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | 116 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; |
| @@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
| 133 | * Invalidate all full cache lines between 'start' and 'end'. | 120 | * Invalidate all full cache lines between 'start' and 'end'. |
| 134 | */ | 121 | */ |
| 135 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { | 122 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { |
| 136 | vaddr = l2_map_va(start, vaddr, flags); | 123 | vaddr = l2_map_va(start, vaddr); |
| 137 | xsc3_l2_inv_mva(vaddr); | 124 | xsc3_l2_inv_mva(vaddr); |
| 138 | start += CACHE_LINE_SIZE; | 125 | start += CACHE_LINE_SIZE; |
| 139 | } | 126 | } |
| @@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
| 142 | * Clean and invalidate partial last cache line. | 129 | * Clean and invalidate partial last cache line. |
| 143 | */ | 130 | */ |
| 144 | if (start < end) { | 131 | if (start < end) { |
| 145 | vaddr = l2_map_va(start, vaddr, flags); | 132 | vaddr = l2_map_va(start, vaddr); |
| 146 | xsc3_l2_clean_mva(vaddr); | 133 | xsc3_l2_clean_mva(vaddr); |
| 147 | xsc3_l2_inv_mva(vaddr); | 134 | xsc3_l2_inv_mva(vaddr); |
| 148 | } | 135 | } |
| 149 | 136 | ||
| 150 | l2_map_restore_flags(flags); | 137 | l2_unmap_va(vaddr); |
| 151 | 138 | ||
| 152 | dsb(); | 139 | dsb(); |
| 153 | } | 140 | } |
| 154 | 141 | ||
| 155 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) | 142 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) |
| 156 | { | 143 | { |
| 157 | unsigned long vaddr, flags; | 144 | unsigned long vaddr; |
| 158 | 145 | ||
| 159 | vaddr = -1; /* to force the first mapping */ | 146 | vaddr = -1; /* to force the first mapping */ |
| 160 | l2_map_save_flags(flags); | ||
| 161 | 147 | ||
| 162 | start &= ~(CACHE_LINE_SIZE - 1); | 148 | start &= ~(CACHE_LINE_SIZE - 1); |
| 163 | while (start < end) { | 149 | while (start < end) { |
| 164 | vaddr = l2_map_va(start, vaddr, flags); | 150 | vaddr = l2_map_va(start, vaddr); |
| 165 | xsc3_l2_clean_mva(vaddr); | 151 | xsc3_l2_clean_mva(vaddr); |
| 166 | start += CACHE_LINE_SIZE; | 152 | start += CACHE_LINE_SIZE; |
| 167 | } | 153 | } |
| 168 | 154 | ||
| 169 | l2_map_restore_flags(flags); | 155 | l2_unmap_va(vaddr); |
| 170 | 156 | ||
| 171 | dsb(); | 157 | dsb(); |
| 172 | } | 158 | } |
| @@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void) | |||
| 193 | 179 | ||
| 194 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | 180 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) |
| 195 | { | 181 | { |
| 196 | unsigned long vaddr, flags; | 182 | unsigned long vaddr; |
| 197 | 183 | ||
| 198 | if (start == 0 && end == -1ul) { | 184 | if (start == 0 && end == -1ul) { |
| 199 | xsc3_l2_flush_all(); | 185 | xsc3_l2_flush_all(); |
| @@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | |||
| 201 | } | 187 | } |
| 202 | 188 | ||
| 203 | vaddr = -1; /* to force the first mapping */ | 189 | vaddr = -1; /* to force the first mapping */ |
| 204 | l2_map_save_flags(flags); | ||
| 205 | 190 | ||
| 206 | start &= ~(CACHE_LINE_SIZE - 1); | 191 | start &= ~(CACHE_LINE_SIZE - 1); |
| 207 | while (start < end) { | 192 | while (start < end) { |
| 208 | vaddr = l2_map_va(start, vaddr, flags); | 193 | vaddr = l2_map_va(start, vaddr); |
| 209 | xsc3_l2_clean_mva(vaddr); | 194 | xsc3_l2_clean_mva(vaddr); |
| 210 | xsc3_l2_inv_mva(vaddr); | 195 | xsc3_l2_inv_mva(vaddr); |
| 211 | start += CACHE_LINE_SIZE; | 196 | start += CACHE_LINE_SIZE; |
| 212 | } | 197 | } |
| 213 | 198 | ||
| 214 | l2_map_restore_flags(flags); | 199 | l2_unmap_va(vaddr); |
| 215 | 200 | ||
| 216 | dsb(); | 201 | dsb(); |
| 217 | } | 202 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ac6a36142fc..809f1bf9fa2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
| 19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
| 20 | #include <linux/highmem.h> | ||
| 20 | 21 | ||
| 21 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
| 22 | #include <asm/highmem.h> | 23 | #include <asm/highmem.h> |
| @@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
| 480 | op(vaddr, len, dir); | 481 | op(vaddr, len, dir); |
| 481 | kunmap_high(page); | 482 | kunmap_high(page); |
| 482 | } else if (cache_is_vipt()) { | 483 | } else if (cache_is_vipt()) { |
| 483 | pte_t saved_pte; | 484 | /* unmapped pages might still be cached */ |
| 484 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | 485 | vaddr = kmap_atomic(page); |
| 485 | op(vaddr + offset, len, dir); | 486 | op(vaddr + offset, len, dir); |
| 486 | kunmap_high_l1_vipt(page, saved_pte); | 487 | kunmap_atomic(vaddr); |
| 487 | } | 488 | } |
| 488 | } else { | 489 | } else { |
| 489 | vaddr = page_address(page) + offset; | 490 | vaddr = page_address(page) + offset; |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 391ffae7509..c29f2839f1d 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
| 12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/highmem.h> | ||
| 13 | 14 | ||
| 14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
| 15 | #include <asm/cachetype.h> | 16 | #include <asm/cachetype.h> |
| @@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
| 180 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 181 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 181 | kunmap_high(page); | 182 | kunmap_high(page); |
| 182 | } else if (cache_is_vipt()) { | 183 | } else if (cache_is_vipt()) { |
| 183 | pte_t saved_pte; | 184 | /* unmapped pages might still be cached */ |
| 184 | addr = kmap_high_l1_vipt(page, &saved_pte); | 185 | addr = kmap_atomic(page); |
| 185 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 186 | kunmap_high_l1_vipt(page, saved_pte); | 187 | kunmap_atomic(addr); |
| 187 | } | 188 | } |
| 188 | } | 189 | } |
| 189 | 190 | ||
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index c435fd9e1da..807c0573abb 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
| @@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
| 140 | pte = TOP_PTE(vaddr); | 140 | pte = TOP_PTE(vaddr); |
| 141 | return pte_page(*pte); | 141 | return pte_page(*pte); |
| 142 | } | 142 | } |
| 143 | |||
| 144 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
| 145 | |||
| 146 | #include <linux/percpu.h> | ||
| 147 | |||
| 148 | /* | ||
| 149 | * The VIVT cache of a highmem page is always flushed before the page | ||
| 150 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
| 151 | * in that case. | ||
| 152 | * | ||
| 153 | * However unmapped pages may still be cached with a VIPT cache, and | ||
| 154 | * it is not possible to perform cache maintenance on them using physical | ||
| 155 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
| 156 | * virtual mapping for that purpose. | ||
| 157 | * | ||
| 158 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
| 159 | * functions which are possibly called from interrupt context. As we don't | ||
| 160 | * want to keep interrupt disabled all the time when such maintenance is | ||
| 161 | * taking place, we therefore allow for some reentrancy by preserving and | ||
| 162 | * restoring the previous fixmap entry before the interrupted context is | ||
| 163 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
| 164 | * the previous fixmap, and leaving the current one in place allow it to | ||
| 165 | * be reused the next time without a TLB flush (common with DMA). | ||
| 166 | */ | ||
| 167 | |||
| 168 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
| 169 | |||
| 170 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
| 171 | { | ||
| 172 | unsigned int idx, cpu; | ||
| 173 | int *depth; | ||
| 174 | unsigned long vaddr, flags; | ||
| 175 | pte_t pte, *ptep; | ||
| 176 | |||
| 177 | if (!in_interrupt()) | ||
| 178 | preempt_disable(); | ||
| 179 | |||
| 180 | cpu = smp_processor_id(); | ||
| 181 | depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
| 182 | |||
| 183 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
| 184 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 185 | ptep = TOP_PTE(vaddr); | ||
| 186 | pte = mk_pte(page, kmap_prot); | ||
| 187 | |||
| 188 | raw_local_irq_save(flags); | ||
| 189 | (*depth)++; | ||
| 190 | if (pte_val(*ptep) == pte_val(pte)) { | ||
| 191 | *saved_pte = pte; | ||
| 192 | } else { | ||
| 193 | *saved_pte = *ptep; | ||
| 194 | set_pte_ext(ptep, pte, 0); | ||
| 195 | local_flush_tlb_kernel_page(vaddr); | ||
| 196 | } | ||
| 197 | raw_local_irq_restore(flags); | ||
| 198 | |||
| 199 | return (void *)vaddr; | ||
| 200 | } | ||
| 201 | |||
| 202 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
| 203 | { | ||
| 204 | unsigned int idx, cpu = smp_processor_id(); | ||
| 205 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
| 206 | unsigned long vaddr, flags; | ||
| 207 | pte_t pte, *ptep; | ||
| 208 | |||
| 209 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
| 210 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 211 | ptep = TOP_PTE(vaddr); | ||
| 212 | pte = mk_pte(page, kmap_prot); | ||
| 213 | |||
| 214 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
| 215 | BUG_ON(*depth <= 0); | ||
| 216 | |||
| 217 | raw_local_irq_save(flags); | ||
| 218 | (*depth)--; | ||
| 219 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
| 220 | set_pte_ext(ptep, saved_pte, 0); | ||
| 221 | local_flush_tlb_kernel_page(vaddr); | ||
| 222 | } | ||
| 223 | raw_local_irq_restore(flags); | ||
| 224 | |||
| 225 | if (!in_interrupt()) | ||
| 226 | preempt_enable(); | ||
| 227 | } | ||
| 228 | |||
| 229 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||
