diff options
Diffstat (limited to 'arch/arm/mm/highmem.c')
| -rw-r--r-- | arch/arm/mm/highmem.c | 87 |
1 files changed, 86 insertions, 1 deletions
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 2be1ec7c1b41..77b030f5ec09 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
| @@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
| 79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); | 79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); |
| 80 | 80 | ||
| 81 | if (kvaddr >= (void *)FIXADDR_START) { | 81 | if (kvaddr >= (void *)FIXADDR_START) { |
| 82 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 82 | if (cache_is_vivt()) |
| 83 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | ||
| 83 | #ifdef CONFIG_DEBUG_HIGHMEM | 84 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 84 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 85 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
| 85 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); | 86 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); |
| @@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
| 124 | pte = TOP_PTE(vaddr); | 125 | pte = TOP_PTE(vaddr); |
| 125 | return pte_page(*pte); | 126 | return pte_page(*pte); |
| 126 | } | 127 | } |
| 128 | |||
| 129 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
| 130 | |||
| 131 | #include <linux/percpu.h> | ||
| 132 | |||
| 133 | /* | ||
| 134 | * The VIVT cache of a highmem page is always flushed before the page | ||
| 135 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
| 136 | * in that case. | ||
| 137 | * | ||
| 138 | * However unmapped pages may still be cached with a VIPT cache, and | ||
| 139 | * it is not possible to perform cache maintenance on them using physical | ||
| 140 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
| 141 | * virtual mapping for that purpose. | ||
| 142 | * | ||
| 143 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
| 144 | * functions which are possibly called from interrupt context. As we don't | ||
| 145 | * want to keep interrupt disabled all the time when such maintenance is | ||
| 146 | * taking place, we therefore allow for some reentrancy by preserving and | ||
| 147 | * restoring the previous fixmap entry before the interrupted context is | ||
| 148 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
| 149 | * the previous fixmap, and leaving the current one in place allow it to | ||
| 150 | * be reused the next time without a TLB flush (common with DMA). | ||
| 151 | */ | ||
| 152 | |||
| 153 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
| 154 | |||
| 155 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
| 156 | { | ||
| 157 | unsigned int idx, cpu = smp_processor_id(); | ||
| 158 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
| 159 | unsigned long vaddr, flags; | ||
| 160 | pte_t pte, *ptep; | ||
| 161 | |||
| 162 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
| 163 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 164 | ptep = TOP_PTE(vaddr); | ||
| 165 | pte = mk_pte(page, kmap_prot); | ||
| 166 | |||
| 167 | if (!in_interrupt()) | ||
| 168 | preempt_disable(); | ||
| 169 | |||
| 170 | raw_local_irq_save(flags); | ||
| 171 | (*depth)++; | ||
| 172 | if (pte_val(*ptep) == pte_val(pte)) { | ||
| 173 | *saved_pte = pte; | ||
| 174 | } else { | ||
| 175 | *saved_pte = *ptep; | ||
| 176 | set_pte_ext(ptep, pte, 0); | ||
| 177 | local_flush_tlb_kernel_page(vaddr); | ||
| 178 | } | ||
| 179 | raw_local_irq_restore(flags); | ||
| 180 | |||
| 181 | return (void *)vaddr; | ||
| 182 | } | ||
| 183 | |||
| 184 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
| 185 | { | ||
| 186 | unsigned int idx, cpu = smp_processor_id(); | ||
| 187 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
| 188 | unsigned long vaddr, flags; | ||
| 189 | pte_t pte, *ptep; | ||
| 190 | |||
| 191 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
| 192 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 193 | ptep = TOP_PTE(vaddr); | ||
| 194 | pte = mk_pte(page, kmap_prot); | ||
| 195 | |||
| 196 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
| 197 | BUG_ON(*depth <= 0); | ||
| 198 | |||
| 199 | raw_local_irq_save(flags); | ||
| 200 | (*depth)--; | ||
| 201 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
| 202 | set_pte_ext(ptep, saved_pte, 0); | ||
| 203 | local_flush_tlb_kernel_page(vaddr); | ||
| 204 | } | ||
| 205 | raw_local_irq_restore(flags); | ||
| 206 | |||
| 207 | if (!in_interrupt()) | ||
| 208 | preempt_enable(); | ||
| 209 | } | ||
| 210 | |||
| 211 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||
