diff options
Diffstat (limited to 'arch/tile/mm/highmem.c')
| -rw-r--r-- | arch/tile/mm/highmem.c | 328 |
1 files changed, 328 insertions, 0 deletions
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c new file mode 100644 index 000000000000..ff1cdff5114d --- /dev/null +++ b/arch/tile/mm/highmem.c | |||
| @@ -0,0 +1,328 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU General Public License | ||
| 6 | * as published by the Free Software Foundation, version 2. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
| 11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/highmem.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/pagemap.h> | ||
| 18 | #include <asm/homecache.h> | ||
| 19 | |||
| 20 | #define kmap_get_pte(vaddr) \ | ||
| 21 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ | ||
| 22 | (vaddr)), (vaddr)) | ||
| 23 | |||
| 24 | |||
| 25 | void *kmap(struct page *page) | ||
| 26 | { | ||
| 27 | void *kva; | ||
| 28 | unsigned long flags; | ||
| 29 | pte_t *ptep; | ||
| 30 | |||
| 31 | might_sleep(); | ||
| 32 | if (!PageHighMem(page)) | ||
| 33 | return page_address(page); | ||
| 34 | kva = kmap_high(page); | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Rewrite the PTE under the lock. This ensures that the page | ||
| 38 | * is not currently migrating. | ||
| 39 | */ | ||
| 40 | ptep = kmap_get_pte((unsigned long)kva); | ||
| 41 | flags = homecache_kpte_lock(); | ||
| 42 | set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); | ||
| 43 | homecache_kpte_unlock(flags); | ||
| 44 | |||
| 45 | return kva; | ||
| 46 | } | ||
| 47 | EXPORT_SYMBOL(kmap); | ||
| 48 | |||
| 49 | void kunmap(struct page *page) | ||
| 50 | { | ||
| 51 | if (in_interrupt()) | ||
| 52 | BUG(); | ||
| 53 | if (!PageHighMem(page)) | ||
| 54 | return; | ||
| 55 | kunmap_high(page); | ||
| 56 | } | ||
| 57 | EXPORT_SYMBOL(kunmap); | ||
| 58 | |||
| 59 | static void debug_kmap_atomic_prot(enum km_type type) | ||
| 60 | { | ||
| 61 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 62 | static unsigned warn_count = 10; | ||
| 63 | |||
| 64 | if (unlikely(warn_count == 0)) | ||
| 65 | return; | ||
| 66 | |||
| 67 | if (unlikely(in_interrupt())) { | ||
| 68 | if (in_irq()) { | ||
| 69 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
| 70 | type != KM_BIO_SRC_IRQ && | ||
| 71 | /* type != KM_BIO_DST_IRQ && */ | ||
| 72 | type != KM_BOUNCE_READ) { | ||
| 73 | WARN_ON(1); | ||
| 74 | warn_count--; | ||
| 75 | } | ||
| 76 | } else if (!irqs_disabled()) { /* softirq */ | ||
| 77 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
| 78 | type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && | ||
| 79 | type != KM_SKB_SUNRPC_DATA && | ||
| 80 | type != KM_SKB_DATA_SOFTIRQ && | ||
| 81 | type != KM_BOUNCE_READ) { | ||
| 82 | WARN_ON(1); | ||
| 83 | warn_count--; | ||
| 84 | } | ||
| 85 | } | ||
| 86 | } | ||
| 87 | |||
| 88 | if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || | ||
| 89 | type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) { | ||
| 90 | if (!irqs_disabled()) { | ||
| 91 | WARN_ON(1); | ||
| 92 | warn_count--; | ||
| 93 | } | ||
| 94 | } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { | ||
| 95 | if (irq_count() == 0 && !irqs_disabled()) { | ||
| 96 | WARN_ON(1); | ||
| 97 | warn_count--; | ||
| 98 | } | ||
| 99 | } | ||
| 100 | #endif | ||
| 101 | } | ||
| 102 | |||
| 103 | /* | ||
| 104 | * Describe a single atomic mapping of a page on a given cpu at a | ||
| 105 | * given address, and allow it to be linked into a list. | ||
| 106 | */ | ||
| 107 | struct atomic_mapped_page { | ||
| 108 | struct list_head list; | ||
| 109 | struct page *page; | ||
| 110 | int cpu; | ||
| 111 | unsigned long va; | ||
| 112 | }; | ||
| 113 | |||
| 114 | static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&_lock); | ||
| 115 | static struct list_head amp_list = LIST_HEAD_INIT(amp_list); | ||
| 116 | |||
| 117 | /* | ||
| 118 | * Combining this structure with a per-cpu declaration lets us give | ||
| 119 | * each cpu an atomic_mapped_page structure per type. | ||
| 120 | */ | ||
| 121 | struct kmap_amps { | ||
| 122 | struct atomic_mapped_page per_type[KM_TYPE_NR]; | ||
| 123 | }; | ||
| 124 | static DEFINE_PER_CPU(struct kmap_amps, amps); | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Add a page and va, on this cpu, to the list of kmap_atomic pages, | ||
| 128 | * and write the new pte to memory. Writing the new PTE under the | ||
| 129 | * lock guarantees that it is either on the list before migration starts | ||
| 130 | * (if we won the race), or set_pte() sets the migrating bit in the PTE | ||
| 131 | * (if we lost the race). And doing it under the lock guarantees | ||
| 132 | * that when kmap_atomic_fix_one_pte() comes along, it finds a valid | ||
| 133 | * PTE in memory, iff the mapping is still on the amp_list. | ||
| 134 | * | ||
| 135 | * Finally, doing it under the lock lets us safely examine the page | ||
| 136 | * to see if it is immutable or not, for the generic kmap_atomic() case. | ||
| 137 | * If we examine it earlier we are exposed to a race where it looks | ||
| 138 | * writable earlier, but becomes immutable before we write the PTE. | ||
| 139 | */ | ||
| 140 | static void kmap_atomic_register(struct page *page, enum km_type type, | ||
| 141 | unsigned long va, pte_t *ptep, pte_t pteval) | ||
| 142 | { | ||
| 143 | unsigned long flags; | ||
| 144 | struct atomic_mapped_page *amp; | ||
| 145 | |||
| 146 | flags = homecache_kpte_lock(); | ||
| 147 | spin_lock(&_lock); | ||
| 148 | |||
| 149 | /* With interrupts disabled, now fill in the per-cpu info. */ | ||
| 150 | amp = &__get_cpu_var(amps).per_type[type]; | ||
| 151 | amp->page = page; | ||
| 152 | amp->cpu = smp_processor_id(); | ||
| 153 | amp->va = va; | ||
| 154 | |||
| 155 | /* For generic kmap_atomic(), choose the PTE writability now. */ | ||
| 156 | if (!pte_read(pteval)) | ||
| 157 | pteval = mk_pte(page, page_to_kpgprot(page)); | ||
| 158 | |||
| 159 | list_add(&->list, &_list); | ||
| 160 | set_pte(ptep, pteval); | ||
| 161 | arch_flush_lazy_mmu_mode(); | ||
| 162 | |||
| 163 | spin_unlock(&_lock); | ||
| 164 | homecache_kpte_unlock(flags); | ||
| 165 | } | ||
| 166 | |||
| 167 | /* | ||
| 168 | * Remove a page and va, on this cpu, from the list of kmap_atomic pages. | ||
| 169 | * Linear-time search, but we count on the lists being short. | ||
| 170 | * We don't need to adjust the PTE under the lock (as opposed to the | ||
| 171 | * kmap_atomic_register() case), since we're just unconditionally | ||
| 172 | * zeroing the PTE after it's off the list. | ||
| 173 | */ | ||
| 174 | static void kmap_atomic_unregister(struct page *page, unsigned long va) | ||
| 175 | { | ||
| 176 | unsigned long flags; | ||
| 177 | struct atomic_mapped_page *amp; | ||
| 178 | int cpu = smp_processor_id(); | ||
| 179 | spin_lock_irqsave(&_lock, flags); | ||
| 180 | list_for_each_entry(amp, &_list, list) { | ||
| 181 | if (amp->page == page && amp->cpu == cpu && amp->va == va) | ||
| 182 | break; | ||
| 183 | } | ||
| 184 | BUG_ON(&->list == &_list); | ||
| 185 | list_del(&->list); | ||
| 186 | spin_unlock_irqrestore(&_lock, flags); | ||
| 187 | } | ||
| 188 | |||
| 189 | /* Helper routine for kmap_atomic_fix_kpte(), below. */ | ||
| 190 | static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp, | ||
| 191 | int finished) | ||
| 192 | { | ||
| 193 | pte_t *ptep = kmap_get_pte(amp->va); | ||
| 194 | if (!finished) { | ||
| 195 | set_pte(ptep, pte_mkmigrate(*ptep)); | ||
| 196 | flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE, | ||
| 197 | cpumask_of(amp->cpu), NULL, 0); | ||
| 198 | } else { | ||
| 199 | /* | ||
| 200 | * Rewrite a default kernel PTE for this page. | ||
| 201 | * We rely on the fact that set_pte() writes the | ||
| 202 | * present+migrating bits last. | ||
| 203 | */ | ||
| 204 | pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page)); | ||
| 205 | set_pte(ptep, pte); | ||
| 206 | } | ||
| 207 | } | ||
| 208 | |||
| 209 | /* | ||
| 210 | * This routine is a helper function for homecache_fix_kpte(); see | ||
| 211 | * its comments for more information on the "finished" argument here. | ||
| 212 | * | ||
| 213 | * Note that we hold the lock while doing the remote flushes, which | ||
| 214 | * will stall any unrelated cpus trying to do kmap_atomic operations. | ||
| 215 | * We could just update the PTEs under the lock, and save away copies | ||
| 216 | * of the structs (or just the va+cpu), then flush them after we | ||
| 217 | * release the lock, but it seems easier just to do it all under the lock. | ||
| 218 | */ | ||
| 219 | void kmap_atomic_fix_kpte(struct page *page, int finished) | ||
| 220 | { | ||
| 221 | struct atomic_mapped_page *amp; | ||
| 222 | unsigned long flags; | ||
| 223 | spin_lock_irqsave(&_lock, flags); | ||
| 224 | list_for_each_entry(amp, &_list, list) { | ||
| 225 | if (amp->page == page) | ||
| 226 | kmap_atomic_fix_one_kpte(amp, finished); | ||
| 227 | } | ||
| 228 | spin_unlock_irqrestore(&_lock, flags); | ||
| 229 | } | ||
| 230 | |||
| 231 | /* | ||
| 232 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap | ||
| 233 | * because the kmap code must perform a global TLB invalidation when | ||
| 234 | * the kmap pool wraps. | ||
| 235 | * | ||
| 236 | * Note that they may be slower than on x86 (etc.) because unlike on | ||
| 237 | * those platforms, we do have to take a global lock to map and unmap | ||
| 238 | * pages on Tile (see above). | ||
| 239 | * | ||
| 240 | * When holding an atomic kmap is is not legal to sleep, so atomic | ||
| 241 | * kmaps are appropriate for short, tight code paths only. | ||
| 242 | */ | ||
| 243 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
| 244 | { | ||
| 245 | enum fixed_addresses idx; | ||
| 246 | unsigned long vaddr; | ||
| 247 | pte_t *pte; | ||
| 248 | |||
| 249 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
| 250 | pagefault_disable(); | ||
| 251 | |||
| 252 | /* Avoid icache flushes by disallowing atomic executable mappings. */ | ||
| 253 | BUG_ON(pte_exec(prot)); | ||
| 254 | |||
| 255 | if (!PageHighMem(page)) | ||
| 256 | return page_address(page); | ||
| 257 | |||
| 258 | debug_kmap_atomic_prot(type); | ||
| 259 | |||
| 260 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 261 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 262 | pte = kmap_get_pte(vaddr); | ||
| 263 | BUG_ON(!pte_none(*pte)); | ||
| 264 | |||
| 265 | /* Register that this page is mapped atomically on this cpu. */ | ||
| 266 | kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot)); | ||
| 267 | |||
| 268 | return (void *)vaddr; | ||
| 269 | } | ||
| 270 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
| 271 | |||
| 272 | void *kmap_atomic(struct page *page, enum km_type type) | ||
| 273 | { | ||
| 274 | /* PAGE_NONE is a magic value that tells us to check immutability. */ | ||
| 275 | return kmap_atomic_prot(page, type, PAGE_NONE); | ||
| 276 | } | ||
| 277 | EXPORT_SYMBOL(kmap_atomic); | ||
| 278 | |||
| 279 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
| 280 | { | ||
| 281 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
| 282 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 283 | |||
| 284 | /* | ||
| 285 | * Force other mappings to Oops if they try to access this pte without | ||
| 286 | * first remapping it. Keeping stale mappings around is a bad idea. | ||
| 287 | */ | ||
| 288 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) { | ||
| 289 | pte_t *pte = kmap_get_pte(vaddr); | ||
| 290 | pte_t pteval = *pte; | ||
| 291 | BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); | ||
| 292 | kmap_atomic_unregister(pte_page(pteval), vaddr); | ||
| 293 | kpte_clear_flush(pte, vaddr); | ||
| 294 | } else { | ||
| 295 | /* Must be a lowmem page */ | ||
| 296 | BUG_ON(vaddr < PAGE_OFFSET); | ||
| 297 | BUG_ON(vaddr >= (unsigned long)high_memory); | ||
| 298 | } | ||
| 299 | |||
| 300 | arch_flush_lazy_mmu_mode(); | ||
| 301 | pagefault_enable(); | ||
| 302 | } | ||
| 303 | EXPORT_SYMBOL(kunmap_atomic); | ||
| 304 | |||
| 305 | /* | ||
| 306 | * This API is supposed to allow us to map memory without a "struct page". | ||
| 307 | * Currently we don't support this, though this may change in the future. | ||
| 308 | */ | ||
| 309 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | ||
| 310 | { | ||
| 311 | return kmap_atomic(pfn_to_page(pfn), type); | ||
| 312 | } | ||
| 313 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | ||
| 314 | { | ||
| 315 | return kmap_atomic_prot(pfn_to_page(pfn), type, prot); | ||
| 316 | } | ||
| 317 | |||
| 318 | struct page *kmap_atomic_to_page(void *ptr) | ||
| 319 | { | ||
| 320 | pte_t *pte; | ||
| 321 | unsigned long vaddr = (unsigned long)ptr; | ||
| 322 | |||
| 323 | if (vaddr < FIXADDR_START) | ||
| 324 | return virt_to_page(ptr); | ||
| 325 | |||
| 326 | pte = kmap_get_pte(vaddr); | ||
| 327 | return pte_page(*pte); | ||
| 328 | } | ||
