aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm/highmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/mm/highmem.c')
-rw-r--r--arch/tile/mm/highmem.c86
1 files changed, 24 insertions, 62 deletions
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 12ab137e7d4f..abb57331cf6e 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -56,50 +56,6 @@ void kunmap(struct page *page)
56} 56}
57EXPORT_SYMBOL(kunmap); 57EXPORT_SYMBOL(kunmap);
58 58
59static void debug_kmap_atomic_prot(enum km_type type)
60{
61#ifdef CONFIG_DEBUG_HIGHMEM
62 static unsigned warn_count = 10;
63
64 if (unlikely(warn_count == 0))
65 return;
66
67 if (unlikely(in_interrupt())) {
68 if (in_irq()) {
69 if (type != KM_IRQ0 && type != KM_IRQ1 &&
70 type != KM_BIO_SRC_IRQ &&
71 /* type != KM_BIO_DST_IRQ && */
72 type != KM_BOUNCE_READ) {
73 WARN_ON(1);
74 warn_count--;
75 }
76 } else if (!irqs_disabled()) { /* softirq */
77 if (type != KM_IRQ0 && type != KM_IRQ1 &&
78 type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
79 type != KM_SKB_SUNRPC_DATA &&
80 type != KM_SKB_DATA_SOFTIRQ &&
81 type != KM_BOUNCE_READ) {
82 WARN_ON(1);
83 warn_count--;
84 }
85 }
86 }
87
88 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
89 type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
90 if (!irqs_disabled()) {
91 WARN_ON(1);
92 warn_count--;
93 }
94 } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
95 if (irq_count() == 0 && !irqs_disabled()) {
96 WARN_ON(1);
97 warn_count--;
98 }
99 }
100#endif
101}
102
103/* 59/*
104 * Describe a single atomic mapping of a page on a given cpu at a 60 * Describe a single atomic mapping of a page on a given cpu at a
105 * given address, and allow it to be linked into a list. 61 * given address, and allow it to be linked into a list.
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
240 * When holding an atomic kmap is is not legal to sleep, so atomic 196 * When holding an atomic kmap is is not legal to sleep, so atomic
241 * kmaps are appropriate for short, tight code paths only. 197 * kmaps are appropriate for short, tight code paths only.
242 */ 198 */
243void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 199void *kmap_atomic_prot(struct page *page, pgprot_t prot)
244{ 200{
245 enum fixed_addresses idx;
246 unsigned long vaddr; 201 unsigned long vaddr;
202 int idx, type;
247 pte_t *pte; 203 pte_t *pte;
248 204
249 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 205 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
255 if (!PageHighMem(page)) 211 if (!PageHighMem(page))
256 return page_address(page); 212 return page_address(page);
257 213
258 debug_kmap_atomic_prot(type); 214 type = kmap_atomic_idx_push();
259
260 idx = type + KM_TYPE_NR*smp_processor_id(); 215 idx = type + KM_TYPE_NR*smp_processor_id();
261 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 216 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
262 pte = kmap_get_pte(vaddr); 217 pte = kmap_get_pte(vaddr);
@@ -269,28 +224,35 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
269} 224}
270EXPORT_SYMBOL(kmap_atomic_prot); 225EXPORT_SYMBOL(kmap_atomic_prot);
271 226
272void *kmap_atomic(struct page *page, enum km_type type) 227void *__kmap_atomic(struct page *page)
273{ 228{
274 /* PAGE_NONE is a magic value that tells us to check immutability. */ 229 /* PAGE_NONE is a magic value that tells us to check immutability. */
275 return kmap_atomic_prot(page, type, PAGE_NONE); 230 return kmap_atomic_prot(page, type, PAGE_NONE);
276} 231}
277EXPORT_SYMBOL(kmap_atomic); 232EXPORT_SYMBOL(__kmap_atomic);
278 233
279void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 234void __kunmap_atomic(void *kvaddr)
280{ 235{
281 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 236 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
282 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
283 237
284 /* 238 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
285 * Force other mappings to Oops if they try to access this pte without 239 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
286 * first remapping it. Keeping stale mappings around is a bad idea.
287 */
288 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
289 pte_t *pte = kmap_get_pte(vaddr); 240 pte_t *pte = kmap_get_pte(vaddr);
290 pte_t pteval = *pte; 241 pte_t pteval = *pte;
242 int idx, type;
243
244 type = kmap_atomic_idx();
245 idx = type + KM_TYPE_NR*smp_processor_id();
246
247 /*
248 * Force other mappings to Oops if they try to access this pte
249 * without first remapping it. Keeping stale mappings around
250 * is a bad idea.
251 */
291 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); 252 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
292 kmap_atomic_unregister(pte_page(pteval), vaddr); 253 kmap_atomic_unregister(pte_page(pteval), vaddr);
293 kpte_clear_flush(pte, vaddr); 254 kpte_clear_flush(pte, vaddr);
255 kmap_atomic_idx_pop();
294 } else { 256 } else {
295 /* Must be a lowmem page */ 257 /* Must be a lowmem page */
296 BUG_ON(vaddr < PAGE_OFFSET); 258 BUG_ON(vaddr < PAGE_OFFSET);
@@ -300,19 +262,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
300 arch_flush_lazy_mmu_mode(); 262 arch_flush_lazy_mmu_mode();
301 pagefault_enable(); 263 pagefault_enable();
302} 264}
303EXPORT_SYMBOL(kunmap_atomic_notypecheck); 265EXPORT_SYMBOL(__kunmap_atomic);
304 266
305/* 267/*
306 * This API is supposed to allow us to map memory without a "struct page". 268 * This API is supposed to allow us to map memory without a "struct page".
307 * Currently we don't support this, though this may change in the future. 269 * Currently we don't support this, though this may change in the future.
308 */ 270 */
309void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 271void *kmap_atomic_pfn(unsigned long pfn)
310{ 272{
311 return kmap_atomic(pfn_to_page(pfn), type); 273 return kmap_atomic(pfn_to_page(pfn));
312} 274}
313void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 275void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
314{ 276{
315 return kmap_atomic_prot(pfn_to_page(pfn), type, prot); 277 return kmap_atomic_prot(pfn_to_page(pfn), prot);
316} 278}
317 279
318struct page *kmap_atomic_to_page(void *ptr) 280struct page *kmap_atomic_to_page(void *ptr)