aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-26 17:21:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 19:52:08 -0400
commit3e4d3af501cccdc8a8cca41bdbe57d54ad7e7e73 (patch)
tree2ce507f7ec7275563653e52f18606aba4f99b7f1 /arch/tile/mm
parent61ecdb801ef2cd28e32442383106d7837d76deac (diff)
mm: stack based kmap_atomic()
Keep the current interface but ignore the KM_type and use a stack based approach. The advantage is that we get rid of crappy code like: #define __KM_PTE \ (in_nmi() ? KM_NMI_PTE : \ in_irq() ? KM_IRQ_PTE : \ KM_PTE0) and in general can stop worrying about what context we're in and what kmap slots might be appropriate for that. The downside is that FRV kmap_atomic() gets more expensive. For now we use a CPP trick suggested by Andrew: #define kmap_atomic(page, args...) __kmap_atomic(page) to avoid having to touch all kmap_atomic() users in a single patch. [ not compiled on: - mn10300: the arch doesn't actually build with highmem to begin with ] [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c] Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dave Airlie <airlied@linux.ie> Cc: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/tile/mm')
-rw-r--r--arch/tile/mm/highmem.c85
1 files changed, 23 insertions, 62 deletions
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 12ab137e7d4f..8ef6595e162c 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -56,50 +56,6 @@ void kunmap(struct page *page)
56} 56}
57EXPORT_SYMBOL(kunmap); 57EXPORT_SYMBOL(kunmap);
58 58
59static void debug_kmap_atomic_prot(enum km_type type)
60{
61#ifdef CONFIG_DEBUG_HIGHMEM
62 static unsigned warn_count = 10;
63
64 if (unlikely(warn_count == 0))
65 return;
66
67 if (unlikely(in_interrupt())) {
68 if (in_irq()) {
69 if (type != KM_IRQ0 && type != KM_IRQ1 &&
70 type != KM_BIO_SRC_IRQ &&
71 /* type != KM_BIO_DST_IRQ && */
72 type != KM_BOUNCE_READ) {
73 WARN_ON(1);
74 warn_count--;
75 }
76 } else if (!irqs_disabled()) { /* softirq */
77 if (type != KM_IRQ0 && type != KM_IRQ1 &&
78 type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
79 type != KM_SKB_SUNRPC_DATA &&
80 type != KM_SKB_DATA_SOFTIRQ &&
81 type != KM_BOUNCE_READ) {
82 WARN_ON(1);
83 warn_count--;
84 }
85 }
86 }
87
88 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
89 type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
90 if (!irqs_disabled()) {
91 WARN_ON(1);
92 warn_count--;
93 }
94 } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
95 if (irq_count() == 0 && !irqs_disabled()) {
96 WARN_ON(1);
97 warn_count--;
98 }
99 }
100#endif
101}
102
103/* 59/*
104 * Describe a single atomic mapping of a page on a given cpu at a 60 * Describe a single atomic mapping of a page on a given cpu at a
105 * given address, and allow it to be linked into a list. 61 * given address, and allow it to be linked into a list.
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
240 * When holding an atomic kmap is is not legal to sleep, so atomic 196 * When holding an atomic kmap is is not legal to sleep, so atomic
241 * kmaps are appropriate for short, tight code paths only. 197 * kmaps are appropriate for short, tight code paths only.
242 */ 198 */
243void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 199void *kmap_atomic_prot(struct page *page, pgprot_t prot)
244{ 200{
245 enum fixed_addresses idx;
246 unsigned long vaddr; 201 unsigned long vaddr;
202 int idx, type;
247 pte_t *pte; 203 pte_t *pte;
248 204
249 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 205 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
255 if (!PageHighMem(page)) 211 if (!PageHighMem(page))
256 return page_address(page); 212 return page_address(page);
257 213
258 debug_kmap_atomic_prot(type); 214 type = kmap_atomic_idx_push();
259
260 idx = type + KM_TYPE_NR*smp_processor_id(); 215 idx = type + KM_TYPE_NR*smp_processor_id();
261 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 216 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
262 pte = kmap_get_pte(vaddr); 217 pte = kmap_get_pte(vaddr);
@@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
269} 224}
270EXPORT_SYMBOL(kmap_atomic_prot); 225EXPORT_SYMBOL(kmap_atomic_prot);
271 226
272void *kmap_atomic(struct page *page, enum km_type type) 227void *__kmap_atomic(struct page *page)
273{ 228{
274 /* PAGE_NONE is a magic value that tells us to check immutability. */ 229 /* PAGE_NONE is a magic value that tells us to check immutability. */
275 return kmap_atomic_prot(page, type, PAGE_NONE); 230 return kmap_atomic_prot(page, type, PAGE_NONE);
276} 231}
277EXPORT_SYMBOL(kmap_atomic); 232EXPORT_SYMBOL(__kmap_atomic);
278 233
279void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 234void __kunmap_atomic(void *kvaddr)
280{ 235{
281 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 236 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
282 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
283 237
284 /* 238 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
285 * Force other mappings to Oops if they try to access this pte without 239 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
286 * first remapping it. Keeping stale mappings around is a bad idea.
287 */
288 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
289 pte_t *pte = kmap_get_pte(vaddr); 240 pte_t *pte = kmap_get_pte(vaddr);
290 pte_t pteval = *pte; 241 pte_t pteval = *pte;
242 int idx, type;
243
244 type = kmap_atomic_idx_pop();
245 idx = type + KM_TYPE_NR*smp_processor_id();
246
247 /*
248 * Force other mappings to Oops if they try to access this pte
249 * without first remapping it. Keeping stale mappings around
250 * is a bad idea.
251 */
291 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); 252 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
292 kmap_atomic_unregister(pte_page(pteval), vaddr); 253 kmap_atomic_unregister(pte_page(pteval), vaddr);
293 kpte_clear_flush(pte, vaddr); 254 kpte_clear_flush(pte, vaddr);
@@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
300 arch_flush_lazy_mmu_mode(); 261 arch_flush_lazy_mmu_mode();
301 pagefault_enable(); 262 pagefault_enable();
302} 263}
303EXPORT_SYMBOL(kunmap_atomic_notypecheck); 264EXPORT_SYMBOL(__kunmap_atomic);
304 265
305/* 266/*
306 * This API is supposed to allow us to map memory without a "struct page". 267 * This API is supposed to allow us to map memory without a "struct page".
307 * Currently we don't support this, though this may change in the future. 268 * Currently we don't support this, though this may change in the future.
308 */ 269 */
309void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 270void *kmap_atomic_pfn(unsigned long pfn)
310{ 271{
311 return kmap_atomic(pfn_to_page(pfn), type); 272 return kmap_atomic(pfn_to_page(pfn));
312} 273}
313void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 274void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
314{ 275{
315 return kmap_atomic_prot(pfn_to_page(pfn), type, prot); 276 return kmap_atomic_prot(pfn_to_page(pfn), prot);
316} 277}
317 278
318struct page *kmap_atomic_to_page(void *ptr) 279struct page *kmap_atomic_to_page(void *ptr)