aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-26 17:21:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 19:52:08 -0400
commit3e4d3af501cccdc8a8cca41bdbe57d54ad7e7e73 (patch)
tree2ce507f7ec7275563653e52f18606aba4f99b7f1 /arch/mips/mm
parent61ecdb801ef2cd28e32442383106d7837d76deac (diff)
mm: stack based kmap_atomic()
Keep the current interface but ignore the KM_type and use a stack based approach. The advantage is that we get rid of crappy code like: #define __KM_PTE \ (in_nmi() ? KM_NMI_PTE : \ in_irq() ? KM_IRQ_PTE : \ KM_PTE0) and in general can stop worrying about what context we're in and what kmap slots might be appropriate for that. The downside is that FRV kmap_atomic() gets more expensive. For now we use a CPP trick suggested by Andrew: #define kmap_atomic(page, args...) __kmap_atomic(page) to avoid having to touch all kmap_atomic() users in a single patch. [ not compiled on: - mn10300: the arch doesn't actually build with highmem to begin with ] [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c] Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dave Airlie <airlied@linux.ie> Cc: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/highmem.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 6a2b1bf9ef11..1e69b1fb4b85 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -9,7 +9,7 @@ static pte_t *kmap_pte;
9 9
10unsigned long highstart_pfn, highend_pfn; 10unsigned long highstart_pfn, highend_pfn;
11 11
12void *__kmap(struct page *page) 12void *kmap(struct page *page)
13{ 13{
14 void *addr; 14 void *addr;
15 15
@@ -21,16 +21,16 @@ void *__kmap(struct page *page)
21 21
22 return addr; 22 return addr;
23} 23}
24EXPORT_SYMBOL(__kmap); 24EXPORT_SYMBOL(kmap);
25 25
26void __kunmap(struct page *page) 26void kunmap(struct page *page)
27{ 27{
28 BUG_ON(in_interrupt()); 28 BUG_ON(in_interrupt());
29 if (!PageHighMem(page)) 29 if (!PageHighMem(page))
30 return; 30 return;
31 kunmap_high(page); 31 kunmap_high(page);
32} 32}
33EXPORT_SYMBOL(__kunmap); 33EXPORT_SYMBOL(kunmap);
34 34
35/* 35/*
36 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 36 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
41 * kmaps are appropriate for short, tight code paths only. 41 * kmaps are appropriate for short, tight code paths only.
42 */ 42 */
43 43
44void *__kmap_atomic(struct page *page, enum km_type type) 44void *__kmap_atomic(struct page *page)
45{ 45{
46 enum fixed_addresses idx;
47 unsigned long vaddr; 46 unsigned long vaddr;
47 int idx, type;
48 48
49 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 49 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
50 pagefault_disable(); 50 pagefault_disable();
51 if (!PageHighMem(page)) 51 if (!PageHighMem(page))
52 return page_address(page); 52 return page_address(page);
53 53
54 debug_kmap_atomic(type); 54 type = kmap_atomic_idx_push();
55 idx = type + KM_TYPE_NR*smp_processor_id(); 55 idx = type + KM_TYPE_NR*smp_processor_id();
56 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 56 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
57#ifdef CONFIG_DEBUG_HIGHMEM 57#ifdef CONFIG_DEBUG_HIGHMEM
@@ -64,43 +64,47 @@ void *__kmap_atomic(struct page *page, enum km_type type)
64} 64}
65EXPORT_SYMBOL(__kmap_atomic); 65EXPORT_SYMBOL(__kmap_atomic);
66 66
67void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 67void __kunmap_atomic(void *kvaddr)
68{ 68{
69#ifdef CONFIG_DEBUG_HIGHMEM
70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 69 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
71 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 70 int type;
72 71
73 if (vaddr < FIXADDR_START) { // FIXME 72 if (vaddr < FIXADDR_START) { // FIXME
74 pagefault_enable(); 73 pagefault_enable();
75 return; 74 return;
76 } 75 }
77 76
78 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 77 type = kmap_atomic_idx_pop();
78#ifdef CONFIG_DEBUG_HIGHMEM
79 {
80 int idx = type + KM_TYPE_NR * smp_processor_id();
79 81
80 /* 82 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
81 * force other mappings to Oops if they'll try to access
82 * this pte without first remap it
83 */
84 pte_clear(&init_mm, vaddr, kmap_pte-idx);
85 local_flush_tlb_one(vaddr);
86#endif
87 83
84 /*
85 * force other mappings to Oops if they'll try to access
86 * this pte without first remap it
87 */
88 pte_clear(&init_mm, vaddr, kmap_pte-idx);
89 local_flush_tlb_one(vaddr);
90 }
91#endif
88 pagefault_enable(); 92 pagefault_enable();
89} 93}
90EXPORT_SYMBOL(__kunmap_atomic_notypecheck); 94EXPORT_SYMBOL(__kunmap_atomic);
91 95
92/* 96/*
93 * This is the same as kmap_atomic() but can map memory that doesn't 97 * This is the same as kmap_atomic() but can map memory that doesn't
94 * have a struct page associated with it. 98 * have a struct page associated with it.
95 */ 99 */
96void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 100void *kmap_atomic_pfn(unsigned long pfn)
97{ 101{
98 enum fixed_addresses idx;
99 unsigned long vaddr; 102 unsigned long vaddr;
103 int idx, type;
100 104
101 pagefault_disable(); 105 pagefault_disable();
102 106
103 debug_kmap_atomic(type); 107 type = kmap_atomic_idx_push();
104 idx = type + KM_TYPE_NR*smp_processor_id(); 108 idx = type + KM_TYPE_NR*smp_processor_id();
105 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 109 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
106 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 110 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
@@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
109 return (void*) vaddr; 113 return (void*) vaddr;
110} 114}
111 115
112struct page *__kmap_atomic_to_page(void *ptr) 116struct page *kmap_atomic_to_page(void *ptr)
113{ 117{
114 unsigned long idx, vaddr = (unsigned long)ptr; 118 unsigned long idx, vaddr = (unsigned long)ptr;
115 pte_t *pte; 119 pte_t *pte;