diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-18 15:25:00 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-26 00:37:25 -0400 |
commit | 850f6ac316cf84bba63fdb775c897834eccbfaa3 (patch) | |
tree | 1a14fa9a1ee611f9ab280b945c8ed95e4b9210eb /arch/powerpc/include/asm/highmem.h | |
parent | 85355bb272db31a3f2dd99d547eef794805e1319 (diff) |
powerpc/mm: Make k(un)map_atomic out of line
Those functions are way too big to be inline, besides, kmap_atomic()
wants to call debug_kmap_atomic() which isn't exported for modules
and causes module link failures.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/highmem.h')
-rw-r--r-- | arch/powerpc/include/asm/highmem.h | 57 |
1 files changed, 4 insertions, 53 deletions
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 684a73f4324f..a74c4ee6c020 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h | |||
@@ -22,9 +22,7 @@ | |||
22 | 22 | ||
23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
24 | 24 | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
27 | #include <linux/highmem.h> | ||
28 | #include <asm/kmap_types.h> | 26 | #include <asm/kmap_types.h> |
29 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
30 | #include <asm/page.h> | 28 | #include <asm/page.h> |
@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table; | |||
62 | 60 | ||
63 | extern void *kmap_high(struct page *page); | 61 | extern void *kmap_high(struct page *page); |
64 | extern void kunmap_high(struct page *page); | 62 | extern void kunmap_high(struct page *page); |
63 | extern void *kmap_atomic_prot(struct page *page, enum km_type type, | ||
64 | pgprot_t prot); | ||
65 | extern void kunmap_atomic(void *kvaddr, enum km_type type); | ||
65 | 66 | ||
66 | static inline void *kmap(struct page *page) | 67 | static inline void *kmap(struct page *page) |
67 | { | 68 | { |
@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page) | |||
79 | kunmap_high(page); | 80 | kunmap_high(page); |
80 | } | 81 | } |
81 | 82 | ||
82 | /* | ||
83 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
84 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
85 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
86 | * it. | ||
87 | */ | ||
88 | static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
89 | { | ||
90 | unsigned int idx; | ||
91 | unsigned long vaddr; | ||
92 | |||
93 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
94 | pagefault_disable(); | ||
95 | if (!PageHighMem(page)) | ||
96 | return page_address(page); | ||
97 | |||
98 | debug_kmap_atomic(type); | ||
99 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
100 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
101 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
102 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
103 | #endif | ||
104 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); | ||
105 | local_flush_tlb_page(NULL, vaddr); | ||
106 | |||
107 | return (void*) vaddr; | ||
108 | } | ||
109 | |||
110 | static inline void *kmap_atomic(struct page *page, enum km_type type) | 83 | static inline void *kmap_atomic(struct page *page, enum km_type type) |
111 | { | 84 | { |
112 | return kmap_atomic_prot(page, type, kmap_prot); | 85 | return kmap_atomic_prot(page, type, kmap_prot); |
113 | } | 86 | } |
114 | 87 | ||
115 | static inline void kunmap_atomic(void *kvaddr, enum km_type type) | ||
116 | { | ||
117 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
118 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
119 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
120 | |||
121 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
122 | pagefault_enable(); | ||
123 | return; | ||
124 | } | ||
125 | |||
126 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
127 | |||
128 | /* | ||
129 | * force other mappings to Oops if they'll try to access | ||
130 | * this pte without first remap it | ||
131 | */ | ||
132 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
133 | local_flush_tlb_page(NULL, vaddr); | ||
134 | #endif | ||
135 | pagefault_enable(); | ||
136 | } | ||
137 | |||
138 | static inline struct page *kmap_atomic_to_page(void *ptr) | 88 | static inline struct page *kmap_atomic_to_page(void *ptr) |
139 | { | 89 | { |
140 | unsigned long idx, vaddr = (unsigned long) ptr; | 90 | unsigned long idx, vaddr = (unsigned long) ptr; |
@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr) | |||
148 | return pte_page(*pte); | 98 | return pte_page(*pte); |
149 | } | 99 | } |
150 | 100 | ||
101 | |||
151 | #define flush_cache_kmaps() flush_cache_all() | 102 | #define flush_cache_kmaps() flush_cache_all() |
152 | 103 | ||
153 | #endif /* __KERNEL__ */ | 104 | #endif /* __KERNEL__ */ |