diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/highmem.c | 65 |
1 files changed, 57 insertions, 8 deletions
diff --git a/mm/highmem.c b/mm/highmem.c index b36b83b920ff..910198037bf5 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -67,6 +67,25 @@ pte_t * pkmap_page_table; | |||
67 | 67 | ||
68 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | 68 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); |
69 | 69 | ||
70 | /* | ||
71 | * Most architectures have no use for kmap_high_get(), so let's abstract | ||
72 | * the disabling of IRQ out of the locking in that case to save on a | ||
73 | * potential useless overhead. | ||
74 | */ | ||
75 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | ||
76 | #define lock_kmap() spin_lock_irq(&kmap_lock) | ||
77 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) | ||
78 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) | ||
79 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) | ||
80 | #else | ||
81 | #define lock_kmap() spin_lock(&kmap_lock) | ||
82 | #define unlock_kmap() spin_unlock(&kmap_lock) | ||
83 | #define lock_kmap_any(flags) \ | ||
84 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) | ||
85 | #define unlock_kmap_any(flags) \ | ||
86 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) | ||
87 | #endif | ||
88 | |||
70 | static void flush_all_zero_pkmaps(void) | 89 | static void flush_all_zero_pkmaps(void) |
71 | { | 90 | { |
72 | int i; | 91 | int i; |
@@ -113,9 +132,9 @@ static void flush_all_zero_pkmaps(void) | |||
113 | */ | 132 | */ |
114 | void kmap_flush_unused(void) | 133 | void kmap_flush_unused(void) |
115 | { | 134 | { |
116 | spin_lock(&kmap_lock); | 135 | lock_kmap(); |
117 | flush_all_zero_pkmaps(); | 136 | flush_all_zero_pkmaps(); |
118 | spin_unlock(&kmap_lock); | 137 | unlock_kmap(); |
119 | } | 138 | } |
120 | 139 | ||
121 | static inline unsigned long map_new_virtual(struct page *page) | 140 | static inline unsigned long map_new_virtual(struct page *page) |
@@ -145,10 +164,10 @@ start: | |||
145 | 164 | ||
146 | __set_current_state(TASK_UNINTERRUPTIBLE); | 165 | __set_current_state(TASK_UNINTERRUPTIBLE); |
147 | add_wait_queue(&pkmap_map_wait, &wait); | 166 | add_wait_queue(&pkmap_map_wait, &wait); |
148 | spin_unlock(&kmap_lock); | 167 | unlock_kmap(); |
149 | schedule(); | 168 | schedule(); |
150 | remove_wait_queue(&pkmap_map_wait, &wait); | 169 | remove_wait_queue(&pkmap_map_wait, &wait); |
151 | spin_lock(&kmap_lock); | 170 | lock_kmap(); |
152 | 171 | ||
153 | /* Somebody else might have mapped it while we slept */ | 172 | /* Somebody else might have mapped it while we slept */ |
154 | if (page_address(page)) | 173 | if (page_address(page)) |
@@ -184,29 +203,59 @@ void *kmap_high(struct page *page) | |||
184 | * For highmem pages, we can't trust "virtual" until | 203 | * For highmem pages, we can't trust "virtual" until |
185 | * after we have the lock. | 204 | * after we have the lock. |
186 | */ | 205 | */ |
187 | spin_lock(&kmap_lock); | 206 | lock_kmap(); |
188 | vaddr = (unsigned long)page_address(page); | 207 | vaddr = (unsigned long)page_address(page); |
189 | if (!vaddr) | 208 | if (!vaddr) |
190 | vaddr = map_new_virtual(page); | 209 | vaddr = map_new_virtual(page); |
191 | pkmap_count[PKMAP_NR(vaddr)]++; | 210 | pkmap_count[PKMAP_NR(vaddr)]++; |
192 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); | 211 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
193 | spin_unlock(&kmap_lock); | 212 | unlock_kmap(); |
194 | return (void*) vaddr; | 213 | return (void*) vaddr; |
195 | } | 214 | } |
196 | 215 | ||
197 | EXPORT_SYMBOL(kmap_high); | 216 | EXPORT_SYMBOL(kmap_high); |
198 | 217 | ||
218 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | ||
219 | /** | ||
220 | * kmap_high_get - pin a highmem page into memory | ||
221 | * @page: &struct page to pin | ||
222 | * | ||
223 | * Returns the page's current virtual memory address, or NULL if no mapping | ||
224 | * exists. When and only when a non null address is returned then a | ||
225 | * matching call to kunmap_high() is necessary. | ||
226 | * | ||
227 | * This can be called from any context. | ||
228 | */ | ||
229 | void *kmap_high_get(struct page *page) | ||
230 | { | ||
231 | unsigned long vaddr, flags; | ||
232 | |||
233 | lock_kmap_any(flags); | ||
234 | vaddr = (unsigned long)page_address(page); | ||
235 | if (vaddr) { | ||
236 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); | ||
237 | pkmap_count[PKMAP_NR(vaddr)]++; | ||
238 | } | ||
239 | unlock_kmap_any(flags); | ||
240 | return (void*) vaddr; | ||
241 | } | ||
242 | #endif | ||
243 | |||
199 | /** | 244 | /** |
200 | * kunmap_high - map a highmem page into memory | 245 | * kunmap_high - map a highmem page into memory |
201 | * @page: &struct page to unmap | 246 | * @page: &struct page to unmap |
247 | * | ||
248 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called | ||
249 | * only from user context. | ||
202 | */ | 250 | */ |
203 | void kunmap_high(struct page *page) | 251 | void kunmap_high(struct page *page) |
204 | { | 252 | { |
205 | unsigned long vaddr; | 253 | unsigned long vaddr; |
206 | unsigned long nr; | 254 | unsigned long nr; |
255 | unsigned long flags; | ||
207 | int need_wakeup; | 256 | int need_wakeup; |
208 | 257 | ||
209 | spin_lock(&kmap_lock); | 258 | lock_kmap_any(flags); |
210 | vaddr = (unsigned long)page_address(page); | 259 | vaddr = (unsigned long)page_address(page); |
211 | BUG_ON(!vaddr); | 260 | BUG_ON(!vaddr); |
212 | nr = PKMAP_NR(vaddr); | 261 | nr = PKMAP_NR(vaddr); |
@@ -232,7 +281,7 @@ void kunmap_high(struct page *page) | |||
232 | */ | 281 | */ |
233 | need_wakeup = waitqueue_active(&pkmap_map_wait); | 282 | need_wakeup = waitqueue_active(&pkmap_map_wait); |
234 | } | 283 | } |
235 | spin_unlock(&kmap_lock); | 284 | unlock_kmap_any(flags); |
236 | 285 | ||
237 | /* do wake-up, if needed, race-free outside of the spin lock */ | 286 | /* do wake-up, if needed, race-free outside of the spin lock */ |
238 | if (need_wakeup) | 287 | if (need_wakeup) |