diff options
-rw-r--r-- | mm/highmem.c | 86 |
1 files changed, 75 insertions, 11 deletions
diff --git a/mm/highmem.c b/mm/highmem.c index b32b70cdaed6..123bcd3ed4f2 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -44,6 +44,66 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx); | |||
44 | */ | 44 | */ |
45 | #ifdef CONFIG_HIGHMEM | 45 | #ifdef CONFIG_HIGHMEM |
46 | 46 | ||
47 | /* | ||
48 | * Architecture with aliasing data cache may define the following family of | ||
49 | * helper functions in its asm/highmem.h to control cache color of virtual | ||
50 | * addresses where physical memory pages are mapped by kmap. | ||
51 | */ | ||
52 | #ifndef get_pkmap_color | ||
53 | |||
54 | /* | ||
55 | * Determine color of virtual address where the page should be mapped. | ||
56 | */ | ||
57 | static inline unsigned int get_pkmap_color(struct page *page) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | #define get_pkmap_color get_pkmap_color | ||
62 | |||
63 | /* | ||
64 | * Get next index for mapping inside PKMAP region for page with given color. | ||
65 | */ | ||
66 | static inline unsigned int get_next_pkmap_nr(unsigned int color) | ||
67 | { | ||
68 | static unsigned int last_pkmap_nr; | ||
69 | |||
70 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | ||
71 | return last_pkmap_nr; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Determine if page index inside PKMAP region (pkmap_nr) of given color | ||
76 | * has wrapped around PKMAP region end. When this happens an attempt to | ||
77 | * flush all unused PKMAP slots is made. | ||
78 | */ | ||
79 | static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) | ||
80 | { | ||
81 | return pkmap_nr == 0; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Get the number of PKMAP entries of the given color. If no free slot is | ||
86 | * found after checking that many entries, kmap will sleep waiting for | ||
87 | * someone to call kunmap and free PKMAP slot. | ||
88 | */ | ||
89 | static inline int get_pkmap_entries_count(unsigned int color) | ||
90 | { | ||
91 | return LAST_PKMAP; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Get head of a wait queue for PKMAP entries of the given color. | ||
96 | * Wait queues for different mapping colors should be independent to avoid | ||
97 | * unnecessary wakeups caused by freeing of slots of other colors. | ||
98 | */ | ||
99 | static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) | ||
100 | { | ||
101 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | ||
102 | |||
103 | return &pkmap_map_wait; | ||
104 | } | ||
105 | #endif | ||
106 | |||
47 | unsigned long totalhigh_pages __read_mostly; | 107 | unsigned long totalhigh_pages __read_mostly; |
48 | EXPORT_SYMBOL(totalhigh_pages); | 108 | EXPORT_SYMBOL(totalhigh_pages); |
49 | 109 | ||
@@ -68,13 +128,10 @@ unsigned int nr_free_highpages (void) | |||
68 | } | 128 | } |
69 | 129 | ||
70 | static int pkmap_count[LAST_PKMAP]; | 130 | static int pkmap_count[LAST_PKMAP]; |
71 | static unsigned int last_pkmap_nr; | ||
72 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); | 131 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
73 | 132 | ||
74 | pte_t * pkmap_page_table; | 133 | pte_t * pkmap_page_table; |
75 | 134 | ||
76 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | ||
77 | |||
78 | /* | 135 | /* |
79 | * Most architectures have no use for kmap_high_get(), so let's abstract | 136 | * Most architectures have no use for kmap_high_get(), so let's abstract |
80 | * the disabling of IRQ out of the locking in that case to save on a | 137 | * the disabling of IRQ out of the locking in that case to save on a |
@@ -161,15 +218,17 @@ static inline unsigned long map_new_virtual(struct page *page) | |||
161 | { | 218 | { |
162 | unsigned long vaddr; | 219 | unsigned long vaddr; |
163 | int count; | 220 | int count; |
221 | unsigned int last_pkmap_nr; | ||
222 | unsigned int color = get_pkmap_color(page); | ||
164 | 223 | ||
165 | start: | 224 | start: |
166 | count = LAST_PKMAP; | 225 | count = get_pkmap_entries_count(color); |
167 | /* Find an empty entry */ | 226 | /* Find an empty entry */ |
168 | for (;;) { | 227 | for (;;) { |
169 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | 228 | last_pkmap_nr = get_next_pkmap_nr(color); |
170 | if (!last_pkmap_nr) { | 229 | if (no_more_pkmaps(last_pkmap_nr, color)) { |
171 | flush_all_zero_pkmaps(); | 230 | flush_all_zero_pkmaps(); |
172 | count = LAST_PKMAP; | 231 | count = get_pkmap_entries_count(color); |
173 | } | 232 | } |
174 | if (!pkmap_count[last_pkmap_nr]) | 233 | if (!pkmap_count[last_pkmap_nr]) |
175 | break; /* Found a usable entry */ | 234 | break; /* Found a usable entry */ |
@@ -181,12 +240,14 @@ start: | |||
181 | */ | 240 | */ |
182 | { | 241 | { |
183 | DECLARE_WAITQUEUE(wait, current); | 242 | DECLARE_WAITQUEUE(wait, current); |
243 | wait_queue_head_t *pkmap_map_wait = | ||
244 | get_pkmap_wait_queue_head(color); | ||
184 | 245 | ||
185 | __set_current_state(TASK_UNINTERRUPTIBLE); | 246 | __set_current_state(TASK_UNINTERRUPTIBLE); |
186 | add_wait_queue(&pkmap_map_wait, &wait); | 247 | add_wait_queue(pkmap_map_wait, &wait); |
187 | unlock_kmap(); | 248 | unlock_kmap(); |
188 | schedule(); | 249 | schedule(); |
189 | remove_wait_queue(&pkmap_map_wait, &wait); | 250 | remove_wait_queue(pkmap_map_wait, &wait); |
190 | lock_kmap(); | 251 | lock_kmap(); |
191 | 252 | ||
192 | /* Somebody else might have mapped it while we slept */ | 253 | /* Somebody else might have mapped it while we slept */ |
@@ -274,6 +335,8 @@ void kunmap_high(struct page *page) | |||
274 | unsigned long nr; | 335 | unsigned long nr; |
275 | unsigned long flags; | 336 | unsigned long flags; |
276 | int need_wakeup; | 337 | int need_wakeup; |
338 | unsigned int color = get_pkmap_color(page); | ||
339 | wait_queue_head_t *pkmap_map_wait; | ||
277 | 340 | ||
278 | lock_kmap_any(flags); | 341 | lock_kmap_any(flags); |
279 | vaddr = (unsigned long)page_address(page); | 342 | vaddr = (unsigned long)page_address(page); |
@@ -299,13 +362,14 @@ void kunmap_high(struct page *page) | |||
299 | * no need for the wait-queue-head's lock. Simply | 362 | * no need for the wait-queue-head's lock. Simply |
300 | * test if the queue is empty. | 363 | * test if the queue is empty. |
301 | */ | 364 | */ |
302 | need_wakeup = waitqueue_active(&pkmap_map_wait); | 365 | pkmap_map_wait = get_pkmap_wait_queue_head(color); |
366 | need_wakeup = waitqueue_active(pkmap_map_wait); | ||
303 | } | 367 | } |
304 | unlock_kmap_any(flags); | 368 | unlock_kmap_any(flags); |
305 | 369 | ||
306 | /* do wake-up, if needed, race-free outside of the spin lock */ | 370 | /* do wake-up, if needed, race-free outside of the spin lock */ |
307 | if (need_wakeup) | 371 | if (need_wakeup) |
308 | wake_up(&pkmap_map_wait); | 372 | wake_up(pkmap_map_wait); |
309 | } | 373 | } |
310 | 374 | ||
311 | EXPORT_SYMBOL(kunmap_high); | 375 | EXPORT_SYMBOL(kunmap_high); |