diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2014-01-23 18:52:50 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-23 19:36:50 -0500 |
commit | 01cc2e58697e34c6ee9a40fb6cebc18bf5a1923f (patch) | |
tree | f1f0bf6c95568cb85771c58c6a9f7dc6168d2879 /mm/mlock.c | |
parent | f0b791a34cb3cffd2bbc3ca4365c9b719fa2c9f3 (diff) |
mm: munlock: fix potential race with THP page split
Since commit ff6a6da60b89 ("mm: accelerate munlock() treatment of THP
pages") munlock skips tail pages of a munlocked THP page. There is some
attempt to prevent bad consequences of racing with a THP page split, but
code inspection indicates that there are two problems that may lead to a
non-fatal, yet wrong outcome.
First, __split_huge_page_refcount() copies flags including PageMlocked
from the head page to the tail pages. Clearing PageMlocked by
munlock_vma_page() in the middle of this operation might result in part
of tail pages left with PageMlocked flag. As the head page still
appears to be a THP page until all tail pages are processed,
munlock_vma_page() might think it munlocked the whole THP page and skip
all the former tail pages. Before ff6a6da60, those pages would be
cleared in further iterations of munlock_vma_pages_range(), but NR_MLOCK
would still become undercounted (related the next point).
Second, NR_MLOCK accounting is based on call to hpage_nr_pages() after
the PageMlocked is cleared. The accounting might also become
inconsistent due to race with __split_huge_page_refcount()
- undercount when HUGE_PMD_NR is subtracted, but some tail pages are
left with PageMlocked set and counted again (only possible before
ff6a6da60)
- overcount when hpage_nr_pages() sees a normal page (split has already
finished), but the parallel split has meanwhile cleared PageMlocked from
additional tail pages
This patch prevents both problems via extending the scope of lru_lock in
munlock_vma_page(). This is convenient because:
- __split_huge_page_refcount() takes lru_lock for its whole operation
- munlock_vma_page() typically takes lru_lock anyway for page isolation
As this becomes a second function where page isolation is done with
lru_lock already held, factor this out to a new
__munlock_isolate_lru_page() function and clean up the code around.
[akpm@linux-foundation.org: avoid a coding-style ugly]
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 104 |
1 files changed, 60 insertions, 44 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index 10819ed4df3e..b30adbe62034 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -91,6 +91,26 @@ void mlock_vma_page(struct page *page) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * Isolate a page from LRU with optional get_page() pin. | ||
95 | * Assumes lru_lock already held and page already pinned. | ||
96 | */ | ||
97 | static bool __munlock_isolate_lru_page(struct page *page, bool getpage) | ||
98 | { | ||
99 | if (PageLRU(page)) { | ||
100 | struct lruvec *lruvec; | ||
101 | |||
102 | lruvec = mem_cgroup_page_lruvec(page, page_zone(page)); | ||
103 | if (getpage) | ||
104 | get_page(page); | ||
105 | ClearPageLRU(page); | ||
106 | del_page_from_lru_list(page, lruvec, page_lru(page)); | ||
107 | return true; | ||
108 | } | ||
109 | |||
110 | return false; | ||
111 | } | ||
112 | |||
113 | /* | ||
94 | * Finish munlock after successful page isolation | 114 | * Finish munlock after successful page isolation |
95 | * | 115 | * |
96 | * Page must be locked. This is a wrapper for try_to_munlock() | 116 | * Page must be locked. This is a wrapper for try_to_munlock() |
@@ -126,9 +146,9 @@ static void __munlock_isolated_page(struct page *page) | |||
126 | static void __munlock_isolation_failed(struct page *page) | 146 | static void __munlock_isolation_failed(struct page *page) |
127 | { | 147 | { |
128 | if (PageUnevictable(page)) | 148 | if (PageUnevictable(page)) |
129 | count_vm_event(UNEVICTABLE_PGSTRANDED); | 149 | __count_vm_event(UNEVICTABLE_PGSTRANDED); |
130 | else | 150 | else |
131 | count_vm_event(UNEVICTABLE_PGMUNLOCKED); | 151 | __count_vm_event(UNEVICTABLE_PGMUNLOCKED); |
132 | } | 152 | } |
133 | 153 | ||
134 | /** | 154 | /** |
@@ -152,28 +172,34 @@ static void __munlock_isolation_failed(struct page *page) | |||
152 | unsigned int munlock_vma_page(struct page *page) | 172 | unsigned int munlock_vma_page(struct page *page) |
153 | { | 173 | { |
154 | unsigned int nr_pages; | 174 | unsigned int nr_pages; |
175 | struct zone *zone = page_zone(page); | ||
155 | 176 | ||
156 | BUG_ON(!PageLocked(page)); | 177 | BUG_ON(!PageLocked(page)); |
157 | 178 | ||
158 | if (TestClearPageMlocked(page)) { | ||
159 | nr_pages = hpage_nr_pages(page); | ||
160 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); | ||
161 | if (!isolate_lru_page(page)) | ||
162 | __munlock_isolated_page(page); | ||
163 | else | ||
164 | __munlock_isolation_failed(page); | ||
165 | } else { | ||
166 | nr_pages = hpage_nr_pages(page); | ||
167 | } | ||
168 | |||
169 | /* | 179 | /* |
170 | * Regardless of the original PageMlocked flag, we determine nr_pages | 180 | * Serialize with any parallel __split_huge_page_refcount() which |
171 | * after touching the flag. This leaves a possible race with a THP page | 181 | * might otherwise copy PageMlocked to part of the tail pages before |
172 | * split, such that a whole THP page was munlocked, but nr_pages == 1. | 182 | * we clear it in the head page. It also stabilizes hpage_nr_pages(). |
173 | * Returning a smaller mask due to that is OK, the worst that can | ||
174 | * happen is subsequent useless scanning of the former tail pages. | ||
175 | * The NR_MLOCK accounting can however become broken. | ||
176 | */ | 183 | */ |
184 | spin_lock_irq(&zone->lru_lock); | ||
185 | |||
186 | nr_pages = hpage_nr_pages(page); | ||
187 | if (!TestClearPageMlocked(page)) | ||
188 | goto unlock_out; | ||
189 | |||
190 | __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); | ||
191 | |||
192 | if (__munlock_isolate_lru_page(page, true)) { | ||
193 | spin_unlock_irq(&zone->lru_lock); | ||
194 | __munlock_isolated_page(page); | ||
195 | goto out; | ||
196 | } | ||
197 | __munlock_isolation_failed(page); | ||
198 | |||
199 | unlock_out: | ||
200 | spin_unlock_irq(&zone->lru_lock); | ||
201 | |||
202 | out: | ||
177 | return nr_pages - 1; | 203 | return nr_pages - 1; |
178 | } | 204 | } |
179 | 205 | ||
@@ -310,34 +336,24 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) | |||
310 | struct page *page = pvec->pages[i]; | 336 | struct page *page = pvec->pages[i]; |
311 | 337 | ||
312 | if (TestClearPageMlocked(page)) { | 338 | if (TestClearPageMlocked(page)) { |
313 | struct lruvec *lruvec; | ||
314 | int lru; | ||
315 | |||
316 | if (PageLRU(page)) { | ||
317 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
318 | lru = page_lru(page); | ||
319 | /* | ||
320 | * We already have pin from follow_page_mask() | ||
321 | * so we can spare the get_page() here. | ||
322 | */ | ||
323 | ClearPageLRU(page); | ||
324 | del_page_from_lru_list(page, lruvec, lru); | ||
325 | } else { | ||
326 | __munlock_isolation_failed(page); | ||
327 | goto skip_munlock; | ||
328 | } | ||
329 | |||
330 | } else { | ||
331 | skip_munlock: | ||
332 | /* | 339 | /* |
333 | * We won't be munlocking this page in the next phase | 340 | * We already have pin from follow_page_mask() |
334 | * but we still need to release the follow_page_mask() | 341 | * so we can spare the get_page() here. |
335 | * pin. We cannot do it under lru_lock however. If it's | ||
336 | * the last pin, __page_cache_release would deadlock. | ||
337 | */ | 342 | */ |
338 | pagevec_add(&pvec_putback, pvec->pages[i]); | 343 | if (__munlock_isolate_lru_page(page, false)) |
339 | pvec->pages[i] = NULL; | 344 | continue; |
345 | else | ||
346 | __munlock_isolation_failed(page); | ||
340 | } | 347 | } |
348 | |||
349 | /* | ||
350 | * We won't be munlocking this page in the next phase | ||
351 | * but we still need to release the follow_page_mask() | ||
352 | * pin. We cannot do it under lru_lock however. If it's | ||
353 | * the last pin, __page_cache_release() would deadlock. | ||
354 | */ | ||
355 | pagevec_add(&pvec_putback, pvec->pages[i]); | ||
356 | pvec->pages[i] = NULL; | ||
341 | } | 357 | } |
342 | delta_munlocked = -nr + pagevec_count(&pvec_putback); | 358 | delta_munlocked = -nr + pagevec_count(&pvec_putback); |
343 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); | 359 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); |