diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2014-01-02 15:58:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-02 17:40:30 -0500 |
commit | c424be1cbbf852e46acc84d73162af3066cd2c86 (patch) | |
tree | 2a91d172fb743ac5c799db29027c8c2640423203 /mm/mlock.c | |
parent | 9a0bb2966efbf30a71c128c3af63307d8b5f5fc0 (diff) |
mm: munlock: fix a bug where THP tail page is encountered
Since commit ff6a6da60b89 ("mm: accelerate munlock() treatment of THP
pages") munlock skips tail pages of a munlocked THP page. However, when
the head page already has PageMlocked unset, it will not skip the tail
pages.
Commit 7225522bb429 ("mm: munlock: batch non-THP page isolation and
munlock+putback using pagevec") has added a PageTransHuge() check which
contains VM_BUG_ON(PageTail(page)). Sasha Levin found this triggered
using trinity, on the first tail page of a THP page without PageMlocked
flag.
This patch fixes the issue by skipping tail pages also in the case when
PageMlocked flag is unset. There is still a possibility of race with
THP page split between clearing PageMlocked and determining how many
pages to skip. The race might result in former tail pages not being
skipped, which is however no longer a bug, as during the skip the
PageTail flags are cleared.
However this race also affects correctness of NR_MLOCK accounting, which
is to be fixed in a separate patch.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Bob Liu <bob.liu@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 29 |
1 files changed, 22 insertions, 7 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index d480cd6fc475..c59c420fd6e1 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page) | |||
133 | 133 | ||
134 | /** | 134 | /** |
135 | * munlock_vma_page - munlock a vma page | 135 | * munlock_vma_page - munlock a vma page |
136 | * @page - page to be unlocked | 136 | * @page - page to be unlocked, either a normal page or THP page head |
137 | * | ||
138 | * returns the size of the page as a page mask (0 for normal page, | ||
139 | * HPAGE_PMD_NR - 1 for THP head page) | ||
137 | * | 140 | * |
138 | * called from munlock()/munmap() path with page supposedly on the LRU. | 141 | * called from munlock()/munmap() path with page supposedly on the LRU. |
139 | * When we munlock a page, because the vma where we found the page is being | 142 | * When we munlock a page, because the vma where we found the page is being |
@@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page) | |||
148 | */ | 151 | */ |
149 | unsigned int munlock_vma_page(struct page *page) | 152 | unsigned int munlock_vma_page(struct page *page) |
150 | { | 153 | { |
151 | unsigned int page_mask = 0; | 154 | unsigned int nr_pages; |
152 | 155 | ||
153 | BUG_ON(!PageLocked(page)); | 156 | BUG_ON(!PageLocked(page)); |
154 | 157 | ||
155 | if (TestClearPageMlocked(page)) { | 158 | if (TestClearPageMlocked(page)) { |
156 | unsigned int nr_pages = hpage_nr_pages(page); | 159 | nr_pages = hpage_nr_pages(page); |
157 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); | 160 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
158 | page_mask = nr_pages - 1; | ||
159 | if (!isolate_lru_page(page)) | 161 | if (!isolate_lru_page(page)) |
160 | __munlock_isolated_page(page); | 162 | __munlock_isolated_page(page); |
161 | else | 163 | else |
162 | __munlock_isolation_failed(page); | 164 | __munlock_isolation_failed(page); |
165 | } else { | ||
166 | nr_pages = hpage_nr_pages(page); | ||
163 | } | 167 | } |
164 | 168 | ||
165 | return page_mask; | 169 | /* |
170 | * Regardless of the original PageMlocked flag, we determine nr_pages | ||
171 | * after touching the flag. This leaves a possible race with a THP page | ||
172 | * split, such that a whole THP page was munlocked, but nr_pages == 1. | ||
173 | * Returning a smaller mask due to that is OK, the worst that can | ||
174 | * happen is subsequent useless scanning of the former tail pages. | ||
175 | * The NR_MLOCK accounting can however become broken. | ||
176 | */ | ||
177 | return nr_pages - 1; | ||
166 | } | 178 | } |
167 | 179 | ||
168 | /** | 180 | /** |
@@ -440,7 +452,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
440 | 452 | ||
441 | while (start < end) { | 453 | while (start < end) { |
442 | struct page *page = NULL; | 454 | struct page *page = NULL; |
443 | unsigned int page_mask, page_increm; | 455 | unsigned int page_mask; |
456 | unsigned long page_increm; | ||
444 | struct pagevec pvec; | 457 | struct pagevec pvec; |
445 | struct zone *zone; | 458 | struct zone *zone; |
446 | int zoneid; | 459 | int zoneid; |
@@ -490,7 +503,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
490 | goto next; | 503 | goto next; |
491 | } | 504 | } |
492 | } | 505 | } |
493 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | 506 | /* It's a bug to munlock in the middle of a THP page */ |
507 | VM_BUG_ON((start >> PAGE_SHIFT) & page_mask); | ||
508 | page_increm = 1 + page_mask; | ||
494 | start += page_increm * PAGE_SIZE; | 509 | start += page_increm * PAGE_SIZE; |
495 | next: | 510 | next: |
496 | cond_resched(); | 511 | cond_resched(); |