diff options
-rw-r--r-- | mm/mlock.c | 29 |
1 files changed, 22 insertions, 7 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index d480cd6fc475..c59c420fd6e1 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page) | |||
133 | 133 | ||
134 | /** | 134 | /** |
135 | * munlock_vma_page - munlock a vma page | 135 | * munlock_vma_page - munlock a vma page |
136 | * @page - page to be unlocked | 136 | * @page - page to be unlocked, either a normal page or THP page head |
137 | * | ||
138 | * returns the size of the page as a page mask (0 for normal page, | ||
139 | * HPAGE_PMD_NR - 1 for THP head page) | ||
137 | * | 140 | * |
138 | * called from munlock()/munmap() path with page supposedly on the LRU. | 141 | * called from munlock()/munmap() path with page supposedly on the LRU. |
139 | * When we munlock a page, because the vma where we found the page is being | 142 | * When we munlock a page, because the vma where we found the page is being |
@@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page) | |||
148 | */ | 151 | */ |
149 | unsigned int munlock_vma_page(struct page *page) | 152 | unsigned int munlock_vma_page(struct page *page) |
150 | { | 153 | { |
151 | unsigned int page_mask = 0; | 154 | unsigned int nr_pages; |
152 | 155 | ||
153 | BUG_ON(!PageLocked(page)); | 156 | BUG_ON(!PageLocked(page)); |
154 | 157 | ||
155 | if (TestClearPageMlocked(page)) { | 158 | if (TestClearPageMlocked(page)) { |
156 | unsigned int nr_pages = hpage_nr_pages(page); | 159 | nr_pages = hpage_nr_pages(page); |
157 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); | 160 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
158 | page_mask = nr_pages - 1; | ||
159 | if (!isolate_lru_page(page)) | 161 | if (!isolate_lru_page(page)) |
160 | __munlock_isolated_page(page); | 162 | __munlock_isolated_page(page); |
161 | else | 163 | else |
162 | __munlock_isolation_failed(page); | 164 | __munlock_isolation_failed(page); |
165 | } else { | ||
166 | nr_pages = hpage_nr_pages(page); | ||
163 | } | 167 | } |
164 | 168 | ||
165 | return page_mask; | 169 | /* |
170 | * Regardless of the original PageMlocked flag, we determine nr_pages | ||
171 | * after touching the flag. This leaves a possible race with a THP page | ||
172 | * split, such that a whole THP page was munlocked, but nr_pages == 1. | ||
173 | * Returning a smaller mask due to that is OK, the worst that can | ||
174 | * happen is subsequent useless scanning of the former tail pages. | ||
175 | * The NR_MLOCK accounting can however become broken. | ||
176 | */ | ||
177 | return nr_pages - 1; | ||
166 | } | 178 | } |
167 | 179 | ||
168 | /** | 180 | /** |
@@ -440,7 +452,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
440 | 452 | ||
441 | while (start < end) { | 453 | while (start < end) { |
442 | struct page *page = NULL; | 454 | struct page *page = NULL; |
443 | unsigned int page_mask, page_increm; | 455 | unsigned int page_mask; |
456 | unsigned long page_increm; | ||
444 | struct pagevec pvec; | 457 | struct pagevec pvec; |
445 | struct zone *zone; | 458 | struct zone *zone; |
446 | int zoneid; | 459 | int zoneid; |
@@ -490,7 +503,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
490 | goto next; | 503 | goto next; |
491 | } | 504 | } |
492 | } | 505 | } |
493 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | 506 | /* It's a bug to munlock in the middle of a THP page */ |
507 | VM_BUG_ON((start >> PAGE_SHIFT) & page_mask); | ||
508 | page_increm = 1 + page_mask; | ||
494 | start += page_increm * PAGE_SIZE; | 509 | start += page_increm * PAGE_SIZE; |
495 | next: | 510 | next: |
496 | cond_resched(); | 511 | cond_resched(); |