diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-03-15 17:57:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 19:55:16 -0400 |
commit | 8df651c7059e7980f08430d4ebbd134b046657ee (patch) | |
tree | e5a53184414736b25e4436ff30aa28360573d2bb /mm | |
parent | 88193f7ce6657ec4197b1f26b73b37197373b8e6 (diff) |
thp: cleanup split_huge_page()
After one of bugfixes to freeze_page(), we don't have freezed pages in
rmap, therefore mapcount of all subpages of freezed THP is zero. And we
have assert for that.
Let's drop code which deal with non-zero mapcount of subpages.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 20 |
1 files changed, 7 insertions, 13 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e10a4fee88d2..1ea21e203a70 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -3220,28 +3220,26 @@ static void unfreeze_page(struct anon_vma *anon_vma, struct page *page) | |||
3220 | } | 3220 | } |
3221 | } | 3221 | } |
3222 | 3222 | ||
3223 | static int __split_huge_page_tail(struct page *head, int tail, | 3223 | static void __split_huge_page_tail(struct page *head, int tail, |
3224 | struct lruvec *lruvec, struct list_head *list) | 3224 | struct lruvec *lruvec, struct list_head *list) |
3225 | { | 3225 | { |
3226 | int mapcount; | ||
3227 | struct page *page_tail = head + tail; | 3226 | struct page *page_tail = head + tail; |
3228 | 3227 | ||
3229 | mapcount = atomic_read(&page_tail->_mapcount) + 1; | 3228 | VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); |
3230 | VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail); | 3229 | VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail); |
3231 | 3230 | ||
3232 | /* | 3231 | /* |
3233 | * tail_page->_count is zero and not changing from under us. But | 3232 | * tail_page->_count is zero and not changing from under us. But |
3234 | * get_page_unless_zero() may be running from under us on the | 3233 | * get_page_unless_zero() may be running from under us on the |
3235 | * tail_page. If we used atomic_set() below instead of atomic_add(), we | 3234 | * tail_page. If we used atomic_set() below instead of atomic_inc(), we |
3236 | * would then run atomic_set() concurrently with | 3235 | * would then run atomic_set() concurrently with |
3237 | * get_page_unless_zero(), and atomic_set() is implemented in C not | 3236 | * get_page_unless_zero(), and atomic_set() is implemented in C not |
3238 | * using locked ops. spin_unlock on x86 sometime uses locked ops | 3237 | * using locked ops. spin_unlock on x86 sometime uses locked ops |
3239 | * because of PPro errata 66, 92, so unless somebody can guarantee | 3238 | * because of PPro errata 66, 92, so unless somebody can guarantee |
3240 | * atomic_set() here would be safe on all archs (and not only on x86), | 3239 | * atomic_set() here would be safe on all archs (and not only on x86), |
3241 | * it's safer to use atomic_add(). | 3240 | * it's safer to use atomic_inc(). |
3242 | */ | 3241 | */ |
3243 | atomic_add(mapcount + 1, &page_tail->_count); | 3242 | atomic_inc(&page_tail->_count); |
3244 | |||
3245 | 3243 | ||
3246 | page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; | 3244 | page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; |
3247 | page_tail->flags |= (head->flags & | 3245 | page_tail->flags |= (head->flags & |
@@ -3275,8 +3273,6 @@ static int __split_huge_page_tail(struct page *head, int tail, | |||
3275 | page_tail->index = head->index + tail; | 3273 | page_tail->index = head->index + tail; |
3276 | page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); | 3274 | page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); |
3277 | lru_add_page_tail(head, page_tail, lruvec, list); | 3275 | lru_add_page_tail(head, page_tail, lruvec, list); |
3278 | |||
3279 | return mapcount; | ||
3280 | } | 3276 | } |
3281 | 3277 | ||
3282 | static void __split_huge_page(struct page *page, struct list_head *list) | 3278 | static void __split_huge_page(struct page *page, struct list_head *list) |
@@ -3284,7 +3280,7 @@ static void __split_huge_page(struct page *page, struct list_head *list) | |||
3284 | struct page *head = compound_head(page); | 3280 | struct page *head = compound_head(page); |
3285 | struct zone *zone = page_zone(head); | 3281 | struct zone *zone = page_zone(head); |
3286 | struct lruvec *lruvec; | 3282 | struct lruvec *lruvec; |
3287 | int i, tail_mapcount; | 3283 | int i; |
3288 | 3284 | ||
3289 | /* prevent PageLRU to go away from under us, and freeze lru stats */ | 3285 | /* prevent PageLRU to go away from under us, and freeze lru stats */ |
3290 | spin_lock_irq(&zone->lru_lock); | 3286 | spin_lock_irq(&zone->lru_lock); |
@@ -3293,10 +3289,8 @@ static void __split_huge_page(struct page *page, struct list_head *list) | |||
3293 | /* complete memcg works before add pages to LRU */ | 3289 | /* complete memcg works before add pages to LRU */ |
3294 | mem_cgroup_split_huge_fixup(head); | 3290 | mem_cgroup_split_huge_fixup(head); |
3295 | 3291 | ||
3296 | tail_mapcount = 0; | ||
3297 | for (i = HPAGE_PMD_NR - 1; i >= 1; i--) | 3292 | for (i = HPAGE_PMD_NR - 1; i >= 1; i--) |
3298 | tail_mapcount += __split_huge_page_tail(head, i, lruvec, list); | 3293 | __split_huge_page_tail(head, i, lruvec, list); |
3299 | atomic_sub(tail_mapcount, &head->_count); | ||
3300 | 3294 | ||
3301 | ClearPageCompound(head); | 3295 | ClearPageCompound(head); |
3302 | spin_unlock_irq(&zone->lru_lock); | 3296 | spin_unlock_irq(&zone->lru_lock); |