diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-04 13:51:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-04 13:51:29 -0400 |
commit | d46d0256cd030f196185078a4a8863563425b624 (patch) | |
tree | f72264009d1e979bd5f24b1b3e5fc70172446db9 /mm/page_alloc.c | |
parent | 8c52b6dcdd1994ad0d2672e43c8d975d5c8195c3 (diff) | |
parent | e46e7b77c9096eb2f4d6bcb9ca0b64c9338465ee (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge various fixes from Andrew Morton:
"10 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm, page_alloc: recalculate the preferred zoneref if the context can ignore memory policies
mm, page_alloc: reset zonelist iterator after resetting fair zone allocation policy
mm, oom_reaper: do not use siglock in try_oom_reaper()
mm, page_alloc: prevent infinite loop in buffered_rmqueue()
checkpatch: reduce git commit description style false positives
mm/z3fold.c: avoid modifying HEADLESS page and minor cleanup
memcg: add RCU locking around css_for_each_descendant_pre() in memcg_offline_kmem()
mm: check the return value of lookup_page_ext for all call sites
kdump: fix dmesg gdbmacro to work with record based printk
mm: fix overflow in vm_map_ram()
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 39 |
1 files changed, 28 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f8f3bfc435ee..6903b695ebae 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, | |||
656 | return; | 656 | return; |
657 | 657 | ||
658 | page_ext = lookup_page_ext(page); | 658 | page_ext = lookup_page_ext(page); |
659 | if (unlikely(!page_ext)) | ||
660 | return; | ||
661 | |||
659 | __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | 662 | __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); |
660 | 663 | ||
661 | INIT_LIST_HEAD(&page->lru); | 664 | INIT_LIST_HEAD(&page->lru); |
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, | |||
673 | return; | 676 | return; |
674 | 677 | ||
675 | page_ext = lookup_page_ext(page); | 678 | page_ext = lookup_page_ext(page); |
679 | if (unlikely(!page_ext)) | ||
680 | return; | ||
681 | |||
676 | __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | 682 | __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); |
677 | 683 | ||
678 | set_page_private(page, 0); | 684 | set_page_private(page, 0); |
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, | |||
2609 | page = list_last_entry(list, struct page, lru); | 2615 | page = list_last_entry(list, struct page, lru); |
2610 | else | 2616 | else |
2611 | page = list_first_entry(list, struct page, lru); | 2617 | page = list_first_entry(list, struct page, lru); |
2612 | } while (page && check_new_pcp(page)); | ||
2613 | 2618 | ||
2614 | __dec_zone_state(zone, NR_ALLOC_BATCH); | 2619 | __dec_zone_state(zone, NR_ALLOC_BATCH); |
2615 | list_del(&page->lru); | 2620 | list_del(&page->lru); |
2616 | pcp->count--; | 2621 | pcp->count--; |
2622 | |||
2623 | } while (check_new_pcp(page)); | ||
2617 | } else { | 2624 | } else { |
2618 | /* | 2625 | /* |
2619 | * We most definitely don't want callers attempting to | 2626 | * We most definitely don't want callers attempting to |
@@ -3023,6 +3030,7 @@ reset_fair: | |||
3023 | apply_fair = false; | 3030 | apply_fair = false; |
3024 | fair_skipped = false; | 3031 | fair_skipped = false; |
3025 | reset_alloc_batches(ac->preferred_zoneref->zone); | 3032 | reset_alloc_batches(ac->preferred_zoneref->zone); |
3033 | z = ac->preferred_zoneref; | ||
3026 | goto zonelist_scan; | 3034 | goto zonelist_scan; |
3027 | } | 3035 | } |
3028 | 3036 | ||
@@ -3596,6 +3604,17 @@ retry: | |||
3596 | */ | 3604 | */ |
3597 | alloc_flags = gfp_to_alloc_flags(gfp_mask); | 3605 | alloc_flags = gfp_to_alloc_flags(gfp_mask); |
3598 | 3606 | ||
3607 | /* | ||
3608 | * Reset the zonelist iterators if memory policies can be ignored. | ||
3609 | * These allocations are high priority and system rather than user | ||
3610 | * orientated. | ||
3611 | */ | ||
3612 | if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) { | ||
3613 | ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); | ||
3614 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, | ||
3615 | ac->high_zoneidx, ac->nodemask); | ||
3616 | } | ||
3617 | |||
3599 | /* This is the last chance, in general, before the goto nopage. */ | 3618 | /* This is the last chance, in general, before the goto nopage. */ |
3600 | page = get_page_from_freelist(gfp_mask, order, | 3619 | page = get_page_from_freelist(gfp_mask, order, |
3601 | alloc_flags & ~ALLOC_NO_WATERMARKS, ac); | 3620 | alloc_flags & ~ALLOC_NO_WATERMARKS, ac); |
@@ -3604,12 +3623,6 @@ retry: | |||
3604 | 3623 | ||
3605 | /* Allocate without watermarks if the context allows */ | 3624 | /* Allocate without watermarks if the context allows */ |
3606 | if (alloc_flags & ALLOC_NO_WATERMARKS) { | 3625 | if (alloc_flags & ALLOC_NO_WATERMARKS) { |
3607 | /* | ||
3608 | * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds | ||
3609 | * the allocation is high priority and these type of | ||
3610 | * allocations are system rather than user orientated | ||
3611 | */ | ||
3612 | ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); | ||
3613 | page = get_page_from_freelist(gfp_mask, order, | 3626 | page = get_page_from_freelist(gfp_mask, order, |
3614 | ALLOC_NO_WATERMARKS, ac); | 3627 | ALLOC_NO_WATERMARKS, ac); |
3615 | if (page) | 3628 | if (page) |
@@ -3808,7 +3821,11 @@ retry_cpuset: | |||
3808 | /* Dirty zone balancing only done in the fast path */ | 3821 | /* Dirty zone balancing only done in the fast path */ |
3809 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); | 3822 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); |
3810 | 3823 | ||
3811 | /* The preferred zone is used for statistics later */ | 3824 | /* |
3825 | * The preferred zone is used for statistics but crucially it is | ||
3826 | * also used as the starting point for the zonelist iterator. It | ||
3827 | * may get reset for allocations that ignore memory policies. | ||
3828 | */ | ||
3812 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, | 3829 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, |
3813 | ac.high_zoneidx, ac.nodemask); | 3830 | ac.high_zoneidx, ac.nodemask); |
3814 | if (!ac.preferred_zoneref) { | 3831 | if (!ac.preferred_zoneref) { |