diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 91 |
1 files changed, 52 insertions, 39 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 644fb75f6f24..dd36da6ffef5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -3577,6 +3577,14 @@ retry_cpuset: | |||
| 3577 | no_progress_loops = 0; | 3577 | no_progress_loops = 0; |
| 3578 | compact_priority = DEF_COMPACT_PRIORITY; | 3578 | compact_priority = DEF_COMPACT_PRIORITY; |
| 3579 | cpuset_mems_cookie = read_mems_allowed_begin(); | 3579 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 3580 | |||
| 3581 | /* | ||
| 3582 | * The fast path uses conservative alloc_flags to succeed only until | ||
| 3583 | * kswapd needs to be woken up, and to avoid the cost of setting up | ||
| 3584 | * alloc_flags precisely. So we do that now. | ||
| 3585 | */ | ||
| 3586 | alloc_flags = gfp_to_alloc_flags(gfp_mask); | ||
| 3587 | |||
| 3580 | /* | 3588 | /* |
| 3581 | * We need to recalculate the starting point for the zonelist iterator | 3589 | * We need to recalculate the starting point for the zonelist iterator |
| 3582 | * because we might have used different nodemask in the fast path, or | 3590 | * because we might have used different nodemask in the fast path, or |
| @@ -3588,14 +3596,6 @@ retry_cpuset: | |||
| 3588 | if (!ac->preferred_zoneref->zone) | 3596 | if (!ac->preferred_zoneref->zone) |
| 3589 | goto nopage; | 3597 | goto nopage; |
| 3590 | 3598 | ||
| 3591 | |||
| 3592 | /* | ||
| 3593 | * The fast path uses conservative alloc_flags to succeed only until | ||
| 3594 | * kswapd needs to be woken up, and to avoid the cost of setting up | ||
| 3595 | * alloc_flags precisely. So we do that now. | ||
| 3596 | */ | ||
| 3597 | alloc_flags = gfp_to_alloc_flags(gfp_mask); | ||
| 3598 | |||
| 3599 | if (gfp_mask & __GFP_KSWAPD_RECLAIM) | 3599 | if (gfp_mask & __GFP_KSWAPD_RECLAIM) |
| 3600 | wake_all_kswapds(order, ac); | 3600 | wake_all_kswapds(order, ac); |
| 3601 | 3601 | ||
| @@ -3672,35 +3672,21 @@ retry: | |||
| 3672 | goto got_pg; | 3672 | goto got_pg; |
| 3673 | 3673 | ||
| 3674 | /* Caller is not willing to reclaim, we can't balance anything */ | 3674 | /* Caller is not willing to reclaim, we can't balance anything */ |
| 3675 | if (!can_direct_reclaim) { | 3675 | if (!can_direct_reclaim) |
| 3676 | /* | ||
| 3677 | * All existing users of the __GFP_NOFAIL are blockable, so warn | ||
| 3678 | * of any new users that actually allow this type of allocation | ||
| 3679 | * to fail. | ||
| 3680 | */ | ||
| 3681 | WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); | ||
| 3682 | goto nopage; | 3676 | goto nopage; |
| 3683 | } | ||
| 3684 | 3677 | ||
| 3685 | /* Avoid recursion of direct reclaim */ | 3678 | /* Make sure we know about allocations which stall for too long */ |
| 3686 | if (current->flags & PF_MEMALLOC) { | 3679 | if (time_after(jiffies, alloc_start + stall_timeout)) { |
| 3687 | /* | 3680 | warn_alloc(gfp_mask, ac->nodemask, |
| 3688 | * __GFP_NOFAIL request from this context is rather bizarre | 3681 | "page allocation stalls for %ums, order:%u", |
| 3689 | * because we cannot reclaim anything and only can loop waiting | 3682 | jiffies_to_msecs(jiffies-alloc_start), order); |
| 3690 | * for somebody to do a work for us. | 3683 | stall_timeout += 10 * HZ; |
| 3691 | */ | ||
| 3692 | if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { | ||
| 3693 | cond_resched(); | ||
| 3694 | goto retry; | ||
| 3695 | } | ||
| 3696 | goto nopage; | ||
| 3697 | } | 3684 | } |
| 3698 | 3685 | ||
| 3699 | /* Avoid allocations with no watermarks from looping endlessly */ | 3686 | /* Avoid recursion of direct reclaim */ |
| 3700 | if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) | 3687 | if (current->flags & PF_MEMALLOC) |
| 3701 | goto nopage; | 3688 | goto nopage; |
| 3702 | 3689 | ||
| 3703 | |||
| 3704 | /* Try direct reclaim and then allocating */ | 3690 | /* Try direct reclaim and then allocating */ |
| 3705 | page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, | 3691 | page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, |
| 3706 | &did_some_progress); | 3692 | &did_some_progress); |
| @@ -3724,14 +3710,6 @@ retry: | |||
| 3724 | if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT)) | 3710 | if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT)) |
| 3725 | goto nopage; | 3711 | goto nopage; |
| 3726 | 3712 | ||
| 3727 | /* Make sure we know about allocations which stall for too long */ | ||
| 3728 | if (time_after(jiffies, alloc_start + stall_timeout)) { | ||
| 3729 | warn_alloc(gfp_mask, ac->nodemask, | ||
| 3730 | "page allocation stalls for %ums, order:%u", | ||
| 3731 | jiffies_to_msecs(jiffies-alloc_start), order); | ||
| 3732 | stall_timeout += 10 * HZ; | ||
| 3733 | } | ||
| 3734 | |||
| 3735 | if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, | 3713 | if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, |
| 3736 | did_some_progress > 0, &no_progress_loops)) | 3714 | did_some_progress > 0, &no_progress_loops)) |
| 3737 | goto retry; | 3715 | goto retry; |
| @@ -3760,6 +3738,10 @@ retry: | |||
| 3760 | if (page) | 3738 | if (page) |
| 3761 | goto got_pg; | 3739 | goto got_pg; |
| 3762 | 3740 | ||
| 3741 | /* Avoid allocations with no watermarks from looping endlessly */ | ||
| 3742 | if (test_thread_flag(TIF_MEMDIE)) | ||
| 3743 | goto nopage; | ||
| 3744 | |||
| 3763 | /* Retry as long as the OOM killer is making progress */ | 3745 | /* Retry as long as the OOM killer is making progress */ |
| 3764 | if (did_some_progress) { | 3746 | if (did_some_progress) { |
| 3765 | no_progress_loops = 0; | 3747 | no_progress_loops = 0; |
| @@ -3777,6 +3759,37 @@ nopage: | |||
| 3777 | if (read_mems_allowed_retry(cpuset_mems_cookie)) | 3759 | if (read_mems_allowed_retry(cpuset_mems_cookie)) |
| 3778 | goto retry_cpuset; | 3760 | goto retry_cpuset; |
| 3779 | 3761 | ||
| 3762 | /* | ||
| 3763 | * Make sure that __GFP_NOFAIL request doesn't leak out and make sure | ||
| 3764 | * we always retry | ||
| 3765 | */ | ||
| 3766 | if (gfp_mask & __GFP_NOFAIL) { | ||
| 3767 | /* | ||
| 3768 | * All existing users of the __GFP_NOFAIL are blockable, so warn | ||
| 3769 | * of any new users that actually require GFP_NOWAIT | ||
| 3770 | */ | ||
| 3771 | if (WARN_ON_ONCE(!can_direct_reclaim)) | ||
| 3772 | goto fail; | ||
| 3773 | |||
| 3774 | /* | ||
| 3775 | * PF_MEMALLOC request from this context is rather bizarre | ||
| 3776 | * because we cannot reclaim anything and only can loop waiting | ||
| 3777 | * for somebody to do a work for us | ||
| 3778 | */ | ||
| 3779 | WARN_ON_ONCE(current->flags & PF_MEMALLOC); | ||
| 3780 | |||
| 3781 | /* | ||
| 3782 | * non failing costly orders are a hard requirement which we | ||
| 3783 | * are not prepared for much so let's warn about these users | ||
| 3784 | * so that we can identify them and convert them to something | ||
| 3785 | * else. | ||
| 3786 | */ | ||
| 3787 | WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); | ||
| 3788 | |||
| 3789 | cond_resched(); | ||
| 3790 | goto retry; | ||
| 3791 | } | ||
| 3792 | fail: | ||
| 3780 | warn_alloc(gfp_mask, ac->nodemask, | 3793 | warn_alloc(gfp_mask, ac->nodemask, |
| 3781 | "page allocation failure: order:%u", order); | 3794 | "page allocation failure: order:%u", order); |
| 3782 | got_pg: | 3795 | got_pg: |
