diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 21 | 
1 files changed, 16 insertions, 5 deletions
| diff --git a/mm/page_alloc.c b/mm/page_alloc.c index caa92689aac9..d052abbe3063 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -882,7 +882,7 @@ retry_reserve: | |||
| 882 | */ | 882 | */ | 
| 883 | static int rmqueue_bulk(struct zone *zone, unsigned int order, | 883 | static int rmqueue_bulk(struct zone *zone, unsigned int order, | 
| 884 | unsigned long count, struct list_head *list, | 884 | unsigned long count, struct list_head *list, | 
| 885 | int migratetype) | 885 | int migratetype, int cold) | 
| 886 | { | 886 | { | 
| 887 | int i; | 887 | int i; | 
| 888 | 888 | ||
| @@ -901,7 +901,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, | |||
| 901 | * merge IO requests if the physical pages are ordered | 901 | * merge IO requests if the physical pages are ordered | 
| 902 | * properly. | 902 | * properly. | 
| 903 | */ | 903 | */ | 
| 904 | list_add(&page->lru, list); | 904 | if (likely(cold == 0)) | 
| 905 | list_add(&page->lru, list); | ||
| 906 | else | ||
| 907 | list_add_tail(&page->lru, list); | ||
| 905 | set_page_private(page, migratetype); | 908 | set_page_private(page, migratetype); | 
| 906 | list = &page->lru; | 909 | list = &page->lru; | 
| 907 | } | 910 | } | 
| @@ -1119,7 +1122,8 @@ again: | |||
| 1119 | local_irq_save(flags); | 1122 | local_irq_save(flags); | 
| 1120 | if (!pcp->count) { | 1123 | if (!pcp->count) { | 
| 1121 | pcp->count = rmqueue_bulk(zone, 0, | 1124 | pcp->count = rmqueue_bulk(zone, 0, | 
| 1122 | pcp->batch, &pcp->list, migratetype); | 1125 | pcp->batch, &pcp->list, | 
| 1126 | migratetype, cold); | ||
| 1123 | if (unlikely(!pcp->count)) | 1127 | if (unlikely(!pcp->count)) | 
| 1124 | goto failed; | 1128 | goto failed; | 
| 1125 | } | 1129 | } | 
| @@ -1138,7 +1142,8 @@ again: | |||
| 1138 | /* Allocate more to the pcp list if necessary */ | 1142 | /* Allocate more to the pcp list if necessary */ | 
| 1139 | if (unlikely(&page->lru == &pcp->list)) { | 1143 | if (unlikely(&page->lru == &pcp->list)) { | 
| 1140 | pcp->count += rmqueue_bulk(zone, 0, | 1144 | pcp->count += rmqueue_bulk(zone, 0, | 
| 1141 | pcp->batch, &pcp->list, migratetype); | 1145 | pcp->batch, &pcp->list, | 
| 1146 | migratetype, cold); | ||
| 1142 | page = list_entry(pcp->list.next, struct page, lru); | 1147 | page = list_entry(pcp->list.next, struct page, lru); | 
| 1143 | } | 1148 | } | 
| 1144 | 1149 | ||
| @@ -1740,8 +1745,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
| 1740 | * be using allocators in order of preference for an area that is | 1745 | * be using allocators in order of preference for an area that is | 
| 1741 | * too large. | 1746 | * too large. | 
| 1742 | */ | 1747 | */ | 
| 1743 | if (WARN_ON_ONCE(order >= MAX_ORDER)) | 1748 | if (order >= MAX_ORDER) { | 
| 1749 | WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); | ||
| 1744 | return NULL; | 1750 | return NULL; | 
| 1751 | } | ||
| 1745 | 1752 | ||
| 1746 | /* | 1753 | /* | 
| 1747 | * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and | 1754 | * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and | 
| @@ -1789,6 +1796,10 @@ rebalance: | |||
| 1789 | if (p->flags & PF_MEMALLOC) | 1796 | if (p->flags & PF_MEMALLOC) | 
| 1790 | goto nopage; | 1797 | goto nopage; | 
| 1791 | 1798 | ||
| 1799 | /* Avoid allocations with no watermarks from looping endlessly */ | ||
| 1800 | if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) | ||
| 1801 | goto nopage; | ||
| 1802 | |||
| 1792 | /* Try direct reclaim and then allocating */ | 1803 | /* Try direct reclaim and then allocating */ | 
| 1793 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 1804 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 
| 1794 | zonelist, high_zoneidx, | 1805 | zonelist, high_zoneidx, | 
