aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-09-12 07:02:26 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-12 07:02:26 -0400
commitddd559b13f6d2fe3ad68c4b3f5235fd3c2eae4e3 (patch)
treed827bca3fc825a0ac33efbcd493713be40fcc812 /mm/page_alloc.c
parentcf7a2b4fb6a9b86779930a0a123b0df41aa9208f (diff)
parentf17a1f06d2fa93f4825be572622eb02c4894db4e (diff)
Merge branch 'devel-stable' into devel
Conflicts: MAINTAINERS arch/arm/mm/fault.c
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a35eeab2724c..d052abbe3063 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -882,7 +882,7 @@ retry_reserve:
882 */ 882 */
883static int rmqueue_bulk(struct zone *zone, unsigned int order, 883static int rmqueue_bulk(struct zone *zone, unsigned int order,
884 unsigned long count, struct list_head *list, 884 unsigned long count, struct list_head *list,
885 int migratetype) 885 int migratetype, int cold)
886{ 886{
887 int i; 887 int i;
888 888
@@ -901,7 +901,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
901 * merge IO requests if the physical pages are ordered 901 * merge IO requests if the physical pages are ordered
902 * properly. 902 * properly.
903 */ 903 */
904 list_add(&page->lru, list); 904 if (likely(cold == 0))
905 list_add(&page->lru, list);
906 else
907 list_add_tail(&page->lru, list);
905 set_page_private(page, migratetype); 908 set_page_private(page, migratetype);
906 list = &page->lru; 909 list = &page->lru;
907 } 910 }
@@ -1119,7 +1122,8 @@ again:
1119 local_irq_save(flags); 1122 local_irq_save(flags);
1120 if (!pcp->count) { 1123 if (!pcp->count) {
1121 pcp->count = rmqueue_bulk(zone, 0, 1124 pcp->count = rmqueue_bulk(zone, 0,
1122 pcp->batch, &pcp->list, migratetype); 1125 pcp->batch, &pcp->list,
1126 migratetype, cold);
1123 if (unlikely(!pcp->count)) 1127 if (unlikely(!pcp->count))
1124 goto failed; 1128 goto failed;
1125 } 1129 }
@@ -1138,7 +1142,8 @@ again:
1138 /* Allocate more to the pcp list if necessary */ 1142 /* Allocate more to the pcp list if necessary */
1139 if (unlikely(&page->lru == &pcp->list)) { 1143 if (unlikely(&page->lru == &pcp->list)) {
1140 pcp->count += rmqueue_bulk(zone, 0, 1144 pcp->count += rmqueue_bulk(zone, 0,
1141 pcp->batch, &pcp->list, migratetype); 1145 pcp->batch, &pcp->list,
1146 migratetype, cold);
1142 page = list_entry(pcp->list.next, struct page, lru); 1147 page = list_entry(pcp->list.next, struct page, lru);
1143 } 1148 }
1144 1149
@@ -1740,8 +1745,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1740 * be using allocators in order of preference for an area that is 1745 * be using allocators in order of preference for an area that is
1741 * too large. 1746 * too large.
1742 */ 1747 */
1743 if (WARN_ON_ONCE(order >= MAX_ORDER)) 1748 if (order >= MAX_ORDER) {
1749 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1744 return NULL; 1750 return NULL;
1751 }
1745 1752
1746 /* 1753 /*
1747 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1754 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
@@ -1789,6 +1796,10 @@ rebalance:
1789 if (p->flags & PF_MEMALLOC) 1796 if (p->flags & PF_MEMALLOC)
1790 goto nopage; 1797 goto nopage;
1791 1798
1799 /* Avoid allocations with no watermarks from looping endlessly */
1800 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1801 goto nopage;
1802
1792 /* Try direct reclaim and then allocating */ 1803 /* Try direct reclaim and then allocating */
1793 page = __alloc_pages_direct_reclaim(gfp_mask, order, 1804 page = __alloc_pages_direct_reclaim(gfp_mask, order,
1794 zonelist, high_zoneidx, 1805 zonelist, high_zoneidx,
@@ -4745,8 +4756,10 @@ void *__init alloc_large_system_hash(const char *tablename,
4745 * some pages at the end of hash table which 4756 * some pages at the end of hash table which
4746 * alloc_pages_exact() automatically does 4757 * alloc_pages_exact() automatically does
4747 */ 4758 */
4748 if (get_order(size) < MAX_ORDER) 4759 if (get_order(size) < MAX_ORDER) {
4749 table = alloc_pages_exact(size, GFP_ATOMIC); 4760 table = alloc_pages_exact(size, GFP_ATOMIC);
4761 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4762 }
4750 } 4763 }
4751 } while (!table && size > PAGE_SIZE && --log2qty); 4764 } while (!table && size > PAGE_SIZE && --log2qty);
4752 4765
@@ -4764,16 +4777,6 @@ void *__init alloc_large_system_hash(const char *tablename,
4764 if (_hash_mask) 4777 if (_hash_mask)
4765 *_hash_mask = (1 << log2qty) - 1; 4778 *_hash_mask = (1 << log2qty) - 1;
4766 4779
4767 /*
4768 * If hashdist is set, the table allocation is done with __vmalloc()
4769 * which invokes the kmemleak_alloc() callback. This function may also
4770 * be called before the slab and kmemleak are initialised when
4771 * kmemleak simply buffers the request to be executed later
4772 * (GFP_ATOMIC flag ignored in this case).
4773 */
4774 if (!hashdist)
4775 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4776
4777 return table; 4780 return table;
4778} 4781}
4779 4782