diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 41 |
1 files changed, 22 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e0f2cdf9d8b1..d052abbe3063 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -882,7 +882,7 @@ retry_reserve: | |||
882 | */ | 882 | */ |
883 | static int rmqueue_bulk(struct zone *zone, unsigned int order, | 883 | static int rmqueue_bulk(struct zone *zone, unsigned int order, |
884 | unsigned long count, struct list_head *list, | 884 | unsigned long count, struct list_head *list, |
885 | int migratetype) | 885 | int migratetype, int cold) |
886 | { | 886 | { |
887 | int i; | 887 | int i; |
888 | 888 | ||
@@ -901,7 +901,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, | |||
901 | * merge IO requests if the physical pages are ordered | 901 | * merge IO requests if the physical pages are ordered |
902 | * properly. | 902 | * properly. |
903 | */ | 903 | */ |
904 | list_add(&page->lru, list); | 904 | if (likely(cold == 0)) |
905 | list_add(&page->lru, list); | ||
906 | else | ||
907 | list_add_tail(&page->lru, list); | ||
905 | set_page_private(page, migratetype); | 908 | set_page_private(page, migratetype); |
906 | list = &page->lru; | 909 | list = &page->lru; |
907 | } | 910 | } |
@@ -1119,7 +1122,8 @@ again: | |||
1119 | local_irq_save(flags); | 1122 | local_irq_save(flags); |
1120 | if (!pcp->count) { | 1123 | if (!pcp->count) { |
1121 | pcp->count = rmqueue_bulk(zone, 0, | 1124 | pcp->count = rmqueue_bulk(zone, 0, |
1122 | pcp->batch, &pcp->list, migratetype); | 1125 | pcp->batch, &pcp->list, |
1126 | migratetype, cold); | ||
1123 | if (unlikely(!pcp->count)) | 1127 | if (unlikely(!pcp->count)) |
1124 | goto failed; | 1128 | goto failed; |
1125 | } | 1129 | } |
@@ -1138,7 +1142,8 @@ again: | |||
1138 | /* Allocate more to the pcp list if necessary */ | 1142 | /* Allocate more to the pcp list if necessary */ |
1139 | if (unlikely(&page->lru == &pcp->list)) { | 1143 | if (unlikely(&page->lru == &pcp->list)) { |
1140 | pcp->count += rmqueue_bulk(zone, 0, | 1144 | pcp->count += rmqueue_bulk(zone, 0, |
1141 | pcp->batch, &pcp->list, migratetype); | 1145 | pcp->batch, &pcp->list, |
1146 | migratetype, cold); | ||
1142 | page = list_entry(pcp->list.next, struct page, lru); | 1147 | page = list_entry(pcp->list.next, struct page, lru); |
1143 | } | 1148 | } |
1144 | 1149 | ||
@@ -1666,7 +1671,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, | |||
1666 | preferred_zone, migratetype); | 1671 | preferred_zone, migratetype); |
1667 | 1672 | ||
1668 | if (!page && gfp_mask & __GFP_NOFAIL) | 1673 | if (!page && gfp_mask & __GFP_NOFAIL) |
1669 | congestion_wait(WRITE, HZ/50); | 1674 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
1670 | } while (!page && (gfp_mask & __GFP_NOFAIL)); | 1675 | } while (!page && (gfp_mask & __GFP_NOFAIL)); |
1671 | 1676 | ||
1672 | return page; | 1677 | return page; |
@@ -1740,8 +1745,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
1740 | * be using allocators in order of preference for an area that is | 1745 | * be using allocators in order of preference for an area that is |
1741 | * too large. | 1746 | * too large. |
1742 | */ | 1747 | */ |
1743 | if (WARN_ON_ONCE(order >= MAX_ORDER)) | 1748 | if (order >= MAX_ORDER) { |
1749 | WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); | ||
1744 | return NULL; | 1750 | return NULL; |
1751 | } | ||
1745 | 1752 | ||
1746 | /* | 1753 | /* |
1747 | * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and | 1754 | * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and |
@@ -1789,6 +1796,10 @@ rebalance: | |||
1789 | if (p->flags & PF_MEMALLOC) | 1796 | if (p->flags & PF_MEMALLOC) |
1790 | goto nopage; | 1797 | goto nopage; |
1791 | 1798 | ||
1799 | /* Avoid allocations with no watermarks from looping endlessly */ | ||
1800 | if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) | ||
1801 | goto nopage; | ||
1802 | |||
1792 | /* Try direct reclaim and then allocating */ | 1803 | /* Try direct reclaim and then allocating */ |
1793 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 1804 | page = __alloc_pages_direct_reclaim(gfp_mask, order, |
1794 | zonelist, high_zoneidx, | 1805 | zonelist, high_zoneidx, |
@@ -1831,7 +1842,7 @@ rebalance: | |||
1831 | pages_reclaimed += did_some_progress; | 1842 | pages_reclaimed += did_some_progress; |
1832 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { | 1843 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { |
1833 | /* Wait for some write requests to complete then retry */ | 1844 | /* Wait for some write requests to complete then retry */ |
1834 | congestion_wait(WRITE, HZ/50); | 1845 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
1835 | goto rebalance; | 1846 | goto rebalance; |
1836 | } | 1847 | } |
1837 | 1848 | ||
@@ -1983,7 +1994,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | |||
1983 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | 1994 | unsigned long alloc_end = addr + (PAGE_SIZE << order); |
1984 | unsigned long used = addr + PAGE_ALIGN(size); | 1995 | unsigned long used = addr + PAGE_ALIGN(size); |
1985 | 1996 | ||
1986 | split_page(virt_to_page(addr), order); | 1997 | split_page(virt_to_page((void *)addr), order); |
1987 | while (used < alloc_end) { | 1998 | while (used < alloc_end) { |
1988 | free_page(used); | 1999 | free_page(used); |
1989 | used += PAGE_SIZE; | 2000 | used += PAGE_SIZE; |
@@ -4745,8 +4756,10 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
4745 | * some pages at the end of hash table which | 4756 | * some pages at the end of hash table which |
4746 | * alloc_pages_exact() automatically does | 4757 | * alloc_pages_exact() automatically does |
4747 | */ | 4758 | */ |
4748 | if (get_order(size) < MAX_ORDER) | 4759 | if (get_order(size) < MAX_ORDER) { |
4749 | table = alloc_pages_exact(size, GFP_ATOMIC); | 4760 | table = alloc_pages_exact(size, GFP_ATOMIC); |
4761 | kmemleak_alloc(table, size, 1, GFP_ATOMIC); | ||
4762 | } | ||
4750 | } | 4763 | } |
4751 | } while (!table && size > PAGE_SIZE && --log2qty); | 4764 | } while (!table && size > PAGE_SIZE && --log2qty); |
4752 | 4765 | ||
@@ -4764,16 +4777,6 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
4764 | if (_hash_mask) | 4777 | if (_hash_mask) |
4765 | *_hash_mask = (1 << log2qty) - 1; | 4778 | *_hash_mask = (1 << log2qty) - 1; |
4766 | 4779 | ||
4767 | /* | ||
4768 | * If hashdist is set, the table allocation is done with __vmalloc() | ||
4769 | * which invokes the kmemleak_alloc() callback. This function may also | ||
4770 | * be called before the slab and kmemleak are initialised when | ||
4771 | * kmemleak simply buffers the request to be executed later | ||
4772 | * (GFP_ATOMIC flag ignored in this case). | ||
4773 | */ | ||
4774 | if (!hashdist) | ||
4775 | kmemleak_alloc(table, size, 1, GFP_ATOMIC); | ||
4776 | |||
4777 | return table; | 4780 | return table; |
4778 | } | 4781 | } |
4779 | 4782 | ||