aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 01:45:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 01:45:43 -0500
commit5cbb3d216e2041700231bcfc383ee5f8b7fc8b74 (patch)
treea738fa82dbcefa9bd283c08bc67f38827be63937 /mm/page_alloc.c
parent9bc9ccd7db1c9f043f75380b5a5b94912046a60e (diff)
parent4e9b45a19241354daec281d7a785739829b52359 (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge first patch-bomb from Andrew Morton: "Quite a lot of other stuff is banked up awaiting further next->mainline merging, but this batch contains: - Lots of random misc patches - OCFS2 - Most of MM - backlight updates - lib/ updates - printk updates - checkpatch updates - epoll tweaking - rtc updates - hfs - hfsplus - documentation - procfs - update gcov to gcc-4.7 format - IPC" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (269 commits) ipc, msg: fix message length check for negative values ipc/util.c: remove unnecessary work pending test devpts: plug the memory leak in kill_sb ./Makefile: export initial ramdisk compression config option init/Kconfig: add option to disable kernel compression drivers: w1: make w1_slave::flags long to avoid memory corruption drivers/w1/masters/ds1wm.cuse dev_get_platdata() drivers/memstick/core/ms_block.c: fix unreachable state in h_msb_read_page() drivers/memstick/core/mspro_block.c: fix attributes array allocation drivers/pps/clients/pps-gpio.c: remove redundant of_match_ptr kernel/panic.c: reduce 1 byte usage for print tainted buffer gcov: reuse kbasename helper kernel/gcov/fs.c: use pr_warn() kernel/module.c: use pr_foo() gcov: compile specific gcov implementation based on gcc version gcov: add support for gcc 4.7 gcov format gcov: move gcov structs definitions to a gcc version specific file kernel/taskstats.c: return -ENOMEM when alloc memory fails in add_del_listener() kernel/taskstats.c: add nla_nest_cancel() for failure processing between nla_nest_start() and nla_nest_end() kernel/sysctl_binary.c: use scnprintf() instead of snprintf() ...
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c34
1 files changed, 13 insertions, 21 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73d812f16dde..580a5f075ed0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -234,8 +234,8 @@ int page_group_by_mobility_disabled __read_mostly;
234 234
235void set_pageblock_migratetype(struct page *page, int migratetype) 235void set_pageblock_migratetype(struct page *page, int migratetype)
236{ 236{
237 237 if (unlikely(page_group_by_mobility_disabled &&
238 if (unlikely(page_group_by_mobility_disabled)) 238 migratetype < MIGRATE_PCPTYPES))
239 migratetype = MIGRATE_UNMOVABLE; 239 migratetype = MIGRATE_UNMOVABLE;
240 240
241 set_pageblock_flags_group(page, (unsigned long)migratetype, 241 set_pageblock_flags_group(page, (unsigned long)migratetype,
@@ -1027,6 +1027,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
1027{ 1027{
1028 int current_order = page_order(page); 1028 int current_order = page_order(page);
1029 1029
1030 /*
1031 * When borrowing from MIGRATE_CMA, we need to release the excess
1032 * buddy pages to CMA itself.
1033 */
1030 if (is_migrate_cma(fallback_type)) 1034 if (is_migrate_cma(fallback_type))
1031 return fallback_type; 1035 return fallback_type;
1032 1036
@@ -1091,21 +1095,11 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1091 list_del(&page->lru); 1095 list_del(&page->lru);
1092 rmv_page_order(page); 1096 rmv_page_order(page);
1093 1097
1094 /*
1095 * Borrow the excess buddy pages as well, irrespective
1096 * of whether we stole freepages, or took ownership of
1097 * the pageblock or not.
1098 *
1099 * Exception: When borrowing from MIGRATE_CMA, release
1100 * the excess buddy pages to CMA itself.
1101 */
1102 expand(zone, page, order, current_order, area, 1098 expand(zone, page, order, current_order, area,
1103 is_migrate_cma(migratetype) 1099 new_type);
1104 ? migratetype : start_migratetype);
1105 1100
1106 trace_mm_page_alloc_extfrag(page, order, 1101 trace_mm_page_alloc_extfrag(page, order, current_order,
1107 current_order, start_migratetype, migratetype, 1102 start_migratetype, migratetype, new_type);
1108 new_type == start_migratetype);
1109 1103
1110 return page; 1104 return page;
1111 } 1105 }
@@ -1711,7 +1705,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1711 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1705 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1712 * that have to skip over a lot of full or unallowed zones. 1706 * that have to skip over a lot of full or unallowed zones.
1713 * 1707 *
1714 * If the zonelist cache is present in the passed in zonelist, then 1708 * If the zonelist cache is present in the passed zonelist, then
1715 * returns a pointer to the allowed node mask (either the current 1709 * returns a pointer to the allowed node mask (either the current
1716 * tasks mems_allowed, or node_states[N_MEMORY].) 1710 * tasks mems_allowed, or node_states[N_MEMORY].)
1717 * 1711 *
@@ -2593,7 +2587,7 @@ rebalance:
2593 * running out of options and have to consider going OOM 2587 * running out of options and have to consider going OOM
2594 */ 2588 */
2595 if (!did_some_progress) { 2589 if (!did_some_progress) {
2596 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 2590 if (oom_gfp_allowed(gfp_mask)) {
2597 if (oom_killer_disabled) 2591 if (oom_killer_disabled)
2598 goto nopage; 2592 goto nopage;
2599 /* Coredumps can quickly deplete all memory reserves */ 2593 /* Coredumps can quickly deplete all memory reserves */
@@ -3881,8 +3875,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
3881 return ffz(~size); 3875 return ffz(~size);
3882} 3876}
3883 3877
3884#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3885
3886/* 3878/*
3887 * Check if a pageblock contains reserved pages 3879 * Check if a pageblock contains reserved pages
3888 */ 3880 */
@@ -4266,7 +4258,7 @@ static __meminit void zone_pcp_init(struct zone *zone)
4266 */ 4258 */
4267 zone->pageset = &boot_pageset; 4259 zone->pageset = &boot_pageset;
4268 4260
4269 if (zone->present_pages) 4261 if (populated_zone(zone))
4270 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 4262 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4271 zone->name, zone->present_pages, 4263 zone->name, zone->present_pages,
4272 zone_batchsize(zone)); 4264 zone_batchsize(zone));
@@ -5160,7 +5152,7 @@ static void check_for_memory(pg_data_t *pgdat, int nid)
5160 5152
5161 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 5153 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
5162 struct zone *zone = &pgdat->node_zones[zone_type]; 5154 struct zone *zone = &pgdat->node_zones[zone_type];
5163 if (zone->present_pages) { 5155 if (populated_zone(zone)) {
5164 node_set_state(nid, N_HIGH_MEMORY); 5156 node_set_state(nid, N_HIGH_MEMORY);
5165 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 5157 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5166 zone_type <= ZONE_NORMAL) 5158 zone_type <= ZONE_NORMAL)