aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c38
1 files changed, 28 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c73dbbc1cd8f..b1061b1962f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -43,7 +43,9 @@
43 * initializer cleaner 43 * initializer cleaner
44 */ 44 */
45nodemask_t node_online_map = { { [0] = 1UL } }; 45nodemask_t node_online_map = { { [0] = 1UL } };
46EXPORT_SYMBOL(node_online_map);
46nodemask_t node_possible_map = NODE_MASK_ALL; 47nodemask_t node_possible_map = NODE_MASK_ALL;
48EXPORT_SYMBOL(node_possible_map);
47struct pglist_data *pgdat_list; 49struct pglist_data *pgdat_list;
48unsigned long totalram_pages; 50unsigned long totalram_pages;
49unsigned long totalhigh_pages; 51unsigned long totalhigh_pages;
@@ -799,14 +801,18 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
799 } 801 }
800 802
801 /* This allocation should allow future memory freeing. */ 803 /* This allocation should allow future memory freeing. */
802 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) && !in_interrupt()) { 804
803 /* go through the zonelist yet again, ignoring mins */ 805 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
804 for (i = 0; (z = zones[i]) != NULL; i++) { 806 && !in_interrupt()) {
805 if (!cpuset_zone_allowed(z)) 807 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
806 continue; 808 /* go through the zonelist yet again, ignoring mins */
807 page = buffered_rmqueue(z, order, gfp_mask); 809 for (i = 0; (z = zones[i]) != NULL; i++) {
808 if (page) 810 if (!cpuset_zone_allowed(z))
809 goto got_pg; 811 continue;
812 page = buffered_rmqueue(z, order, gfp_mask);
813 if (page)
814 goto got_pg;
815 }
810 } 816 }
811 goto nopage; 817 goto nopage;
812 } 818 }
@@ -1351,8 +1357,7 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli
1351#define MAX_NODE_LOAD (num_online_nodes()) 1357#define MAX_NODE_LOAD (num_online_nodes())
1352static int __initdata node_load[MAX_NUMNODES]; 1358static int __initdata node_load[MAX_NUMNODES];
1353/** 1359/**
1354 * find_next_best_node - find the next node that should appear in a given 1360 * find_next_best_node - find the next node that should appear in a given node's fallback list
1355 * node's fallback list
1356 * @node: node whose fallback list we're appending 1361 * @node: node whose fallback list we're appending
1357 * @used_node_mask: nodemask_t of already used nodes 1362 * @used_node_mask: nodemask_t of already used nodes
1358 * 1363 *
@@ -1671,6 +1676,18 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
1671 if (batch < 1) 1676 if (batch < 1)
1672 batch = 1; 1677 batch = 1;
1673 1678
1679 /*
1680 * Clamp the batch to a 2^n - 1 value. Having a power
1681 * of 2 value was found to be more likely to have
1682 * suboptimal cache aliasing properties in some cases.
1683 *
1684 * For example if 2 tasks are alternately allocating
1685 * batches of pages, one task can end up with a lot
1686 * of pages of one half of the possible page colors
1687 * and the other with pages of the other colors.
1688 */
1689 batch = (1 << fls(batch + batch/2)) - 1;
1690
1674 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1691 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1675 struct per_cpu_pages *pcp; 1692 struct per_cpu_pages *pcp;
1676 1693
@@ -1881,6 +1898,7 @@ static char *vmstat_text[] = {
1881 "allocstall", 1898 "allocstall",
1882 1899
1883 "pgrotated", 1900 "pgrotated",
1901 "nr_bounce",
1884}; 1902};
1885 1903
1886static void *vmstat_start(struct seq_file *m, loff_t *pos) 1904static void *vmstat_start(struct seq_file *m, loff_t *pos)