diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 36 |
1 files changed, 26 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c73dbbc1cd8f..fc1b1064c505 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -799,14 +799,18 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, | |||
799 | } | 799 | } |
800 | 800 | ||
801 | /* This allocation should allow future memory freeing. */ | 801 | /* This allocation should allow future memory freeing. */ |
802 | if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) && !in_interrupt()) { | 802 | |
803 | /* go through the zonelist yet again, ignoring mins */ | 803 | if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) |
804 | for (i = 0; (z = zones[i]) != NULL; i++) { | 804 | && !in_interrupt()) { |
805 | if (!cpuset_zone_allowed(z)) | 805 | if (!(gfp_mask & __GFP_NOMEMALLOC)) { |
806 | continue; | 806 | /* go through the zonelist yet again, ignoring mins */ |
807 | page = buffered_rmqueue(z, order, gfp_mask); | 807 | for (i = 0; (z = zones[i]) != NULL; i++) { |
808 | if (page) | 808 | if (!cpuset_zone_allowed(z)) |
809 | goto got_pg; | 809 | continue; |
810 | page = buffered_rmqueue(z, order, gfp_mask); | ||
811 | if (page) | ||
812 | goto got_pg; | ||
813 | } | ||
810 | } | 814 | } |
811 | goto nopage; | 815 | goto nopage; |
812 | } | 816 | } |
@@ -1351,8 +1355,7 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli | |||
1351 | #define MAX_NODE_LOAD (num_online_nodes()) | 1355 | #define MAX_NODE_LOAD (num_online_nodes()) |
1352 | static int __initdata node_load[MAX_NUMNODES]; | 1356 | static int __initdata node_load[MAX_NUMNODES]; |
1353 | /** | 1357 | /** |
1354 | * find_next_best_node - find the next node that should appear in a given | 1358 | * find_next_best_node - find the next node that should appear in a given node's fallback list |
1355 | * node's fallback list | ||
1356 | * @node: node whose fallback list we're appending | 1359 | * @node: node whose fallback list we're appending |
1357 | * @used_node_mask: nodemask_t of already used nodes | 1360 | * @used_node_mask: nodemask_t of already used nodes |
1358 | * | 1361 | * |
@@ -1671,6 +1674,18 @@ static void __init free_area_init_core(struct pglist_data *pgdat, | |||
1671 | if (batch < 1) | 1674 | if (batch < 1) |
1672 | batch = 1; | 1675 | batch = 1; |
1673 | 1676 | ||
1677 | /* | ||
1678 | * Clamp the batch to a 2^n - 1 value. Having a power | ||
1679 | * of 2 value was found to be more likely to have | ||
1680 | * suboptimal cache aliasing properties in some cases. | ||
1681 | * | ||
1682 | * For example if 2 tasks are alternately allocating | ||
1683 | * batches of pages, one task can end up with a lot | ||
1684 | * of pages of one half of the possible page colors | ||
1685 | * and the other with pages of the other colors. | ||
1686 | */ | ||
1687 | batch = (1 << fls(batch + batch/2)) - 1; | ||
1688 | |||
1674 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1689 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1675 | struct per_cpu_pages *pcp; | 1690 | struct per_cpu_pages *pcp; |
1676 | 1691 | ||
@@ -1881,6 +1896,7 @@ static char *vmstat_text[] = { | |||
1881 | "allocstall", | 1896 | "allocstall", |
1882 | 1897 | ||
1883 | "pgrotated", | 1898 | "pgrotated", |
1899 | "nr_bounce", | ||
1884 | }; | 1900 | }; |
1885 | 1901 | ||
1886 | static void *vmstat_start(struct seq_file *m, loff_t *pos) | 1902 | static void *vmstat_start(struct seq_file *m, loff_t *pos) |