aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c44
1 files changed, 24 insertions, 20 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index dde04ff4be31..208812b25597 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,6 +56,7 @@ long nr_swap_pages;
56int percpu_pagelist_fraction; 56int percpu_pagelist_fraction;
57 57
58static void fastcall free_hot_cold_page(struct page *page, int cold); 58static void fastcall free_hot_cold_page(struct page *page, int cold);
59static void __free_pages_ok(struct page *page, unsigned int order);
59 60
60/* 61/*
61 * results with 256, 32 in the lowmem_reserve sysctl: 62 * results with 256, 32 in the lowmem_reserve sysctl:
@@ -169,20 +170,23 @@ static void bad_page(struct page *page)
169 * All pages have PG_compound set. All pages have their ->private pointing at 170 * All pages have PG_compound set. All pages have their ->private pointing at
170 * the head page (even the head page has this). 171 * the head page (even the head page has this).
171 * 172 *
172 * The first tail page's ->mapping, if non-zero, holds the address of the 173 * The first tail page's ->lru.next holds the address of the compound page's
173 * compound page's put_page() function. 174 * put_page() function. Its ->lru.prev holds the order of allocation.
174 * 175 * This usage means that zero-order pages may not be compound.
175 * The order of the allocation is stored in the first tail page's ->index
176 * This is only for debug at present. This usage means that zero-order pages
177 * may not be compound.
178 */ 176 */
177
178static void free_compound_page(struct page *page)
179{
180 __free_pages_ok(page, (unsigned long)page[1].lru.prev);
181}
182
179static void prep_compound_page(struct page *page, unsigned long order) 183static void prep_compound_page(struct page *page, unsigned long order)
180{ 184{
181 int i; 185 int i;
182 int nr_pages = 1 << order; 186 int nr_pages = 1 << order;
183 187
184 page[1].mapping = NULL; 188 page[1].lru.next = (void *)free_compound_page; /* set dtor */
185 page[1].index = order; 189 page[1].lru.prev = (void *)order;
186 for (i = 0; i < nr_pages; i++) { 190 for (i = 0; i < nr_pages; i++) {
187 struct page *p = page + i; 191 struct page *p = page + i;
188 192
@@ -196,7 +200,7 @@ static void destroy_compound_page(struct page *page, unsigned long order)
196 int i; 200 int i;
197 int nr_pages = 1 << order; 201 int nr_pages = 1 << order;
198 202
199 if (unlikely(page[1].index != order)) 203 if (unlikely((unsigned long)page[1].lru.prev != order))
200 bad_page(page); 204 bad_page(page);
201 205
202 for (i = 0; i < nr_pages; i++) { 206 for (i = 0; i < nr_pages; i++) {
@@ -1537,29 +1541,29 @@ static int __initdata node_load[MAX_NUMNODES];
1537 */ 1541 */
1538static int __init find_next_best_node(int node, nodemask_t *used_node_mask) 1542static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1539{ 1543{
1540 int i, n, val; 1544 int n, val;
1541 int min_val = INT_MAX; 1545 int min_val = INT_MAX;
1542 int best_node = -1; 1546 int best_node = -1;
1543 1547
1544 for_each_online_node(i) { 1548 /* Use the local node if we haven't already */
1545 cpumask_t tmp; 1549 if (!node_isset(node, *used_node_mask)) {
1550 node_set(node, *used_node_mask);
1551 return node;
1552 }
1546 1553
1547 /* Start from local node */ 1554 for_each_online_node(n) {
1548 n = (node+i) % num_online_nodes(); 1555 cpumask_t tmp;
1549 1556
1550 /* Don't want a node to appear more than once */ 1557 /* Don't want a node to appear more than once */
1551 if (node_isset(n, *used_node_mask)) 1558 if (node_isset(n, *used_node_mask))
1552 continue; 1559 continue;
1553 1560
1554 /* Use the local node if we haven't already */
1555 if (!node_isset(node, *used_node_mask)) {
1556 best_node = node;
1557 break;
1558 }
1559
1560 /* Use the distance array to find the distance */ 1561 /* Use the distance array to find the distance */
1561 val = node_distance(node, n); 1562 val = node_distance(node, n);
1562 1563
1564 /* Penalize nodes under us ("prefer the next node") */
1565 val += (n < node);
1566
1563 /* Give preference to headless and unused nodes */ 1567 /* Give preference to headless and unused nodes */
1564 tmp = node_to_cpumask(n); 1568 tmp = node_to_cpumask(n);
1565 if (!cpus_empty(tmp)) 1569 if (!cpus_empty(tmp))