diff options
author | Lee Schermerhorn <lee.schermerhorn@hp.com> | 2009-12-14 20:58:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:13 -0500 |
commit | 9b5e5d0fdc91b73bba8cf5e0fbe3521a953e4e4d (patch) | |
tree | 58b7ca4a13b71de2132ae669fc138eb85c5ab3c8 /mm/hugetlb.c | |
parent | 267b4c281b4a43c8f3d965c791d3a7fd62448733 (diff) |
hugetlb: use only nodes with memory for huge pages
Register per node hstate sysfs attributes only for nodes with memory.
Global replacement of 'all online nodes" with "all nodes with memory" in
mm/hugetlb.c. Suggested by David Rientjes.
A subsequent patch will handle adding/removing of per node hstate sysfs
attributes when nodes transition to/from memoryless state via memory
hotplug.
NOTE: this patch has not been tested with memoryless nodes.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Reviewed-by: Andi Kleen <andi@firstfloor.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@canonical.com>
Cc: Eric Whitney <eric.whitney@hp.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 544f7bcb615e..b4a263512cb7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -942,14 +942,14 @@ static void return_unused_surplus_pages(struct hstate *h, | |||
942 | 942 | ||
943 | /* | 943 | /* |
944 | * We want to release as many surplus pages as possible, spread | 944 | * We want to release as many surplus pages as possible, spread |
945 | * evenly across all nodes. Iterate across all nodes until we | 945 | * evenly across all nodes with memory. Iterate across these nodes |
946 | * can no longer free unreserved surplus pages. This occurs when | 946 | * until we can no longer free unreserved surplus pages. This occurs |
947 | * the nodes with surplus pages have no free pages. | 947 | * when the nodes with surplus pages have no free pages. |
948 | * free_pool_huge_page() will balance the the frees across the | 948 | * free_pool_huge_page() will balance the the freed pages across the |
949 | * on-line nodes for us and will handle the hstate accounting. | 949 | * on-line nodes with memory and will handle the hstate accounting. |
950 | */ | 950 | */ |
951 | while (nr_pages--) { | 951 | while (nr_pages--) { |
952 | if (!free_pool_huge_page(h, &node_online_map, 1)) | 952 | if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1)) |
953 | break; | 953 | break; |
954 | } | 954 | } |
955 | } | 955 | } |
@@ -1053,14 +1053,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, | |||
1053 | int __weak alloc_bootmem_huge_page(struct hstate *h) | 1053 | int __weak alloc_bootmem_huge_page(struct hstate *h) |
1054 | { | 1054 | { |
1055 | struct huge_bootmem_page *m; | 1055 | struct huge_bootmem_page *m; |
1056 | int nr_nodes = nodes_weight(node_online_map); | 1056 | int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); |
1057 | 1057 | ||
1058 | while (nr_nodes) { | 1058 | while (nr_nodes) { |
1059 | void *addr; | 1059 | void *addr; |
1060 | 1060 | ||
1061 | addr = __alloc_bootmem_node_nopanic( | 1061 | addr = __alloc_bootmem_node_nopanic( |
1062 | NODE_DATA(hstate_next_node_to_alloc(h, | 1062 | NODE_DATA(hstate_next_node_to_alloc(h, |
1063 | &node_online_map)), | 1063 | &node_states[N_HIGH_MEMORY])), |
1064 | huge_page_size(h), huge_page_size(h), 0); | 1064 | huge_page_size(h), huge_page_size(h), 0); |
1065 | 1065 | ||
1066 | if (addr) { | 1066 | if (addr) { |
@@ -1115,7 +1115,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) | |||
1115 | if (h->order >= MAX_ORDER) { | 1115 | if (h->order >= MAX_ORDER) { |
1116 | if (!alloc_bootmem_huge_page(h)) | 1116 | if (!alloc_bootmem_huge_page(h)) |
1117 | break; | 1117 | break; |
1118 | } else if (!alloc_fresh_huge_page(h, &node_online_map)) | 1118 | } else if (!alloc_fresh_huge_page(h, |
1119 | &node_states[N_HIGH_MEMORY])) | ||
1119 | break; | 1120 | break; |
1120 | } | 1121 | } |
1121 | h->max_huge_pages = i; | 1122 | h->max_huge_pages = i; |
@@ -1388,7 +1389,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy, | |||
1388 | 1389 | ||
1389 | h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); | 1390 | h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); |
1390 | 1391 | ||
1391 | if (nodes_allowed != &node_online_map) | 1392 | if (nodes_allowed != &node_states[N_HIGH_MEMORY]) |
1392 | NODEMASK_FREE(nodes_allowed); | 1393 | NODEMASK_FREE(nodes_allowed); |
1393 | 1394 | ||
1394 | return len; | 1395 | return len; |
@@ -1610,7 +1611,7 @@ void hugetlb_unregister_node(struct node *node) | |||
1610 | struct node_hstate *nhs = &node_hstates[node->sysdev.id]; | 1611 | struct node_hstate *nhs = &node_hstates[node->sysdev.id]; |
1611 | 1612 | ||
1612 | if (!nhs->hugepages_kobj) | 1613 | if (!nhs->hugepages_kobj) |
1613 | return; | 1614 | return; /* no hstate attributes */ |
1614 | 1615 | ||
1615 | for_each_hstate(h) | 1616 | for_each_hstate(h) |
1616 | if (nhs->hstate_kobjs[h - hstates]) { | 1617 | if (nhs->hstate_kobjs[h - hstates]) { |
@@ -1675,15 +1676,15 @@ void hugetlb_register_node(struct node *node) | |||
1675 | } | 1676 | } |
1676 | 1677 | ||
1677 | /* | 1678 | /* |
1678 | * hugetlb init time: register hstate attributes for all registered | 1679 | * hugetlb init time: register hstate attributes for all registered node |
1679 | * node sysdevs. All on-line nodes should have registered their | 1680 | * sysdevs of nodes that have memory. All on-line nodes should have |
1680 | * associated sysdev by the time the hugetlb module initializes. | 1681 | * registered their associated sysdev by this time. |
1681 | */ | 1682 | */ |
1682 | static void hugetlb_register_all_nodes(void) | 1683 | static void hugetlb_register_all_nodes(void) |
1683 | { | 1684 | { |
1684 | int nid; | 1685 | int nid; |
1685 | 1686 | ||
1686 | for (nid = 0; nid < nr_node_ids; nid++) { | 1687 | for_each_node_state(nid, N_HIGH_MEMORY) { |
1687 | struct node *node = &node_devices[nid]; | 1688 | struct node *node = &node_devices[nid]; |
1688 | if (node->sysdev.id == nid) | 1689 | if (node->sysdev.id == nid) |
1689 | hugetlb_register_node(node); | 1690 | hugetlb_register_node(node); |
@@ -1777,8 +1778,8 @@ void __init hugetlb_add_hstate(unsigned order) | |||
1777 | h->free_huge_pages = 0; | 1778 | h->free_huge_pages = 0; |
1778 | for (i = 0; i < MAX_NUMNODES; ++i) | 1779 | for (i = 0; i < MAX_NUMNODES; ++i) |
1779 | INIT_LIST_HEAD(&h->hugepage_freelists[i]); | 1780 | INIT_LIST_HEAD(&h->hugepage_freelists[i]); |
1780 | h->next_nid_to_alloc = first_node(node_online_map); | 1781 | h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]); |
1781 | h->next_nid_to_free = first_node(node_online_map); | 1782 | h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); |
1782 | snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", | 1783 | snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", |
1783 | huge_page_size(h)/1024); | 1784 | huge_page_size(h)/1024); |
1784 | 1785 | ||