aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 16:11:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 16:11:15 -0500
commitf6e858a00af788bab0fd4c0b7f5cd788000edc18 (patch)
treef9403ca3671be9821dbf83e726e61dbe75fbca6b /mm/hugetlb.c
parent193c0d682525987db59ac3a24531a77e4947aa95 (diff)
parent98870901cce098bbe94d90d2c41d8d1fa8d94392 (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge misc VM changes from Andrew Morton: "The rest of most-of-MM. The other MM bits await a slab merge. This patch includes the addition of a huge zero_page. Not a performance boost but it an save large amounts of physical memory in some situations. Also a bunch of Fujitsu engineers are working on memory hotplug. Which, as it turns out, was badly broken. About half of their patches are included here; the remainder are 3.8 material." However, this merge disables CONFIG_MOVABLE_NODE, which was totally broken. We don't add new features with "default y", nor do we add Kconfig questions that are incomprehensible to most people without any help text. Does the feature even make sense without compaction or memory hotplug? * akpm: (54 commits) mm/bootmem.c: remove unused wrapper function reserve_bootmem_generic() mm/memory.c: remove unused code from do_wp_page() asm-generic, mm: pgtable: consolidate zero page helpers mm/hugetlb.c: fix warning on freeing hwpoisoned hugepage hwpoison, hugetlbfs: fix RSS-counter warning hwpoison, hugetlbfs: fix "bad pmd" warning in unmapping hwpoisoned hugepage mm: protect against concurrent vma expansion memcg: do not check for mm in __mem_cgroup_count_vm_event tmpfs: support SEEK_DATA and SEEK_HOLE (reprise) mm: provide more accurate estimation of pages occupied by memmap fs/buffer.c: remove redundant initialization in alloc_page_buffers() fs/buffer.c: do not inline exported function writeback: fix a typo in comment mm: introduce new field "managed_pages" to struct zone mm, oom: remove statically defined arch functions of same name mm, oom: remove redundant sleep in pagefault oom handler mm, oom: cleanup pagefault oom handler memory_hotplug: allow online/offline memory to result movable node numa: add CONFIG_MOVABLE_NODE for movable-dedicated node mm, memcg: avoid unnecessary function call when memcg is disabled ...
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 34f372ad89d0..88e7293b96bd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1057,7 +1057,7 @@ static void return_unused_surplus_pages(struct hstate *h,
1057 * on-line nodes with memory and will handle the hstate accounting. 1057 * on-line nodes with memory and will handle the hstate accounting.
1058 */ 1058 */
1059 while (nr_pages--) { 1059 while (nr_pages--) {
1060 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1)) 1060 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1061 break; 1061 break;
1062 } 1062 }
1063} 1063}
@@ -1180,14 +1180,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1180int __weak alloc_bootmem_huge_page(struct hstate *h) 1180int __weak alloc_bootmem_huge_page(struct hstate *h)
1181{ 1181{
1182 struct huge_bootmem_page *m; 1182 struct huge_bootmem_page *m;
1183 int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 1183 int nr_nodes = nodes_weight(node_states[N_MEMORY]);
1184 1184
1185 while (nr_nodes) { 1185 while (nr_nodes) {
1186 void *addr; 1186 void *addr;
1187 1187
1188 addr = __alloc_bootmem_node_nopanic( 1188 addr = __alloc_bootmem_node_nopanic(
1189 NODE_DATA(hstate_next_node_to_alloc(h, 1189 NODE_DATA(hstate_next_node_to_alloc(h,
1190 &node_states[N_HIGH_MEMORY])), 1190 &node_states[N_MEMORY])),
1191 huge_page_size(h), huge_page_size(h), 0); 1191 huge_page_size(h), huge_page_size(h), 0);
1192 1192
1193 if (addr) { 1193 if (addr) {
@@ -1259,7 +1259,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1259 if (!alloc_bootmem_huge_page(h)) 1259 if (!alloc_bootmem_huge_page(h))
1260 break; 1260 break;
1261 } else if (!alloc_fresh_huge_page(h, 1261 } else if (!alloc_fresh_huge_page(h,
1262 &node_states[N_HIGH_MEMORY])) 1262 &node_states[N_MEMORY]))
1263 break; 1263 break;
1264 } 1264 }
1265 h->max_huge_pages = i; 1265 h->max_huge_pages = i;
@@ -1527,7 +1527,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1527 if (!(obey_mempolicy && 1527 if (!(obey_mempolicy &&
1528 init_nodemask_of_mempolicy(nodes_allowed))) { 1528 init_nodemask_of_mempolicy(nodes_allowed))) {
1529 NODEMASK_FREE(nodes_allowed); 1529 NODEMASK_FREE(nodes_allowed);
1530 nodes_allowed = &node_states[N_HIGH_MEMORY]; 1530 nodes_allowed = &node_states[N_MEMORY];
1531 } 1531 }
1532 } else if (nodes_allowed) { 1532 } else if (nodes_allowed) {
1533 /* 1533 /*
@@ -1537,11 +1537,11 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1537 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 1537 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1538 init_nodemask_of_node(nodes_allowed, nid); 1538 init_nodemask_of_node(nodes_allowed, nid);
1539 } else 1539 } else
1540 nodes_allowed = &node_states[N_HIGH_MEMORY]; 1540 nodes_allowed = &node_states[N_MEMORY];
1541 1541
1542 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 1542 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1543 1543
1544 if (nodes_allowed != &node_states[N_HIGH_MEMORY]) 1544 if (nodes_allowed != &node_states[N_MEMORY])
1545 NODEMASK_FREE(nodes_allowed); 1545 NODEMASK_FREE(nodes_allowed);
1546 1546
1547 return len; 1547 return len;
@@ -1844,7 +1844,7 @@ static void hugetlb_register_all_nodes(void)
1844{ 1844{
1845 int nid; 1845 int nid;
1846 1846
1847 for_each_node_state(nid, N_HIGH_MEMORY) { 1847 for_each_node_state(nid, N_MEMORY) {
1848 struct node *node = node_devices[nid]; 1848 struct node *node = node_devices[nid];
1849 if (node->dev.id == nid) 1849 if (node->dev.id == nid)
1850 hugetlb_register_node(node); 1850 hugetlb_register_node(node);
@@ -1939,8 +1939,8 @@ void __init hugetlb_add_hstate(unsigned order)
1939 for (i = 0; i < MAX_NUMNODES; ++i) 1939 for (i = 0; i < MAX_NUMNODES; ++i)
1940 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 1940 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1941 INIT_LIST_HEAD(&h->hugepage_activelist); 1941 INIT_LIST_HEAD(&h->hugepage_activelist);
1942 h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]); 1942 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
1943 h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); 1943 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
1944 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1944 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1945 huge_page_size(h)/1024); 1945 huge_page_size(h)/1024);
1946 /* 1946 /*
@@ -2035,11 +2035,11 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2035 if (!(obey_mempolicy && 2035 if (!(obey_mempolicy &&
2036 init_nodemask_of_mempolicy(nodes_allowed))) { 2036 init_nodemask_of_mempolicy(nodes_allowed))) {
2037 NODEMASK_FREE(nodes_allowed); 2037 NODEMASK_FREE(nodes_allowed);
2038 nodes_allowed = &node_states[N_HIGH_MEMORY]; 2038 nodes_allowed = &node_states[N_MEMORY];
2039 } 2039 }
2040 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); 2040 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2041 2041
2042 if (nodes_allowed != &node_states[N_HIGH_MEMORY]) 2042 if (nodes_allowed != &node_states[N_MEMORY])
2043 NODEMASK_FREE(nodes_allowed); 2043 NODEMASK_FREE(nodes_allowed);
2044 } 2044 }
2045out: 2045out:
@@ -2386,8 +2386,10 @@ again:
2386 /* 2386 /*
2387 * HWPoisoned hugepage is already unmapped and dropped reference 2387 * HWPoisoned hugepage is already unmapped and dropped reference
2388 */ 2388 */
2389 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) 2389 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2390 pte_clear(mm, address, ptep);
2390 continue; 2391 continue;
2392 }
2391 2393
2392 page = pte_page(pte); 2394 page = pte_page(pte);
2393 /* 2395 /*
@@ -3170,7 +3172,13 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
3170 3172
3171 spin_lock(&hugetlb_lock); 3173 spin_lock(&hugetlb_lock);
3172 if (is_hugepage_on_freelist(hpage)) { 3174 if (is_hugepage_on_freelist(hpage)) {
3173 list_del(&hpage->lru); 3175 /*
3176 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3177 * but dangling hpage->lru can trigger list-debug warnings
3178 * (this happens when we call unpoison_memory() on it),
3179 * so let it point to itself with list_del_init().
3180 */
3181 list_del_init(&hpage->lru);
3174 set_page_refcounted(hpage); 3182 set_page_refcounted(hpage);
3175 h->free_huge_pages--; 3183 h->free_huge_pages--;
3176 h->free_huge_pages_node[nid]--; 3184 h->free_huge_pages_node[nid]--;