aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c52
1 files changed, 33 insertions, 19 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 59a0059b39e2..e5318c7793ae 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Generic hugetlb support. 2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004 3 * (C) Nadia Yvette Chambers, April 2004
4 */ 4 */
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/init.h> 6#include <linux/init.h>
@@ -1057,7 +1057,7 @@ static void return_unused_surplus_pages(struct hstate *h,
1057 * on-line nodes with memory and will handle the hstate accounting. 1057 * on-line nodes with memory and will handle the hstate accounting.
1058 */ 1058 */
1059 while (nr_pages--) { 1059 while (nr_pages--) {
1060 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1)) 1060 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1061 break; 1061 break;
1062 } 1062 }
1063} 1063}
@@ -1180,14 +1180,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1180int __weak alloc_bootmem_huge_page(struct hstate *h) 1180int __weak alloc_bootmem_huge_page(struct hstate *h)
1181{ 1181{
1182 struct huge_bootmem_page *m; 1182 struct huge_bootmem_page *m;
1183 int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 1183 int nr_nodes = nodes_weight(node_states[N_MEMORY]);
1184 1184
1185 while (nr_nodes) { 1185 while (nr_nodes) {
1186 void *addr; 1186 void *addr;
1187 1187
1188 addr = __alloc_bootmem_node_nopanic( 1188 addr = __alloc_bootmem_node_nopanic(
1189 NODE_DATA(hstate_next_node_to_alloc(h, 1189 NODE_DATA(hstate_next_node_to_alloc(h,
1190 &node_states[N_HIGH_MEMORY])), 1190 &node_states[N_MEMORY])),
1191 huge_page_size(h), huge_page_size(h), 0); 1191 huge_page_size(h), huge_page_size(h), 0);
1192 1192
1193 if (addr) { 1193 if (addr) {
@@ -1259,7 +1259,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1259 if (!alloc_bootmem_huge_page(h)) 1259 if (!alloc_bootmem_huge_page(h))
1260 break; 1260 break;
1261 } else if (!alloc_fresh_huge_page(h, 1261 } else if (!alloc_fresh_huge_page(h,
1262 &node_states[N_HIGH_MEMORY])) 1262 &node_states[N_MEMORY]))
1263 break; 1263 break;
1264 } 1264 }
1265 h->max_huge_pages = i; 1265 h->max_huge_pages = i;
@@ -1527,7 +1527,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1527 if (!(obey_mempolicy && 1527 if (!(obey_mempolicy &&
1528 init_nodemask_of_mempolicy(nodes_allowed))) { 1528 init_nodemask_of_mempolicy(nodes_allowed))) {
1529 NODEMASK_FREE(nodes_allowed); 1529 NODEMASK_FREE(nodes_allowed);
1530 nodes_allowed = &node_states[N_HIGH_MEMORY]; 1530 nodes_allowed = &node_states[N_MEMORY];
1531 } 1531 }
1532 } else if (nodes_allowed) { 1532 } else if (nodes_allowed) {
1533 /* 1533 /*
@@ -1537,11 +1537,11 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1537 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 1537 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1538 init_nodemask_of_node(nodes_allowed, nid); 1538 init_nodemask_of_node(nodes_allowed, nid);
1539 } else 1539 } else
1540 nodes_allowed = &node_states[N_HIGH_MEMORY]; 1540 nodes_allowed = &node_states[N_MEMORY];
1541 1541
1542 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 1542 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1543 1543
1544 if (nodes_allowed != &node_states[N_HIGH_MEMORY]) 1544 if (nodes_allowed != &node_states[N_MEMORY])
1545 NODEMASK_FREE(nodes_allowed); 1545 NODEMASK_FREE(nodes_allowed);
1546 1546
1547 return len; 1547 return len;
@@ -1800,7 +1800,7 @@ static void hugetlb_unregister_all_nodes(void)
1800 * remove hstate attributes from any nodes that have them. 1800 * remove hstate attributes from any nodes that have them.
1801 */ 1801 */
1802 for (nid = 0; nid < nr_node_ids; nid++) 1802 for (nid = 0; nid < nr_node_ids; nid++)
1803 hugetlb_unregister_node(&node_devices[nid]); 1803 hugetlb_unregister_node(node_devices[nid]);
1804} 1804}
1805 1805
1806/* 1806/*
@@ -1844,8 +1844,8 @@ static void hugetlb_register_all_nodes(void)
1844{ 1844{
1845 int nid; 1845 int nid;
1846 1846
1847 for_each_node_state(nid, N_HIGH_MEMORY) { 1847 for_each_node_state(nid, N_MEMORY) {
1848 struct node *node = &node_devices[nid]; 1848 struct node *node = node_devices[nid];
1849 if (node->dev.id == nid) 1849 if (node->dev.id == nid)
1850 hugetlb_register_node(node); 1850 hugetlb_register_node(node);
1851 } 1851 }
@@ -1939,8 +1939,8 @@ void __init hugetlb_add_hstate(unsigned order)
1939 for (i = 0; i < MAX_NUMNODES; ++i) 1939 for (i = 0; i < MAX_NUMNODES; ++i)
1940 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 1940 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1941 INIT_LIST_HEAD(&h->hugepage_activelist); 1941 INIT_LIST_HEAD(&h->hugepage_activelist);
1942 h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]); 1942 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
1943 h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); 1943 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
1944 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1944 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1945 huge_page_size(h)/1024); 1945 huge_page_size(h)/1024);
1946 /* 1946 /*
@@ -2035,11 +2035,11 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2035 if (!(obey_mempolicy && 2035 if (!(obey_mempolicy &&
2036 init_nodemask_of_mempolicy(nodes_allowed))) { 2036 init_nodemask_of_mempolicy(nodes_allowed))) {
2037 NODEMASK_FREE(nodes_allowed); 2037 NODEMASK_FREE(nodes_allowed);
2038 nodes_allowed = &node_states[N_HIGH_MEMORY]; 2038 nodes_allowed = &node_states[N_MEMORY];
2039 } 2039 }
2040 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); 2040 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2041 2041
2042 if (nodes_allowed != &node_states[N_HIGH_MEMORY]) 2042 if (nodes_allowed != &node_states[N_MEMORY])
2043 NODEMASK_FREE(nodes_allowed); 2043 NODEMASK_FREE(nodes_allowed);
2044 } 2044 }
2045out: 2045out:
@@ -2386,8 +2386,10 @@ again:
2386 /* 2386 /*
2387 * HWPoisoned hugepage is already unmapped and dropped reference 2387 * HWPoisoned hugepage is already unmapped and dropped reference
2388 */ 2388 */
2389 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) 2389 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2390 pte_clear(mm, address, ptep);
2390 continue; 2391 continue;
2392 }
2391 2393
2392 page = pte_page(pte); 2394 page = pte_page(pte);
2393 /* 2395 /*
@@ -3014,7 +3016,7 @@ same_page:
3014 return i ? i : -EFAULT; 3016 return i ? i : -EFAULT;
3015} 3017}
3016 3018
3017void hugetlb_change_protection(struct vm_area_struct *vma, 3019unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3018 unsigned long address, unsigned long end, pgprot_t newprot) 3020 unsigned long address, unsigned long end, pgprot_t newprot)
3019{ 3021{
3020 struct mm_struct *mm = vma->vm_mm; 3022 struct mm_struct *mm = vma->vm_mm;
@@ -3022,6 +3024,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
3022 pte_t *ptep; 3024 pte_t *ptep;
3023 pte_t pte; 3025 pte_t pte;
3024 struct hstate *h = hstate_vma(vma); 3026 struct hstate *h = hstate_vma(vma);
3027 unsigned long pages = 0;
3025 3028
3026 BUG_ON(address >= end); 3029 BUG_ON(address >= end);
3027 flush_cache_range(vma, address, end); 3030 flush_cache_range(vma, address, end);
@@ -3032,12 +3035,15 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
3032 ptep = huge_pte_offset(mm, address); 3035 ptep = huge_pte_offset(mm, address);
3033 if (!ptep) 3036 if (!ptep)
3034 continue; 3037 continue;
3035 if (huge_pmd_unshare(mm, &address, ptep)) 3038 if (huge_pmd_unshare(mm, &address, ptep)) {
3039 pages++;
3036 continue; 3040 continue;
3041 }
3037 if (!huge_pte_none(huge_ptep_get(ptep))) { 3042 if (!huge_pte_none(huge_ptep_get(ptep))) {
3038 pte = huge_ptep_get_and_clear(mm, address, ptep); 3043 pte = huge_ptep_get_and_clear(mm, address, ptep);
3039 pte = pte_mkhuge(pte_modify(pte, newprot)); 3044 pte = pte_mkhuge(pte_modify(pte, newprot));
3040 set_huge_pte_at(mm, address, ptep, pte); 3045 set_huge_pte_at(mm, address, ptep, pte);
3046 pages++;
3041 } 3047 }
3042 } 3048 }
3043 spin_unlock(&mm->page_table_lock); 3049 spin_unlock(&mm->page_table_lock);
@@ -3049,6 +3055,8 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
3049 */ 3055 */
3050 flush_tlb_range(vma, start, end); 3056 flush_tlb_range(vma, start, end);
3051 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 3057 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3058
3059 return pages << h->order;
3052} 3060}
3053 3061
3054int hugetlb_reserve_pages(struct inode *inode, 3062int hugetlb_reserve_pages(struct inode *inode,
@@ -3170,7 +3178,13 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
3170 3178
3171 spin_lock(&hugetlb_lock); 3179 spin_lock(&hugetlb_lock);
3172 if (is_hugepage_on_freelist(hpage)) { 3180 if (is_hugepage_on_freelist(hpage)) {
3173 list_del(&hpage->lru); 3181 /*
3182 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3183 * but dangling hpage->lru can trigger list-debug warnings
3184 * (this happens when we call unpoison_memory() on it),
3185 * so let it point to itself with list_del_init().
3186 */
3187 list_del_init(&hpage->lru);
3174 set_page_refcounted(hpage); 3188 set_page_refcounted(hpage);
3175 h->free_huge_pages--; 3189 h->free_huge_pages--;
3176 h->free_huge_pages_node[nid]--; 3190 h->free_huge_pages_node[nid]--;