aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 36db012b38dd..eb7180db3033 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -140,6 +140,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
140 return page; 140 return page;
141 141
142fail: 142fail:
143 if (vma->vm_flags & VM_MAYSHARE)
144 resv_huge_pages++;
143 spin_unlock(&hugetlb_lock); 145 spin_unlock(&hugetlb_lock);
144 return NULL; 146 return NULL;
145} 147}
@@ -172,6 +174,17 @@ static int __init hugetlb_setup(char *s)
172} 174}
173__setup("hugepages=", hugetlb_setup); 175__setup("hugepages=", hugetlb_setup);
174 176
177static unsigned int cpuset_mems_nr(unsigned int *array)
178{
179 int node;
180 unsigned int nr = 0;
181
182 for_each_node_mask(node, cpuset_current_mems_allowed)
183 nr += array[node];
184
185 return nr;
186}
187
175#ifdef CONFIG_SYSCTL 188#ifdef CONFIG_SYSCTL
176static void update_and_free_page(struct page *page) 189static void update_and_free_page(struct page *page)
177{ 190{
@@ -817,6 +830,26 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
817 chg = region_chg(&inode->i_mapping->private_list, from, to); 830 chg = region_chg(&inode->i_mapping->private_list, from, to);
818 if (chg < 0) 831 if (chg < 0)
819 return chg; 832 return chg;
833 /*
834 * When cpuset is configured, it breaks the strict hugetlb page
835 * reservation as the accounting is done on a global variable. Such
836 * reservation is completely rubbish in the presence of cpuset because
837 * the reservation is not checked against page availability for the
838 * current cpuset. Application can still potentially OOM'ed by kernel
839 * with lack of free htlb page in cpuset that the task is in.
840 * Attempt to enforce strict accounting with cpuset is almost
841 * impossible (or too ugly) because cpuset is too fluid that
842 * task or memory node can be dynamically moved between cpusets.
843 *
844 * The change of semantics for shared hugetlb mapping with cpuset is
845 * undesirable. However, in order to preserve some of the semantics,
846 * we fall back to check against current free page availability as
847 * a best attempt and hopefully to minimize the impact of changing
848 * semantics that cpuset has.
849 */
850 if (chg > cpuset_mems_nr(free_huge_pages_node))
851 return -ENOMEM;
852
820 ret = hugetlb_acct_memory(chg); 853 ret = hugetlb_acct_memory(chg);
821 if (ret < 0) 854 if (ret < 0)
822 return ret; 855 return ret;