aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/hugetlb.c82
1 files changed, 41 insertions, 41 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2c5c9ee4220d..a4dbba8965f3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -716,6 +716,47 @@ unsigned long hugetlb_total_pages(void)
716 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 716 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
717} 717}
718 718
719static int hugetlb_acct_memory(long delta)
720{
721 int ret = -ENOMEM;
722
723 spin_lock(&hugetlb_lock);
724 /*
725 * When cpuset is configured, it breaks the strict hugetlb page
726 * reservation as the accounting is done on a global variable. Such
727 * reservation is completely rubbish in the presence of cpuset because
728 * the reservation is not checked against page availability for the
729 * current cpuset. Application can still potentially OOM'ed by kernel
730 * with lack of free htlb page in cpuset that the task is in.
731 * Attempt to enforce strict accounting with cpuset is almost
732 * impossible (or too ugly) because cpuset is too fluid that
733 * task or memory node can be dynamically moved between cpusets.
734 *
735 * The change of semantics for shared hugetlb mapping with cpuset is
736 * undesirable. However, in order to preserve some of the semantics,
737 * we fall back to check against current free page availability as
738 * a best attempt and hopefully to minimize the impact of changing
739 * semantics that cpuset has.
740 */
741 if (delta > 0) {
742 if (gather_surplus_pages(delta) < 0)
743 goto out;
744
745 if (delta > cpuset_mems_nr(free_huge_pages_node)) {
746 return_unused_surplus_pages(delta);
747 goto out;
748 }
749 }
750
751 ret = 0;
752 if (delta < 0)
753 return_unused_surplus_pages((unsigned long) -delta);
754
755out:
756 spin_unlock(&hugetlb_lock);
757 return ret;
758}
759
719/* 760/*
720 * We cannot handle pagefaults against hugetlb pages at all. They cause 761 * We cannot handle pagefaults against hugetlb pages at all. They cause
721 * handle_mm_fault() to try to instantiate regular-sized pages in the 762 * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -1248,47 +1289,6 @@ static long region_truncate(struct list_head *head, long end)
1248 return chg; 1289 return chg;
1249} 1290}
1250 1291
1251static int hugetlb_acct_memory(long delta)
1252{
1253 int ret = -ENOMEM;
1254
1255 spin_lock(&hugetlb_lock);
1256 /*
1257 * When cpuset is configured, it breaks the strict hugetlb page
1258 * reservation as the accounting is done on a global variable. Such
1259 * reservation is completely rubbish in the presence of cpuset because
1260 * the reservation is not checked against page availability for the
1261 * current cpuset. Application can still potentially OOM'ed by kernel
1262 * with lack of free htlb page in cpuset that the task is in.
1263 * Attempt to enforce strict accounting with cpuset is almost
1264 * impossible (or too ugly) because cpuset is too fluid that
1265 * task or memory node can be dynamically moved between cpusets.
1266 *
1267 * The change of semantics for shared hugetlb mapping with cpuset is
1268 * undesirable. However, in order to preserve some of the semantics,
1269 * we fall back to check against current free page availability as
1270 * a best attempt and hopefully to minimize the impact of changing
1271 * semantics that cpuset has.
1272 */
1273 if (delta > 0) {
1274 if (gather_surplus_pages(delta) < 0)
1275 goto out;
1276
1277 if (delta > cpuset_mems_nr(free_huge_pages_node)) {
1278 return_unused_surplus_pages(delta);
1279 goto out;
1280 }
1281 }
1282
1283 ret = 0;
1284 if (delta < 0)
1285 return_unused_surplus_pages((unsigned long) -delta);
1286
1287out:
1288 spin_unlock(&hugetlb_lock);
1289 return ret;
1290}
1291
1292int hugetlb_reserve_pages(struct inode *inode, long from, long to) 1292int hugetlb_reserve_pages(struct inode *inode, long from, long to)
1293{ 1293{
1294 long ret, chg; 1294 long ret, chg;