aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2008-03-04 17:29:38 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 19:35:18 -0500
commitac09b3a15154af5f081fed509c6c3662e79de785 (patch)
tree753170e02fafcadbd525d93b01105a4995d51461 /mm
parenta07e6ab41be179cf1ed728a4f41368435508b550 (diff)
hugetlb: close a difficult to trigger reservation race
A hugetlb reservation may be inadequately backed in the event of racing allocations and frees when utilizing surplus huge pages. Consider the following series of events in processes A and B: A) Allocates some surplus pages to satisfy a reservation B) Frees some huge pages A) A notices the extra free pages and drops hugetlb_lock to free some of its surplus pages back to the buddy allocator. B) Allocates some huge pages A) Reacquires hugetlb_lock and returns from gather_surplus_huge_pages() Avoid this by commiting the reservation after pages have been allocated but before dropping the lock to free excess pages. For parity, release the reservation in return_unused_surplus_pages(). This patch also corrects the cpuset_mems_nr() error path in hugetlb_acct_memory(). If the cpuset check fails, uncommit the reservation, but also be sure to return any surplus huge pages that may have been allocated to back the failed reservation. Thanks to Andy Whitcroft for discovering this. Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 89e6286a7f57..20e04c64468d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -296,8 +296,10 @@ static int gather_surplus_pages(int delta)
296 int needed, allocated; 296 int needed, allocated;
297 297
298 needed = (resv_huge_pages + delta) - free_huge_pages; 298 needed = (resv_huge_pages + delta) - free_huge_pages;
299 if (needed <= 0) 299 if (needed <= 0) {
300 resv_huge_pages += delta;
300 return 0; 301 return 0;
302 }
301 303
302 allocated = 0; 304 allocated = 0;
303 INIT_LIST_HEAD(&surplus_list); 305 INIT_LIST_HEAD(&surplus_list);
@@ -335,9 +337,12 @@ retry:
335 * The surplus_list now contains _at_least_ the number of extra pages 337 * The surplus_list now contains _at_least_ the number of extra pages
336 * needed to accomodate the reservation. Add the appropriate number 338 * needed to accomodate the reservation. Add the appropriate number
337 * of pages to the hugetlb pool and free the extras back to the buddy 339 * of pages to the hugetlb pool and free the extras back to the buddy
338 * allocator. 340 * allocator. Commit the entire reservation here to prevent another
341 * process from stealing the pages as they are added to the pool but
342 * before they are reserved.
339 */ 343 */
340 needed += allocated; 344 needed += allocated;
345 resv_huge_pages += delta;
341 ret = 0; 346 ret = 0;
342free: 347free:
343 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 348 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
@@ -371,6 +376,9 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
371 struct page *page; 376 struct page *page;
372 unsigned long nr_pages; 377 unsigned long nr_pages;
373 378
379 /* Uncommit the reservation */
380 resv_huge_pages -= unused_resv_pages;
381
374 nr_pages = min(unused_resv_pages, surplus_huge_pages); 382 nr_pages = min(unused_resv_pages, surplus_huge_pages);
375 383
376 while (nr_pages) { 384 while (nr_pages) {
@@ -1205,12 +1213,13 @@ static int hugetlb_acct_memory(long delta)
1205 if (gather_surplus_pages(delta) < 0) 1213 if (gather_surplus_pages(delta) < 0)
1206 goto out; 1214 goto out;
1207 1215
1208 if (delta > cpuset_mems_nr(free_huge_pages_node)) 1216 if (delta > cpuset_mems_nr(free_huge_pages_node)) {
1217 return_unused_surplus_pages(delta);
1209 goto out; 1218 goto out;
1219 }
1210 } 1220 }
1211 1221
1212 ret = 0; 1222 ret = 0;
1213 resv_huge_pages += delta;
1214 if (delta < 0) 1223 if (delta < 0)
1215 return_unused_surplus_pages((unsigned long) -delta); 1224 return_unused_surplus_pages((unsigned long) -delta);
1216 1225