diff options
author | Mike Kravetz <mike.kravetz@oracle.com> | 2015-06-24 19:57:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-24 20:49:44 -0400 |
commit | cf3ad20bfeadda693e408d85684790714fc29b08 (patch) | |
tree | e0a4c133a20bebccdf53f809b3846c58021fd69f | |
parent | 1dd308a7b49d4bdbc17bfa570675ecc8cf7bedb3 (diff) |
mm/hugetlb: compute/return the number of regions added by region_add()
Modify region_add() to keep track of regions(pages) added to the reserve
map and return this value. The return value can be compared to the return
value of region_chg() to determine if the map was modified between calls.
Make vma_commit_reservation() also pass along the return value of
region_add(). In the normal case, we want vma_commit_reservation to
return the same value as the preceding call to vma_needs_reservation.
Create a common __vma_reservation_common routine to help keep the special
case return values in sync
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/hugetlb.c | 72 |
1 files changed, 48 insertions, 24 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4a1d7021efaf..cd3fc4194733 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -245,11 +245,15 @@ struct file_region { | |||
245 | * expanded, because region_add is only called after region_chg | 245 | * expanded, because region_add is only called after region_chg |
246 | * with the same range. If a new file_region structure must | 246 | * with the same range. If a new file_region structure must |
247 | * be allocated, it is done in region_chg. | 247 | * be allocated, it is done in region_chg. |
248 | * | ||
249 | * Return the number of new huge pages added to the map. This | ||
250 | * number is greater than or equal to zero. | ||
248 | */ | 251 | */ |
249 | static long region_add(struct resv_map *resv, long f, long t) | 252 | static long region_add(struct resv_map *resv, long f, long t) |
250 | { | 253 | { |
251 | struct list_head *head = &resv->regions; | 254 | struct list_head *head = &resv->regions; |
252 | struct file_region *rg, *nrg, *trg; | 255 | struct file_region *rg, *nrg, *trg; |
256 | long add = 0; | ||
253 | 257 | ||
254 | spin_lock(&resv->lock); | 258 | spin_lock(&resv->lock); |
255 | /* Locate the region we are either in or before. */ | 259 | /* Locate the region we are either in or before. */ |
@@ -275,14 +279,24 @@ static long region_add(struct resv_map *resv, long f, long t) | |||
275 | if (rg->to > t) | 279 | if (rg->to > t) |
276 | t = rg->to; | 280 | t = rg->to; |
277 | if (rg != nrg) { | 281 | if (rg != nrg) { |
282 | /* Decrement return value by the deleted range. | ||
283 | * Another range will span this area so that by | ||
284 | * end of routine add will be >= zero | ||
285 | */ | ||
286 | add -= (rg->to - rg->from); | ||
278 | list_del(&rg->link); | 287 | list_del(&rg->link); |
279 | kfree(rg); | 288 | kfree(rg); |
280 | } | 289 | } |
281 | } | 290 | } |
291 | |||
292 | add += (nrg->from - f); /* Added to beginning of region */ | ||
282 | nrg->from = f; | 293 | nrg->from = f; |
294 | add += t - nrg->to; /* Added to end of region */ | ||
283 | nrg->to = t; | 295 | nrg->to = t; |
296 | |||
284 | spin_unlock(&resv->lock); | 297 | spin_unlock(&resv->lock); |
285 | return 0; | 298 | VM_BUG_ON(add < 0); |
299 | return add; | ||
286 | } | 300 | } |
287 | 301 | ||
288 | /* | 302 | /* |
@@ -1470,46 +1484,56 @@ static void return_unused_surplus_pages(struct hstate *h, | |||
1470 | } | 1484 | } |
1471 | 1485 | ||
1472 | /* | 1486 | /* |
1473 | * Determine if the huge page at addr within the vma has an associated | 1487 | * vma_needs_reservation and vma_commit_reservation are used by the huge |
1474 | * reservation. Where it does not we will need to logically increase | 1488 | * page allocation routines to manage reservations. |
1475 | * reservation and actually increase subpool usage before an allocation | 1489 | * |
1476 | * can occur. Where any new reservation would be required the | 1490 | * vma_needs_reservation is called to determine if the huge page at addr |
1477 | * reservation change is prepared, but not committed. Once the page | 1491 | * within the vma has an associated reservation. If a reservation is |
1478 | * has been allocated from the subpool and instantiated the change should | 1492 | * needed, the value 1 is returned. The caller is then responsible for |
1479 | * be committed via vma_commit_reservation. No action is required on | 1493 | * managing the global reservation and subpool usage counts. After |
1480 | * failure. | 1494 | * the huge page has been allocated, vma_commit_reservation is called |
1495 | * to add the page to the reservation map. | ||
1496 | * | ||
1497 | * In the normal case, vma_commit_reservation returns the same value | ||
1498 | * as the preceding vma_needs_reservation call. The only time this | ||
1499 | * is not the case is if a reserve map was changed between calls. It | ||
1500 | * is the responsibility of the caller to notice the difference and | ||
1501 | * take appropriate action. | ||
1481 | */ | 1502 | */ |
1482 | static long vma_needs_reservation(struct hstate *h, | 1503 | static long __vma_reservation_common(struct hstate *h, |
1483 | struct vm_area_struct *vma, unsigned long addr) | 1504 | struct vm_area_struct *vma, unsigned long addr, |
1505 | bool commit) | ||
1484 | { | 1506 | { |
1485 | struct resv_map *resv; | 1507 | struct resv_map *resv; |
1486 | pgoff_t idx; | 1508 | pgoff_t idx; |
1487 | long chg; | 1509 | long ret; |
1488 | 1510 | ||
1489 | resv = vma_resv_map(vma); | 1511 | resv = vma_resv_map(vma); |
1490 | if (!resv) | 1512 | if (!resv) |
1491 | return 1; | 1513 | return 1; |
1492 | 1514 | ||
1493 | idx = vma_hugecache_offset(h, vma, addr); | 1515 | idx = vma_hugecache_offset(h, vma, addr); |
1494 | chg = region_chg(resv, idx, idx + 1); | 1516 | if (commit) |
1517 | ret = region_add(resv, idx, idx + 1); | ||
1518 | else | ||
1519 | ret = region_chg(resv, idx, idx + 1); | ||
1495 | 1520 | ||
1496 | if (vma->vm_flags & VM_MAYSHARE) | 1521 | if (vma->vm_flags & VM_MAYSHARE) |
1497 | return chg; | 1522 | return ret; |
1498 | else | 1523 | else |
1499 | return chg < 0 ? chg : 0; | 1524 | return ret < 0 ? ret : 0; |
1500 | } | 1525 | } |
1501 | static void vma_commit_reservation(struct hstate *h, | 1526 | |
1527 | static long vma_needs_reservation(struct hstate *h, | ||
1502 | struct vm_area_struct *vma, unsigned long addr) | 1528 | struct vm_area_struct *vma, unsigned long addr) |
1503 | { | 1529 | { |
1504 | struct resv_map *resv; | 1530 | return __vma_reservation_common(h, vma, addr, false); |
1505 | pgoff_t idx; | 1531 | } |
1506 | |||
1507 | resv = vma_resv_map(vma); | ||
1508 | if (!resv) | ||
1509 | return; | ||
1510 | 1532 | ||
1511 | idx = vma_hugecache_offset(h, vma, addr); | 1533 | static long vma_commit_reservation(struct hstate *h, |
1512 | region_add(resv, idx, idx + 1); | 1534 | struct vm_area_struct *vma, unsigned long addr) |
1535 | { | ||
1536 | return __vma_reservation_common(h, vma, addr, true); | ||
1513 | } | 1537 | } |
1514 | 1538 | ||
1515 | static struct page *alloc_huge_page(struct vm_area_struct *vma, | 1539 | static struct page *alloc_huge_page(struct vm_area_struct *vma, |