diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2010-09-07 21:19:34 -0400 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2010-10-08 03:32:44 -0400 |
commit | 0ebabb416f585ace711769057422af4bbc9d1110 (patch) | |
tree | d23d66033fdaefec9c2b4051f108b948c5389d0e /mm | |
parent | bf50bab2b34483316162443587b8467952e07730 (diff) |
hugetlb: redefine hugepage copy functions
This patch modifies hugepage copy functions to have only destination
and source hugepages as arguments for later use.
The old ones are renamed from copy_{gigantic,huge}_page() to
copy_user_{gigantic,huge}_page().
This naming convention is consistent with that between copy_highpage()
and copy_user_highpage().
ChangeLog since v4:
- add blank line between local declaration and code
- remove unnecessary might_sleep()
ChangeLog since v2:
- change copy_huge_page() from macro to inline dummy function
to avoid compile warning when !CONFIG_HUGETLB_PAGE.
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 45 |
1 files changed, 40 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83fa0c3b6e2b..a73dbdcb89eb 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -423,14 +423,14 @@ static void clear_huge_page(struct page *page, | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | static void copy_gigantic_page(struct page *dst, struct page *src, | 426 | static void copy_user_gigantic_page(struct page *dst, struct page *src, |
427 | unsigned long addr, struct vm_area_struct *vma) | 427 | unsigned long addr, struct vm_area_struct *vma) |
428 | { | 428 | { |
429 | int i; | 429 | int i; |
430 | struct hstate *h = hstate_vma(vma); | 430 | struct hstate *h = hstate_vma(vma); |
431 | struct page *dst_base = dst; | 431 | struct page *dst_base = dst; |
432 | struct page *src_base = src; | 432 | struct page *src_base = src; |
433 | might_sleep(); | 433 | |
434 | for (i = 0; i < pages_per_huge_page(h); ) { | 434 | for (i = 0; i < pages_per_huge_page(h); ) { |
435 | cond_resched(); | 435 | cond_resched(); |
436 | copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); | 436 | copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); |
@@ -440,14 +440,15 @@ static void copy_gigantic_page(struct page *dst, struct page *src, | |||
440 | src = mem_map_next(src, src_base, i); | 440 | src = mem_map_next(src, src_base, i); |
441 | } | 441 | } |
442 | } | 442 | } |
443 | static void copy_huge_page(struct page *dst, struct page *src, | 443 | |
444 | static void copy_user_huge_page(struct page *dst, struct page *src, | ||
444 | unsigned long addr, struct vm_area_struct *vma) | 445 | unsigned long addr, struct vm_area_struct *vma) |
445 | { | 446 | { |
446 | int i; | 447 | int i; |
447 | struct hstate *h = hstate_vma(vma); | 448 | struct hstate *h = hstate_vma(vma); |
448 | 449 | ||
449 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { | 450 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { |
450 | copy_gigantic_page(dst, src, addr, vma); | 451 | copy_user_gigantic_page(dst, src, addr, vma); |
451 | return; | 452 | return; |
452 | } | 453 | } |
453 | 454 | ||
@@ -458,6 +459,40 @@ static void copy_huge_page(struct page *dst, struct page *src, | |||
458 | } | 459 | } |
459 | } | 460 | } |
460 | 461 | ||
462 | static void copy_gigantic_page(struct page *dst, struct page *src) | ||
463 | { | ||
464 | int i; | ||
465 | struct hstate *h = page_hstate(src); | ||
466 | struct page *dst_base = dst; | ||
467 | struct page *src_base = src; | ||
468 | |||
469 | for (i = 0; i < pages_per_huge_page(h); ) { | ||
470 | cond_resched(); | ||
471 | copy_highpage(dst, src); | ||
472 | |||
473 | i++; | ||
474 | dst = mem_map_next(dst, dst_base, i); | ||
475 | src = mem_map_next(src, src_base, i); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | void copy_huge_page(struct page *dst, struct page *src) | ||
480 | { | ||
481 | int i; | ||
482 | struct hstate *h = page_hstate(src); | ||
483 | |||
484 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { | ||
485 | copy_gigantic_page(dst, src); | ||
486 | return; | ||
487 | } | ||
488 | |||
489 | might_sleep(); | ||
490 | for (i = 0; i < pages_per_huge_page(h); i++) { | ||
491 | cond_resched(); | ||
492 | copy_highpage(dst + i, src + i); | ||
493 | } | ||
494 | } | ||
495 | |||
461 | static void enqueue_huge_page(struct hstate *h, struct page *page) | 496 | static void enqueue_huge_page(struct hstate *h, struct page *page) |
462 | { | 497 | { |
463 | int nid = page_to_nid(page); | 498 | int nid = page_to_nid(page); |
@@ -2412,7 +2447,7 @@ retry_avoidcopy: | |||
2412 | if (unlikely(anon_vma_prepare(vma))) | 2447 | if (unlikely(anon_vma_prepare(vma))) |
2413 | return VM_FAULT_OOM; | 2448 | return VM_FAULT_OOM; |
2414 | 2449 | ||
2415 | copy_huge_page(new_page, old_page, address, vma); | 2450 | copy_user_huge_page(new_page, old_page, address, vma); |
2416 | __SetPageUptodate(new_page); | 2451 | __SetPageUptodate(new_page); |
2417 | 2452 | ||
2418 | /* | 2453 | /* |