diff options
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 45 |
1 files changed, 40 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83fa0c3b6e2b..a73dbdcb89eb 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -423,14 +423,14 @@ static void clear_huge_page(struct page *page, | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | static void copy_gigantic_page(struct page *dst, struct page *src, | 426 | static void copy_user_gigantic_page(struct page *dst, struct page *src, |
427 | unsigned long addr, struct vm_area_struct *vma) | 427 | unsigned long addr, struct vm_area_struct *vma) |
428 | { | 428 | { |
429 | int i; | 429 | int i; |
430 | struct hstate *h = hstate_vma(vma); | 430 | struct hstate *h = hstate_vma(vma); |
431 | struct page *dst_base = dst; | 431 | struct page *dst_base = dst; |
432 | struct page *src_base = src; | 432 | struct page *src_base = src; |
433 | might_sleep(); | 433 | |
434 | for (i = 0; i < pages_per_huge_page(h); ) { | 434 | for (i = 0; i < pages_per_huge_page(h); ) { |
435 | cond_resched(); | 435 | cond_resched(); |
436 | copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); | 436 | copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); |
@@ -440,14 +440,15 @@ static void copy_gigantic_page(struct page *dst, struct page *src, | |||
440 | src = mem_map_next(src, src_base, i); | 440 | src = mem_map_next(src, src_base, i); |
441 | } | 441 | } |
442 | } | 442 | } |
443 | static void copy_huge_page(struct page *dst, struct page *src, | 443 | |
444 | static void copy_user_huge_page(struct page *dst, struct page *src, | ||
444 | unsigned long addr, struct vm_area_struct *vma) | 445 | unsigned long addr, struct vm_area_struct *vma) |
445 | { | 446 | { |
446 | int i; | 447 | int i; |
447 | struct hstate *h = hstate_vma(vma); | 448 | struct hstate *h = hstate_vma(vma); |
448 | 449 | ||
449 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { | 450 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { |
450 | copy_gigantic_page(dst, src, addr, vma); | 451 | copy_user_gigantic_page(dst, src, addr, vma); |
451 | return; | 452 | return; |
452 | } | 453 | } |
453 | 454 | ||
@@ -458,6 +459,40 @@ static void copy_huge_page(struct page *dst, struct page *src, | |||
458 | } | 459 | } |
459 | } | 460 | } |
460 | 461 | ||
462 | static void copy_gigantic_page(struct page *dst, struct page *src) | ||
463 | { | ||
464 | int i; | ||
465 | struct hstate *h = page_hstate(src); | ||
466 | struct page *dst_base = dst; | ||
467 | struct page *src_base = src; | ||
468 | |||
469 | for (i = 0; i < pages_per_huge_page(h); ) { | ||
470 | cond_resched(); | ||
471 | copy_highpage(dst, src); | ||
472 | |||
473 | i++; | ||
474 | dst = mem_map_next(dst, dst_base, i); | ||
475 | src = mem_map_next(src, src_base, i); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | void copy_huge_page(struct page *dst, struct page *src) | ||
480 | { | ||
481 | int i; | ||
482 | struct hstate *h = page_hstate(src); | ||
483 | |||
484 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { | ||
485 | copy_gigantic_page(dst, src); | ||
486 | return; | ||
487 | } | ||
488 | |||
489 | might_sleep(); | ||
490 | for (i = 0; i < pages_per_huge_page(h); i++) { | ||
491 | cond_resched(); | ||
492 | copy_highpage(dst + i, src + i); | ||
493 | } | ||
494 | } | ||
495 | |||
461 | static void enqueue_huge_page(struct hstate *h, struct page *page) | 496 | static void enqueue_huge_page(struct hstate *h, struct page *page) |
462 | { | 497 | { |
463 | int nid = page_to_nid(page); | 498 | int nid = page_to_nid(page); |
@@ -2412,7 +2447,7 @@ retry_avoidcopy: | |||
2412 | if (unlikely(anon_vma_prepare(vma))) | 2447 | if (unlikely(anon_vma_prepare(vma))) |
2413 | return VM_FAULT_OOM; | 2448 | return VM_FAULT_OOM; |
2414 | 2449 | ||
2415 | copy_huge_page(new_page, old_page, address, vma); | 2450 | copy_user_huge_page(new_page, old_page, address, vma); |
2416 | __SetPageUptodate(new_page); | 2451 | __SetPageUptodate(new_page); |
2417 | 2452 | ||
2418 | /* | 2453 | /* |