diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-01-06 03:10:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-06 11:33:23 -0500 |
commit | 6bda666a03f063968833760c5bb5c13062ab9291 (patch) | |
tree | 8ecc0b672c059aa296f80935cda33f3e59970832 | |
parent | 21abb1478a87e26f5fa71dbcb7cf4264272c2248 (diff) |
[PATCH] hugepages: fold find_or_alloc_pages into huge_no_page()
The number of parameters for find_or_alloc_page increases significantly after
policy support is added to huge pages. Simplify the code by folding
find_or_alloc_huge_page() into hugetlb_no_page().
Adam Litke objected to this piece in an earlier patch but I think this is a
good simplification. Diffstat shows that we can get rid of almost half of the
lines of find_or_alloc_page(). If we can find no consensus then lets simply
drop this patch.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Andi Kleen <ak@muc.de>
Acked-by: William Lee Irwin III <wli@holomorphy.com>
Cc: Adam Litke <agl@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/hugetlb.c | 66 |
1 files changed, 24 insertions, 42 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eb405565949d..f4c43d7980ba 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -368,43 +368,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
368 | flush_tlb_range(vma, start, end); | 368 | flush_tlb_range(vma, start, end); |
369 | } | 369 | } |
370 | 370 | ||
371 | static struct page *find_or_alloc_huge_page(struct vm_area_struct *vma, | ||
372 | unsigned long addr, struct address_space *mapping, | ||
373 | unsigned long idx, int shared) | ||
374 | { | ||
375 | struct page *page; | ||
376 | int err; | ||
377 | |||
378 | retry: | ||
379 | page = find_lock_page(mapping, idx); | ||
380 | if (page) | ||
381 | goto out; | ||
382 | |||
383 | if (hugetlb_get_quota(mapping)) | ||
384 | goto out; | ||
385 | page = alloc_huge_page(vma, addr); | ||
386 | if (!page) { | ||
387 | hugetlb_put_quota(mapping); | ||
388 | goto out; | ||
389 | } | ||
390 | |||
391 | if (shared) { | ||
392 | err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); | ||
393 | if (err) { | ||
394 | put_page(page); | ||
395 | hugetlb_put_quota(mapping); | ||
396 | if (err == -EEXIST) | ||
397 | goto retry; | ||
398 | page = NULL; | ||
399 | } | ||
400 | } else { | ||
401 | /* Caller expects a locked page */ | ||
402 | lock_page(page); | ||
403 | } | ||
404 | out: | ||
405 | return page; | ||
406 | } | ||
407 | |||
408 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | 371 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, |
409 | unsigned long address, pte_t *ptep, pte_t pte) | 372 | unsigned long address, pte_t *ptep, pte_t pte) |
410 | { | 373 | { |
@@ -471,12 +434,31 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
471 | * Use page lock to guard against racing truncation | 434 | * Use page lock to guard against racing truncation |
472 | * before we get page_table_lock. | 435 | * before we get page_table_lock. |
473 | */ | 436 | */ |
474 | page = find_or_alloc_huge_page(vma, address, mapping, idx, | 437 | retry: |
475 | vma->vm_flags & VM_SHARED); | 438 | page = find_lock_page(mapping, idx); |
476 | if (!page) | 439 | if (!page) { |
477 | goto out; | 440 | if (hugetlb_get_quota(mapping)) |
441 | goto out; | ||
442 | page = alloc_huge_page(vma, address); | ||
443 | if (!page) { | ||
444 | hugetlb_put_quota(mapping); | ||
445 | goto out; | ||
446 | } | ||
478 | 447 | ||
479 | BUG_ON(!PageLocked(page)); | 448 | if (vma->vm_flags & VM_SHARED) { |
449 | int err; | ||
450 | |||
451 | err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); | ||
452 | if (err) { | ||
453 | put_page(page); | ||
454 | hugetlb_put_quota(mapping); | ||
455 | if (err == -EEXIST) | ||
456 | goto retry; | ||
457 | goto out; | ||
458 | } | ||
459 | } else | ||
460 | lock_page(page); | ||
461 | } | ||
480 | 462 | ||
481 | spin_lock(&mm->page_table_lock); | 463 | spin_lock(&mm->page_table_lock); |
482 | size = i_size_read(mapping->host) >> HPAGE_SHIFT; | 464 | size = i_size_read(mapping->host) >> HPAGE_SHIFT; |