aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2015-09-08 18:01:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commitab76ad540a50191308e5bb6b5e2d9e26c78616d3 (patch)
tree2d3f6ffea05caf95cf96807b382ee9323a2b1435 /mm/hugetlb.c
parentd85f69b0b533ec6d7ac8c21db958c44c6d957c90 (diff)
hugetlbfs: New huge_add_to_page_cache helper routine
Currently, there is only a single place where hugetlbfs pages are added to the page cache. The new fallocate code be adding a second one, so break the functionality out into its own helper. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 114ad6ce7030..d45eacc5653e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3375,6 +3375,23 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
3375 return page != NULL; 3375 return page != NULL;
3376} 3376}
3377 3377
3378int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3379 pgoff_t idx)
3380{
3381 struct inode *inode = mapping->host;
3382 struct hstate *h = hstate_inode(inode);
3383 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3384
3385 if (err)
3386 return err;
3387 ClearPagePrivate(page);
3388
3389 spin_lock(&inode->i_lock);
3390 inode->i_blocks += blocks_per_huge_page(h);
3391 spin_unlock(&inode->i_lock);
3392 return 0;
3393}
3394
3378static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 3395static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3379 struct address_space *mapping, pgoff_t idx, 3396 struct address_space *mapping, pgoff_t idx,
3380 unsigned long address, pte_t *ptep, unsigned int flags) 3397 unsigned long address, pte_t *ptep, unsigned int flags)
@@ -3422,21 +3439,13 @@ retry:
3422 set_page_huge_active(page); 3439 set_page_huge_active(page);
3423 3440
3424 if (vma->vm_flags & VM_MAYSHARE) { 3441 if (vma->vm_flags & VM_MAYSHARE) {
3425 int err; 3442 int err = huge_add_to_page_cache(page, mapping, idx);
3426 struct inode *inode = mapping->host;
3427
3428 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3429 if (err) { 3443 if (err) {
3430 put_page(page); 3444 put_page(page);
3431 if (err == -EEXIST) 3445 if (err == -EEXIST)
3432 goto retry; 3446 goto retry;
3433 goto out; 3447 goto out;
3434 } 3448 }
3435 ClearPagePrivate(page);
3436
3437 spin_lock(&inode->i_lock);
3438 inode->i_blocks += blocks_per_huge_page(h);
3439 spin_unlock(&inode->i_lock);
3440 } else { 3449 } else {
3441 lock_page(page); 3450 lock_page(page);
3442 if (unlikely(anon_vma_prepare(vma))) { 3451 if (unlikely(anon_vma_prepare(vma))) {