summaryrefslogtreecommitdiffstats
path: root/fs/hugetlbfs
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2015-09-08 18:01:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commit1bfad99ab42569807d0ca1698449cae5e8c0334a (patch)
tree50676e321bcd807c229f85e6a272df3865480515 /fs/hugetlbfs
parentc672c7f29f2fdb73e1f72911bf499675c81fcdbb (diff)
hugetlbfs: hugetlb_vmtruncate_list() needs to take a range to delete
fallocate hole punch will want to unmap a specific range of pages. Modify the existing hugetlb_vmtruncate_list() routine to take a start/end range. If end is 0, this indicates all pages after start should be unmapped. This is the same as the existing truncate functionality. Modify existing callers to add 0 as end of range. Since the routine will be used in hole punch as well as truncate operations, it is more appropriately renamed to hugetlb_vmdelete_list(). Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/hugetlbfs')
-rw-r--r--fs/hugetlbfs/inode.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 973c24ce59ad..b1e197d38abb 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -349,11 +349,15 @@ static void hugetlbfs_evict_inode(struct inode *inode)
349} 349}
350 350
351static inline void 351static inline void
352hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff) 352hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
353{ 353{
354 struct vm_area_struct *vma; 354 struct vm_area_struct *vma;
355 355
356 vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) { 356 /*
357 * end == 0 indicates that the entire range after
358 * start should be unmapped.
359 */
360 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
357 unsigned long v_offset; 361 unsigned long v_offset;
358 362
359 /* 363 /*
@@ -362,13 +366,20 @@ hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
362 * which overlap the truncated area starting at pgoff, 366 * which overlap the truncated area starting at pgoff,
363 * and no vma on a 32-bit arch can span beyond the 4GB. 367 * and no vma on a 32-bit arch can span beyond the 4GB.
364 */ 368 */
365 if (vma->vm_pgoff < pgoff) 369 if (vma->vm_pgoff < start)
366 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT; 370 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
367 else 371 else
368 v_offset = 0; 372 v_offset = 0;
369 373
370 unmap_hugepage_range(vma, vma->vm_start + v_offset, 374 if (end) {
371 vma->vm_end, NULL); 375 end = ((end - start) << PAGE_SHIFT) +
376 vma->vm_start + v_offset;
377 if (end > vma->vm_end)
378 end = vma->vm_end;
379 } else
380 end = vma->vm_end;
381
382 unmap_hugepage_range(vma, vma->vm_start + v_offset, end, NULL);
372 } 383 }
373} 384}
374 385
@@ -384,7 +395,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
384 i_size_write(inode, offset); 395 i_size_write(inode, offset);
385 i_mmap_lock_write(mapping); 396 i_mmap_lock_write(mapping);
386 if (!RB_EMPTY_ROOT(&mapping->i_mmap)) 397 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
387 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); 398 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
388 i_mmap_unlock_write(mapping); 399 i_mmap_unlock_write(mapping);
389 truncate_hugepages(inode, offset); 400 truncate_hugepages(inode, offset);
390 return 0; 401 return 0;