aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-05-29 18:06:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:22 -0400
commit83e4fa9c16e4af7122e31be3eca5d57881d236fe (patch)
tree594bc4152a58314a3f5068aefaa28783d1f2021a /mm/shmem.c
parentec9516fbc5fa814014991e1ae7f8860127122105 (diff)
tmpfs: support fallocate FALLOC_FL_PUNCH_HOLE
tmpfs has supported hole-punching since 2.6.16, via madvise(,,MADV_REMOVE). But nowadays fallocate(,FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE,,) is the agreed way to punch holes. So add shmem_fallocate() to support that, and tweak shmem_truncate_range() to support partial pages at both the beginning and end of range (never needed for madvise, which demands rounded addr and rounds up length). Based-on-patch-by: Cong Wang <amwang@redhat.com> Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Cong Wang <amwang@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c68
1 files changed, 57 insertions, 11 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 45c26476f0fc..7e54ff1c63e1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -53,6 +53,7 @@ static struct vfsmount *shm_mnt;
53#include <linux/blkdev.h> 53#include <linux/blkdev.h>
54#include <linux/pagevec.h> 54#include <linux/pagevec.h>
55#include <linux/percpu_counter.h> 55#include <linux/percpu_counter.h>
56#include <linux/falloc.h>
56#include <linux/splice.h> 57#include <linux/splice.h>
57#include <linux/security.h> 58#include <linux/security.h>
58#include <linux/swapops.h> 59#include <linux/swapops.h>
@@ -432,21 +433,23 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
432 struct address_space *mapping = inode->i_mapping; 433 struct address_space *mapping = inode->i_mapping;
433 struct shmem_inode_info *info = SHMEM_I(inode); 434 struct shmem_inode_info *info = SHMEM_I(inode);
434 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 435 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
435 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 436 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
436 pgoff_t end = (lend >> PAGE_CACHE_SHIFT); 437 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
438 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
437 struct pagevec pvec; 439 struct pagevec pvec;
438 pgoff_t indices[PAGEVEC_SIZE]; 440 pgoff_t indices[PAGEVEC_SIZE];
439 long nr_swaps_freed = 0; 441 long nr_swaps_freed = 0;
440 pgoff_t index; 442 pgoff_t index;
441 int i; 443 int i;
442 444
443 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 445 if (lend == -1)
446 end = -1; /* unsigned, so actually very big */
444 447
445 pagevec_init(&pvec, 0); 448 pagevec_init(&pvec, 0);
446 index = start; 449 index = start;
447 while (index <= end) { 450 while (index < end) {
448 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 451 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
449 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 452 min(end - index, (pgoff_t)PAGEVEC_SIZE),
450 pvec.pages, indices); 453 pvec.pages, indices);
451 if (!pvec.nr) 454 if (!pvec.nr)
452 break; 455 break;
@@ -455,7 +458,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
455 struct page *page = pvec.pages[i]; 458 struct page *page = pvec.pages[i];
456 459
457 index = indices[i]; 460 index = indices[i];
458 if (index > end) 461 if (index >= end)
459 break; 462 break;
460 463
461 if (radix_tree_exceptional_entry(page)) { 464 if (radix_tree_exceptional_entry(page)) {
@@ -479,22 +482,39 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
479 index++; 482 index++;
480 } 483 }
481 484
482 if (partial) { 485 if (partial_start) {
483 struct page *page = NULL; 486 struct page *page = NULL;
484 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 487 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
485 if (page) { 488 if (page) {
486 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 489 unsigned int top = PAGE_CACHE_SIZE;
490 if (start > end) {
491 top = partial_end;
492 partial_end = 0;
493 }
494 zero_user_segment(page, partial_start, top);
495 set_page_dirty(page);
496 unlock_page(page);
497 page_cache_release(page);
498 }
499 }
500 if (partial_end) {
501 struct page *page = NULL;
502 shmem_getpage(inode, end, &page, SGP_READ, NULL);
503 if (page) {
504 zero_user_segment(page, 0, partial_end);
487 set_page_dirty(page); 505 set_page_dirty(page);
488 unlock_page(page); 506 unlock_page(page);
489 page_cache_release(page); 507 page_cache_release(page);
490 } 508 }
491 } 509 }
510 if (start >= end)
511 return;
492 512
493 index = start; 513 index = start;
494 for ( ; ; ) { 514 for ( ; ; ) {
495 cond_resched(); 515 cond_resched();
496 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 516 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
497 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 517 min(end - index, (pgoff_t)PAGEVEC_SIZE),
498 pvec.pages, indices); 518 pvec.pages, indices);
499 if (!pvec.nr) { 519 if (!pvec.nr) {
500 if (index == start) 520 if (index == start)
@@ -502,7 +522,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
502 index = start; 522 index = start;
503 continue; 523 continue;
504 } 524 }
505 if (index == start && indices[0] > end) { 525 if (index == start && indices[0] >= end) {
506 shmem_deswap_pagevec(&pvec); 526 shmem_deswap_pagevec(&pvec);
507 pagevec_release(&pvec); 527 pagevec_release(&pvec);
508 break; 528 break;
@@ -512,7 +532,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
512 struct page *page = pvec.pages[i]; 532 struct page *page = pvec.pages[i];
513 533
514 index = indices[i]; 534 index = indices[i];
515 if (index > end) 535 if (index >= end)
516 break; 536 break;
517 537
518 if (radix_tree_exceptional_entry(page)) { 538 if (radix_tree_exceptional_entry(page)) {
@@ -1578,6 +1598,31 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1578 return error; 1598 return error;
1579} 1599}
1580 1600
1601static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1602 loff_t len)
1603{
1604 struct inode *inode = file->f_path.dentry->d_inode;
1605 int error = -EOPNOTSUPP;
1606
1607 mutex_lock(&inode->i_mutex);
1608
1609 if (mode & FALLOC_FL_PUNCH_HOLE) {
1610 struct address_space *mapping = file->f_mapping;
1611 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1612 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1613
1614 if ((u64)unmap_end > (u64)unmap_start)
1615 unmap_mapping_range(mapping, unmap_start,
1616 1 + unmap_end - unmap_start, 0);
1617 shmem_truncate_range(inode, offset, offset + len - 1);
1618 /* No need to unmap again: hole-punching leaves COWed pages */
1619 error = 0;
1620 }
1621
1622 mutex_unlock(&inode->i_mutex);
1623 return error;
1624}
1625
1581static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1626static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1582{ 1627{
1583 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1628 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -2490,6 +2535,7 @@ static const struct file_operations shmem_file_operations = {
2490 .fsync = noop_fsync, 2535 .fsync = noop_fsync,
2491 .splice_read = shmem_file_splice_read, 2536 .splice_read = shmem_file_splice_read,
2492 .splice_write = generic_file_splice_write, 2537 .splice_write = generic_file_splice_write,
2538 .fallocate = shmem_fallocate,
2493#endif 2539#endif
2494}; 2540};
2495 2541