diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/madvise.c | 35 | ||||
-rw-r--r-- | mm/memory.c | 25 | ||||
-rw-r--r-- | mm/shmem.c | 32 |
3 files changed, 83 insertions, 9 deletions
diff --git a/mm/madvise.c b/mm/madvise.c index 2b7cf0400a21..ae0ae3ea299a 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -140,6 +140,36 @@ static long madvise_dontneed(struct vm_area_struct * vma, | |||
140 | return 0; | 140 | return 0; |
141 | } | 141 | } |
142 | 142 | ||
143 | /* | ||
144 | * Application wants to free up the pages and associated backing store. | ||
145 | * This is effectively punching a hole into the middle of a file. | ||
146 | * | ||
147 | * NOTE: Currently, only shmfs/tmpfs is supported for this operation. | ||
148 | * Other filesystems return -ENOSYS. | ||
149 | */ | ||
150 | static long madvise_remove(struct vm_area_struct *vma, | ||
151 | unsigned long start, unsigned long end) | ||
152 | { | ||
153 | struct address_space *mapping; | ||
154 | loff_t offset, endoff; | ||
155 | |||
156 | if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) | ||
157 | return -EINVAL; | ||
158 | |||
159 | if (!vma->vm_file || !vma->vm_file->f_mapping | ||
160 | || !vma->vm_file->f_mapping->host) { | ||
161 | return -EINVAL; | ||
162 | } | ||
163 | |||
164 | mapping = vma->vm_file->f_mapping; | ||
165 | |||
166 | offset = (loff_t)(start - vma->vm_start) | ||
167 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | ||
168 | endoff = (loff_t)(end - vma->vm_start - 1) | ||
169 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | ||
170 | return vmtruncate_range(mapping->host, offset, endoff); | ||
171 | } | ||
172 | |||
143 | static long | 173 | static long |
144 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | 174 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, |
145 | unsigned long start, unsigned long end, int behavior) | 175 | unsigned long start, unsigned long end, int behavior) |
@@ -152,6 +182,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | |||
152 | case MADV_RANDOM: | 182 | case MADV_RANDOM: |
153 | error = madvise_behavior(vma, prev, start, end, behavior); | 183 | error = madvise_behavior(vma, prev, start, end, behavior); |
154 | break; | 184 | break; |
185 | case MADV_REMOVE: | ||
186 | error = madvise_remove(vma, start, end); | ||
187 | break; | ||
155 | 188 | ||
156 | case MADV_WILLNEED: | 189 | case MADV_WILLNEED: |
157 | error = madvise_willneed(vma, prev, start, end); | 190 | error = madvise_willneed(vma, prev, start, end); |
@@ -190,6 +223,8 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | |||
190 | * some pages ahead. | 223 | * some pages ahead. |
191 | * MADV_DONTNEED - the application is finished with the given range, | 224 | * MADV_DONTNEED - the application is finished with the given range, |
192 | * so the kernel can free resources associated with it. | 225 | * so the kernel can free resources associated with it. |
226 | * MADV_REMOVE - the application wants to free up the given range of | ||
227 | * pages and associated backing store. | ||
193 | * | 228 | * |
194 | * return values: | 229 | * return values: |
195 | * zero - success | 230 | * zero - success |
diff --git a/mm/memory.c b/mm/memory.c index d8dde07a3656..e249088908c4 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1770,9 +1770,32 @@ out_big: | |||
1770 | out_busy: | 1770 | out_busy: |
1771 | return -ETXTBSY; | 1771 | return -ETXTBSY; |
1772 | } | 1772 | } |
1773 | |||
1774 | EXPORT_SYMBOL(vmtruncate); | 1773 | EXPORT_SYMBOL(vmtruncate); |
1775 | 1774 | ||
1775 | int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | ||
1776 | { | ||
1777 | struct address_space *mapping = inode->i_mapping; | ||
1778 | |||
1779 | /* | ||
1780 | * If the underlying filesystem is not going to provide | ||
1781 | * a way to truncate a range of blocks (punch a hole) - | ||
1782 | * we should return failure right now. | ||
1783 | */ | ||
1784 | if (!inode->i_op || !inode->i_op->truncate_range) | ||
1785 | return -ENOSYS; | ||
1786 | |||
1787 | down(&inode->i_sem); | ||
1788 | down_write(&inode->i_alloc_sem); | ||
1789 | unmap_mapping_range(mapping, offset, (end - offset), 1); | ||
1790 | truncate_inode_pages_range(mapping, offset, end); | ||
1791 | inode->i_op->truncate_range(inode, offset, end); | ||
1792 | up_write(&inode->i_alloc_sem); | ||
1793 | up(&inode->i_sem); | ||
1794 | |||
1795 | return 0; | ||
1796 | } | ||
1797 | EXPORT_SYMBOL(vmtruncate_range); | ||
1798 | |||
1776 | /* | 1799 | /* |
1777 | * Primitive swap readahead code. We simply read an aligned block of | 1800 | * Primitive swap readahead code. We simply read an aligned block of |
1778 | * (1 << page_cluster) entries in the swap area. This method is chosen | 1801 | * (1 << page_cluster) entries in the swap area. This method is chosen |
diff --git a/mm/shmem.c b/mm/shmem.c index d9fc277940da..65c148efa2ed 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -457,7 +457,7 @@ static void shmem_free_pages(struct list_head *next) | |||
457 | } while (next); | 457 | } while (next); |
458 | } | 458 | } |
459 | 459 | ||
460 | static void shmem_truncate(struct inode *inode) | 460 | static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) |
461 | { | 461 | { |
462 | struct shmem_inode_info *info = SHMEM_I(inode); | 462 | struct shmem_inode_info *info = SHMEM_I(inode); |
463 | unsigned long idx; | 463 | unsigned long idx; |
@@ -475,18 +475,27 @@ static void shmem_truncate(struct inode *inode) | |||
475 | long nr_swaps_freed = 0; | 475 | long nr_swaps_freed = 0; |
476 | int offset; | 476 | int offset; |
477 | int freed; | 477 | int freed; |
478 | int punch_hole = 0; | ||
478 | 479 | ||
479 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | 480 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
480 | idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 481 | idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
481 | if (idx >= info->next_index) | 482 | if (idx >= info->next_index) |
482 | return; | 483 | return; |
483 | 484 | ||
484 | spin_lock(&info->lock); | 485 | spin_lock(&info->lock); |
485 | info->flags |= SHMEM_TRUNCATE; | 486 | info->flags |= SHMEM_TRUNCATE; |
486 | limit = info->next_index; | 487 | if (likely(end == (loff_t) -1)) { |
487 | info->next_index = idx; | 488 | limit = info->next_index; |
489 | info->next_index = idx; | ||
490 | } else { | ||
491 | limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
492 | if (limit > info->next_index) | ||
493 | limit = info->next_index; | ||
494 | punch_hole = 1; | ||
495 | } | ||
496 | |||
488 | topdir = info->i_indirect; | 497 | topdir = info->i_indirect; |
489 | if (topdir && idx <= SHMEM_NR_DIRECT) { | 498 | if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { |
490 | info->i_indirect = NULL; | 499 | info->i_indirect = NULL; |
491 | nr_pages_to_free++; | 500 | nr_pages_to_free++; |
492 | list_add(&topdir->lru, &pages_to_free); | 501 | list_add(&topdir->lru, &pages_to_free); |
@@ -573,11 +582,12 @@ static void shmem_truncate(struct inode *inode) | |||
573 | set_page_private(subdir, page_private(subdir) - freed); | 582 | set_page_private(subdir, page_private(subdir) - freed); |
574 | if (offset) | 583 | if (offset) |
575 | spin_unlock(&info->lock); | 584 | spin_unlock(&info->lock); |
576 | BUG_ON(page_private(subdir) > offset); | 585 | if (!punch_hole) |
586 | BUG_ON(page_private(subdir) > offset); | ||
577 | } | 587 | } |
578 | if (offset) | 588 | if (offset) |
579 | offset = 0; | 589 | offset = 0; |
580 | else if (subdir) { | 590 | else if (subdir && !page_private(subdir)) { |
581 | dir[diroff] = NULL; | 591 | dir[diroff] = NULL; |
582 | nr_pages_to_free++; | 592 | nr_pages_to_free++; |
583 | list_add(&subdir->lru, &pages_to_free); | 593 | list_add(&subdir->lru, &pages_to_free); |
@@ -594,7 +604,7 @@ done2: | |||
594 | * Also, though shmem_getpage checks i_size before adding to | 604 | * Also, though shmem_getpage checks i_size before adding to |
595 | * cache, no recheck after: so fix the narrow window there too. | 605 | * cache, no recheck after: so fix the narrow window there too. |
596 | */ | 606 | */ |
597 | truncate_inode_pages(inode->i_mapping, inode->i_size); | 607 | truncate_inode_pages_range(inode->i_mapping, start, end); |
598 | } | 608 | } |
599 | 609 | ||
600 | spin_lock(&info->lock); | 610 | spin_lock(&info->lock); |
@@ -614,6 +624,11 @@ done2: | |||
614 | } | 624 | } |
615 | } | 625 | } |
616 | 626 | ||
627 | static void shmem_truncate(struct inode *inode) | ||
628 | { | ||
629 | shmem_truncate_range(inode, inode->i_size, (loff_t)-1); | ||
630 | } | ||
631 | |||
617 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | 632 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) |
618 | { | 633 | { |
619 | struct inode *inode = dentry->d_inode; | 634 | struct inode *inode = dentry->d_inode; |
@@ -2083,6 +2098,7 @@ static struct file_operations shmem_file_operations = { | |||
2083 | static struct inode_operations shmem_inode_operations = { | 2098 | static struct inode_operations shmem_inode_operations = { |
2084 | .truncate = shmem_truncate, | 2099 | .truncate = shmem_truncate, |
2085 | .setattr = shmem_notify_change, | 2100 | .setattr = shmem_notify_change, |
2101 | .truncate_range = shmem_truncate_range, | ||
2086 | }; | 2102 | }; |
2087 | 2103 | ||
2088 | static struct inode_operations shmem_dir_inode_operations = { | 2104 | static struct inode_operations shmem_dir_inode_operations = { |