aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-08-03 19:21:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-08-03 20:25:23 -0400
commitbda97eab0cc9c6385b9f26abdda6459f630f4513 (patch)
treebfa418b90c5889a1cd33836fd8f0a2f0232e3dac /mm
parent41ffe5d5ceef7f7ff2ff18e320d88ca6d629efaf (diff)
tmpfs: copy truncate_inode_pages_range
Bring truncate.c's code for truncate_inode_pages_range() inline into shmem_truncate_range(), replacing its first call (there's a followup call below, but leave that one, it will disappear next). Don't play with it yet, apart from leaving out the cleancache flush, and (importantly) the nrpages == 0 skip, and moving shmem_setattr()'s partial page preparation into its partial page handling. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/shmem.c99
1 files changed, 79 insertions, 20 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 24e95ac1605..e101c211ed1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -50,6 +50,7 @@ static struct vfsmount *shm_mnt;
50#include <linux/shmem_fs.h> 50#include <linux/shmem_fs.h>
51#include <linux/writeback.h> 51#include <linux/writeback.h>
52#include <linux/blkdev.h> 52#include <linux/blkdev.h>
53#include <linux/pagevec.h>
53#include <linux/percpu_counter.h> 54#include <linux/percpu_counter.h>
54#include <linux/splice.h> 55#include <linux/splice.h>
55#include <linux/security.h> 56#include <linux/security.h>
@@ -242,11 +243,88 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
242 struct address_space *mapping = inode->i_mapping; 243 struct address_space *mapping = inode->i_mapping;
243 struct shmem_inode_info *info = SHMEM_I(inode); 244 struct shmem_inode_info *info = SHMEM_I(inode);
244 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 245 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
246 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
245 pgoff_t end = (lend >> PAGE_CACHE_SHIFT); 247 pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
248 struct pagevec pvec;
246 pgoff_t index; 249 pgoff_t index;
247 swp_entry_t swap; 250 swp_entry_t swap;
251 int i;
252
253 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
254
255 pagevec_init(&pvec, 0);
256 index = start;
257 while (index <= end && pagevec_lookup(&pvec, mapping, index,
258 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
259 mem_cgroup_uncharge_start();
260 for (i = 0; i < pagevec_count(&pvec); i++) {
261 struct page *page = pvec.pages[i];
262
263 /* We rely upon deletion not changing page->index */
264 index = page->index;
265 if (index > end)
266 break;
267
268 if (!trylock_page(page))
269 continue;
270 WARN_ON(page->index != index);
271 if (PageWriteback(page)) {
272 unlock_page(page);
273 continue;
274 }
275 truncate_inode_page(mapping, page);
276 unlock_page(page);
277 }
278 pagevec_release(&pvec);
279 mem_cgroup_uncharge_end();
280 cond_resched();
281 index++;
282 }
283
284 if (partial) {
285 struct page *page = NULL;
286 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
287 if (page) {
288 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
289 set_page_dirty(page);
290 unlock_page(page);
291 page_cache_release(page);
292 }
293 }
294
295 index = start;
296 for ( ; ; ) {
297 cond_resched();
298 if (!pagevec_lookup(&pvec, mapping, index,
299 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
300 if (index == start)
301 break;
302 index = start;
303 continue;
304 }
305 if (index == start && pvec.pages[0]->index > end) {
306 pagevec_release(&pvec);
307 break;
308 }
309 mem_cgroup_uncharge_start();
310 for (i = 0; i < pagevec_count(&pvec); i++) {
311 struct page *page = pvec.pages[i];
248 312
249 truncate_inode_pages_range(mapping, lstart, lend); 313 /* We rely upon deletion not changing page->index */
314 index = page->index;
315 if (index > end)
316 break;
317
318 lock_page(page);
319 WARN_ON(page->index != index);
320 wait_on_page_writeback(page);
321 truncate_inode_page(mapping, page);
322 unlock_page(page);
323 }
324 pagevec_release(&pvec);
325 mem_cgroup_uncharge_end();
326 index++;
327 }
250 328
251 if (end > SHMEM_NR_DIRECT) 329 if (end > SHMEM_NR_DIRECT)
252 end = SHMEM_NR_DIRECT; 330 end = SHMEM_NR_DIRECT;
@@ -289,24 +367,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
289 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 367 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
290 loff_t oldsize = inode->i_size; 368 loff_t oldsize = inode->i_size;
291 loff_t newsize = attr->ia_size; 369 loff_t newsize = attr->ia_size;
292 struct page *page = NULL;
293 370
294 if (newsize < oldsize) {
295 /*
296 * If truncating down to a partial page, then
297 * if that page is already allocated, hold it
298 * in memory until the truncation is over, so
299 * truncate_partial_page cannot miss it were
300 * it assigned to swap.
301 */
302 if (newsize & (PAGE_CACHE_SIZE-1)) {
303 (void) shmem_getpage(inode,
304 newsize >> PAGE_CACHE_SHIFT,
305 &page, SGP_READ, NULL);
306 if (page)
307 unlock_page(page);
308 }
309 }
310 if (newsize != oldsize) { 371 if (newsize != oldsize) {
311 i_size_write(inode, newsize); 372 i_size_write(inode, newsize);
312 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 373 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -318,8 +379,6 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
318 /* unmap again to remove racily COWed private pages */ 379 /* unmap again to remove racily COWed private pages */
319 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 380 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
320 } 381 }
321 if (page)
322 page_cache_release(page);
323 } 382 }
324 383
325 setattr_copy(inode, attr); 384 setattr_copy(inode, attr);