diff options
author | npiggin@suse.de <npiggin@suse.de> | 2009-08-20 12:35:05 -0400 |
---|---|---|
committer | al <al@dizzy.pdmi.ras.ru> | 2009-09-24 08:41:47 -0400 |
commit | 25d9e2d15286281ec834b829a4aaf8969011f1cd (patch) | |
tree | e4329a481ca197afae30f04335e023c7d04f7d67 /mm/memory.c | |
parent | eca6f534e61919b28fb21aafbd1c2983deae75be (diff) |
truncate: new helpers
Introduce new truncate helpers truncate_pagecache and inode_newsize_ok.
vmtruncate is also consolidated from mm/memory.c and mm/nommu.c and
into mm/truncate.c.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 62 |
1 files changed, 3 insertions, 59 deletions
diff --git a/mm/memory.c b/mm/memory.c index b1443ac07c00..ebcd3decac89 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -297,7 +297,8 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
297 | unsigned long addr = vma->vm_start; | 297 | unsigned long addr = vma->vm_start; |
298 | 298 | ||
299 | /* | 299 | /* |
300 | * Hide vma from rmap and vmtruncate before freeing pgtables | 300 | * Hide vma from rmap and truncate_pagecache before freeing |
301 | * pgtables | ||
301 | */ | 302 | */ |
302 | anon_vma_unlink(vma); | 303 | anon_vma_unlink(vma); |
303 | unlink_file_vma(vma); | 304 | unlink_file_vma(vma); |
@@ -2407,7 +2408,7 @@ restart: | |||
2407 | * @mapping: the address space containing mmaps to be unmapped. | 2408 | * @mapping: the address space containing mmaps to be unmapped. |
2408 | * @holebegin: byte in first page to unmap, relative to the start of | 2409 | * @holebegin: byte in first page to unmap, relative to the start of |
2409 | * the underlying file. This will be rounded down to a PAGE_SIZE | 2410 | * the underlying file. This will be rounded down to a PAGE_SIZE |
2410 | * boundary. Note that this is different from vmtruncate(), which | 2411 | * boundary. Note that this is different from truncate_pagecache(), which |
2411 | * must keep the partial page. In contrast, we must get rid of | 2412 | * must keep the partial page. In contrast, we must get rid of |
2412 | * partial pages. | 2413 | * partial pages. |
2413 | * @holelen: size of prospective hole in bytes. This will be rounded | 2414 | * @holelen: size of prospective hole in bytes. This will be rounded |
@@ -2458,63 +2459,6 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2458 | } | 2459 | } |
2459 | EXPORT_SYMBOL(unmap_mapping_range); | 2460 | EXPORT_SYMBOL(unmap_mapping_range); |
2460 | 2461 | ||
2461 | /** | ||
2462 | * vmtruncate - unmap mappings "freed" by truncate() syscall | ||
2463 | * @inode: inode of the file used | ||
2464 | * @offset: file offset to start truncating | ||
2465 | * | ||
2466 | * NOTE! We have to be ready to update the memory sharing | ||
2467 | * between the file and the memory map for a potential last | ||
2468 | * incomplete page. Ugly, but necessary. | ||
2469 | */ | ||
2470 | int vmtruncate(struct inode * inode, loff_t offset) | ||
2471 | { | ||
2472 | if (inode->i_size < offset) { | ||
2473 | unsigned long limit; | ||
2474 | |||
2475 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | ||
2476 | if (limit != RLIM_INFINITY && offset > limit) | ||
2477 | goto out_sig; | ||
2478 | if (offset > inode->i_sb->s_maxbytes) | ||
2479 | goto out_big; | ||
2480 | i_size_write(inode, offset); | ||
2481 | } else { | ||
2482 | struct address_space *mapping = inode->i_mapping; | ||
2483 | |||
2484 | /* | ||
2485 | * truncation of in-use swapfiles is disallowed - it would | ||
2486 | * cause subsequent swapout to scribble on the now-freed | ||
2487 | * blocks. | ||
2488 | */ | ||
2489 | if (IS_SWAPFILE(inode)) | ||
2490 | return -ETXTBSY; | ||
2491 | i_size_write(inode, offset); | ||
2492 | |||
2493 | /* | ||
2494 | * unmap_mapping_range is called twice, first simply for | ||
2495 | * efficiency so that truncate_inode_pages does fewer | ||
2496 | * single-page unmaps. However after this first call, and | ||
2497 | * before truncate_inode_pages finishes, it is possible for | ||
2498 | * private pages to be COWed, which remain after | ||
2499 | * truncate_inode_pages finishes, hence the second | ||
2500 | * unmap_mapping_range call must be made for correctness. | ||
2501 | */ | ||
2502 | unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); | ||
2503 | truncate_inode_pages(mapping, offset); | ||
2504 | unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); | ||
2505 | } | ||
2506 | |||
2507 | if (inode->i_op->truncate) | ||
2508 | inode->i_op->truncate(inode); | ||
2509 | return 0; | ||
2510 | |||
2511 | out_sig: | ||
2512 | send_sig(SIGXFSZ, current, 0); | ||
2513 | out_big: | ||
2514 | return -EFBIG; | ||
2515 | } | ||
2516 | EXPORT_SYMBOL(vmtruncate); | ||
2517 | |||
2518 | int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | 2462 | int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) |
2519 | { | 2463 | { |
2520 | struct address_space *mapping = inode->i_mapping; | 2464 | struct address_space *mapping = inode->i_mapping; |