aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2011-01-14 07:07:43 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2011-01-17 02:25:31 -0500
commit2fe17c1075836b66678ed2a305fd09b6773883aa (patch)
treeeb5287be8138686682eef9622872cfc7657e0664 /fs
parent64c23e86873ee410554d6d1c76b60da47025e96f (diff)
fallocate should be a file operation
Currently all filesystems except XFS implement fallocate asynchronously, while XFS forced a commit. Both of these are suboptimal - in case of O_SYNC I/O we really want our allocation on disk, especially for the !KEEP_SIZE case where we actually grow the file with user-visible zeroes. On the other hand always commiting the transaction is a bad idea for fast-path uses of fallocate like for example in recent Samba versions. Given that block allocation is a data plane operation anyway change it from an inode operation to a file operation so that we have the file structure available that lets us check for O_SYNC. This also includes moving the code around for a few of the filesystems, and remove the already unnedded S_ISDIR checks given that we only wire up fallocate for regular files. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/file.c113
-rw-r--r--fs/btrfs/inode.c111
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c9
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/gfs2/file.c258
-rw-r--r--fs/gfs2/ops_inode.c258
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/open.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c56
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c60
11 files changed, 437 insertions, 444 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 66836d85763b..a9e0a4eaf3d9 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -24,6 +24,7 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/backing-dev.h> 25#include <linux/backing-dev.h>
26#include <linux/mpage.h> 26#include <linux/mpage.h>
27#include <linux/falloc.h>
27#include <linux/swap.h> 28#include <linux/swap.h>
28#include <linux/writeback.h> 29#include <linux/writeback.h>
29#include <linux/statfs.h> 30#include <linux/statfs.h>
@@ -1237,6 +1238,117 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1237 return 0; 1238 return 0;
1238} 1239}
1239 1240
1241static long btrfs_fallocate(struct file *file, int mode,
1242 loff_t offset, loff_t len)
1243{
1244 struct inode *inode = file->f_path.dentry->d_inode;
1245 struct extent_state *cached_state = NULL;
1246 u64 cur_offset;
1247 u64 last_byte;
1248 u64 alloc_start;
1249 u64 alloc_end;
1250 u64 alloc_hint = 0;
1251 u64 locked_end;
1252 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1253 struct extent_map *em;
1254 int ret;
1255
1256 alloc_start = offset & ~mask;
1257 alloc_end = (offset + len + mask) & ~mask;
1258
1259 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1260 if (mode & ~FALLOC_FL_KEEP_SIZE)
1261 return -EOPNOTSUPP;
1262
1263 /*
1264 * wait for ordered IO before we have any locks. We'll loop again
1265 * below with the locks held.
1266 */
1267 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
1268
1269 mutex_lock(&inode->i_mutex);
1270 ret = inode_newsize_ok(inode, alloc_end);
1271 if (ret)
1272 goto out;
1273
1274 if (alloc_start > inode->i_size) {
1275 ret = btrfs_cont_expand(inode, alloc_start);
1276 if (ret)
1277 goto out;
1278 }
1279
1280 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
1281 if (ret)
1282 goto out;
1283
1284 locked_end = alloc_end - 1;
1285 while (1) {
1286 struct btrfs_ordered_extent *ordered;
1287
1288 /* the extent lock is ordered inside the running
1289 * transaction
1290 */
1291 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1292 locked_end, 0, &cached_state, GFP_NOFS);
1293 ordered = btrfs_lookup_first_ordered_extent(inode,
1294 alloc_end - 1);
1295 if (ordered &&
1296 ordered->file_offset + ordered->len > alloc_start &&
1297 ordered->file_offset < alloc_end) {
1298 btrfs_put_ordered_extent(ordered);
1299 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1300 alloc_start, locked_end,
1301 &cached_state, GFP_NOFS);
1302 /*
1303 * we can't wait on the range with the transaction
1304 * running or with the extent lock held
1305 */
1306 btrfs_wait_ordered_range(inode, alloc_start,
1307 alloc_end - alloc_start);
1308 } else {
1309 if (ordered)
1310 btrfs_put_ordered_extent(ordered);
1311 break;
1312 }
1313 }
1314
1315 cur_offset = alloc_start;
1316 while (1) {
1317 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1318 alloc_end - cur_offset, 0);
1319 BUG_ON(IS_ERR(em) || !em);
1320 last_byte = min(extent_map_end(em), alloc_end);
1321 last_byte = (last_byte + mask) & ~mask;
1322 if (em->block_start == EXTENT_MAP_HOLE ||
1323 (cur_offset >= inode->i_size &&
1324 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1325 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1326 last_byte - cur_offset,
1327 1 << inode->i_blkbits,
1328 offset + len,
1329 &alloc_hint);
1330 if (ret < 0) {
1331 free_extent_map(em);
1332 break;
1333 }
1334 }
1335 free_extent_map(em);
1336
1337 cur_offset = last_byte;
1338 if (cur_offset >= alloc_end) {
1339 ret = 0;
1340 break;
1341 }
1342 }
1343 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
1344 &cached_state, GFP_NOFS);
1345
1346 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
1347out:
1348 mutex_unlock(&inode->i_mutex);
1349 return ret;
1350}
1351
1240const struct file_operations btrfs_file_operations = { 1352const struct file_operations btrfs_file_operations = {
1241 .llseek = generic_file_llseek, 1353 .llseek = generic_file_llseek,
1242 .read = do_sync_read, 1354 .read = do_sync_read,
@@ -1248,6 +1360,7 @@ const struct file_operations btrfs_file_operations = {
1248 .open = generic_file_open, 1360 .open = generic_file_open,
1249 .release = btrfs_release_file, 1361 .release = btrfs_release_file,
1250 .fsync = btrfs_sync_file, 1362 .fsync = btrfs_sync_file,
1363 .fallocate = btrfs_fallocate,
1251 .unlocked_ioctl = btrfs_ioctl, 1364 .unlocked_ioctl = btrfs_ioctl,
1252#ifdef CONFIG_COMPAT 1365#ifdef CONFIG_COMPAT
1253 .compat_ioctl = btrfs_ioctl, 1366 .compat_ioctl = btrfs_ioctl,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 64daf2acd0d5..902afbf50811 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7098,116 +7098,6 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
7098 min_size, actual_len, alloc_hint, trans); 7098 min_size, actual_len, alloc_hint, trans);
7099} 7099}
7100 7100
7101static long btrfs_fallocate(struct inode *inode, int mode,
7102 loff_t offset, loff_t len)
7103{
7104 struct extent_state *cached_state = NULL;
7105 u64 cur_offset;
7106 u64 last_byte;
7107 u64 alloc_start;
7108 u64 alloc_end;
7109 u64 alloc_hint = 0;
7110 u64 locked_end;
7111 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
7112 struct extent_map *em;
7113 int ret;
7114
7115 alloc_start = offset & ~mask;
7116 alloc_end = (offset + len + mask) & ~mask;
7117
7118 /* We only support the FALLOC_FL_KEEP_SIZE mode */
7119 if (mode & ~FALLOC_FL_KEEP_SIZE)
7120 return -EOPNOTSUPP;
7121
7122 /*
7123 * wait for ordered IO before we have any locks. We'll loop again
7124 * below with the locks held.
7125 */
7126 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
7127
7128 mutex_lock(&inode->i_mutex);
7129 ret = inode_newsize_ok(inode, alloc_end);
7130 if (ret)
7131 goto out;
7132
7133 if (alloc_start > inode->i_size) {
7134 ret = btrfs_cont_expand(inode, alloc_start);
7135 if (ret)
7136 goto out;
7137 }
7138
7139 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
7140 if (ret)
7141 goto out;
7142
7143 locked_end = alloc_end - 1;
7144 while (1) {
7145 struct btrfs_ordered_extent *ordered;
7146
7147 /* the extent lock is ordered inside the running
7148 * transaction
7149 */
7150 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
7151 locked_end, 0, &cached_state, GFP_NOFS);
7152 ordered = btrfs_lookup_first_ordered_extent(inode,
7153 alloc_end - 1);
7154 if (ordered &&
7155 ordered->file_offset + ordered->len > alloc_start &&
7156 ordered->file_offset < alloc_end) {
7157 btrfs_put_ordered_extent(ordered);
7158 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
7159 alloc_start, locked_end,
7160 &cached_state, GFP_NOFS);
7161 /*
7162 * we can't wait on the range with the transaction
7163 * running or with the extent lock held
7164 */
7165 btrfs_wait_ordered_range(inode, alloc_start,
7166 alloc_end - alloc_start);
7167 } else {
7168 if (ordered)
7169 btrfs_put_ordered_extent(ordered);
7170 break;
7171 }
7172 }
7173
7174 cur_offset = alloc_start;
7175 while (1) {
7176 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
7177 alloc_end - cur_offset, 0);
7178 BUG_ON(IS_ERR(em) || !em);
7179 last_byte = min(extent_map_end(em), alloc_end);
7180 last_byte = (last_byte + mask) & ~mask;
7181 if (em->block_start == EXTENT_MAP_HOLE ||
7182 (cur_offset >= inode->i_size &&
7183 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7184 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
7185 last_byte - cur_offset,
7186 1 << inode->i_blkbits,
7187 offset + len,
7188 &alloc_hint);
7189 if (ret < 0) {
7190 free_extent_map(em);
7191 break;
7192 }
7193 }
7194 free_extent_map(em);
7195
7196 cur_offset = last_byte;
7197 if (cur_offset >= alloc_end) {
7198 ret = 0;
7199 break;
7200 }
7201 }
7202 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
7203 &cached_state, GFP_NOFS);
7204
7205 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
7206out:
7207 mutex_unlock(&inode->i_mutex);
7208 return ret;
7209}
7210
7211static int btrfs_set_page_dirty(struct page *page) 7101static int btrfs_set_page_dirty(struct page *page)
7212{ 7102{
7213 return __set_page_dirty_nobuffers(page); 7103 return __set_page_dirty_nobuffers(page);
@@ -7310,7 +7200,6 @@ static const struct inode_operations btrfs_file_inode_operations = {
7310 .listxattr = btrfs_listxattr, 7200 .listxattr = btrfs_listxattr,
7311 .removexattr = btrfs_removexattr, 7201 .removexattr = btrfs_removexattr,
7312 .permission = btrfs_permission, 7202 .permission = btrfs_permission,
7313 .fallocate = btrfs_fallocate,
7314 .fiemap = btrfs_fiemap, 7203 .fiemap = btrfs_fiemap,
7315}; 7204};
7316static const struct inode_operations btrfs_special_inode_operations = { 7205static const struct inode_operations btrfs_special_inode_operations = {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 1de65f572033..0c8d97b56f34 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2065,7 +2065,7 @@ extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
2065extern void ext4_ext_truncate(struct inode *); 2065extern void ext4_ext_truncate(struct inode *);
2066extern void ext4_ext_init(struct super_block *); 2066extern void ext4_ext_init(struct super_block *);
2067extern void ext4_ext_release(struct super_block *); 2067extern void ext4_ext_release(struct super_block *);
2068extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, 2068extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
2069 loff_t len); 2069 loff_t len);
2070extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 2070extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
2071 ssize_t len); 2071 ssize_t len);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 4bdd160854eb..63a75810b7c3 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3627,14 +3627,15 @@ static void ext4_falloc_update_inode(struct inode *inode,
3627} 3627}
3628 3628
3629/* 3629/*
3630 * preallocate space for a file. This implements ext4's fallocate inode 3630 * preallocate space for a file. This implements ext4's fallocate file
3631 * operation, which gets called from sys_fallocate system call. 3631 * operation, which gets called from sys_fallocate system call.
3632 * For block-mapped files, posix_fallocate should fall back to the method 3632 * For block-mapped files, posix_fallocate should fall back to the method
3633 * of writing zeroes to the required new blocks (the same behavior which is 3633 * of writing zeroes to the required new blocks (the same behavior which is
3634 * expected for file systems which do not support fallocate() system call). 3634 * expected for file systems which do not support fallocate() system call).
3635 */ 3635 */
3636long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) 3636long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3637{ 3637{
3638 struct inode *inode = file->f_path.dentry->d_inode;
3638 handle_t *handle; 3639 handle_t *handle;
3639 loff_t new_size; 3640 loff_t new_size;
3640 unsigned int max_blocks; 3641 unsigned int max_blocks;
@@ -3655,10 +3656,6 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3655 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 3656 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3656 return -EOPNOTSUPP; 3657 return -EOPNOTSUPP;
3657 3658
3658 /* preallocation to directories is currently not supported */
3659 if (S_ISDIR(inode->i_mode))
3660 return -ENODEV;
3661
3662 map.m_lblk = offset >> blkbits; 3659 map.m_lblk = offset >> blkbits;
3663 /* 3660 /*
3664 * We can't just convert len to max_blocks because 3661 * We can't just convert len to max_blocks because
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index bb003dc9ffff..2e8322c8aa88 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -210,6 +210,7 @@ const struct file_operations ext4_file_operations = {
210 .fsync = ext4_sync_file, 210 .fsync = ext4_sync_file,
211 .splice_read = generic_file_splice_read, 211 .splice_read = generic_file_splice_read,
212 .splice_write = generic_file_splice_write, 212 .splice_write = generic_file_splice_write,
213 .fallocate = ext4_fallocate,
213}; 214};
214 215
215const struct inode_operations ext4_file_inode_operations = { 216const struct inode_operations ext4_file_inode_operations = {
@@ -223,7 +224,6 @@ const struct inode_operations ext4_file_inode_operations = {
223 .removexattr = generic_removexattr, 224 .removexattr = generic_removexattr,
224#endif 225#endif
225 .check_acl = ext4_check_acl, 226 .check_acl = ext4_check_acl,
226 .fallocate = ext4_fallocate,
227 .fiemap = ext4_fiemap, 227 .fiemap = ext4_fiemap,
228}; 228};
229 229
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index fca6689e12e6..7cfdcb913363 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -19,6 +19,8 @@
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
21#include <linux/ext2_fs.h> 21#include <linux/ext2_fs.h>
22#include <linux/falloc.h>
23#include <linux/swap.h>
22#include <linux/crc32.h> 24#include <linux/crc32.h>
23#include <linux/writeback.h> 25#include <linux/writeback.h>
24#include <asm/uaccess.h> 26#include <asm/uaccess.h>
@@ -610,6 +612,260 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
610 return generic_file_aio_write(iocb, iov, nr_segs, pos); 612 return generic_file_aio_write(iocb, iov, nr_segs, pos);
611} 613}
612 614
615static void empty_write_end(struct page *page, unsigned from,
616 unsigned to)
617{
618 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
619
620 page_zero_new_buffers(page, from, to);
621 flush_dcache_page(page);
622 mark_page_accessed(page);
623
624 if (!gfs2_is_writeback(ip))
625 gfs2_page_add_databufs(ip, page, from, to);
626
627 block_commit_write(page, from, to);
628}
629
630static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
631{
632 unsigned start, end, next;
633 struct buffer_head *bh, *head;
634 int error;
635
636 if (!page_has_buffers(page)) {
637 error = __block_write_begin(page, from, to - from, gfs2_block_map);
638 if (unlikely(error))
639 return error;
640
641 empty_write_end(page, from, to);
642 return 0;
643 }
644
645 bh = head = page_buffers(page);
646 next = end = 0;
647 while (next < from) {
648 next += bh->b_size;
649 bh = bh->b_this_page;
650 }
651 start = next;
652 do {
653 next += bh->b_size;
654 if (buffer_mapped(bh)) {
655 if (end) {
656 error = __block_write_begin(page, start, end - start,
657 gfs2_block_map);
658 if (unlikely(error))
659 return error;
660 empty_write_end(page, start, end);
661 end = 0;
662 }
663 start = next;
664 }
665 else
666 end = next;
667 bh = bh->b_this_page;
668 } while (next < to);
669
670 if (end) {
671 error = __block_write_begin(page, start, end - start, gfs2_block_map);
672 if (unlikely(error))
673 return error;
674 empty_write_end(page, start, end);
675 }
676
677 return 0;
678}
679
680static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
681 int mode)
682{
683 struct gfs2_inode *ip = GFS2_I(inode);
684 struct buffer_head *dibh;
685 int error;
686 u64 start = offset >> PAGE_CACHE_SHIFT;
687 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
688 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
689 pgoff_t curr;
690 struct page *page;
691 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
692 unsigned int from, to;
693
694 if (!end_offset)
695 end_offset = PAGE_CACHE_SIZE;
696
697 error = gfs2_meta_inode_buffer(ip, &dibh);
698 if (unlikely(error))
699 goto out;
700
701 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
702
703 if (gfs2_is_stuffed(ip)) {
704 error = gfs2_unstuff_dinode(ip, NULL);
705 if (unlikely(error))
706 goto out;
707 }
708
709 curr = start;
710 offset = start << PAGE_CACHE_SHIFT;
711 from = start_offset;
712 to = PAGE_CACHE_SIZE;
713 while (curr <= end) {
714 page = grab_cache_page_write_begin(inode->i_mapping, curr,
715 AOP_FLAG_NOFS);
716 if (unlikely(!page)) {
717 error = -ENOMEM;
718 goto out;
719 }
720
721 if (curr == end)
722 to = end_offset;
723 error = write_empty_blocks(page, from, to);
724 if (!error && offset + to > inode->i_size &&
725 !(mode & FALLOC_FL_KEEP_SIZE)) {
726 i_size_write(inode, offset + to);
727 }
728 unlock_page(page);
729 page_cache_release(page);
730 if (error)
731 goto out;
732 curr++;
733 offset += PAGE_CACHE_SIZE;
734 from = 0;
735 }
736
737 gfs2_dinode_out(ip, dibh->b_data);
738 mark_inode_dirty(inode);
739
740 brelse(dibh);
741
742out:
743 return error;
744}
745
746static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
747 unsigned int *data_blocks, unsigned int *ind_blocks)
748{
749 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
750 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
751 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
752
753 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
754 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
755 max_data -= tmp;
756 }
757 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
758 so it might end up with fewer data blocks */
759 if (max_data <= *data_blocks)
760 return;
761 *data_blocks = max_data;
762 *ind_blocks = max_blocks - max_data;
763 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
764 if (*len > max) {
765 *len = max;
766 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
767 }
768}
769
770static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
771 loff_t len)
772{
773 struct inode *inode = file->f_path.dentry->d_inode;
774 struct gfs2_sbd *sdp = GFS2_SB(inode);
775 struct gfs2_inode *ip = GFS2_I(inode);
776 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
777 loff_t bytes, max_bytes;
778 struct gfs2_alloc *al;
779 int error;
780 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
781 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
782
783 /* We only support the FALLOC_FL_KEEP_SIZE mode */
784 if (mode & ~FALLOC_FL_KEEP_SIZE)
785 return -EOPNOTSUPP;
786
787 offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
788 sdp->sd_sb.sb_bsize_shift;
789
790 len = next - offset;
791 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
792 if (!bytes)
793 bytes = UINT_MAX;
794
795 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
796 error = gfs2_glock_nq(&ip->i_gh);
797 if (unlikely(error))
798 goto out_uninit;
799
800 if (!gfs2_write_alloc_required(ip, offset, len))
801 goto out_unlock;
802
803 while (len > 0) {
804 if (len < bytes)
805 bytes = len;
806 al = gfs2_alloc_get(ip);
807 if (!al) {
808 error = -ENOMEM;
809 goto out_unlock;
810 }
811
812 error = gfs2_quota_lock_check(ip);
813 if (error)
814 goto out_alloc_put;
815
816retry:
817 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
818
819 al->al_requested = data_blocks + ind_blocks;
820 error = gfs2_inplace_reserve(ip);
821 if (error) {
822 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
823 bytes >>= 1;
824 goto retry;
825 }
826 goto out_qunlock;
827 }
828 max_bytes = bytes;
829 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
830 al->al_requested = data_blocks + ind_blocks;
831
832 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
833 RES_RG_HDR + gfs2_rg_blocks(al);
834 if (gfs2_is_jdata(ip))
835 rblocks += data_blocks ? data_blocks : 1;
836
837 error = gfs2_trans_begin(sdp, rblocks,
838 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
839 if (error)
840 goto out_trans_fail;
841
842 error = fallocate_chunk(inode, offset, max_bytes, mode);
843 gfs2_trans_end(sdp);
844
845 if (error)
846 goto out_trans_fail;
847
848 len -= max_bytes;
849 offset += max_bytes;
850 gfs2_inplace_release(ip);
851 gfs2_quota_unlock(ip);
852 gfs2_alloc_put(ip);
853 }
854 goto out_unlock;
855
856out_trans_fail:
857 gfs2_inplace_release(ip);
858out_qunlock:
859 gfs2_quota_unlock(ip);
860out_alloc_put:
861 gfs2_alloc_put(ip);
862out_unlock:
863 gfs2_glock_dq(&ip->i_gh);
864out_uninit:
865 gfs2_holder_uninit(&ip->i_gh);
866 return error;
867}
868
613#ifdef CONFIG_GFS2_FS_LOCKING_DLM 869#ifdef CONFIG_GFS2_FS_LOCKING_DLM
614 870
615/** 871/**
@@ -765,6 +1021,7 @@ const struct file_operations gfs2_file_fops = {
765 .splice_read = generic_file_splice_read, 1021 .splice_read = generic_file_splice_read,
766 .splice_write = generic_file_splice_write, 1022 .splice_write = generic_file_splice_write,
767 .setlease = gfs2_setlease, 1023 .setlease = gfs2_setlease,
1024 .fallocate = gfs2_fallocate,
768}; 1025};
769 1026
770const struct file_operations gfs2_dir_fops = { 1027const struct file_operations gfs2_dir_fops = {
@@ -794,6 +1051,7 @@ const struct file_operations gfs2_file_fops_nolock = {
794 .splice_read = generic_file_splice_read, 1051 .splice_read = generic_file_splice_read,
795 .splice_write = generic_file_splice_write, 1052 .splice_write = generic_file_splice_write,
796 .setlease = generic_setlease, 1053 .setlease = generic_setlease,
1054 .fallocate = gfs2_fallocate,
797}; 1055};
798 1056
799const struct file_operations gfs2_dir_fops_nolock = { 1057const struct file_operations gfs2_dir_fops_nolock = {
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index c09528c07f3d..d8b26ac2e20b 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -18,8 +18,6 @@
18#include <linux/gfs2_ondisk.h> 18#include <linux/gfs2_ondisk.h>
19#include <linux/crc32.h> 19#include <linux/crc32.h>
20#include <linux/fiemap.h> 20#include <linux/fiemap.h>
21#include <linux/swap.h>
22#include <linux/falloc.h>
23#include <asm/uaccess.h> 21#include <asm/uaccess.h>
24 22
25#include "gfs2.h" 23#include "gfs2.h"
@@ -1257,261 +1255,6 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
1257 return ret; 1255 return ret;
1258} 1256}
1259 1257
1260static void empty_write_end(struct page *page, unsigned from,
1261 unsigned to)
1262{
1263 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
1264
1265 page_zero_new_buffers(page, from, to);
1266 flush_dcache_page(page);
1267 mark_page_accessed(page);
1268
1269 if (!gfs2_is_writeback(ip))
1270 gfs2_page_add_databufs(ip, page, from, to);
1271
1272 block_commit_write(page, from, to);
1273}
1274
1275
1276static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
1277{
1278 unsigned start, end, next;
1279 struct buffer_head *bh, *head;
1280 int error;
1281
1282 if (!page_has_buffers(page)) {
1283 error = __block_write_begin(page, from, to - from, gfs2_block_map);
1284 if (unlikely(error))
1285 return error;
1286
1287 empty_write_end(page, from, to);
1288 return 0;
1289 }
1290
1291 bh = head = page_buffers(page);
1292 next = end = 0;
1293 while (next < from) {
1294 next += bh->b_size;
1295 bh = bh->b_this_page;
1296 }
1297 start = next;
1298 do {
1299 next += bh->b_size;
1300 if (buffer_mapped(bh)) {
1301 if (end) {
1302 error = __block_write_begin(page, start, end - start,
1303 gfs2_block_map);
1304 if (unlikely(error))
1305 return error;
1306 empty_write_end(page, start, end);
1307 end = 0;
1308 }
1309 start = next;
1310 }
1311 else
1312 end = next;
1313 bh = bh->b_this_page;
1314 } while (next < to);
1315
1316 if (end) {
1317 error = __block_write_begin(page, start, end - start, gfs2_block_map);
1318 if (unlikely(error))
1319 return error;
1320 empty_write_end(page, start, end);
1321 }
1322
1323 return 0;
1324}
1325
1326static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
1327 int mode)
1328{
1329 struct gfs2_inode *ip = GFS2_I(inode);
1330 struct buffer_head *dibh;
1331 int error;
1332 u64 start = offset >> PAGE_CACHE_SHIFT;
1333 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
1334 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
1335 pgoff_t curr;
1336 struct page *page;
1337 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
1338 unsigned int from, to;
1339
1340 if (!end_offset)
1341 end_offset = PAGE_CACHE_SIZE;
1342
1343 error = gfs2_meta_inode_buffer(ip, &dibh);
1344 if (unlikely(error))
1345 goto out;
1346
1347 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1348
1349 if (gfs2_is_stuffed(ip)) {
1350 error = gfs2_unstuff_dinode(ip, NULL);
1351 if (unlikely(error))
1352 goto out;
1353 }
1354
1355 curr = start;
1356 offset = start << PAGE_CACHE_SHIFT;
1357 from = start_offset;
1358 to = PAGE_CACHE_SIZE;
1359 while (curr <= end) {
1360 page = grab_cache_page_write_begin(inode->i_mapping, curr,
1361 AOP_FLAG_NOFS);
1362 if (unlikely(!page)) {
1363 error = -ENOMEM;
1364 goto out;
1365 }
1366
1367 if (curr == end)
1368 to = end_offset;
1369 error = write_empty_blocks(page, from, to);
1370 if (!error && offset + to > inode->i_size &&
1371 !(mode & FALLOC_FL_KEEP_SIZE)) {
1372 i_size_write(inode, offset + to);
1373 }
1374 unlock_page(page);
1375 page_cache_release(page);
1376 if (error)
1377 goto out;
1378 curr++;
1379 offset += PAGE_CACHE_SIZE;
1380 from = 0;
1381 }
1382
1383 gfs2_dinode_out(ip, dibh->b_data);
1384 mark_inode_dirty(inode);
1385
1386 brelse(dibh);
1387
1388out:
1389 return error;
1390}
1391
1392static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
1393 unsigned int *data_blocks, unsigned int *ind_blocks)
1394{
1395 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1396 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
1397 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1398
1399 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1400 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1401 max_data -= tmp;
1402 }
1403 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
1404 so it might end up with fewer data blocks */
1405 if (max_data <= *data_blocks)
1406 return;
1407 *data_blocks = max_data;
1408 *ind_blocks = max_blocks - max_data;
1409 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1410 if (*len > max) {
1411 *len = max;
1412 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1413 }
1414}
1415
1416static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset,
1417 loff_t len)
1418{
1419 struct gfs2_sbd *sdp = GFS2_SB(inode);
1420 struct gfs2_inode *ip = GFS2_I(inode);
1421 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1422 loff_t bytes, max_bytes;
1423 struct gfs2_alloc *al;
1424 int error;
1425 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
1426 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1427
1428 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1429 if (mode & ~FALLOC_FL_KEEP_SIZE)
1430 return -EOPNOTSUPP;
1431
1432 offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
1433 sdp->sd_sb.sb_bsize_shift;
1434
1435 len = next - offset;
1436 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1437 if (!bytes)
1438 bytes = UINT_MAX;
1439
1440 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
1441 error = gfs2_glock_nq(&ip->i_gh);
1442 if (unlikely(error))
1443 goto out_uninit;
1444
1445 if (!gfs2_write_alloc_required(ip, offset, len))
1446 goto out_unlock;
1447
1448 while (len > 0) {
1449 if (len < bytes)
1450 bytes = len;
1451 al = gfs2_alloc_get(ip);
1452 if (!al) {
1453 error = -ENOMEM;
1454 goto out_unlock;
1455 }
1456
1457 error = gfs2_quota_lock_check(ip);
1458 if (error)
1459 goto out_alloc_put;
1460
1461retry:
1462 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1463
1464 al->al_requested = data_blocks + ind_blocks;
1465 error = gfs2_inplace_reserve(ip);
1466 if (error) {
1467 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
1468 bytes >>= 1;
1469 goto retry;
1470 }
1471 goto out_qunlock;
1472 }
1473 max_bytes = bytes;
1474 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
1475 al->al_requested = data_blocks + ind_blocks;
1476
1477 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1478 RES_RG_HDR + gfs2_rg_blocks(al);
1479 if (gfs2_is_jdata(ip))
1480 rblocks += data_blocks ? data_blocks : 1;
1481
1482 error = gfs2_trans_begin(sdp, rblocks,
1483 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
1484 if (error)
1485 goto out_trans_fail;
1486
1487 error = fallocate_chunk(inode, offset, max_bytes, mode);
1488 gfs2_trans_end(sdp);
1489
1490 if (error)
1491 goto out_trans_fail;
1492
1493 len -= max_bytes;
1494 offset += max_bytes;
1495 gfs2_inplace_release(ip);
1496 gfs2_quota_unlock(ip);
1497 gfs2_alloc_put(ip);
1498 }
1499 goto out_unlock;
1500
1501out_trans_fail:
1502 gfs2_inplace_release(ip);
1503out_qunlock:
1504 gfs2_quota_unlock(ip);
1505out_alloc_put:
1506 gfs2_alloc_put(ip);
1507out_unlock:
1508 gfs2_glock_dq(&ip->i_gh);
1509out_uninit:
1510 gfs2_holder_uninit(&ip->i_gh);
1511 return error;
1512}
1513
1514
1515static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1258static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1516 u64 start, u64 len) 1259 u64 start, u64 len)
1517{ 1260{
@@ -1562,7 +1305,6 @@ const struct inode_operations gfs2_file_iops = {
1562 .getxattr = gfs2_getxattr, 1305 .getxattr = gfs2_getxattr,
1563 .listxattr = gfs2_listxattr, 1306 .listxattr = gfs2_listxattr,
1564 .removexattr = gfs2_removexattr, 1307 .removexattr = gfs2_removexattr,
1565 .fallocate = gfs2_fallocate,
1566 .fiemap = gfs2_fiemap, 1308 .fiemap = gfs2_fiemap,
1567}; 1309};
1568 1310
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index cf254ce8c941..a6651956482e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1989,9 +1989,10 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1989 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); 1989 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1990} 1990}
1991 1991
1992static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, 1992static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
1993 loff_t len) 1993 loff_t len)
1994{ 1994{
1995 struct inode *inode = file->f_path.dentry->d_inode;
1995 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1996 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1996 struct ocfs2_space_resv sr; 1997 struct ocfs2_space_resv sr;
1997 int change_size = 1; 1998 int change_size = 1;
@@ -2002,9 +2003,6 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
2002 if (!ocfs2_writes_unwritten_extents(osb)) 2003 if (!ocfs2_writes_unwritten_extents(osb))
2003 return -EOPNOTSUPP; 2004 return -EOPNOTSUPP;
2004 2005
2005 if (S_ISDIR(inode->i_mode))
2006 return -ENODEV;
2007
2008 if (mode & FALLOC_FL_KEEP_SIZE) 2006 if (mode & FALLOC_FL_KEEP_SIZE)
2009 change_size = 0; 2007 change_size = 0;
2010 2008
@@ -2612,7 +2610,6 @@ const struct inode_operations ocfs2_file_iops = {
2612 .getxattr = generic_getxattr, 2610 .getxattr = generic_getxattr,
2613 .listxattr = ocfs2_listxattr, 2611 .listxattr = ocfs2_listxattr,
2614 .removexattr = generic_removexattr, 2612 .removexattr = generic_removexattr,
2615 .fallocate = ocfs2_fallocate,
2616 .fiemap = ocfs2_fiemap, 2613 .fiemap = ocfs2_fiemap,
2617}; 2614};
2618 2615
@@ -2644,6 +2641,7 @@ const struct file_operations ocfs2_fops = {
2644 .flock = ocfs2_flock, 2641 .flock = ocfs2_flock,
2645 .splice_read = ocfs2_file_splice_read, 2642 .splice_read = ocfs2_file_splice_read,
2646 .splice_write = ocfs2_file_splice_write, 2643 .splice_write = ocfs2_file_splice_write,
2644 .fallocate = ocfs2_fallocate,
2647}; 2645};
2648 2646
2649const struct file_operations ocfs2_dops = { 2647const struct file_operations ocfs2_dops = {
diff --git a/fs/open.c b/fs/open.c
index 5b6ef7e2859e..e52389e1f05b 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -255,10 +255,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
255 if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) 255 if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
256 return -EFBIG; 256 return -EFBIG;
257 257
258 if (!inode->i_op->fallocate) 258 if (!file->f_op->fallocate)
259 return -EOPNOTSUPP; 259 return -EOPNOTSUPP;
260 260
261 return inode->i_op->fallocate(inode, mode, offset, len); 261 return file->f_op->fallocate(file, mode, offset, len);
262} 262}
263 263
264SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len) 264SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index ef51eb43e137..a55c1b46b219 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -37,6 +37,7 @@
37#include "xfs_trace.h" 37#include "xfs_trace.h"
38 38
39#include <linux/dcache.h> 39#include <linux/dcache.h>
40#include <linux/falloc.h>
40 41
41static const struct vm_operations_struct xfs_file_vm_ops; 42static const struct vm_operations_struct xfs_file_vm_ops;
42 43
@@ -882,6 +883,60 @@ out_unlock:
882 return ret; 883 return ret;
883} 884}
884 885
886STATIC long
887xfs_file_fallocate(
888 struct file *file,
889 int mode,
890 loff_t offset,
891 loff_t len)
892{
893 struct inode *inode = file->f_path.dentry->d_inode;
894 long error;
895 loff_t new_size = 0;
896 xfs_flock64_t bf;
897 xfs_inode_t *ip = XFS_I(inode);
898 int cmd = XFS_IOC_RESVSP;
899
900 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
901 return -EOPNOTSUPP;
902
903 bf.l_whence = 0;
904 bf.l_start = offset;
905 bf.l_len = len;
906
907 xfs_ilock(ip, XFS_IOLOCK_EXCL);
908
909 if (mode & FALLOC_FL_PUNCH_HOLE)
910 cmd = XFS_IOC_UNRESVSP;
911
912 /* check the new inode size is valid before allocating */
913 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
914 offset + len > i_size_read(inode)) {
915 new_size = offset + len;
916 error = inode_newsize_ok(inode, new_size);
917 if (error)
918 goto out_unlock;
919 }
920
921 error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
922 if (error)
923 goto out_unlock;
924
925 /* Change file size if needed */
926 if (new_size) {
927 struct iattr iattr;
928
929 iattr.ia_valid = ATTR_SIZE;
930 iattr.ia_size = new_size;
931 error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
932 }
933
934out_unlock:
935 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
936 return error;
937}
938
939
885STATIC int 940STATIC int
886xfs_file_open( 941xfs_file_open(
887 struct inode *inode, 942 struct inode *inode,
@@ -1000,6 +1055,7 @@ const struct file_operations xfs_file_operations = {
1000 .open = xfs_file_open, 1055 .open = xfs_file_open,
1001 .release = xfs_file_release, 1056 .release = xfs_file_release,
1002 .fsync = xfs_file_fsync, 1057 .fsync = xfs_file_fsync,
1058 .fallocate = xfs_file_fallocate,
1003}; 1059};
1004 1060
1005const struct file_operations xfs_dir_file_operations = { 1061const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index a4ecc2188a09..bd5727852fd6 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -46,7 +46,6 @@
46#include <linux/namei.h> 46#include <linux/namei.h>
47#include <linux/posix_acl.h> 47#include <linux/posix_acl.h>
48#include <linux/security.h> 48#include <linux/security.h>
49#include <linux/falloc.h>
50#include <linux/fiemap.h> 49#include <linux/fiemap.h>
51#include <linux/slab.h> 50#include <linux/slab.h>
52 51
@@ -505,64 +504,6 @@ xfs_vn_setattr(
505 return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0); 504 return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
506} 505}
507 506
508STATIC long
509xfs_vn_fallocate(
510 struct inode *inode,
511 int mode,
512 loff_t offset,
513 loff_t len)
514{
515 long error;
516 loff_t new_size = 0;
517 xfs_flock64_t bf;
518 xfs_inode_t *ip = XFS_I(inode);
519 int cmd = XFS_IOC_RESVSP;
520
521 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
522 return -EOPNOTSUPP;
523
524 /* preallocation on directories not yet supported */
525 error = -ENODEV;
526 if (S_ISDIR(inode->i_mode))
527 goto out_error;
528
529 bf.l_whence = 0;
530 bf.l_start = offset;
531 bf.l_len = len;
532
533 xfs_ilock(ip, XFS_IOLOCK_EXCL);
534
535 if (mode & FALLOC_FL_PUNCH_HOLE)
536 cmd = XFS_IOC_UNRESVSP;
537
538 /* check the new inode size is valid before allocating */
539 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
540 offset + len > i_size_read(inode)) {
541 new_size = offset + len;
542 error = inode_newsize_ok(inode, new_size);
543 if (error)
544 goto out_unlock;
545 }
546
547 error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
548 if (error)
549 goto out_unlock;
550
551 /* Change file size if needed */
552 if (new_size) {
553 struct iattr iattr;
554
555 iattr.ia_valid = ATTR_SIZE;
556 iattr.ia_size = new_size;
557 error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
558 }
559
560out_unlock:
561 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
562out_error:
563 return error;
564}
565
566#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 507#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
567 508
568/* 509/*
@@ -656,7 +597,6 @@ static const struct inode_operations xfs_inode_operations = {
656 .getxattr = generic_getxattr, 597 .getxattr = generic_getxattr,
657 .removexattr = generic_removexattr, 598 .removexattr = generic_removexattr,
658 .listxattr = xfs_vn_listxattr, 599 .listxattr = xfs_vn_listxattr,
659 .fallocate = xfs_vn_fallocate,
660 .fiemap = xfs_vn_fiemap, 600 .fiemap = xfs_vn_fiemap,
661}; 601};
662 602