summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c10
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/crypto/keyinfo.c5
-rw-r--r--fs/direct-io.c42
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h24
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/exec.c1
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/segment.c6
-rw-r--r--fs/f2fs/super.c2
-rw-r--r--fs/fscache/object-list.c7
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/iomap.c41
-rw-r--r--fs/mpage.c14
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/quota/dquot.c27
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c15
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h27
-rw-r--r--fs/xfs/xfs_acl.c22
-rw-r--r--fs/xfs/xfs_aops.c47
-rw-r--r--fs/xfs/xfs_attr_inactive.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_bmap_util.h13
-rw-r--r--fs/xfs/xfs_file.c4
-rw-r--r--fs/xfs/xfs_fsmap.c58
-rw-r--r--fs/xfs/xfs_inode_item.c79
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_ondisk.h2
-rw-r--r--fs/xfs/xfs_super.c2
37 files changed, 329 insertions, 179 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index adaf6f6dd858..e1cbdfdb7c68 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
310 310
311 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); 311 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
312 312
313 if (unlikely(copied < len && !PageUptodate(page))) { 313 if (!PageUptodate(page)) {
314 copied = 0; 314 if (unlikely(copied < len)) {
315 goto out; 315 copied = 0;
316 goto out;
317 } else if (len == PAGE_SIZE) {
318 SetPageUptodate(page);
319 }
316 } 320 }
317 /* 321 /*
318 * No need to use i_size_read() here, the i_size 322 * No need to use i_size_read() here, the i_size
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 2a46762def31..a7c5a9861bef 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -596,7 +596,7 @@ static void bm_evict_inode(struct inode *inode)
596{ 596{
597 Node *e = inode->i_private; 597 Node *e = inode->i_private;
598 598
599 if (e->flags & MISC_FMT_OPEN_FILE) 599 if (e && e->flags & MISC_FMT_OPEN_FILE)
600 filp_close(e->interp_file, NULL); 600 filp_close(e->interp_file, NULL);
601 601
602 clear_inode(inode); 602 clear_inode(inode);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 93d088ffc05c..789f55e851ae 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
716 716
717 set_page_writeback(page); 717 set_page_writeback(page);
718 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); 718 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
719 if (result) 719 if (result) {
720 end_page_writeback(page); 720 end_page_writeback(page);
721 else 721 } else {
722 clean_page_buffers(page);
722 unlock_page(page); 723 unlock_page(page);
724 }
723 blk_queue_exit(bdev->bd_queue); 725 blk_queue_exit(bdev->bd_queue);
724 return result; 726 return result;
725} 727}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 35a128acfbd1..161694b66038 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1135,7 +1135,7 @@ static int btrfs_fill_super(struct super_block *sb,
1135#ifdef CONFIG_BTRFS_FS_POSIX_ACL 1135#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1136 sb->s_flags |= MS_POSIXACL; 1136 sb->s_flags |= MS_POSIXACL;
1137#endif 1137#endif
1138 sb->s_flags |= MS_I_VERSION; 1138 sb->s_flags |= SB_I_VERSION;
1139 sb->s_iflags |= SB_I_CGROUPWB; 1139 sb->s_iflags |= SB_I_CGROUPWB;
1140 1140
1141 err = super_setup_bdi(sb); 1141 err = super_setup_bdi(sb);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 018c588c7ac3..8e704d12a1cf 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
109 goto out; 109 goto out;
110 } 110 }
111 ukp = user_key_payload_locked(keyring_key); 111 ukp = user_key_payload_locked(keyring_key);
112 if (!ukp) {
113 /* key was revoked before we acquired its semaphore */
114 res = -EKEYREVOKED;
115 goto out;
116 }
112 if (ukp->datalen != sizeof(struct fscrypt_key)) { 117 if (ukp->datalen != sizeof(struct fscrypt_key)) {
113 res = -EINVAL; 118 res = -EINVAL;
114 goto out; 119 goto out;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 62cf812ed0e5..b53e66d9abd7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -45,6 +45,12 @@
45#define DIO_PAGES 64 45#define DIO_PAGES 64
46 46
47/* 47/*
48 * Flags for dio_complete()
49 */
50#define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */
51#define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */
52
53/*
48 * This code generally works in units of "dio_blocks". A dio_block is 54 * This code generally works in units of "dio_blocks". A dio_block is
49 * somewhere between the hard sector size and the filesystem block size. it 55 * somewhere between the hard sector size and the filesystem block size. it
50 * is determined on a per-invocation basis. When talking to the filesystem 56 * is determined on a per-invocation basis. When talking to the filesystem
@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio,
225 * filesystems can use it to hold additional state between get_block calls and 231 * filesystems can use it to hold additional state between get_block calls and
226 * dio_complete. 232 * dio_complete.
227 */ 233 */
228static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) 234static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
229{ 235{
230 loff_t offset = dio->iocb->ki_pos; 236 loff_t offset = dio->iocb->ki_pos;
231 ssize_t transferred = 0; 237 ssize_t transferred = 0;
@@ -259,14 +265,27 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
259 if (ret == 0) 265 if (ret == 0)
260 ret = transferred; 266 ret = transferred;
261 267
268 if (dio->end_io) {
269 // XXX: ki_pos??
270 err = dio->end_io(dio->iocb, offset, ret, dio->private);
271 if (err)
272 ret = err;
273 }
274
262 /* 275 /*
263 * Try again to invalidate clean pages which might have been cached by 276 * Try again to invalidate clean pages which might have been cached by
264 * non-direct readahead, or faulted in by get_user_pages() if the source 277 * non-direct readahead, or faulted in by get_user_pages() if the source
265 * of the write was an mmap'ed region of the file we're writing. Either 278 * of the write was an mmap'ed region of the file we're writing. Either
266 * one is a pretty crazy thing to do, so we don't support it 100%. If 279 * one is a pretty crazy thing to do, so we don't support it 100%. If
267 * this invalidation fails, tough, the write still worked... 280 * this invalidation fails, tough, the write still worked...
281 *
282 * And this page cache invalidation has to be after dio->end_io(), as
283 * some filesystems convert unwritten extents to real allocations in
284 * end_io() when necessary, otherwise a racing buffer read would cache
285 * zeros from unwritten extents.
268 */ 286 */
269 if (ret > 0 && dio->op == REQ_OP_WRITE && 287 if (flags & DIO_COMPLETE_INVALIDATE &&
288 ret > 0 && dio->op == REQ_OP_WRITE &&
270 dio->inode->i_mapping->nrpages) { 289 dio->inode->i_mapping->nrpages) {
271 err = invalidate_inode_pages2_range(dio->inode->i_mapping, 290 err = invalidate_inode_pages2_range(dio->inode->i_mapping,
272 offset >> PAGE_SHIFT, 291 offset >> PAGE_SHIFT,
@@ -274,18 +293,10 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
274 WARN_ON_ONCE(err); 293 WARN_ON_ONCE(err);
275 } 294 }
276 295
277 if (dio->end_io) {
278
279 // XXX: ki_pos??
280 err = dio->end_io(dio->iocb, offset, ret, dio->private);
281 if (err)
282 ret = err;
283 }
284
285 if (!(dio->flags & DIO_SKIP_DIO_COUNT)) 296 if (!(dio->flags & DIO_SKIP_DIO_COUNT))
286 inode_dio_end(dio->inode); 297 inode_dio_end(dio->inode);
287 298
288 if (is_async) { 299 if (flags & DIO_COMPLETE_ASYNC) {
289 /* 300 /*
290 * generic_write_sync expects ki_pos to have been updated 301 * generic_write_sync expects ki_pos to have been updated
291 * already, but the submission path only does this for 302 * already, but the submission path only does this for
@@ -306,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work)
306{ 317{
307 struct dio *dio = container_of(work, struct dio, complete_work); 318 struct dio *dio = container_of(work, struct dio, complete_work);
308 319
309 dio_complete(dio, 0, true); 320 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
310} 321}
311 322
312static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); 323static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -348,7 +359,7 @@ static void dio_bio_end_aio(struct bio *bio)
348 queue_work(dio->inode->i_sb->s_dio_done_wq, 359 queue_work(dio->inode->i_sb->s_dio_done_wq,
349 &dio->complete_work); 360 &dio->complete_work);
350 } else { 361 } else {
351 dio_complete(dio, 0, true); 362 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
352 } 363 }
353 } 364 }
354} 365}
@@ -866,7 +877,8 @@ out:
866 */ 877 */
867 if (sdio->boundary) { 878 if (sdio->boundary) {
868 ret = dio_send_cur_page(dio, sdio, map_bh); 879 ret = dio_send_cur_page(dio, sdio, map_bh);
869 dio_bio_submit(dio, sdio); 880 if (sdio->bio)
881 dio_bio_submit(dio, sdio);
870 put_page(sdio->cur_page); 882 put_page(sdio->cur_page);
871 sdio->cur_page = NULL; 883 sdio->cur_page = NULL;
872 } 884 }
@@ -1359,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1359 dio_await_completion(dio); 1371 dio_await_completion(dio);
1360 1372
1361 if (drop_refcount(dio) == 0) { 1373 if (drop_refcount(dio) == 0) {
1362 retval = dio_complete(dio, retval, false); 1374 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1363 } else 1375 } else
1364 BUG_ON(retval != -EIOCBQUEUED); 1376 BUG_ON(retval != -EIOCBQUEUED);
1365 1377
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 9c351bf757b2..3fbc0ff79699 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
84static inline struct ecryptfs_auth_tok * 84static inline struct ecryptfs_auth_tok *
85ecryptfs_get_encrypted_key_payload_data(struct key *key) 85ecryptfs_get_encrypted_key_payload_data(struct key *key)
86{ 86{
87 if (key->type == &key_type_encrypted) 87 struct encrypted_key_payload *payload;
88 return (struct ecryptfs_auth_tok *) 88
89 (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data); 89 if (key->type != &key_type_encrypted)
90 else
91 return NULL; 90 return NULL;
91
92 payload = key->payload.data[0];
93 if (!payload)
94 return ERR_PTR(-EKEYREVOKED);
95
96 return (struct ecryptfs_auth_tok *)payload->payload_data;
92} 97}
93 98
94static inline struct key *ecryptfs_get_encrypted_key(char *sig) 99static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
114ecryptfs_get_key_payload_data(struct key *key) 119ecryptfs_get_key_payload_data(struct key *key)
115{ 120{
116 struct ecryptfs_auth_tok *auth_tok; 121 struct ecryptfs_auth_tok *auth_tok;
122 struct user_key_payload *ukp;
117 123
118 auth_tok = ecryptfs_get_encrypted_key_payload_data(key); 124 auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
119 if (!auth_tok) 125 if (auth_tok)
120 return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data;
121 else
122 return auth_tok; 126 return auth_tok;
127
128 ukp = user_key_payload_locked(key);
129 if (!ukp)
130 return ERR_PTR(-EKEYREVOKED);
131
132 return (struct ecryptfs_auth_tok *)ukp->data;
123} 133}
124 134
125#define ECRYPTFS_MAX_KEYSET_SIZE 1024 135#define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3cf1546dca82..fa218cd64f74 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -459,7 +459,8 @@ out:
459 * @auth_tok_key: key containing the authentication token 459 * @auth_tok_key: key containing the authentication token
460 * @auth_tok: authentication token 460 * @auth_tok: authentication token
461 * 461 *
462 * Returns zero on valid auth tok; -EINVAL otherwise 462 * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
463 * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
463 */ 464 */
464static int 465static int
465ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, 466ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
468 int rc = 0; 469 int rc = 0;
469 470
470 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key); 471 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
472 if (IS_ERR(*auth_tok)) {
473 rc = PTR_ERR(*auth_tok);
474 *auth_tok = NULL;
475 goto out;
476 }
477
471 if (ecryptfs_verify_version((*auth_tok)->version)) { 478 if (ecryptfs_verify_version((*auth_tok)->version)) {
472 printk(KERN_ERR "Data structure version mismatch. Userspace " 479 printk(KERN_ERR "Data structure version mismatch. Userspace "
473 "tools must match eCryptfs kernel module with major " 480 "tools must match eCryptfs kernel module with major "
diff --git a/fs/exec.c b/fs/exec.c
index 5470d3c1892a..3e14ba25f678 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1802,6 +1802,7 @@ static int do_execveat_common(int fd, struct filename *filename,
1802 /* execve succeeded */ 1802 /* execve succeeded */
1803 current->fs->in_exec = 0; 1803 current->fs->in_exec = 0;
1804 current->in_execve = 0; 1804 current->in_execve = 0;
1805 membarrier_execve(current);
1805 acct_update_integrals(current); 1806 acct_update_integrals(current);
1806 task_numa_free(current); 1807 task_numa_free(current);
1807 free_bprm(bprm); 1808 free_bprm(bprm);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b104096fce9e..b0915b734a38 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1677,7 +1677,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1677 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; 1677 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1678 return 1; 1678 return 1;
1679 case Opt_i_version: 1679 case Opt_i_version:
1680 sb->s_flags |= MS_I_VERSION; 1680 sb->s_flags |= SB_I_VERSION;
1681 return 1; 1681 return 1;
1682 case Opt_lazytime: 1682 case Opt_lazytime:
1683 sb->s_flags |= MS_LAZYTIME; 1683 sb->s_flags |= MS_LAZYTIME;
@@ -2060,7 +2060,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2060 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 2060 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2061 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 2061 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2062 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 2062 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2063 if (sb->s_flags & MS_I_VERSION) 2063 if (sb->s_flags & SB_I_VERSION)
2064 SEQ_OPTS_PUTS("i_version"); 2064 SEQ_OPTS_PUTS("i_version");
2065 if (nodefs || sbi->s_stripe) 2065 if (nodefs || sbi->s_stripe)
2066 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 2066 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9a7c90386947..4b4a72f392be 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
2525bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 2525bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
2526void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); 2526void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
2527void stop_discard_thread(struct f2fs_sb_info *sbi); 2527void stop_discard_thread(struct f2fs_sb_info *sbi);
2528void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); 2528void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
2529void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); 2529void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
2530void release_discard_addrs(struct f2fs_sb_info *sbi); 2530void release_discard_addrs(struct f2fs_sb_info *sbi);
2531int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 2531int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 621b9b3d320b..c695ff462ee6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi)
1210} 1210}
1211 1211
1212/* This comes from f2fs_put_super and f2fs_trim_fs */ 1212/* This comes from f2fs_put_super and f2fs_trim_fs */
1213void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) 1213void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
1214{ 1214{
1215 __issue_discard_cmd(sbi, false); 1215 __issue_discard_cmd(sbi, false);
1216 __drop_discard_cmd(sbi); 1216 __drop_discard_cmd(sbi);
1217 __wait_discard_cmd(sbi, false); 1217 __wait_discard_cmd(sbi, !umount);
1218} 1218}
1219 1219
1220static void mark_discard_range_all(struct f2fs_sb_info *sbi) 1220static void mark_discard_range_all(struct f2fs_sb_info *sbi)
@@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2244 } 2244 }
2245 /* It's time to issue all the filed discards */ 2245 /* It's time to issue all the filed discards */
2246 mark_discard_range_all(sbi); 2246 mark_discard_range_all(sbi);
2247 f2fs_wait_discard_bios(sbi); 2247 f2fs_wait_discard_bios(sbi, false);
2248out: 2248out:
2249 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); 2249 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
2250 return err; 2250 return err;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 89f61eb3d167..933c3d529e65 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb)
801 } 801 }
802 802
803 /* be sure to wait for any on-going discard commands */ 803 /* be sure to wait for any on-going discard commands */
804 f2fs_wait_discard_bios(sbi); 804 f2fs_wait_discard_bios(sbi, true);
805 805
806 if (f2fs_discard_en(sbi) && !sbi->discard_blks) { 806 if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
807 struct cp_control cpc = { 807 struct cp_control cpc = {
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index b5ab06fabc60..0438d4cd91ef 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -331,6 +331,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
331 rcu_read_lock(); 331 rcu_read_lock();
332 332
333 confkey = user_key_payload_rcu(key); 333 confkey = user_key_payload_rcu(key);
334 if (!confkey) {
335 /* key was revoked */
336 rcu_read_unlock();
337 key_put(key);
338 goto no_config;
339 }
340
334 buf = confkey->data; 341 buf = confkey->data;
335 342
336 for (len = confkey->datalen - 1; len >= 0; len--) { 343 for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 65c88379a3a1..94a745acaef8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1059,7 +1059,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1059 if (sb->s_flags & MS_MANDLOCK) 1059 if (sb->s_flags & MS_MANDLOCK)
1060 goto err; 1060 goto err;
1061 1061
1062 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1062 sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
1063 1063
1064 if (!parse_fuse_opt(data, &d, is_bdev)) 1064 if (!parse_fuse_opt(data, &d, is_bdev))
1065 goto err; 1065 goto err;
diff --git a/fs/iomap.c b/fs/iomap.c
index be61cf742b5e..d4801f8dd4fd 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -714,23 +714,9 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
714{ 714{
715 struct kiocb *iocb = dio->iocb; 715 struct kiocb *iocb = dio->iocb;
716 struct inode *inode = file_inode(iocb->ki_filp); 716 struct inode *inode = file_inode(iocb->ki_filp);
717 loff_t offset = iocb->ki_pos;
717 ssize_t ret; 718 ssize_t ret;
718 719
719 /*
720 * Try again to invalidate clean pages which might have been cached by
721 * non-direct readahead, or faulted in by get_user_pages() if the source
722 * of the write was an mmap'ed region of the file we're writing. Either
723 * one is a pretty crazy thing to do, so we don't support it 100%. If
724 * this invalidation fails, tough, the write still worked...
725 */
726 if (!dio->error &&
727 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
728 ret = invalidate_inode_pages2_range(inode->i_mapping,
729 iocb->ki_pos >> PAGE_SHIFT,
730 (iocb->ki_pos + dio->size - 1) >> PAGE_SHIFT);
731 WARN_ON_ONCE(ret);
732 }
733
734 if (dio->end_io) { 720 if (dio->end_io) {
735 ret = dio->end_io(iocb, 721 ret = dio->end_io(iocb,
736 dio->error ? dio->error : dio->size, 722 dio->error ? dio->error : dio->size,
@@ -742,12 +728,33 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
742 if (likely(!ret)) { 728 if (likely(!ret)) {
743 ret = dio->size; 729 ret = dio->size;
744 /* check for short read */ 730 /* check for short read */
745 if (iocb->ki_pos + ret > dio->i_size && 731 if (offset + ret > dio->i_size &&
746 !(dio->flags & IOMAP_DIO_WRITE)) 732 !(dio->flags & IOMAP_DIO_WRITE))
747 ret = dio->i_size - iocb->ki_pos; 733 ret = dio->i_size - offset;
748 iocb->ki_pos += ret; 734 iocb->ki_pos += ret;
749 } 735 }
750 736
737 /*
738 * Try again to invalidate clean pages which might have been cached by
739 * non-direct readahead, or faulted in by get_user_pages() if the source
740 * of the write was an mmap'ed region of the file we're writing. Either
741 * one is a pretty crazy thing to do, so we don't support it 100%. If
742 * this invalidation fails, tough, the write still worked...
743 *
744 * And this page cache invalidation has to be after dio->end_io(), as
745 * some filesystems convert unwritten extents to real allocations in
746 * end_io() when necessary, otherwise a racing buffer read would cache
747 * zeros from unwritten extents.
748 */
749 if (!dio->error &&
750 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
751 int err;
752 err = invalidate_inode_pages2_range(inode->i_mapping,
753 offset >> PAGE_SHIFT,
754 (offset + dio->size - 1) >> PAGE_SHIFT);
755 WARN_ON_ONCE(err);
756 }
757
751 inode_dio_end(file_inode(iocb->ki_filp)); 758 inode_dio_end(file_inode(iocb->ki_filp));
752 kfree(dio); 759 kfree(dio);
753 760
diff --git a/fs/mpage.c b/fs/mpage.c
index 37bb77c1302c..c991faec70b9 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
468 try_to_free_buffers(page); 468 try_to_free_buffers(page);
469} 469}
470 470
471/*
472 * For situations where we want to clean all buffers attached to a page.
473 * We don't need to calculate how many buffers are attached to the page,
474 * we just need to specify a number larger than the maximum number of buffers.
475 */
476void clean_page_buffers(struct page *page)
477{
478 clean_buffers(page, ~0U);
479}
480
471static int __mpage_writepage(struct page *page, struct writeback_control *wbc, 481static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
472 void *data) 482 void *data)
473{ 483{
@@ -605,10 +615,8 @@ alloc_new:
605 if (bio == NULL) { 615 if (bio == NULL) {
606 if (first_unmapped == blocks_per_page) { 616 if (first_unmapped == blocks_per_page) {
607 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), 617 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
608 page, wbc)) { 618 page, wbc))
609 clean_buffers(page, first_unmapped);
610 goto out; 619 goto out;
611 }
612 } 620 }
613 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 621 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
614 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); 622 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
diff --git a/fs/namespace.c b/fs/namespace.c
index 3b601f115b6c..d18deb4c410b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2825,7 +2825,8 @@ long do_mount(const char *dev_name, const char __user *dir_name,
2825 SB_MANDLOCK | 2825 SB_MANDLOCK |
2826 SB_DIRSYNC | 2826 SB_DIRSYNC |
2827 SB_SILENT | 2827 SB_SILENT |
2828 SB_POSIXACL); 2828 SB_POSIXACL |
2829 SB_I_VERSION);
2829 2830
2830 if (flags & MS_REMOUNT) 2831 if (flags & MS_REMOUNT)
2831 retval = do_remount(&path, flags, sb_flags, mnt_flags, 2832 retval = do_remount(&path, flags, sb_flags, mnt_flags,
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 3c69db7d4905..8487486ec496 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u)
927 exp_put(u->secinfo.si_exp); 927 exp_put(u->secinfo.si_exp);
928} 928}
929 929
930static void
931nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
932{
933 if (u->secinfo_no_name.sin_exp)
934 exp_put(u->secinfo_no_name.sin_exp);
935}
936
930static __be32 937static __be32
931nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 938nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
932 union nfsd4_op_u *u) 939 union nfsd4_op_u *u)
@@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2375 }, 2382 },
2376 [OP_SECINFO_NO_NAME] = { 2383 [OP_SECINFO_NO_NAME] = {
2377 .op_func = nfsd4_secinfo_no_name, 2384 .op_func = nfsd4_secinfo_no_name,
2378 .op_release = nfsd4_secinfo_release, 2385 .op_release = nfsd4_secinfo_no_name_release,
2379 .op_flags = OP_HANDLES_WRONGSEC, 2386 .op_flags = OP_HANDLES_WRONGSEC,
2380 .op_name = "OP_SECINFO_NO_NAME", 2387 .op_name = "OP_SECINFO_NO_NAME",
2381 .op_rsize_bop = nfsd4_secinfo_rsize, 2388 .op_rsize_bop = nfsd4_secinfo_rsize,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 50b0556a124f..52ad15192e72 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1297 spin_lock(&dquot->dq_dqb_lock); 1297 spin_lock(&dquot->dq_dqb_lock);
1298 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1298 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1299 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1299 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1300 goto add; 1300 goto finish;
1301 1301
1302 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1302 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1303 + space + rsv_space; 1303 + space + rsv_space;
1304 1304
1305 if (flags & DQUOT_SPACE_NOFAIL)
1306 goto add;
1307
1308 if (dquot->dq_dqb.dqb_bhardlimit && 1305 if (dquot->dq_dqb.dqb_bhardlimit &&
1309 tspace > dquot->dq_dqb.dqb_bhardlimit && 1306 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1310 !ignore_hardlimit(dquot)) { 1307 !ignore_hardlimit(dquot)) {
1311 if (flags & DQUOT_SPACE_WARN) 1308 if (flags & DQUOT_SPACE_WARN)
1312 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); 1309 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1313 ret = -EDQUOT; 1310 ret = -EDQUOT;
1314 goto out; 1311 goto finish;
1315 } 1312 }
1316 1313
1317 if (dquot->dq_dqb.dqb_bsoftlimit && 1314 if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1322 if (flags & DQUOT_SPACE_WARN) 1319 if (flags & DQUOT_SPACE_WARN)
1323 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); 1320 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1324 ret = -EDQUOT; 1321 ret = -EDQUOT;
1325 goto out; 1322 goto finish;
1326 } 1323 }
1327 1324
1328 if (dquot->dq_dqb.dqb_bsoftlimit && 1325 if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1338 * be always printed 1335 * be always printed
1339 */ 1336 */
1340 ret = -EDQUOT; 1337 ret = -EDQUOT;
1341 goto out; 1338 goto finish;
1342 } 1339 }
1343 } 1340 }
1344add: 1341finish:
1345 dquot->dq_dqb.dqb_rsvspace += rsv_space; 1342 /*
1346 dquot->dq_dqb.dqb_curspace += space; 1343 * We have to be careful and go through warning generation & grace time
1347out: 1344 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1345 * only here...
1346 */
1347 if (flags & DQUOT_SPACE_NOFAIL)
1348 ret = 0;
1349 if (!ret) {
1350 dquot->dq_dqb.dqb_rsvspace += rsv_space;
1351 dquot->dq_dqb.dqb_curspace += space;
1352 }
1348 spin_unlock(&dquot->dq_dqb_lock); 1353 spin_unlock(&dquot->dq_dqb_lock);
1349 return ret; 1354 return ret;
1350} 1355}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 744dcaec34cc..f965ce832bc0 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small(
1584 1584
1585 bp = xfs_btree_get_bufs(args->mp, args->tp, 1585 bp = xfs_btree_get_bufs(args->mp, args->tp,
1586 args->agno, fbno, 0); 1586 args->agno, fbno, 0);
1587 if (!bp) {
1588 error = -EFSCORRUPTED;
1589 goto error0;
1590 }
1587 xfs_trans_binval(args->tp, bp); 1591 xfs_trans_binval(args->tp, bp);
1588 } 1592 }
1589 args->len = 1; 1593 args->len = 1;
@@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist(
2141 if (error) 2145 if (error)
2142 goto out_agbp_relse; 2146 goto out_agbp_relse;
2143 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); 2147 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
2148 if (!bp) {
2149 error = -EFSCORRUPTED;
2150 goto out_agbp_relse;
2151 }
2144 xfs_trans_binval(tp, bp); 2152 xfs_trans_binval(tp, bp);
2145 } 2153 }
2146 2154
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 044a363119be..89263797cf32 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1477,14 +1477,14 @@ xfs_bmap_isaeof(
1477 int is_empty; 1477 int is_empty;
1478 int error; 1478 int error;
1479 1479
1480 bma->aeof = 0; 1480 bma->aeof = false;
1481 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1481 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1482 &is_empty); 1482 &is_empty);
1483 if (error) 1483 if (error)
1484 return error; 1484 return error;
1485 1485
1486 if (is_empty) { 1486 if (is_empty) {
1487 bma->aeof = 1; 1487 bma->aeof = true;
1488 return 0; 1488 return 0;
1489 } 1489 }
1490 1490
@@ -3852,6 +3852,17 @@ xfs_trim_extent(
3852 } 3852 }
3853} 3853}
3854 3854
3855/* trim extent to within eof */
3856void
3857xfs_trim_extent_eof(
3858 struct xfs_bmbt_irec *irec,
3859 struct xfs_inode *ip)
3860
3861{
3862 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3863 i_size_read(VFS_I(ip))));
3864}
3865
3855/* 3866/*
3856 * Trim the returned map to the required bounds 3867 * Trim the returned map to the required bounds
3857 */ 3868 */
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 851982a5dfbc..502e0d8fb4ff 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -208,6 +208,7 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
208 208
209void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, 209void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
210 xfs_filblks_t len); 210 xfs_filblks_t len);
211void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
211int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); 212int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
212void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); 213void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
213void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, 214void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 988bb3f31446..dfd643909f85 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1962,7 +1962,7 @@ xfs_difree_inobt(
1962 if (!(mp->m_flags & XFS_MOUNT_IKEEP) && 1962 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1963 rec.ir_free == XFS_INOBT_ALL_FREE && 1963 rec.ir_free == XFS_INOBT_ALL_FREE &&
1964 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 1964 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1965 xic->deleted = 1; 1965 xic->deleted = true;
1966 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); 1966 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
1967 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 1967 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
1968 1968
@@ -1989,7 +1989,7 @@ xfs_difree_inobt(
1989 1989
1990 xfs_difree_inode_chunk(mp, agno, &rec, dfops); 1990 xfs_difree_inode_chunk(mp, agno, &rec, dfops);
1991 } else { 1991 } else {
1992 xic->deleted = 0; 1992 xic->deleted = false;
1993 1993
1994 error = xfs_inobt_update(cur, &rec); 1994 error = xfs_inobt_update(cur, &rec);
1995 if (error) { 1995 if (error) {
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 8372e9bcd7b6..71de185735e0 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format {
270 uint32_t ilf_fields; /* flags for fields logged */ 270 uint32_t ilf_fields; /* flags for fields logged */
271 uint16_t ilf_asize; /* size of attr d/ext/root */ 271 uint16_t ilf_asize; /* size of attr d/ext/root */
272 uint16_t ilf_dsize; /* size of data/ext/root */ 272 uint16_t ilf_dsize; /* size of data/ext/root */
273 uint32_t ilf_pad; /* pad for 64 bit boundary */
273 uint64_t ilf_ino; /* inode number */ 274 uint64_t ilf_ino; /* inode number */
274 union { 275 union {
275 uint32_t ilfu_rdev; /* rdev value for dev inode*/ 276 uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format {
280 int32_t ilf_boffset; /* off of inode in buffer */ 281 int32_t ilf_boffset; /* off of inode in buffer */
281} xfs_inode_log_format_t; 282} xfs_inode_log_format_t;
282 283
283typedef struct xfs_inode_log_format_32 { 284/*
284 uint16_t ilf_type; /* inode log item type */ 285 * Old 32 bit systems will log in this format without the 64 bit
285 uint16_t ilf_size; /* size of this item */ 286 * alignment padding. Recovery will detect this and convert it to the
286 uint32_t ilf_fields; /* flags for fields logged */ 287 * correct format.
287 uint16_t ilf_asize; /* size of attr d/ext/root */ 288 */
288 uint16_t ilf_dsize; /* size of data/ext/root */ 289struct xfs_inode_log_format_32 {
289 uint64_t ilf_ino; /* inode number */
290 union {
291 uint32_t ilfu_rdev; /* rdev value for dev inode*/
292 uuid_t ilfu_uuid; /* mount point value */
293 } ilf_u;
294 int64_t ilf_blkno; /* blkno of inode buffer */
295 int32_t ilf_len; /* len of inode buffer */
296 int32_t ilf_boffset; /* off of inode in buffer */
297} __attribute__((packed)) xfs_inode_log_format_32_t;
298
299typedef struct xfs_inode_log_format_64 {
300 uint16_t ilf_type; /* inode log item type */ 290 uint16_t ilf_type; /* inode log item type */
301 uint16_t ilf_size; /* size of this item */ 291 uint16_t ilf_size; /* size of this item */
302 uint32_t ilf_fields; /* flags for fields logged */ 292 uint32_t ilf_fields; /* flags for fields logged */
303 uint16_t ilf_asize; /* size of attr d/ext/root */ 293 uint16_t ilf_asize; /* size of attr d/ext/root */
304 uint16_t ilf_dsize; /* size of data/ext/root */ 294 uint16_t ilf_dsize; /* size of data/ext/root */
305 uint32_t ilf_pad; /* pad for 64 bit boundary */
306 uint64_t ilf_ino; /* inode number */ 295 uint64_t ilf_ino; /* inode number */
307 union { 296 union {
308 uint32_t ilfu_rdev; /* rdev value for dev inode*/ 297 uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 {
311 int64_t ilf_blkno; /* blkno of inode buffer */ 300 int64_t ilf_blkno; /* blkno of inode buffer */
312 int32_t ilf_len; /* len of inode buffer */ 301 int32_t ilf_len; /* len of inode buffer */
313 int32_t ilf_boffset; /* off of inode in buffer */ 302 int32_t ilf_boffset; /* off of inode in buffer */
314} xfs_inode_log_format_64_t; 303} __attribute__((packed));
315 304
316 305
317/* 306/*
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 7034e17535de..3354140de07e 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode)
247int 247int
248xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) 248xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
249{ 249{
250 umode_t mode;
251 bool set_mode = false;
250 int error = 0; 252 int error = 0;
251 253
252 if (!acl) 254 if (!acl)
@@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
257 return error; 259 return error;
258 260
259 if (type == ACL_TYPE_ACCESS) { 261 if (type == ACL_TYPE_ACCESS) {
260 umode_t mode;
261
262 error = posix_acl_update_mode(inode, &mode, &acl); 262 error = posix_acl_update_mode(inode, &mode, &acl);
263 if (error) 263 if (error)
264 return error; 264 return error;
265 error = xfs_set_mode(inode, mode); 265 set_mode = true;
266 if (error)
267 return error;
268 } 266 }
269 267
270 set_acl: 268 set_acl:
271 return __xfs_set_acl(inode, acl, type); 269 error = __xfs_set_acl(inode, acl, type);
270 if (error)
271 return error;
272
273 /*
274 * We set the mode after successfully updating the ACL xattr because the
275 * xattr update can fail at ENOSPC and we don't want to change the mode
276 * if the ACL update hasn't been applied.
277 */
278 if (set_mode)
279 error = xfs_set_mode(inode, mode);
280
281 return error;
272} 282}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f18e5932aec4..a3eeaba156c5 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -446,6 +446,19 @@ xfs_imap_valid(
446{ 446{
447 offset >>= inode->i_blkbits; 447 offset >>= inode->i_blkbits;
448 448
449 /*
450 * We have to make sure the cached mapping is within EOF to protect
451 * against eofblocks trimming on file release leaving us with a stale
452 * mapping. Otherwise, a page for a subsequent file extending buffered
453 * write could get picked up by this writeback cycle and written to the
454 * wrong blocks.
455 *
456 * Note that what we really want here is a generic mapping invalidation
457 * mechanism to protect us from arbitrary extent modifying contexts, not
458 * just eofblocks.
459 */
460 xfs_trim_extent_eof(imap, XFS_I(inode));
461
449 return offset >= imap->br_startoff && 462 return offset >= imap->br_startoff &&
450 offset < imap->br_startoff + imap->br_blockcount; 463 offset < imap->br_startoff + imap->br_blockcount;
451} 464}
@@ -735,6 +748,14 @@ xfs_vm_invalidatepage(
735{ 748{
736 trace_xfs_invalidatepage(page->mapping->host, page, offset, 749 trace_xfs_invalidatepage(page->mapping->host, page, offset,
737 length); 750 length);
751
752 /*
753 * If we are invalidating the entire page, clear the dirty state from it
754 * so that we can check for attempts to release dirty cached pages in
755 * xfs_vm_releasepage().
756 */
757 if (offset == 0 && length >= PAGE_SIZE)
758 cancel_dirty_page(page);
738 block_invalidatepage(page, offset, length); 759 block_invalidatepage(page, offset, length);
739} 760}
740 761
@@ -1190,25 +1211,27 @@ xfs_vm_releasepage(
1190 * mm accommodates an old ext3 case where clean pages might not have had 1211 * mm accommodates an old ext3 case where clean pages might not have had
1191 * the dirty bit cleared. Thus, it can send actual dirty pages to 1212 * the dirty bit cleared. Thus, it can send actual dirty pages to
1192 * ->releasepage() via shrink_active_list(). Conversely, 1213 * ->releasepage() via shrink_active_list(). Conversely,
1193 * block_invalidatepage() can send pages that are still marked dirty 1214 * block_invalidatepage() can send pages that are still marked dirty but
1194 * but otherwise have invalidated buffers. 1215 * otherwise have invalidated buffers.
1195 * 1216 *
1196 * We want to release the latter to avoid unnecessary buildup of the 1217 * We want to release the latter to avoid unnecessary buildup of the
1197 * LRU, skip the former and warn if we've left any lingering 1218 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1198 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc 1219 * that are entirely invalidated and need to be released. Hence the
1199 * or unwritten buffers and warn if the page is not dirty. Otherwise 1220 * only time we should get dirty pages here is through
1200 * try to release the buffers. 1221 * shrink_active_list() and so we can simply skip those now.
1222 *
1223 * warn if we've left any lingering delalloc/unwritten buffers on clean
1224 * or invalidated pages we are about to release.
1201 */ 1225 */
1226 if (PageDirty(page))
1227 return 0;
1228
1202 xfs_count_page_state(page, &delalloc, &unwritten); 1229 xfs_count_page_state(page, &delalloc, &unwritten);
1203 1230
1204 if (delalloc) { 1231 if (WARN_ON_ONCE(delalloc))
1205 WARN_ON_ONCE(!PageDirty(page));
1206 return 0; 1232 return 0;
1207 } 1233 if (WARN_ON_ONCE(unwritten))
1208 if (unwritten) {
1209 WARN_ON_ONCE(!PageDirty(page));
1210 return 0; 1234 return 0;
1211 }
1212 1235
1213 return try_to_free_buffers(page); 1236 return try_to_free_buffers(page);
1214} 1237}
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index ebd66b19fbfc..e3a950ed35a8 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -302,6 +302,8 @@ xfs_attr3_node_inactive(
302 &bp, XFS_ATTR_FORK); 302 &bp, XFS_ATTR_FORK);
303 if (error) 303 if (error)
304 return error; 304 return error;
305 node = bp->b_addr;
306 btree = dp->d_ops->node_tree_p(node);
305 child_fsb = be32_to_cpu(btree[i + 1].before); 307 child_fsb = be32_to_cpu(btree[i + 1].before);
306 xfs_trans_brelse(*trans, bp); 308 xfs_trans_brelse(*trans, bp);
307 } 309 }
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index e9db7fc95b70..6503cfa44262 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -84,6 +84,7 @@ xfs_zero_extent(
84 GFP_NOFS, 0); 84 GFP_NOFS, 0);
85} 85}
86 86
87#ifdef CONFIG_XFS_RT
87int 88int
88xfs_bmap_rtalloc( 89xfs_bmap_rtalloc(
89 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 90 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -190,6 +191,7 @@ xfs_bmap_rtalloc(
190 } 191 }
191 return 0; 192 return 0;
192} 193}
194#endif /* CONFIG_XFS_RT */
193 195
194/* 196/*
195 * Check if the endoff is outside the last extent. If so the caller will grow 197 * Check if the endoff is outside the last extent. If so the caller will grow
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 0eaa81dc49be..7d330b3c77c3 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -28,7 +28,20 @@ struct xfs_mount;
28struct xfs_trans; 28struct xfs_trans;
29struct xfs_bmalloca; 29struct xfs_bmalloca;
30 30
31#ifdef CONFIG_XFS_RT
31int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 32int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
33#else /* !CONFIG_XFS_RT */
34/*
35 * Attempts to allocate RT extents when RT is disable indicates corruption and
36 * should trigger a shutdown.
37 */
38static inline int
39xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
40{
41 return -EFSCORRUPTED;
42}
43#endif /* CONFIG_XFS_RT */
44
32int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 45int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
33 int whichfork, int *eof); 46 int whichfork, int *eof);
34int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, 47int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 309e26c9dddb..56d0e526870c 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -764,7 +764,7 @@ xfs_file_fallocate(
764 enum xfs_prealloc_flags flags = 0; 764 enum xfs_prealloc_flags flags = 0;
765 uint iolock = XFS_IOLOCK_EXCL; 765 uint iolock = XFS_IOLOCK_EXCL;
766 loff_t new_size = 0; 766 loff_t new_size = 0;
767 bool do_file_insert = 0; 767 bool do_file_insert = false;
768 768
769 if (!S_ISREG(inode->i_mode)) 769 if (!S_ISREG(inode->i_mode))
770 return -EINVAL; 770 return -EINVAL;
@@ -825,7 +825,7 @@ xfs_file_fallocate(
825 error = -EINVAL; 825 error = -EINVAL;
826 goto out_unlock; 826 goto out_unlock;
827 } 827 }
828 do_file_insert = 1; 828 do_file_insert = true;
829 } else { 829 } else {
830 flags |= XFS_PREALLOC_SET; 830 flags |= XFS_PREALLOC_SET;
831 831
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 814ed729881d..43cfc07996a4 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper(
367 return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr); 367 return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
368} 368}
369 369
370/* Transform a rtbitmap "record" into a fsmap */
371STATIC int
372xfs_getfsmap_rtdev_rtbitmap_helper(
373 struct xfs_trans *tp,
374 struct xfs_rtalloc_rec *rec,
375 void *priv)
376{
377 struct xfs_mount *mp = tp->t_mountp;
378 struct xfs_getfsmap_info *info = priv;
379 struct xfs_rmap_irec irec;
380 xfs_daddr_t rec_daddr;
381
382 rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
383
384 irec.rm_startblock = rec->ar_startblock;
385 irec.rm_blockcount = rec->ar_blockcount;
386 irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
387 irec.rm_offset = 0;
388 irec.rm_flags = 0;
389
390 return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
391}
392
393/* Transform a bnobt irec into a fsmap */ 370/* Transform a bnobt irec into a fsmap */
394STATIC int 371STATIC int
395xfs_getfsmap_datadev_bnobt_helper( 372xfs_getfsmap_datadev_bnobt_helper(
@@ -475,6 +452,30 @@ xfs_getfsmap_logdev(
475 return xfs_getfsmap_helper(tp, info, &rmap, 0); 452 return xfs_getfsmap_helper(tp, info, &rmap, 0);
476} 453}
477 454
455#ifdef CONFIG_XFS_RT
456/* Transform a rtbitmap "record" into a fsmap */
457STATIC int
458xfs_getfsmap_rtdev_rtbitmap_helper(
459 struct xfs_trans *tp,
460 struct xfs_rtalloc_rec *rec,
461 void *priv)
462{
463 struct xfs_mount *mp = tp->t_mountp;
464 struct xfs_getfsmap_info *info = priv;
465 struct xfs_rmap_irec irec;
466 xfs_daddr_t rec_daddr;
467
468 rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
469
470 irec.rm_startblock = rec->ar_startblock;
471 irec.rm_blockcount = rec->ar_blockcount;
472 irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
473 irec.rm_offset = 0;
474 irec.rm_flags = 0;
475
476 return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
477}
478
478/* Execute a getfsmap query against the realtime device. */ 479/* Execute a getfsmap query against the realtime device. */
479STATIC int 480STATIC int
480__xfs_getfsmap_rtdev( 481__xfs_getfsmap_rtdev(
@@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap(
561 return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, 562 return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
562 info); 563 info);
563} 564}
565#endif /* CONFIG_XFS_RT */
564 566
565/* Execute a getfsmap query against the regular data device. */ 567/* Execute a getfsmap query against the regular data device. */
566STATIC int 568STATIC int
@@ -795,7 +797,15 @@ xfs_getfsmap_check_keys(
795 return false; 797 return false;
796} 798}
797 799
800/*
801 * There are only two devices if we didn't configure RT devices at build time.
802 */
803#ifdef CONFIG_XFS_RT
798#define XFS_GETFSMAP_DEVS 3 804#define XFS_GETFSMAP_DEVS 3
805#else
806#define XFS_GETFSMAP_DEVS 2
807#endif /* CONFIG_XFS_RT */
808
799/* 809/*
800 * Get filesystem's extents as described in head, and format for 810 * Get filesystem's extents as described in head, and format for
801 * output. Calls formatter to fill the user's buffer until all 811 * output. Calls formatter to fill the user's buffer until all
@@ -853,10 +863,12 @@ xfs_getfsmap(
853 handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); 863 handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
854 handlers[1].fn = xfs_getfsmap_logdev; 864 handlers[1].fn = xfs_getfsmap_logdev;
855 } 865 }
866#ifdef CONFIG_XFS_RT
856 if (mp->m_rtdev_targp) { 867 if (mp->m_rtdev_targp) {
857 handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); 868 handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
858 handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; 869 handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
859 } 870 }
871#endif /* CONFIG_XFS_RT */
860 872
861 xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), 873 xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev),
862 xfs_getfsmap_dev_compare); 874 xfs_getfsmap_dev_compare);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index a705f34b58fa..9bbc2d7cc8cb 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode(
364 to->di_dmstate = from->di_dmstate; 364 to->di_dmstate = from->di_dmstate;
365 to->di_flags = from->di_flags; 365 to->di_flags = from->di_flags;
366 366
367 /* log a dummy value to ensure log structure is fully initialised */
368 to->di_next_unlinked = NULLAGINO;
369
367 if (from->di_version == 3) { 370 if (from->di_version == 3) {
368 to->di_changecount = inode->i_version; 371 to->di_changecount = inode->i_version;
369 to->di_crtime.t_sec = from->di_crtime.t_sec; 372 to->di_crtime.t_sec = from->di_crtime.t_sec;
@@ -404,6 +407,11 @@ xfs_inode_item_format_core(
404 * the second with the on-disk inode structure, and a possible third and/or 407 * the second with the on-disk inode structure, and a possible third and/or
405 * fourth with the inode data/extents/b-tree root and inode attributes 408 * fourth with the inode data/extents/b-tree root and inode attributes
406 * data/extents/b-tree root. 409 * data/extents/b-tree root.
410 *
411 * Note: Always use the 64 bit inode log format structure so we don't
412 * leave an uninitialised hole in the format item on 64 bit systems. Log
413 * recovery on 32 bit systems handles this just fine, so there's no reason
414 * for not using an initialising the properly padded structure all the time.
407 */ 415 */
408STATIC void 416STATIC void
409xfs_inode_item_format( 417xfs_inode_item_format(
@@ -412,8 +420,8 @@ xfs_inode_item_format(
412{ 420{
413 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 421 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
414 struct xfs_inode *ip = iip->ili_inode; 422 struct xfs_inode *ip = iip->ili_inode;
415 struct xfs_inode_log_format *ilf;
416 struct xfs_log_iovec *vecp = NULL; 423 struct xfs_log_iovec *vecp = NULL;
424 struct xfs_inode_log_format *ilf;
417 425
418 ASSERT(ip->i_d.di_version > 1); 426 ASSERT(ip->i_d.di_version > 1);
419 427
@@ -425,7 +433,17 @@ xfs_inode_item_format(
425 ilf->ilf_boffset = ip->i_imap.im_boffset; 433 ilf->ilf_boffset = ip->i_imap.im_boffset;
426 ilf->ilf_fields = XFS_ILOG_CORE; 434 ilf->ilf_fields = XFS_ILOG_CORE;
427 ilf->ilf_size = 2; /* format + core */ 435 ilf->ilf_size = 2; /* format + core */
428 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format)); 436
437 /*
438 * make sure we don't leak uninitialised data into the log in the case
439 * when we don't log every field in the inode.
440 */
441 ilf->ilf_dsize = 0;
442 ilf->ilf_asize = 0;
443 ilf->ilf_pad = 0;
444 uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null);
445
446 xlog_finish_iovec(lv, vecp, sizeof(*ilf));
429 447
430 xfs_inode_item_format_core(ip, lv, &vecp); 448 xfs_inode_item_format_core(ip, lv, &vecp);
431 xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); 449 xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
@@ -855,44 +873,29 @@ xfs_istale_done(
855} 873}
856 874
857/* 875/*
858 * convert an xfs_inode_log_format struct from either 32 or 64 bit versions 876 * convert an xfs_inode_log_format struct from the old 32 bit version
859 * (which can have different field alignments) to the native version 877 * (which can have different field alignments) to the native 64 bit version
860 */ 878 */
861int 879int
862xfs_inode_item_format_convert( 880xfs_inode_item_format_convert(
863 xfs_log_iovec_t *buf, 881 struct xfs_log_iovec *buf,
864 xfs_inode_log_format_t *in_f) 882 struct xfs_inode_log_format *in_f)
865{ 883{
866 if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { 884 struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
867 xfs_inode_log_format_32_t *in_f32 = buf->i_addr; 885
868 886 if (buf->i_len != sizeof(*in_f32))
869 in_f->ilf_type = in_f32->ilf_type; 887 return -EFSCORRUPTED;
870 in_f->ilf_size = in_f32->ilf_size; 888
871 in_f->ilf_fields = in_f32->ilf_fields; 889 in_f->ilf_type = in_f32->ilf_type;
872 in_f->ilf_asize = in_f32->ilf_asize; 890 in_f->ilf_size = in_f32->ilf_size;
873 in_f->ilf_dsize = in_f32->ilf_dsize; 891 in_f->ilf_fields = in_f32->ilf_fields;
874 in_f->ilf_ino = in_f32->ilf_ino; 892 in_f->ilf_asize = in_f32->ilf_asize;
875 /* copy biggest field of ilf_u */ 893 in_f->ilf_dsize = in_f32->ilf_dsize;
876 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); 894 in_f->ilf_ino = in_f32->ilf_ino;
877 in_f->ilf_blkno = in_f32->ilf_blkno; 895 /* copy biggest field of ilf_u */
878 in_f->ilf_len = in_f32->ilf_len; 896 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
879 in_f->ilf_boffset = in_f32->ilf_boffset; 897 in_f->ilf_blkno = in_f32->ilf_blkno;
880 return 0; 898 in_f->ilf_len = in_f32->ilf_len;
881 } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ 899 in_f->ilf_boffset = in_f32->ilf_boffset;
882 xfs_inode_log_format_64_t *in_f64 = buf->i_addr; 900 return 0;
883
884 in_f->ilf_type = in_f64->ilf_type;
885 in_f->ilf_size = in_f64->ilf_size;
886 in_f->ilf_fields = in_f64->ilf_fields;
887 in_f->ilf_asize = in_f64->ilf_asize;
888 in_f->ilf_dsize = in_f64->ilf_dsize;
889 in_f->ilf_ino = in_f64->ilf_ino;
890 /* copy biggest field of ilf_u */
891 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
892 in_f->ilf_blkno = in_f64->ilf_blkno;
893 in_f->ilf_len = in_f64->ilf_len;
894 in_f->ilf_boffset = in_f64->ilf_boffset;
895 return 0;
896 }
897 return -EFSCORRUPTED;
898} 901}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c5107c7bc4bf..dc95a49d62e7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2515,7 +2515,7 @@ next_lv:
2515 if (lv) 2515 if (lv)
2516 vecp = lv->lv_iovecp; 2516 vecp = lv->lv_iovecp;
2517 } 2517 }
2518 if (record_cnt == 0 && ordered == false) { 2518 if (record_cnt == 0 && !ordered) {
2519 if (!lv) 2519 if (!lv)
2520 return 0; 2520 return 0;
2521 break; 2521 break;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index ea7d4b4e50d0..e9727d0a541a 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -704,7 +704,7 @@ xfs_mountfs(
704 xfs_set_maxicount(mp); 704 xfs_set_maxicount(mp);
705 705
706 /* enable fail_at_unmount as default */ 706 /* enable fail_at_unmount as default */
707 mp->m_fail_unmount = 1; 707 mp->m_fail_unmount = true;
708 708
709 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); 709 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
710 if (error) 710 if (error)
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 0c381d71b242..0492436a053f 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void)
134 XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); 134 XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28);
135 XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); 135 XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8);
136 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); 136 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52);
137 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56); 137 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56);
138 XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); 138 XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20);
139 XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); 139 XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16);
140} 140}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 584cf2d573ba..f663022353c0 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1637,7 +1637,7 @@ xfs_fs_fill_super(
1637 1637
1638 /* version 5 superblocks support inode version counters. */ 1638 /* version 5 superblocks support inode version counters. */
1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1640 sb->s_flags |= MS_I_VERSION; 1640 sb->s_flags |= SB_I_VERSION;
1641 1641
1642 if (mp->m_flags & XFS_MOUNT_DAX) { 1642 if (mp->m_flags & XFS_MOUNT_DAX) {
1643 xfs_warn(mp, 1643 xfs_warn(mp,