aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-01-10 10:53:55 -0500
committerDan Williams <dan.j.williams@intel.com>2016-01-10 10:53:55 -0500
commit8b63b6bfc1a551acf154061699028c7032d7890c (patch)
tree16882e9bc9e35eacb870a6d8a71617e579c4ffdc /fs
parente07ecd76d4db7bda1e9495395b2110a3fe28845a (diff)
parent55f5560d8c18fe33fc169f8d244a9247dcac7612 (diff)
Merge branch 'for-4.5/block-dax' into for-4.5/libnvdimm
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/block_dev.c131
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/direct-io.c1
-rw-r--r--fs/exofs/inode.c5
-rw-r--r--fs/ext4/crypto.c2
-rw-r--r--fs/ext4/ext4.h51
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/ext4/sysfs.c2
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/jbd2/transaction.c12
-rw-r--r--fs/nfs/callback_xdr.c7
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/objlayout/objio_osd.c5
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c4
-rw-r--r--fs/ocfs2/namei.c4
19 files changed, 190 insertions, 60 deletions
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 699941e90667..511078586fa1 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -451,9 +451,9 @@ void v9fs_evict_inode(struct inode *inode)
451{ 451{
452 struct v9fs_inode *v9inode = V9FS_I(inode); 452 struct v9fs_inode *v9inode = V9FS_I(inode);
453 453
454 truncate_inode_pages_final(inode->i_mapping); 454 truncate_inode_pages_final(&inode->i_data);
455 clear_inode(inode); 455 clear_inode(inode);
456 filemap_fdatawrite(inode->i_mapping); 456 filemap_fdatawrite(&inode->i_data);
457 457
458 v9fs_cache_inode_put_cookie(inode); 458 v9fs_cache_inode_put_cookie(inode);
459 /* clunk the fid stashed in writeback_fid */ 459 /* clunk the fid stashed in writeback_fid */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c25639e907bd..5c0b2cba870e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -156,11 +156,16 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
156 return 0; 156 return 0;
157} 157}
158 158
159static struct inode *bdev_file_inode(struct file *file)
160{
161 return file->f_mapping->host;
162}
163
159static ssize_t 164static ssize_t
160blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) 165blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
161{ 166{
162 struct file *file = iocb->ki_filp; 167 struct file *file = iocb->ki_filp;
163 struct inode *inode = file->f_mapping->host; 168 struct inode *inode = bdev_file_inode(file);
164 169
165 if (IS_DAX(inode)) 170 if (IS_DAX(inode))
166 return dax_do_io(iocb, inode, iter, offset, blkdev_get_block, 171 return dax_do_io(iocb, inode, iter, offset, blkdev_get_block,
@@ -338,7 +343,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
338 */ 343 */
339static loff_t block_llseek(struct file *file, loff_t offset, int whence) 344static loff_t block_llseek(struct file *file, loff_t offset, int whence)
340{ 345{
341 struct inode *bd_inode = file->f_mapping->host; 346 struct inode *bd_inode = bdev_file_inode(file);
342 loff_t retval; 347 loff_t retval;
343 348
344 mutex_lock(&bd_inode->i_mutex); 349 mutex_lock(&bd_inode->i_mutex);
@@ -349,7 +354,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int whence)
349 354
350int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 355int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
351{ 356{
352 struct inode *bd_inode = filp->f_mapping->host; 357 struct inode *bd_inode = bdev_file_inode(filp);
353 struct block_device *bdev = I_BDEV(bd_inode); 358 struct block_device *bdev = I_BDEV(bd_inode);
354 int error; 359 int error;
355 360
@@ -1230,8 +1235,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1230 } 1235 }
1231 } 1236 }
1232 1237
1233 if (!ret) 1238 if (!ret) {
1234 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 1239 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1240 if (!blkdev_dax_capable(bdev))
1241 bdev->bd_inode->i_flags &= ~S_DAX;
1242 }
1235 1243
1236 /* 1244 /*
1237 * If the device is invalidated, rescan partition 1245 * If the device is invalidated, rescan partition
@@ -1245,6 +1253,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1245 else if (ret == -ENOMEDIUM) 1253 else if (ret == -ENOMEDIUM)
1246 invalidate_partitions(disk, bdev); 1254 invalidate_partitions(disk, bdev);
1247 } 1255 }
1256
1248 if (ret) 1257 if (ret)
1249 goto out_clear; 1258 goto out_clear;
1250 } else { 1259 } else {
@@ -1265,12 +1274,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1265 goto out_clear; 1274 goto out_clear;
1266 } 1275 }
1267 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); 1276 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1268 /* 1277 if (!blkdev_dax_capable(bdev))
1269 * If the partition is not aligned on a page
1270 * boundary, we can't do dax I/O to it.
1271 */
1272 if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) ||
1273 (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
1274 bdev->bd_inode->i_flags &= ~S_DAX; 1278 bdev->bd_inode->i_flags &= ~S_DAX;
1275 } 1279 }
1276 } else { 1280 } else {
@@ -1523,11 +1527,14 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1523 WARN_ON_ONCE(bdev->bd_holders); 1527 WARN_ON_ONCE(bdev->bd_holders);
1524 sync_blockdev(bdev); 1528 sync_blockdev(bdev);
1525 kill_bdev(bdev); 1529 kill_bdev(bdev);
1530
1531 bdev_write_inode(bdev);
1526 /* 1532 /*
1527 * ->release can cause the queue to disappear, so flush all 1533 * Detaching bdev inode from its wb in __destroy_inode()
1528 * dirty data before. 1534 * is too late: the queue which embeds its bdi (along with
1535 * root wb) can be gone as soon as we put_disk() below.
1529 */ 1536 */
1530 bdev_write_inode(bdev); 1537 inode_detach_wb(bdev->bd_inode);
1531 } 1538 }
1532 if (bdev->bd_contains == bdev) { 1539 if (bdev->bd_contains == bdev) {
1533 if (disk->fops->release) 1540 if (disk->fops->release)
@@ -1602,14 +1609,14 @@ EXPORT_SYMBOL(blkdev_put);
1602 1609
1603static int blkdev_close(struct inode * inode, struct file * filp) 1610static int blkdev_close(struct inode * inode, struct file * filp)
1604{ 1611{
1605 struct block_device *bdev = I_BDEV(filp->f_mapping->host); 1612 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
1606 blkdev_put(bdev, filp->f_mode); 1613 blkdev_put(bdev, filp->f_mode);
1607 return 0; 1614 return 0;
1608} 1615}
1609 1616
1610static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) 1617static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1611{ 1618{
1612 struct block_device *bdev = I_BDEV(file->f_mapping->host); 1619 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
1613 fmode_t mode = file->f_mode; 1620 fmode_t mode = file->f_mode;
1614 1621
1615 /* 1622 /*
@@ -1634,7 +1641,7 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1634ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) 1641ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
1635{ 1642{
1636 struct file *file = iocb->ki_filp; 1643 struct file *file = iocb->ki_filp;
1637 struct inode *bd_inode = file->f_mapping->host; 1644 struct inode *bd_inode = bdev_file_inode(file);
1638 loff_t size = i_size_read(bd_inode); 1645 loff_t size = i_size_read(bd_inode);
1639 struct blk_plug plug; 1646 struct blk_plug plug;
1640 ssize_t ret; 1647 ssize_t ret;
@@ -1666,7 +1673,7 @@ EXPORT_SYMBOL_GPL(blkdev_write_iter);
1666ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) 1673ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
1667{ 1674{
1668 struct file *file = iocb->ki_filp; 1675 struct file *file = iocb->ki_filp;
1669 struct inode *bd_inode = file->f_mapping->host; 1676 struct inode *bd_inode = bdev_file_inode(file);
1670 loff_t size = i_size_read(bd_inode); 1677 loff_t size = i_size_read(bd_inode);
1671 loff_t pos = iocb->ki_pos; 1678 loff_t pos = iocb->ki_pos;
1672 1679
@@ -1705,13 +1712,101 @@ static const struct address_space_operations def_blk_aops = {
1705 .is_dirty_writeback = buffer_check_dirty_writeback, 1712 .is_dirty_writeback = buffer_check_dirty_writeback,
1706}; 1713};
1707 1714
1715#ifdef CONFIG_FS_DAX
1716/*
1717 * In the raw block case we do not need to contend with truncation nor
1718 * unwritten file extents. Without those concerns there is no need for
1719 * additional locking beyond the mmap_sem context that these routines
1720 * are already executing under.
1721 *
1722 * Note, there is no protection if the block device is dynamically
1723 * resized (partition grow/shrink) during a fault. A stable block device
1724 * size is already not enforced in the blkdev_direct_IO path.
1725 *
1726 * For DAX, it is the responsibility of the block device driver to
1727 * ensure the whole-disk device size is stable while requests are in
1728 * flight.
1729 *
1730 * Finally, unlike the filemap_page_mkwrite() case there is no
1731 * filesystem superblock to sync against freezing. We still include a
1732 * pfn_mkwrite callback for dax drivers to receive write fault
1733 * notifications.
1734 */
1735static int blkdev_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1736{
1737 return __dax_fault(vma, vmf, blkdev_get_block, NULL);
1738}
1739
1740static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
1741 pmd_t *pmd, unsigned int flags)
1742{
1743 return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
1744}
1745
1746static void blkdev_vm_open(struct vm_area_struct *vma)
1747{
1748 struct inode *bd_inode = bdev_file_inode(vma->vm_file);
1749 struct block_device *bdev = I_BDEV(bd_inode);
1750
1751 mutex_lock(&bd_inode->i_mutex);
1752 bdev->bd_map_count++;
1753 mutex_unlock(&bd_inode->i_mutex);
1754}
1755
1756static void blkdev_vm_close(struct vm_area_struct *vma)
1757{
1758 struct inode *bd_inode = bdev_file_inode(vma->vm_file);
1759 struct block_device *bdev = I_BDEV(bd_inode);
1760
1761 mutex_lock(&bd_inode->i_mutex);
1762 bdev->bd_map_count--;
1763 mutex_unlock(&bd_inode->i_mutex);
1764}
1765
1766static const struct vm_operations_struct blkdev_dax_vm_ops = {
1767 .open = blkdev_vm_open,
1768 .close = blkdev_vm_close,
1769 .fault = blkdev_dax_fault,
1770 .pmd_fault = blkdev_dax_pmd_fault,
1771 .pfn_mkwrite = blkdev_dax_fault,
1772};
1773
1774static const struct vm_operations_struct blkdev_default_vm_ops = {
1775 .open = blkdev_vm_open,
1776 .close = blkdev_vm_close,
1777 .fault = filemap_fault,
1778 .map_pages = filemap_map_pages,
1779};
1780
1781static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
1782{
1783 struct inode *bd_inode = bdev_file_inode(file);
1784 struct block_device *bdev = I_BDEV(bd_inode);
1785
1786 file_accessed(file);
1787 mutex_lock(&bd_inode->i_mutex);
1788 bdev->bd_map_count++;
1789 if (IS_DAX(bd_inode)) {
1790 vma->vm_ops = &blkdev_dax_vm_ops;
1791 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1792 } else {
1793 vma->vm_ops = &blkdev_default_vm_ops;
1794 }
1795 mutex_unlock(&bd_inode->i_mutex);
1796
1797 return 0;
1798}
1799#else
1800#define blkdev_mmap generic_file_mmap
1801#endif
1802
1708const struct file_operations def_blk_fops = { 1803const struct file_operations def_blk_fops = {
1709 .open = blkdev_open, 1804 .open = blkdev_open,
1710 .release = blkdev_close, 1805 .release = blkdev_close,
1711 .llseek = block_llseek, 1806 .llseek = block_llseek,
1712 .read_iter = blkdev_read_iter, 1807 .read_iter = blkdev_read_iter,
1713 .write_iter = blkdev_write_iter, 1808 .write_iter = blkdev_write_iter,
1714 .mmap = generic_file_mmap, 1809 .mmap = blkdev_mmap,
1715 .fsync = blkdev_fsync, 1810 .fsync = blkdev_fsync,
1716 .unlocked_ioctl = block_ioctl, 1811 .unlocked_ioctl = block_ioctl,
1717#ifdef CONFIG_COMPAT 1812#ifdef CONFIG_COMPAT
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6b66dd5d1540..a329f5ba35aa 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1831,11 +1831,11 @@ cifs_invalidate_mapping(struct inode *inode)
1831 * @word: long word containing the bit lock 1831 * @word: long word containing the bit lock
1832 */ 1832 */
1833static int 1833static int
1834cifs_wait_bit_killable(struct wait_bit_key *key) 1834cifs_wait_bit_killable(struct wait_bit_key *key, int mode)
1835{ 1835{
1836 if (fatal_signal_pending(current))
1837 return -ERESTARTSYS;
1838 freezable_schedule_unsafe(); 1836 freezable_schedule_unsafe();
1837 if (signal_pending_state(mode, current))
1838 return -ERESTARTSYS;
1839 return 0; 1839 return 0;
1840} 1840}
1841 1841
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1c75a3a07f8f..602e8441bc0f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1175,6 +1175,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1175 if (dio->flags & DIO_LOCKING) 1175 if (dio->flags & DIO_LOCKING)
1176 mutex_unlock(&inode->i_mutex); 1176 mutex_unlock(&inode->i_mutex);
1177 kmem_cache_free(dio_cache, dio); 1177 kmem_cache_free(dio_cache, dio);
1178 retval = 0;
1178 goto out; 1179 goto out;
1179 } 1180 }
1180 1181
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 73c64daa0f55..60f03b78914e 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -592,10 +592,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
592 } 592 }
593 unlock_page(page); 593 unlock_page(page);
594 } 594 }
595 if (PageDirty(page) || PageWriteback(page)) 595 *uptodate = PageUptodate(page);
596 *uptodate = true;
597 else
598 *uptodate = PageUptodate(page);
599 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate); 596 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate);
600 return page; 597 return page;
601 } else { 598 } else {
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index af06830bfc00..1a0835073663 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -389,7 +389,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
389 struct ext4_crypto_ctx *ctx; 389 struct ext4_crypto_ctx *ctx;
390 struct page *ciphertext_page = NULL; 390 struct page *ciphertext_page = NULL;
391 struct bio *bio; 391 struct bio *bio;
392 ext4_lblk_t lblk = ex->ee_block; 392 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
393 ext4_fsblk_t pblk = ext4_ext_pblock(ex); 393 ext4_fsblk_t pblk = ext4_ext_pblock(ex);
394 unsigned int len = ext4_ext_get_actual_len(ex); 394 unsigned int len = ext4_ext_get_actual_len(ex);
395 int ret, err = 0; 395 int ret, err = 0;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 750063f7a50c..cc7ca4e87144 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -26,6 +26,7 @@
26#include <linux/seqlock.h> 26#include <linux/seqlock.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/timer.h> 28#include <linux/timer.h>
29#include <linux/version.h>
29#include <linux/wait.h> 30#include <linux/wait.h>
30#include <linux/blockgroup_lock.h> 31#include <linux/blockgroup_lock.h>
31#include <linux/percpu_counter.h> 32#include <linux/percpu_counter.h>
@@ -727,19 +728,55 @@ struct move_extent {
727 <= (EXT4_GOOD_OLD_INODE_SIZE + \ 728 <= (EXT4_GOOD_OLD_INODE_SIZE + \
728 (einode)->i_extra_isize)) \ 729 (einode)->i_extra_isize)) \
729 730
731/*
732 * We use an encoding that preserves the times for extra epoch "00":
733 *
734 * extra msb of adjust for signed
735 * epoch 32-bit 32-bit tv_sec to
736 * bits time decoded 64-bit tv_sec 64-bit tv_sec valid time range
737 * 0 0 1 -0x80000000..-0x00000001 0x000000000 1901-12-13..1969-12-31
738 * 0 0 0 0x000000000..0x07fffffff 0x000000000 1970-01-01..2038-01-19
739 * 0 1 1 0x080000000..0x0ffffffff 0x100000000 2038-01-19..2106-02-07
740 * 0 1 0 0x100000000..0x17fffffff 0x100000000 2106-02-07..2174-02-25
741 * 1 0 1 0x180000000..0x1ffffffff 0x200000000 2174-02-25..2242-03-16
742 * 1 0 0 0x200000000..0x27fffffff 0x200000000 2242-03-16..2310-04-04
743 * 1 1 1 0x280000000..0x2ffffffff 0x300000000 2310-04-04..2378-04-22
744 * 1 1 0 0x300000000..0x37fffffff 0x300000000 2378-04-22..2446-05-10
745 *
746 * Note that previous versions of the kernel on 64-bit systems would
747 * incorrectly use extra epoch bits 1,1 for dates between 1901 and
748 * 1970. e2fsck will correct this, assuming that it is run on the
749 * affected filesystem before 2242.
750 */
751
730static inline __le32 ext4_encode_extra_time(struct timespec *time) 752static inline __le32 ext4_encode_extra_time(struct timespec *time)
731{ 753{
732 return cpu_to_le32((sizeof(time->tv_sec) > 4 ? 754 u32 extra = sizeof(time->tv_sec) > 4 ?
733 (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) | 755 ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
734 ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK)); 756 return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
735} 757}
736 758
737static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra) 759static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
738{ 760{
739 if (sizeof(time->tv_sec) > 4) 761 if (unlikely(sizeof(time->tv_sec) > 4 &&
740 time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) 762 (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
741 << 32; 763#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
742 time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS; 764 /* Handle legacy encoding of pre-1970 dates with epoch
765 * bits 1,1. We assume that by kernel version 4.20,
766 * everyone will have run fsck over the affected
767 * filesystems to correct the problem. (This
768 * backwards compatibility may be removed before this
769 * time, at the discretion of the ext4 developers.)
770 */
771 u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
772 if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
773 extra_bits = 0;
774 time->tv_sec += extra_bits << 32;
775#else
776 time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
777#endif
778 }
779 time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
743} 780}
744 781
745#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \ 782#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index abe2401ce405..e8e7af62ac95 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -52,7 +52,7 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook
52 /* Symlink is encrypted */ 52 /* Symlink is encrypted */
53 sd = (struct ext4_encrypted_symlink_data *)caddr; 53 sd = (struct ext4_encrypted_symlink_data *)caddr;
54 cstr.name = sd->encrypted_path; 54 cstr.name = sd->encrypted_path;
55 cstr.len = le32_to_cpu(sd->len); 55 cstr.len = le16_to_cpu(sd->len);
56 if ((cstr.len + 56 if ((cstr.len +
57 sizeof(struct ext4_encrypted_symlink_data) - 1) > 57 sizeof(struct ext4_encrypted_symlink_data) - 1) >
58 max_size) { 58 max_size) {
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 1b57c72f4a00..1420a3c614af 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -358,7 +358,7 @@ static int name##_open(struct inode *inode, struct file *file) \
358 return single_open(file, ext4_seq_##name##_show, PDE_DATA(inode)); \ 358 return single_open(file, ext4_seq_##name##_show, PDE_DATA(inode)); \
359} \ 359} \
360\ 360\
361const struct file_operations ext4_seq_##name##_fops = { \ 361static const struct file_operations ext4_seq_##name##_fops = { \
362 .owner = THIS_MODULE, \ 362 .owner = THIS_MODULE, \
363 .open = name##_open, \ 363 .open = name##_open, \
364 .read = seq_read, \ 364 .read = seq_read, \
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index eae2c11268bc..8e3ee1936c7e 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -549,6 +549,8 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
549 unregister_chrdev_region(cc->cdev->dev, 1); 549 unregister_chrdev_region(cc->cdev->dev, 1);
550 cdev_del(cc->cdev); 550 cdev_del(cc->cdev);
551 } 551 }
552 /* Base reference is now owned by "fud" */
553 fuse_conn_put(&cc->fc);
552 554
553 rc = fuse_dev_release(inode, file); /* puts the base reference */ 555 rc = fuse_dev_release(inode, file); /* puts the base reference */
554 556
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e0faf8f2c868..570ca4053c80 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1049,6 +1049,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1049 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 1049 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
1050 flush_dcache_page(page); 1050 flush_dcache_page(page);
1051 1051
1052 iov_iter_advance(ii, tmp);
1052 if (!tmp) { 1053 if (!tmp) {
1053 unlock_page(page); 1054 unlock_page(page);
1054 page_cache_release(page); 1055 page_cache_release(page);
@@ -1061,7 +1062,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1061 req->page_descs[req->num_pages].length = tmp; 1062 req->page_descs[req->num_pages].length = tmp;
1062 req->num_pages++; 1063 req->num_pages++;
1063 1064
1064 iov_iter_advance(ii, tmp);
1065 count += tmp; 1065 count += tmp;
1066 pos += tmp; 1066 pos += tmp;
1067 offset += tmp; 1067 offset += tmp;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 89463eee6791..ca181e81c765 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1009,7 +1009,8 @@ out:
1009} 1009}
1010 1010
1011/* Fast check whether buffer is already attached to the required transaction */ 1011/* Fast check whether buffer is already attached to the required transaction */
1012static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh) 1012static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1013 bool undo)
1013{ 1014{
1014 struct journal_head *jh; 1015 struct journal_head *jh;
1015 bool ret = false; 1016 bool ret = false;
@@ -1036,6 +1037,9 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh)
1036 jh = READ_ONCE(bh->b_private); 1037 jh = READ_ONCE(bh->b_private);
1037 if (!jh) 1038 if (!jh)
1038 goto out; 1039 goto out;
1040 /* For undo access buffer must have data copied */
1041 if (undo && !jh->b_committed_data)
1042 goto out;
1039 if (jh->b_transaction != handle->h_transaction && 1043 if (jh->b_transaction != handle->h_transaction &&
1040 jh->b_next_transaction != handle->h_transaction) 1044 jh->b_next_transaction != handle->h_transaction)
1041 goto out; 1045 goto out;
@@ -1073,7 +1077,7 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1073 struct journal_head *jh; 1077 struct journal_head *jh;
1074 int rc; 1078 int rc;
1075 1079
1076 if (jbd2_write_access_granted(handle, bh)) 1080 if (jbd2_write_access_granted(handle, bh, false))
1077 return 0; 1081 return 0;
1078 1082
1079 jh = jbd2_journal_add_journal_head(bh); 1083 jh = jbd2_journal_add_journal_head(bh);
@@ -1210,7 +1214,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1210 char *committed_data = NULL; 1214 char *committed_data = NULL;
1211 1215
1212 JBUFFER_TRACE(jh, "entry"); 1216 JBUFFER_TRACE(jh, "entry");
1213 if (jbd2_write_access_granted(handle, bh)) 1217 if (jbd2_write_access_granted(handle, bh, true))
1214 return 0; 1218 return 0;
1215 1219
1216 jh = jbd2_journal_add_journal_head(bh); 1220 jh = jbd2_journal_add_journal_head(bh);
@@ -2152,6 +2156,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2152 2156
2153 if (!buffer_dirty(bh)) { 2157 if (!buffer_dirty(bh)) {
2154 /* bdflush has written it. We can drop it now */ 2158 /* bdflush has written it. We can drop it now */
2159 __jbd2_journal_remove_checkpoint(jh);
2155 goto zap_buffer; 2160 goto zap_buffer;
2156 } 2161 }
2157 2162
@@ -2181,6 +2186,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2181 /* The orphan record's transaction has 2186 /* The orphan record's transaction has
2182 * committed. We can cleanse this buffer */ 2187 * committed. We can cleanse this buffer */
2183 clear_buffer_jbddirty(bh); 2188 clear_buffer_jbddirty(bh);
2189 __jbd2_journal_remove_checkpoint(jh);
2184 goto zap_buffer; 2190 goto zap_buffer;
2185 } 2191 }
2186 } 2192 }
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index beac58b0e09c..646cdac73488 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -78,8 +78,7 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
78 78
79 p = xdr_inline_decode(xdr, nbytes); 79 p = xdr_inline_decode(xdr, nbytes);
80 if (unlikely(p == NULL)) 80 if (unlikely(p == NULL))
81 printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed " 81 printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
82 "or truncated request.\n");
83 return p; 82 return p;
84} 83}
85 84
@@ -890,7 +889,6 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
890 struct cb_compound_hdr_arg hdr_arg = { 0 }; 889 struct cb_compound_hdr_arg hdr_arg = { 0 };
891 struct cb_compound_hdr_res hdr_res = { NULL }; 890 struct cb_compound_hdr_res hdr_res = { NULL };
892 struct xdr_stream xdr_in, xdr_out; 891 struct xdr_stream xdr_in, xdr_out;
893 struct xdr_buf *rq_arg = &rqstp->rq_arg;
894 __be32 *p, status; 892 __be32 *p, status;
895 struct cb_process_state cps = { 893 struct cb_process_state cps = {
896 .drc_status = 0, 894 .drc_status = 0,
@@ -902,8 +900,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
902 900
903 dprintk("%s: start\n", __func__); 901 dprintk("%s: start\n", __func__);
904 902
905 rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len; 903 xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
906 xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
907 904
908 p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); 905 p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
909 xdr_init_encode(&xdr_out, &rqstp->rq_res, p); 906 xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 31b0a52223a7..c7e8b87da5b2 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -75,11 +75,11 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
75 * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks 75 * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks
76 * @word: long word containing the bit lock 76 * @word: long word containing the bit lock
77 */ 77 */
78int nfs_wait_bit_killable(struct wait_bit_key *key) 78int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
79{ 79{
80 if (fatal_signal_pending(current))
81 return -ERESTARTSYS;
82 freezable_schedule_unsafe(); 80 freezable_schedule_unsafe();
81 if (signal_pending_state(mode, current))
82 return -ERESTARTSYS;
83 return 0; 83 return 0;
84} 84}
85EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); 85EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 56cfde26fb9c..9dea85f7f918 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -379,7 +379,7 @@ extern int nfs_drop_inode(struct inode *);
379extern void nfs_clear_inode(struct inode *); 379extern void nfs_clear_inode(struct inode *);
380extern void nfs_evict_inode(struct inode *); 380extern void nfs_evict_inode(struct inode *);
381void nfs_zap_acl_cache(struct inode *inode); 381void nfs_zap_acl_cache(struct inode *inode);
382extern int nfs_wait_bit_killable(struct wait_bit_key *key); 382extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
383 383
384/* super.c */ 384/* super.c */
385extern const struct super_operations nfs_sops; 385extern const struct super_operations nfs_sops;
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 5c0c6b58157f..9aebffb40505 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -476,10 +476,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
476 } 476 }
477 unlock_page(page); 477 unlock_page(page);
478 } 478 }
479 if (PageDirty(page) || PageWriteback(page)) 479 *uptodate = PageUptodate(page);
480 *uptodate = true;
481 else
482 *uptodate = PageUptodate(page);
483 dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate); 480 dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate);
484 return page; 481 return page;
485} 482}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index fe3ddd20ff89..452a011ba0d8 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -129,7 +129,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
129 set_bit(NFS_IO_INPROGRESS, &c->flags); 129 set_bit(NFS_IO_INPROGRESS, &c->flags);
130 if (atomic_read(&c->io_count) == 0) 130 if (atomic_read(&c->io_count) == 0)
131 break; 131 break;
132 ret = nfs_wait_bit_killable(&q.key); 132 ret = nfs_wait_bit_killable(&q.key, TASK_KILLABLE);
133 } while (atomic_read(&c->io_count) != 0 && !ret); 133 } while (atomic_read(&c->io_count) != 0 && !ret);
134 finish_wait(wq, &q.wait); 134 finish_wait(wq, &q.wait);
135 return ret; 135 return ret;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 5a8ae2125b50..bec0384499f7 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1466,11 +1466,11 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1466} 1466}
1467 1467
1468/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */ 1468/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
1469static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key) 1469static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode)
1470{ 1470{
1471 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags)) 1471 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
1472 return 1; 1472 return 1;
1473 return nfs_wait_bit_killable(key); 1473 return nfs_wait_bit_killable(key, mode);
1474} 1474}
1475 1475
1476static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) 1476static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a03f6f433075..3123408da935 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -367,13 +367,11 @@ static int ocfs2_mknod(struct inode *dir,
367 goto leave; 367 goto leave;
368 } 368 }
369 369
370 status = posix_acl_create(dir, &mode, &default_acl, &acl); 370 status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
371 if (status) { 371 if (status) {
372 mlog_errno(status); 372 mlog_errno(status);
373 goto leave; 373 goto leave;
374 } 374 }
375 /* update inode->i_mode after mask with "umask". */
376 inode->i_mode = mode;
377 375
378 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 376 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
379 S_ISDIR(mode), 377 S_ISDIR(mode),