aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-03-05 13:50:45 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2014-05-06 17:32:47 -0400
commit886a39115005ced8b15ab067c9c2a8d546b40a5e (patch)
treecb72af0480369f31a449aa7a8451d407315c2bf3 /fs
parent26978b8b4d83c46f4310b253db70fa9e65149e7c (diff)
new primitive: iov_iter_alignment()
returns the value aligned as badly as the worst remaining segment in iov_iter is. Use instead of open-coded equivalents. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/direct-io.c27
1 files changed, 5 insertions, 22 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1c677899b989..adfa1fb33456 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1112,19 +1112,18 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1112 dio_submit_t submit_io, int flags) 1112 dio_submit_t submit_io, int flags)
1113{ 1113{
1114 int seg; 1114 int seg;
1115 size_t size;
1116 unsigned long addr;
1117 unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); 1115 unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
1118 unsigned blkbits = i_blkbits; 1116 unsigned blkbits = i_blkbits;
1119 unsigned blocksize_mask = (1 << blkbits) - 1; 1117 unsigned blocksize_mask = (1 << blkbits) - 1;
1120 ssize_t retval = -EINVAL; 1118 ssize_t retval = -EINVAL;
1121 loff_t end = offset; 1119 loff_t end = offset + iov_iter_count(iter);
1122 struct dio *dio; 1120 struct dio *dio;
1123 struct dio_submit sdio = { 0, }; 1121 struct dio_submit sdio = { 0, };
1124 unsigned long user_addr; 1122 unsigned long user_addr;
1125 size_t bytes; 1123 size_t bytes;
1126 struct buffer_head map_bh = { 0, }; 1124 struct buffer_head map_bh = { 0, };
1127 struct blk_plug plug; 1125 struct blk_plug plug;
1126 unsigned long align = offset | iov_iter_alignment(iter);
1128 1127
1129 if (rw & WRITE) 1128 if (rw & WRITE)
1130 rw = WRITE_ODIRECT; 1129 rw = WRITE_ODIRECT;
@@ -1134,32 +1133,16 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1134 * the early prefetch in the caller enough time. 1133 * the early prefetch in the caller enough time.
1135 */ 1134 */
1136 1135
1137 if (offset & blocksize_mask) { 1136 if (align & blocksize_mask) {
1138 if (bdev) 1137 if (bdev)
1139 blkbits = blksize_bits(bdev_logical_block_size(bdev)); 1138 blkbits = blksize_bits(bdev_logical_block_size(bdev));
1140 blocksize_mask = (1 << blkbits) - 1; 1139 blocksize_mask = (1 << blkbits) - 1;
1141 if (offset & blocksize_mask) 1140 if (align & blocksize_mask)
1142 goto out; 1141 goto out;
1143 } 1142 }
1144 1143
1145 /* Check the memory alignment. Blocks cannot straddle pages */
1146 for (seg = 0; seg < iter->nr_segs; seg++) {
1147 addr = (unsigned long)iter->iov[seg].iov_base;
1148 size = iter->iov[seg].iov_len;
1149 end += size;
1150 if (unlikely((addr & blocksize_mask) ||
1151 (size & blocksize_mask))) {
1152 if (bdev)
1153 blkbits = blksize_bits(
1154 bdev_logical_block_size(bdev));
1155 blocksize_mask = (1 << blkbits) - 1;
1156 if ((addr & blocksize_mask) || (size & blocksize_mask))
1157 goto out;
1158 }
1159 }
1160
1161 /* watch out for a 0 len io from a tricksy fs */ 1144 /* watch out for a 0 len io from a tricksy fs */
1162 if (rw == READ && end == offset) 1145 if (rw == READ && !iov_iter_count(iter))
1163 return 0; 1146 return 0;
1164 1147
1165 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); 1148 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);