diff options
author | Trond Myklebust <trond.myklebust@primarydata.com> | 2017-07-17 19:50:23 -0400 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@primarydata.com> | 2017-08-15 11:54:47 -0400 |
commit | f6032f216fca8a1fa7f43a652f26cdf633183745 (patch) | |
tree | ee273d6a71b7f9defb0060d8c057f3b765da47b0 /fs/nfs/write.c | |
parent | b66aaa8dfeda7b5c7df513cf3b36e1290fa84055 (diff) |
NFS: Teach nfs_try_to_update_request() to deal with request page_groups
Simplify the code, and avoid some flushes to disk.
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 60 |
1 files changed, 20 insertions, 40 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ee981353d4aa..0b4d1ef168e0 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1107,39 +1107,19 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
1107 | 1107 | ||
1108 | end = offset + bytes; | 1108 | end = offset + bytes; |
1109 | 1109 | ||
1110 | for (;;) { | 1110 | req = nfs_lock_and_join_requests(page); |
1111 | if (!(PagePrivate(page) || PageSwapCache(page))) | 1111 | if (IS_ERR_OR_NULL(req)) |
1112 | return NULL; | 1112 | return req; |
1113 | spin_lock(&inode->i_lock); | ||
1114 | req = nfs_page_find_head_request_locked(NFS_I(inode), page); | ||
1115 | if (req == NULL) | ||
1116 | goto out_unlock; | ||
1117 | |||
1118 | /* should be handled by nfs_flush_incompatible */ | ||
1119 | WARN_ON_ONCE(req->wb_head != req); | ||
1120 | WARN_ON_ONCE(req->wb_this_page != req); | ||
1121 | |||
1122 | rqend = req->wb_offset + req->wb_bytes; | ||
1123 | /* | ||
1124 | * Tell the caller to flush out the request if | ||
1125 | * the offsets are non-contiguous. | ||
1126 | * Note: nfs_flush_incompatible() will already | ||
1127 | * have flushed out requests having wrong owners. | ||
1128 | */ | ||
1129 | if (offset > rqend | ||
1130 | || end < req->wb_offset) | ||
1131 | goto out_flushme; | ||
1132 | |||
1133 | if (nfs_lock_request(req)) | ||
1134 | break; | ||
1135 | 1113 | ||
1136 | /* The request is locked, so wait and then retry */ | 1114 | rqend = req->wb_offset + req->wb_bytes; |
1137 | spin_unlock(&inode->i_lock); | 1115 | /* |
1138 | error = nfs_wait_on_request(req); | 1116 | * Tell the caller to flush out the request if |
1139 | nfs_release_request(req); | 1117 | * the offsets are non-contiguous. |
1140 | if (error != 0) | 1118 | * Note: nfs_flush_incompatible() will already |
1141 | goto out_err; | 1119 | * have flushed out requests having wrong owners. |
1142 | } | 1120 | */ |
1121 | if (offset > rqend || end < req->wb_offset) | ||
1122 | goto out_flushme; | ||
1143 | 1123 | ||
1144 | /* Okay, the request matches. Update the region */ | 1124 | /* Okay, the request matches. Update the region */ |
1145 | if (offset < req->wb_offset) { | 1125 | if (offset < req->wb_offset) { |
@@ -1150,17 +1130,17 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
1150 | req->wb_bytes = end - req->wb_offset; | 1130 | req->wb_bytes = end - req->wb_offset; |
1151 | else | 1131 | else |
1152 | req->wb_bytes = rqend - req->wb_offset; | 1132 | req->wb_bytes = rqend - req->wb_offset; |
1153 | out_unlock: | ||
1154 | if (req) | ||
1155 | nfs_clear_request_commit(req); | ||
1156 | spin_unlock(&inode->i_lock); | ||
1157 | return req; | 1133 | return req; |
1158 | out_flushme: | 1134 | out_flushme: |
1159 | spin_unlock(&inode->i_lock); | 1135 | /* |
1160 | nfs_release_request(req); | 1136 | * Note: we mark the request dirty here because |
1137 | * nfs_lock_and_join_requests() cannot preserve | ||
1138 | * commit flags, so we have to replay the write. | ||
1139 | */ | ||
1140 | nfs_mark_request_dirty(req); | ||
1141 | nfs_unlock_and_release_request(req); | ||
1161 | error = nfs_wb_page(inode, page); | 1142 | error = nfs_wb_page(inode, page); |
1162 | out_err: | 1143 | return (error < 0) ? ERR_PTR(error) : NULL; |
1163 | return ERR_PTR(error); | ||
1164 | } | 1144 | } |
1165 | 1145 | ||
1166 | /* | 1146 | /* |