aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2008-01-22 17:13:07 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-01-30 02:05:24 -0500
commitacee478afc6ff7e1b8852d9a4dca1ff36021414d (patch)
tree94aa75336122b4914af0cfa91eaf1329b2c519d3 /fs
parent8b1f9ee56e21e505a3d5d3e33f823006d1abdbaf (diff)
NFS: Clean up the write request locking.
Ensure that we set/clear NFS_PAGE_TAG_LOCKED when the nfs_page is hashed. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/pagelist.c13
-rw-r--r--fs/nfs/write.c16
2 files changed, 15 insertions, 14 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 345bb9b4765b..3b3dbb94393d 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -111,13 +111,14 @@ void nfs_unlock_request(struct nfs_page *req)
111 * nfs_set_page_tag_locked - Tag a request as locked 111 * nfs_set_page_tag_locked - Tag a request as locked
112 * @req: 112 * @req:
113 */ 113 */
114static int nfs_set_page_tag_locked(struct nfs_page *req) 114int nfs_set_page_tag_locked(struct nfs_page *req)
115{ 115{
116 struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); 116 struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
117 117
118 if (!nfs_lock_request(req)) 118 if (!nfs_lock_request_dontget(req))
119 return 0; 119 return 0;
120 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 120 if (req->wb_page != NULL)
121 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
121 return 1; 122 return 1;
122} 123}
123 124
@@ -132,9 +133,10 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
132 if (req->wb_page != NULL) { 133 if (req->wb_page != NULL) {
133 spin_lock(&inode->i_lock); 134 spin_lock(&inode->i_lock);
134 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 135 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
136 nfs_unlock_request(req);
135 spin_unlock(&inode->i_lock); 137 spin_unlock(&inode->i_lock);
136 } 138 } else
137 nfs_unlock_request(req); 139 nfs_unlock_request(req);
138} 140}
139 141
140/** 142/**
@@ -421,6 +423,7 @@ int nfs_scan_list(struct nfs_inode *nfsi,
421 goto out; 423 goto out;
422 idx_start = req->wb_index + 1; 424 idx_start = req->wb_index + 1;
423 if (nfs_set_page_tag_locked(req)) { 425 if (nfs_set_page_tag_locked(req)) {
426 kref_get(&req->wb_kref);
424 nfs_list_remove_request(req); 427 nfs_list_remove_request(req);
425 radix_tree_tag_clear(&nfsi->nfs_page_tree, 428 radix_tree_tag_clear(&nfsi->nfs_page_tree,
426 req->wb_index, tag); 429 req->wb_index, tag);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 51cc1bd6a116..092e79c6d962 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -196,7 +196,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
196 } 196 }
197 /* Update file length */ 197 /* Update file length */
198 nfs_grow_file(page, offset, count); 198 nfs_grow_file(page, offset, count);
199 nfs_unlock_request(req); 199 nfs_clear_page_tag_locked(req);
200 return 0; 200 return 0;
201} 201}
202 202
@@ -252,7 +252,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
252 struct page *page) 252 struct page *page)
253{ 253{
254 struct inode *inode = page->mapping->host; 254 struct inode *inode = page->mapping->host;
255 struct nfs_inode *nfsi = NFS_I(inode);
256 struct nfs_page *req; 255 struct nfs_page *req;
257 int ret; 256 int ret;
258 257
@@ -263,10 +262,10 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
263 spin_unlock(&inode->i_lock); 262 spin_unlock(&inode->i_lock);
264 return 0; 263 return 0;
265 } 264 }
266 if (nfs_lock_request_dontget(req)) 265 if (nfs_set_page_tag_locked(req))
267 break; 266 break;
268 /* Note: If we hold the page lock, as is the case in nfs_writepage, 267 /* Note: If we hold the page lock, as is the case in nfs_writepage,
269 * then the call to nfs_lock_request_dontget() will always 268 * then the call to nfs_set_page_tag_locked() will always
270 * succeed provided that someone hasn't already marked the 269 * succeed provided that someone hasn't already marked the
271 * request as dirty (in which case we don't care). 270 * request as dirty (in which case we don't care).
272 */ 271 */
@@ -280,7 +279,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
280 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { 279 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
281 /* This request is marked for commit */ 280 /* This request is marked for commit */
282 spin_unlock(&inode->i_lock); 281 spin_unlock(&inode->i_lock);
283 nfs_unlock_request(req); 282 nfs_clear_page_tag_locked(req);
284 nfs_pageio_complete(pgio); 283 nfs_pageio_complete(pgio);
285 return 0; 284 return 0;
286 } 285 }
@@ -288,8 +287,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
288 spin_unlock(&inode->i_lock); 287 spin_unlock(&inode->i_lock);
289 BUG(); 288 BUG();
290 } 289 }
291 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
292 NFS_PAGE_TAG_LOCKED);
293 spin_unlock(&inode->i_lock); 290 spin_unlock(&inode->i_lock);
294 nfs_pageio_add_request(pgio, req); 291 nfs_pageio_add_request(pgio, req);
295 return 0; 292 return 0;
@@ -381,6 +378,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
381 set_page_private(req->wb_page, (unsigned long)req); 378 set_page_private(req->wb_page, (unsigned long)req);
382 nfsi->npages++; 379 nfsi->npages++;
383 kref_get(&req->wb_kref); 380 kref_get(&req->wb_kref);
381 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
384 return 0; 382 return 0;
385} 383}
386 384
@@ -596,7 +594,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
596 spin_lock(&inode->i_lock); 594 spin_lock(&inode->i_lock);
597 req = nfs_page_find_request_locked(page); 595 req = nfs_page_find_request_locked(page);
598 if (req) { 596 if (req) {
599 if (!nfs_lock_request_dontget(req)) { 597 if (!nfs_set_page_tag_locked(req)) {
600 int error; 598 int error;
601 599
602 spin_unlock(&inode->i_lock); 600 spin_unlock(&inode->i_lock);
@@ -646,7 +644,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
646 || req->wb_page != page 644 || req->wb_page != page
647 || !nfs_dirty_request(req) 645 || !nfs_dirty_request(req)
648 || offset > rqend || end < req->wb_offset) { 646 || offset > rqend || end < req->wb_offset) {
649 nfs_unlock_request(req); 647 nfs_clear_page_tag_locked(req);
650 return ERR_PTR(-EBUSY); 648 return ERR_PTR(-EBUSY);
651 } 649 }
652 650