aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-07-02 09:57:54 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-07-10 23:40:38 -0400
commit587142f85f796cf0b823dd3080e815f02ff6b952 (patch)
tree891e9389d09916ff2c307dc5ada1c65303660c8d /fs
parent4e56e082dd89266d320ccfbc7bd0102186a765ac (diff)
NFS: Replace NFS_I(inode)->req_lock with inode->i_lock
There is no justification for keeping a special spinlock for the exclusive use of the NFS writeback code. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/pagelist.c11
-rw-r--r--fs/nfs/write.c84
3 files changed, 46 insertions, 50 deletions
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 01fc8ab0c562..9d5124166d20 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1154,7 +1154,6 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
1154 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1154 struct nfs_inode *nfsi = (struct nfs_inode *) foo;
1155 1155
1156 inode_init_once(&nfsi->vfs_inode); 1156 inode_init_once(&nfsi->vfs_inode);
1157 spin_lock_init(&nfsi->req_lock);
1158 INIT_LIST_HEAD(&nfsi->open_files); 1157 INIT_LIST_HEAD(&nfsi->open_files);
1159 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1158 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
1160 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1159 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 8d2642f24b8f..f56dae5216f4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -126,12 +126,13 @@ static int nfs_set_page_tag_locked(struct nfs_page *req)
126 */ 126 */
127void nfs_clear_page_tag_locked(struct nfs_page *req) 127void nfs_clear_page_tag_locked(struct nfs_page *req)
128{ 128{
129 struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); 129 struct inode *inode = req->wb_context->path.dentry->d_inode;
130 struct nfs_inode *nfsi = NFS_I(inode);
130 131
131 if (req->wb_page != NULL) { 132 if (req->wb_page != NULL) {
132 spin_lock(&nfsi->req_lock); 133 spin_lock(&inode->i_lock);
133 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 134 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
134 spin_unlock(&nfsi->req_lock); 135 spin_unlock(&inode->i_lock);
135 } 136 }
136 nfs_unlock_request(req); 137 nfs_unlock_request(req);
137} 138}
@@ -390,7 +391,7 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
390 * If the number of requests is set to 0, the entire address_space 391 * If the number of requests is set to 0, the entire address_space
391 * starting at index idx_start, is scanned. 392 * starting at index idx_start, is scanned.
392 * The requests are *not* checked to ensure that they form a contiguous set. 393 * The requests are *not* checked to ensure that they form a contiguous set.
393 * You must be holding the inode's req_lock when calling this function 394 * You must be holding the inode's i_lock when calling this function
394 */ 395 */
395int nfs_scan_list(struct nfs_inode *nfsi, 396int nfs_scan_list(struct nfs_inode *nfsi,
396 struct list_head *dst, pgoff_t idx_start, 397 struct list_head *dst, pgoff_t idx_start,
@@ -430,7 +431,7 @@ int nfs_scan_list(struct nfs_inode *nfsi,
430 } 431 }
431 } 432 }
432 /* for latency reduction */ 433 /* for latency reduction */
433 cond_resched_lock(&nfsi->req_lock); 434 cond_resched_lock(&nfsi->vfs_inode.i_lock);
434 } 435 }
435out: 436out:
436 return res; 437 return res;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9ef9ec746bfb..73ac992ece85 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -124,12 +124,12 @@ static struct nfs_page *nfs_page_find_request_locked(struct page *page)
124 124
125static struct nfs_page *nfs_page_find_request(struct page *page) 125static struct nfs_page *nfs_page_find_request(struct page *page)
126{ 126{
127 struct inode *inode = page->mapping->host;
127 struct nfs_page *req = NULL; 128 struct nfs_page *req = NULL;
128 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
129 129
130 spin_lock(req_lock); 130 spin_lock(&inode->i_lock);
131 req = nfs_page_find_request_locked(page); 131 req = nfs_page_find_request_locked(page);
132 spin_unlock(req_lock); 132 spin_unlock(&inode->i_lock);
133 return req; 133 return req;
134} 134}
135 135
@@ -251,16 +251,16 @@ static void nfs_end_page_writeback(struct page *page)
251static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 251static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
252 struct page *page) 252 struct page *page)
253{ 253{
254 struct inode *inode = page->mapping->host;
255 struct nfs_inode *nfsi = NFS_I(inode);
254 struct nfs_page *req; 256 struct nfs_page *req;
255 struct nfs_inode *nfsi = NFS_I(page->mapping->host);
256 spinlock_t *req_lock = &nfsi->req_lock;
257 int ret; 257 int ret;
258 258
259 spin_lock(req_lock); 259 spin_lock(&inode->i_lock);
260 for(;;) { 260 for(;;) {
261 req = nfs_page_find_request_locked(page); 261 req = nfs_page_find_request_locked(page);
262 if (req == NULL) { 262 if (req == NULL) {
263 spin_unlock(req_lock); 263 spin_unlock(&inode->i_lock);
264 return 1; 264 return 1;
265 } 265 }
266 if (nfs_lock_request_dontget(req)) 266 if (nfs_lock_request_dontget(req))
@@ -270,28 +270,28 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
270 * succeed provided that someone hasn't already marked the 270 * succeed provided that someone hasn't already marked the
271 * request as dirty (in which case we don't care). 271 * request as dirty (in which case we don't care).
272 */ 272 */
273 spin_unlock(req_lock); 273 spin_unlock(&inode->i_lock);
274 ret = nfs_wait_on_request(req); 274 ret = nfs_wait_on_request(req);
275 nfs_release_request(req); 275 nfs_release_request(req);
276 if (ret != 0) 276 if (ret != 0)
277 return ret; 277 return ret;
278 spin_lock(req_lock); 278 spin_lock(&inode->i_lock);
279 } 279 }
280 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { 280 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
281 /* This request is marked for commit */ 281 /* This request is marked for commit */
282 spin_unlock(req_lock); 282 spin_unlock(&inode->i_lock);
283 nfs_unlock_request(req); 283 nfs_unlock_request(req);
284 nfs_pageio_complete(pgio); 284 nfs_pageio_complete(pgio);
285 return 1; 285 return 1;
286 } 286 }
287 if (nfs_set_page_writeback(page) != 0) { 287 if (nfs_set_page_writeback(page) != 0) {
288 spin_unlock(req_lock); 288 spin_unlock(&inode->i_lock);
289 BUG(); 289 BUG();
290 } 290 }
291 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, 291 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
292 NFS_PAGE_TAG_LOCKED); 292 NFS_PAGE_TAG_LOCKED);
293 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); 293 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
294 spin_unlock(req_lock); 294 spin_unlock(&inode->i_lock);
295 nfs_pageio_add_request(pgio, req); 295 nfs_pageio_add_request(pgio, req);
296 return ret; 296 return ret;
297} 297}
@@ -412,7 +412,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
412 412
413 BUG_ON (!NFS_WBACK_BUSY(req)); 413 BUG_ON (!NFS_WBACK_BUSY(req));
414 414
415 spin_lock(&nfsi->req_lock); 415 spin_lock(&inode->i_lock);
416 set_page_private(req->wb_page, 0); 416 set_page_private(req->wb_page, 0);
417 ClearPagePrivate(req->wb_page); 417 ClearPagePrivate(req->wb_page);
418 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); 418 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
@@ -420,11 +420,11 @@ static void nfs_inode_remove_request(struct nfs_page *req)
420 __set_page_dirty_nobuffers(req->wb_page); 420 __set_page_dirty_nobuffers(req->wb_page);
421 nfsi->npages--; 421 nfsi->npages--;
422 if (!nfsi->npages) { 422 if (!nfsi->npages) {
423 spin_unlock(&nfsi->req_lock); 423 spin_unlock(&inode->i_lock);
424 nfs_end_data_update(inode); 424 nfs_end_data_update(inode);
425 iput(inode); 425 iput(inode);
426 } else 426 } else
427 spin_unlock(&nfsi->req_lock); 427 spin_unlock(&inode->i_lock);
428 nfs_clear_request(req); 428 nfs_clear_request(req);
429 nfs_release_request(req); 429 nfs_release_request(req);
430} 430}
@@ -458,13 +458,13 @@ nfs_mark_request_commit(struct nfs_page *req)
458 struct inode *inode = req->wb_context->path.dentry->d_inode; 458 struct inode *inode = req->wb_context->path.dentry->d_inode;
459 struct nfs_inode *nfsi = NFS_I(inode); 459 struct nfs_inode *nfsi = NFS_I(inode);
460 460
461 spin_lock(&nfsi->req_lock); 461 spin_lock(&inode->i_lock);
462 nfsi->ncommit++; 462 nfsi->ncommit++;
463 set_bit(PG_NEED_COMMIT, &(req)->wb_flags); 463 set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
464 radix_tree_tag_set(&nfsi->nfs_page_tree, 464 radix_tree_tag_set(&nfsi->nfs_page_tree,
465 req->wb_index, 465 req->wb_index,
466 NFS_PAGE_TAG_COMMIT); 466 NFS_PAGE_TAG_COMMIT);
467 spin_unlock(&nfsi->req_lock); 467 spin_unlock(&inode->i_lock);
468 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 468 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
469 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 469 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
470} 470}
@@ -534,10 +534,10 @@ static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, u
534 BUG_ON(!NFS_WBACK_BUSY(req)); 534 BUG_ON(!NFS_WBACK_BUSY(req));
535 535
536 kref_get(&req->wb_kref); 536 kref_get(&req->wb_kref);
537 spin_unlock(&nfsi->req_lock); 537 spin_unlock(&inode->i_lock);
538 error = nfs_wait_on_request(req); 538 error = nfs_wait_on_request(req);
539 nfs_release_request(req); 539 nfs_release_request(req);
540 spin_lock(&nfsi->req_lock); 540 spin_lock(&inode->i_lock);
541 if (error < 0) 541 if (error < 0)
542 return error; 542 return error;
543 res++; 543 res++;
@@ -602,7 +602,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
602{ 602{
603 struct address_space *mapping = page->mapping; 603 struct address_space *mapping = page->mapping;
604 struct inode *inode = mapping->host; 604 struct inode *inode = mapping->host;
605 struct nfs_inode *nfsi = NFS_I(inode);
606 struct nfs_page *req, *new = NULL; 605 struct nfs_page *req, *new = NULL;
607 pgoff_t rqend, end; 606 pgoff_t rqend, end;
608 607
@@ -612,13 +611,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
612 /* Loop over all inode entries and see if we find 611 /* Loop over all inode entries and see if we find
613 * A request for the page we wish to update 612 * A request for the page we wish to update
614 */ 613 */
615 spin_lock(&nfsi->req_lock); 614 spin_lock(&inode->i_lock);
616 req = nfs_page_find_request_locked(page); 615 req = nfs_page_find_request_locked(page);
617 if (req) { 616 if (req) {
618 if (!nfs_lock_request_dontget(req)) { 617 if (!nfs_lock_request_dontget(req)) {
619 int error; 618 int error;
620 619
621 spin_unlock(&nfsi->req_lock); 620 spin_unlock(&inode->i_lock);
622 error = nfs_wait_on_request(req); 621 error = nfs_wait_on_request(req);
623 nfs_release_request(req); 622 nfs_release_request(req);
624 if (error < 0) { 623 if (error < 0) {
@@ -628,7 +627,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
628 } 627 }
629 continue; 628 continue;
630 } 629 }
631 spin_unlock(&nfsi->req_lock); 630 spin_unlock(&inode->i_lock);
632 if (new) 631 if (new)
633 nfs_release_request(new); 632 nfs_release_request(new);
634 break; 633 break;
@@ -639,14 +638,14 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
639 nfs_lock_request_dontget(new); 638 nfs_lock_request_dontget(new);
640 error = nfs_inode_add_request(inode, new); 639 error = nfs_inode_add_request(inode, new);
641 if (error) { 640 if (error) {
642 spin_unlock(&nfsi->req_lock); 641 spin_unlock(&inode->i_lock);
643 nfs_unlock_request(new); 642 nfs_unlock_request(new);
644 return ERR_PTR(error); 643 return ERR_PTR(error);
645 } 644 }
646 spin_unlock(&nfsi->req_lock); 645 spin_unlock(&inode->i_lock);
647 return new; 646 return new;
648 } 647 }
649 spin_unlock(&nfsi->req_lock); 648 spin_unlock(&inode->i_lock);
650 649
651 new = nfs_create_request(ctx, inode, page, offset, bytes); 650 new = nfs_create_request(ctx, inode, page, offset, bytes);
652 if (IS_ERR(new)) 651 if (IS_ERR(new))
@@ -974,9 +973,9 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
974 } 973 }
975 974
976 if (nfs_write_need_commit(data)) { 975 if (nfs_write_need_commit(data)) {
977 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; 976 struct inode *inode = page->mapping->host;
978 977
979 spin_lock(req_lock); 978 spin_lock(&inode->i_lock);
980 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { 979 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
981 /* Do nothing we need to resend the writes */ 980 /* Do nothing we need to resend the writes */
982 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { 981 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
@@ -987,7 +986,7 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
987 clear_bit(PG_NEED_COMMIT, &req->wb_flags); 986 clear_bit(PG_NEED_COMMIT, &req->wb_flags);
988 dprintk(" server reboot detected\n"); 987 dprintk(" server reboot detected\n");
989 } 988 }
990 spin_unlock(req_lock); 989 spin_unlock(&inode->i_lock);
991 } else 990 } else
992 dprintk(" OK\n"); 991 dprintk(" OK\n");
993 992
@@ -1277,13 +1276,12 @@ static const struct rpc_call_ops nfs_commit_ops = {
1277 1276
1278int nfs_commit_inode(struct inode *inode, int how) 1277int nfs_commit_inode(struct inode *inode, int how)
1279{ 1278{
1280 struct nfs_inode *nfsi = NFS_I(inode);
1281 LIST_HEAD(head); 1279 LIST_HEAD(head);
1282 int res; 1280 int res;
1283 1281
1284 spin_lock(&nfsi->req_lock); 1282 spin_lock(&inode->i_lock);
1285 res = nfs_scan_commit(inode, &head, 0, 0); 1283 res = nfs_scan_commit(inode, &head, 0, 0);
1286 spin_unlock(&nfsi->req_lock); 1284 spin_unlock(&inode->i_lock);
1287 if (res) { 1285 if (res) {
1288 int error = nfs_commit_list(inode, &head, how); 1286 int error = nfs_commit_list(inode, &head, how);
1289 if (error < 0) 1287 if (error < 0)
@@ -1301,7 +1299,6 @@ static inline int nfs_commit_list(struct inode *inode, struct list_head *head, i
1301long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) 1299long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1302{ 1300{
1303 struct inode *inode = mapping->host; 1301 struct inode *inode = mapping->host;
1304 struct nfs_inode *nfsi = NFS_I(inode);
1305 pgoff_t idx_start, idx_end; 1302 pgoff_t idx_start, idx_end;
1306 unsigned int npages = 0; 1303 unsigned int npages = 0;
1307 LIST_HEAD(head); 1304 LIST_HEAD(head);
@@ -1323,7 +1320,7 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
1323 } 1320 }
1324 } 1321 }
1325 how &= ~FLUSH_NOCOMMIT; 1322 how &= ~FLUSH_NOCOMMIT;
1326 spin_lock(&nfsi->req_lock); 1323 spin_lock(&inode->i_lock);
1327 do { 1324 do {
1328 ret = nfs_wait_on_requests_locked(inode, idx_start, npages); 1325 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1329 if (ret != 0) 1326 if (ret != 0)
@@ -1334,18 +1331,19 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
1334 if (pages == 0) 1331 if (pages == 0)
1335 break; 1332 break;
1336 if (how & FLUSH_INVALIDATE) { 1333 if (how & FLUSH_INVALIDATE) {
1337 spin_unlock(&nfsi->req_lock); 1334 spin_unlock(&inode->i_lock);
1338 nfs_cancel_commit_list(&head); 1335 nfs_cancel_commit_list(&head);
1339 ret = pages; 1336 ret = pages;
1340 spin_lock(&nfsi->req_lock); 1337 spin_lock(&inode->i_lock);
1341 continue; 1338 continue;
1342 } 1339 }
1343 pages += nfs_scan_commit(inode, &head, 0, 0); 1340 pages += nfs_scan_commit(inode, &head, 0, 0);
1344 spin_unlock(&nfsi->req_lock); 1341 spin_unlock(&inode->i_lock);
1345 ret = nfs_commit_list(inode, &head, how); 1342 ret = nfs_commit_list(inode, &head, how);
1346 spin_lock(&nfsi->req_lock); 1343 spin_lock(&inode->i_lock);
1344
1347 } while (ret >= 0); 1345 } while (ret >= 0);
1348 spin_unlock(&nfsi->req_lock); 1346 spin_unlock(&inode->i_lock);
1349 return ret; 1347 return ret;
1350} 1348}
1351 1349
@@ -1439,7 +1437,6 @@ int nfs_set_page_dirty(struct page *page)
1439{ 1437{
1440 struct address_space *mapping = page->mapping; 1438 struct address_space *mapping = page->mapping;
1441 struct inode *inode; 1439 struct inode *inode;
1442 spinlock_t *req_lock;
1443 struct nfs_page *req; 1440 struct nfs_page *req;
1444 int ret; 1441 int ret;
1445 1442
@@ -1448,18 +1445,17 @@ int nfs_set_page_dirty(struct page *page)
1448 inode = mapping->host; 1445 inode = mapping->host;
1449 if (!inode) 1446 if (!inode)
1450 goto out_raced; 1447 goto out_raced;
1451 req_lock = &NFS_I(inode)->req_lock; 1448 spin_lock(&inode->i_lock);
1452 spin_lock(req_lock);
1453 req = nfs_page_find_request_locked(page); 1449 req = nfs_page_find_request_locked(page);
1454 if (req != NULL) { 1450 if (req != NULL) {
1455 /* Mark any existing write requests for flushing */ 1451 /* Mark any existing write requests for flushing */
1456 ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); 1452 ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
1457 spin_unlock(req_lock); 1453 spin_unlock(&inode->i_lock);
1458 nfs_release_request(req); 1454 nfs_release_request(req);
1459 return ret; 1455 return ret;
1460 } 1456 }
1461 ret = __set_page_dirty_nobuffers(page); 1457 ret = __set_page_dirty_nobuffers(page);
1462 spin_unlock(req_lock); 1458 spin_unlock(&inode->i_lock);
1463 return ret; 1459 return ret;
1464out_raced: 1460out_raced:
1465 return !TestSetPageDirty(page); 1461 return !TestSetPageDirty(page);