aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 13:16:30 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 16:07:39 -0400
commitc6a556b88adfacd2af90be84357c8165d716c27d (patch)
treeb63d477180052d3d1edabffb51f3fdfe4f699866 /fs
parentab0a3dbedc51037f3d2e22ef67717a987b3d15e2 (diff)
[PATCH] NFS: Make searching and waiting on busy writeback requests more efficient.
Basically copies the VFS's method for tracking writebacks and applies it to the struct nfs_page. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/pagelist.c29
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/write.c19
3 files changed, 37 insertions, 14 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 80777f99a58..356a33bb38a 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -112,6 +112,33 @@ void nfs_unlock_request(struct nfs_page *req)
112} 112}
113 113
114/** 114/**
115 * nfs_set_page_writeback_locked - Lock a request for writeback
116 * @req:
117 */
118int nfs_set_page_writeback_locked(struct nfs_page *req)
119{
120 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
121
122 if (!nfs_lock_request(req))
123 return 0;
124 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
125 return 1;
126}
127
128/**
129 * nfs_clear_page_writeback - Unlock request and wake up sleepers
130 */
131void nfs_clear_page_writeback(struct nfs_page *req)
132{
133 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
134
135 spin_lock(&nfsi->req_lock);
136 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
137 spin_unlock(&nfsi->req_lock);
138 nfs_unlock_request(req);
139}
140
141/**
115 * nfs_clear_request - Free up all resources allocated to the request 142 * nfs_clear_request - Free up all resources allocated to the request
116 * @req: 143 * @req:
117 * 144 *
@@ -301,7 +328,7 @@ nfs_scan_list(struct list_head *head, struct list_head *dst,
301 if (req->wb_index > idx_end) 328 if (req->wb_index > idx_end)
302 break; 329 break;
303 330
304 if (!nfs_lock_request(req)) 331 if (!nfs_set_page_writeback_locked(req))
305 continue; 332 continue;
306 nfs_list_remove_request(req); 333 nfs_list_remove_request(req);
307 nfs_list_add_request(req, dst); 334 nfs_list_add_request(req, dst);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index a0042fb5863..6f866b8aa2d 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -173,7 +173,6 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
173 if (len < PAGE_CACHE_SIZE) 173 if (len < PAGE_CACHE_SIZE)
174 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 174 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
175 175
176 nfs_lock_request(new);
177 nfs_list_add_request(new, &one_request); 176 nfs_list_add_request(new, &one_request);
178 nfs_pagein_one(&one_request, inode); 177 nfs_pagein_one(&one_request, inode);
179 return 0; 178 return 0;
@@ -185,7 +184,6 @@ static void nfs_readpage_release(struct nfs_page *req)
185 184
186 nfs_clear_request(req); 185 nfs_clear_request(req);
187 nfs_release_request(req); 186 nfs_release_request(req);
188 nfs_unlock_request(req);
189 187
190 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 188 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
191 req->wb_context->dentry->d_inode->i_sb->s_id, 189 req->wb_context->dentry->d_inode->i_sb->s_id,
@@ -553,7 +551,6 @@ readpage_async_filler(void *data, struct page *page)
553 } 551 }
554 if (len < PAGE_CACHE_SIZE) 552 if (len < PAGE_CACHE_SIZE)
555 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 553 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
556 nfs_lock_request(new);
557 nfs_list_add_request(new, desc->head); 554 nfs_list_add_request(new, desc->head);
558 return 0; 555 return 0;
559} 556}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 79b621a545b..58a39b0486a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -503,13 +503,12 @@ nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int
503 503
504 spin_lock(&nfsi->req_lock); 504 spin_lock(&nfsi->req_lock);
505 next = idx_start; 505 next = idx_start;
506 while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) { 506 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
507 if (req->wb_index > idx_end) 507 if (req->wb_index > idx_end)
508 break; 508 break;
509 509
510 next = req->wb_index + 1; 510 next = req->wb_index + 1;
511 if (!NFS_WBACK_BUSY(req)) 511 BUG_ON(!NFS_WBACK_BUSY(req));
512 continue;
513 512
514 atomic_inc(&req->wb_count); 513 atomic_inc(&req->wb_count);
515 spin_unlock(&nfsi->req_lock); 514 spin_unlock(&nfsi->req_lock);
@@ -821,7 +820,7 @@ out:
821#else 820#else
822 nfs_inode_remove_request(req); 821 nfs_inode_remove_request(req);
823#endif 822#endif
824 nfs_unlock_request(req); 823 nfs_clear_page_writeback(req);
825} 824}
826 825
827static inline int flush_task_priority(int how) 826static inline int flush_task_priority(int how)
@@ -952,7 +951,7 @@ out_bad:
952 nfs_writedata_free(data); 951 nfs_writedata_free(data);
953 } 952 }
954 nfs_mark_request_dirty(req); 953 nfs_mark_request_dirty(req);
955 nfs_unlock_request(req); 954 nfs_clear_page_writeback(req);
956 return -ENOMEM; 955 return -ENOMEM;
957} 956}
958 957
@@ -1002,7 +1001,7 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
1002 struct nfs_page *req = nfs_list_entry(head->next); 1001 struct nfs_page *req = nfs_list_entry(head->next);
1003 nfs_list_remove_request(req); 1002 nfs_list_remove_request(req);
1004 nfs_mark_request_dirty(req); 1003 nfs_mark_request_dirty(req);
1005 nfs_unlock_request(req); 1004 nfs_clear_page_writeback(req);
1006 } 1005 }
1007 return -ENOMEM; 1006 return -ENOMEM;
1008} 1007}
@@ -1029,7 +1028,7 @@ nfs_flush_list(struct list_head *head, int wpages, int how)
1029 req = nfs_list_entry(head->next); 1028 req = nfs_list_entry(head->next);
1030 nfs_list_remove_request(req); 1029 nfs_list_remove_request(req);
1031 nfs_mark_request_dirty(req); 1030 nfs_mark_request_dirty(req);
1032 nfs_unlock_request(req); 1031 nfs_clear_page_writeback(req);
1033 } 1032 }
1034 return error; 1033 return error;
1035} 1034}
@@ -1121,7 +1120,7 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
1121 nfs_inode_remove_request(req); 1120 nfs_inode_remove_request(req);
1122#endif 1121#endif
1123 next: 1122 next:
1124 nfs_unlock_request(req); 1123 nfs_clear_page_writeback(req);
1125 } 1124 }
1126} 1125}
1127 1126
@@ -1278,7 +1277,7 @@ nfs_commit_list(struct list_head *head, int how)
1278 req = nfs_list_entry(head->next); 1277 req = nfs_list_entry(head->next);
1279 nfs_list_remove_request(req); 1278 nfs_list_remove_request(req);
1280 nfs_mark_request_commit(req); 1279 nfs_mark_request_commit(req);
1281 nfs_unlock_request(req); 1280 nfs_clear_page_writeback(req);
1282 } 1281 }
1283 return -ENOMEM; 1282 return -ENOMEM;
1284} 1283}
@@ -1324,7 +1323,7 @@ nfs_commit_done(struct rpc_task *task)
1324 dprintk(" mismatch\n"); 1323 dprintk(" mismatch\n");
1325 nfs_mark_request_dirty(req); 1324 nfs_mark_request_dirty(req);
1326 next: 1325 next:
1327 nfs_unlock_request(req); 1326 nfs_clear_page_writeback(req);
1328 res++; 1327 res++;
1329 } 1328 }
1330 sub_page_state(nr_unstable,res); 1329 sub_page_state(nr_unstable,res);