aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-12-15 16:49:51 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2007-02-05 13:36:12 -0500
commite1d5b18ae92d0bbfe66dc2b4bab65006d32c5f7d (patch)
treed74ca0e101e6146ecf00b4c495bde40f45d7e43e
parentc7b3383437ff41781964d1bf7f40ff8d7dd5bc47 (diff)
[GFS2] Fail over to readpage for stuffed files
This is partially derrived from a patch written by Russell Cattelan. It fixes a bug where there is a race between readpages and truncate by ignoring readpages for stuffed files. This is ok because a stuffed file will never be more than one block (minus sizeof(struct gfs2_dinode)) in size and block size is always less than page size, so we do not lose anything efficiency-wise by not doing readahead for stuffed files. They will have already been "read ahead" by the action of reading the inode in, in the first place. This is the remaining part of the fix for Red Hat bugzilla #218966 which had not yet made it upstream. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Russell Cattelan <cattelan@redhat.com>
-rw-r--r--fs/gfs2/ops_address.c28
1 files changed, 3 insertions, 25 deletions
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 0118aa439c1d..37bfeb961eb3 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -256,7 +256,7 @@ out_unlock:
256 * the page lock and the glock) and return having done no I/O. Its 256 * the page lock and the glock) and return having done no I/O. Its
257 * obviously not something we'd want to do on too regular a basis. 257 * obviously not something we'd want to do on too regular a basis.
258 * Any I/O we ignore at this time will be done via readpage later. 258 * Any I/O we ignore at this time will be done via readpage later.
259 * 2. We have to handle stuffed files here too. 259 * 2. We don't handle stuffed files here we let readpage do the honours.
260 * 3. mpage_readpages() does most of the heavy lifting in the common case. 260 * 3. mpage_readpages() does most of the heavy lifting in the common case.
261 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. 261 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
262 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as 262 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
@@ -269,8 +269,7 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
269 struct gfs2_inode *ip = GFS2_I(inode); 269 struct gfs2_inode *ip = GFS2_I(inode);
270 struct gfs2_sbd *sdp = GFS2_SB(inode); 270 struct gfs2_sbd *sdp = GFS2_SB(inode);
271 struct gfs2_holder gh; 271 struct gfs2_holder gh;
272 unsigned page_idx; 272 int ret = 0;
273 int ret;
274 int do_unlock = 0; 273 int do_unlock = 0;
275 274
276 if (likely(file != &gfs2_internal_file_sentinel)) { 275 if (likely(file != &gfs2_internal_file_sentinel)) {
@@ -289,29 +288,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
289 goto out_unlock; 288 goto out_unlock;
290 } 289 }
291skip_lock: 290skip_lock:
292 if (gfs2_is_stuffed(ip)) { 291 if (!gfs2_is_stuffed(ip))
293 struct pagevec lru_pvec;
294 pagevec_init(&lru_pvec, 0);
295 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
296 struct page *page = list_entry(pages->prev, struct page, lru);
297 prefetchw(&page->flags);
298 list_del(&page->lru);
299 if (!add_to_page_cache(page, mapping,
300 page->index, GFP_KERNEL)) {
301 ret = stuffed_readpage(ip, page);
302 unlock_page(page);
303 if (!pagevec_add(&lru_pvec, page))
304 __pagevec_lru_add(&lru_pvec);
305 } else {
306 page_cache_release(page);
307 }
308 }
309 pagevec_lru_add(&lru_pvec);
310 ret = 0;
311 } else {
312 /* What we really want to do .... */
313 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); 292 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
314 }
315 293
316 if (do_unlock) { 294 if (do_unlock) {
317 gfs2_glock_dq_m(1, &gh); 295 gfs2_glock_dq_m(1, &gh);