aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/ops_address.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/ops_address.c')
-rw-r--r--fs/gfs2/ops_address.c28
1 files changed, 3 insertions, 25 deletions
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 0118aa439c1d..37bfeb961eb3 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -256,7 +256,7 @@ out_unlock:
256 * the page lock and the glock) and return having done no I/O. Its 256 * the page lock and the glock) and return having done no I/O. Its
257 * obviously not something we'd want to do on too regular a basis. 257 * obviously not something we'd want to do on too regular a basis.
258 * Any I/O we ignore at this time will be done via readpage later. 258 * Any I/O we ignore at this time will be done via readpage later.
259 * 2. We have to handle stuffed files here too. 259 * 2. We don't handle stuffed files here we let readpage do the honours.
260 * 3. mpage_readpages() does most of the heavy lifting in the common case. 260 * 3. mpage_readpages() does most of the heavy lifting in the common case.
261 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. 261 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
262 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as 262 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
@@ -269,8 +269,7 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
269 struct gfs2_inode *ip = GFS2_I(inode); 269 struct gfs2_inode *ip = GFS2_I(inode);
270 struct gfs2_sbd *sdp = GFS2_SB(inode); 270 struct gfs2_sbd *sdp = GFS2_SB(inode);
271 struct gfs2_holder gh; 271 struct gfs2_holder gh;
272 unsigned page_idx; 272 int ret = 0;
273 int ret;
274 int do_unlock = 0; 273 int do_unlock = 0;
275 274
276 if (likely(file != &gfs2_internal_file_sentinel)) { 275 if (likely(file != &gfs2_internal_file_sentinel)) {
@@ -289,29 +288,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
289 goto out_unlock; 288 goto out_unlock;
290 } 289 }
291skip_lock: 290skip_lock:
292 if (gfs2_is_stuffed(ip)) { 291 if (!gfs2_is_stuffed(ip))
293 struct pagevec lru_pvec;
294 pagevec_init(&lru_pvec, 0);
295 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
296 struct page *page = list_entry(pages->prev, struct page, lru);
297 prefetchw(&page->flags);
298 list_del(&page->lru);
299 if (!add_to_page_cache(page, mapping,
300 page->index, GFP_KERNEL)) {
301 ret = stuffed_readpage(ip, page);
302 unlock_page(page);
303 if (!pagevec_add(&lru_pvec, page))
304 __pagevec_lru_add(&lru_pvec);
305 } else {
306 page_cache_release(page);
307 }
308 }
309 pagevec_lru_add(&lru_pvec);
310 ret = 0;
311 } else {
312 /* What we really want to do .... */
313 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); 292 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
314 }
315 293
316 if (do_unlock) { 294 if (do_unlock) {
317 gfs2_glock_dq_m(1, &gh); 295 gfs2_glock_dq_m(1, &gh);