aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-07-10 15:47:01 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-07-10 15:47:01 -0400
commitffeb874b2b893aea7d10b0b088e06a7b1ded2a3e (patch)
treee670758c0ed4347fde74411603cf252978c966f6 /fs/gfs2
parentdc3e130a08996e2b56381365a5ac7bb1ce2a9f47 (diff)
[GFS2] Bug fix to gfs2_readpages()
This fixes a bug where we were releasing a page incorrectly sometimes when reading a stuffed file. This fixes the bug that Kevin reported when using Xen. Cc: Kevin Anderson <kanderso@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/ops_address.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 27ce30148e69..2c4ec5cf21ff 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -249,8 +249,6 @@ out_unlock:
249 goto out; 249 goto out;
250} 250}
251 251
252#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
253
254/** 252/**
255 * gfs2_readpages - Read a bunch of pages at once 253 * gfs2_readpages - Read a bunch of pages at once
256 * 254 *
@@ -290,7 +288,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
290 struct pagevec lru_pvec; 288 struct pagevec lru_pvec;
291 pagevec_init(&lru_pvec, 0); 289 pagevec_init(&lru_pvec, 0);
292 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 290 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
293 struct page *page = list_to_page(pages); 291 struct page *page = list_entry(pages->prev, struct page, lru);
292 prefetchw(&page->flags);
294 list_del(&page->lru); 293 list_del(&page->lru);
295 if (!add_to_page_cache(page, mapping, 294 if (!add_to_page_cache(page, mapping,
296 page->index, GFP_KERNEL)) { 295 page->index, GFP_KERNEL)) {
@@ -298,8 +297,9 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
298 unlock_page(page); 297 unlock_page(page);
299 if (!pagevec_add(&lru_pvec, page)) 298 if (!pagevec_add(&lru_pvec, page))
300 __pagevec_lru_add(&lru_pvec); 299 __pagevec_lru_add(&lru_pvec);
300 } else {
301 page_cache_release(page);
301 } 302 }
302 page_cache_release(page);
303 } 303 }
304 pagevec_lru_add(&lru_pvec); 304 pagevec_lru_add(&lru_pvec);
305 ret = 0; 305 ret = 0;
@@ -321,7 +321,7 @@ out_noerror:
321out_unlock: 321out_unlock:
322 /* unlock all pages, we can't do any I/O right now */ 322 /* unlock all pages, we can't do any I/O right now */
323 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 323 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
324 struct page *page = list_to_page(pages); 324 struct page *page = list_entry(pages->prev, struct page, lru);
325 list_del(&page->lru); 325 list_del(&page->lru);
326 unlock_page(page); 326 unlock_page(page);
327 page_cache_release(page); 327 page_cache_release(page);