aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ubifs
diff options
context:
space:
mode:
authorAdrian Hunter <ext-adrian.hunter@nokia.com>2008-09-12 03:34:51 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-09-30 04:12:59 -0400
commit5c0013c16bd2ee08ffef1a1365622556a57218f5 (patch)
tree65148d84c2c9a7d5f30c6e4c53db743ba795a30b /fs/ubifs
parent46773be497a05010a2873e9ad96d739fb352c1e4 (diff)
UBIFS: fix bulk-read handling uptodate pages
Bulk-read skips uptodate pages but this was putting its array index out and causing it to treat subsequent pages as holes. Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com>
Diffstat (limited to 'fs/ubifs')
-rw-r--r--fs/ubifs/file.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 2f20a49ba34e..51cf511d44d9 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -595,7 +595,7 @@ out:
595static int populate_page(struct ubifs_info *c, struct page *page, 595static int populate_page(struct ubifs_info *c, struct page *page,
596 struct bu_info *bu, int *n) 596 struct bu_info *bu, int *n)
597{ 597{
598 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 1, read = 0; 598 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
599 struct inode *inode = page->mapping->host; 599 struct inode *inode = page->mapping->host;
600 loff_t i_size = i_size_read(inode); 600 loff_t i_size = i_size_read(inode);
601 unsigned int page_block; 601 unsigned int page_block;
@@ -609,6 +609,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
609 609
610 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 610 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
611 if (!i_size || page->index > end_index) { 611 if (!i_size || page->index > end_index) {
612 hole = 1;
612 memset(addr, 0, PAGE_CACHE_SIZE); 613 memset(addr, 0, PAGE_CACHE_SIZE);
613 goto out_hole; 614 goto out_hole;
614 } 615 }
@@ -617,10 +618,10 @@ static int populate_page(struct ubifs_info *c, struct page *page,
617 while (1) { 618 while (1) {
618 int err, len, out_len, dlen; 619 int err, len, out_len, dlen;
619 620
620 if (nn >= bu->cnt || 621 if (nn >= bu->cnt) {
621 key_block(c, &bu->zbranch[nn].key) != page_block) 622 hole = 1;
622 memset(addr, 0, UBIFS_BLOCK_SIZE); 623 memset(addr, 0, UBIFS_BLOCK_SIZE);
623 else { 624 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
624 struct ubifs_data_node *dn; 625 struct ubifs_data_node *dn;
625 626
626 dn = bu->buf + (bu->zbranch[nn].offs - offs); 627 dn = bu->buf + (bu->zbranch[nn].offs - offs);
@@ -643,8 +644,13 @@ static int populate_page(struct ubifs_info *c, struct page *page,
643 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); 644 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
644 645
645 nn += 1; 646 nn += 1;
646 hole = 0;
647 read = (i << UBIFS_BLOCK_SHIFT) + len; 647 read = (i << UBIFS_BLOCK_SHIFT) + len;
648 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
649 nn += 1;
650 continue;
651 } else {
652 hole = 1;
653 memset(addr, 0, UBIFS_BLOCK_SIZE);
648 } 654 }
649 if (++i >= UBIFS_BLOCKS_PER_PAGE) 655 if (++i >= UBIFS_BLOCKS_PER_PAGE)
650 break; 656 break;