aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ubifs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ubifs/file.c')
-rw-r--r--fs/ubifs/file.c248
1 files changed, 248 insertions, 0 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 3d698e2022b1..cdcfe95cbfb4 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -577,8 +577,256 @@ out:
577 return copied; 577 return copied;
578} 578}
579 579
580/**
581 * populate_page - copy data nodes into a page for bulk-read.
582 * @c: UBIFS file-system description object
583 * @page: page
584 * @bu: bulk-read information
585 * @n: next zbranch slot
586 *
587 * This function returns %0 on success and a negative error code on failure.
588 */
589static int populate_page(struct ubifs_info *c, struct page *page,
590 struct bu_info *bu, int *n)
591{
592 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 1, read = 0;
593 struct inode *inode = page->mapping->host;
594 loff_t i_size = i_size_read(inode);
595 unsigned int page_block;
596 void *addr, *zaddr;
597 pgoff_t end_index;
598
599 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
600 inode->i_ino, page->index, i_size, page->flags);
601
602 addr = zaddr = kmap(page);
603
604 end_index = (i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
605 if (!i_size || page->index > end_index) {
606 memset(addr, 0, PAGE_CACHE_SIZE);
607 goto out_hole;
608 }
609
610 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
611 while (1) {
612 int err, len, out_len, dlen;
613
614 if (nn >= bu->cnt ||
615 key_block(c, &bu->zbranch[nn].key) != page_block)
616 memset(addr, 0, UBIFS_BLOCK_SIZE);
617 else {
618 struct ubifs_data_node *dn;
619
620 dn = bu->buf + (bu->zbranch[nn].offs - offs);
621
622 ubifs_assert(dn->ch.sqnum >
623 ubifs_inode(inode)->creat_sqnum);
624
625 len = le32_to_cpu(dn->size);
626 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
627 goto out_err;
628
629 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
630 out_len = UBIFS_BLOCK_SIZE;
631 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
632 le16_to_cpu(dn->compr_type));
633 if (err || len != out_len)
634 goto out_err;
635
636 if (len < UBIFS_BLOCK_SIZE)
637 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
638
639 nn += 1;
640 hole = 0;
641 read = (i << UBIFS_BLOCK_SHIFT) + len;
642 }
643 if (++i >= UBIFS_BLOCKS_PER_PAGE)
644 break;
645 addr += UBIFS_BLOCK_SIZE;
646 page_block += 1;
647 }
648
649 if (end_index == page->index) {
650 int len = i_size & (PAGE_CACHE_SIZE - 1);
651
652 if (len < read)
653 memset(zaddr + len, 0, read - len);
654 }
655
656out_hole:
657 if (hole) {
658 SetPageChecked(page);
659 dbg_gen("hole");
660 }
661
662 SetPageUptodate(page);
663 ClearPageError(page);
664 flush_dcache_page(page);
665 kunmap(page);
666 *n = nn;
667 return 0;
668
669out_err:
670 ClearPageUptodate(page);
671 SetPageError(page);
672 flush_dcache_page(page);
673 kunmap(page);
674 ubifs_err("bad data node (block %u, inode %lu)",
675 page_block, inode->i_ino);
676 return -EINVAL;
677}
678
679/**
680 * ubifs_do_bulk_read - do bulk-read.
681 * @c: UBIFS file-system description object
682 * @page1: first page
683 *
684 * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
685 */
686static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
687{
688 pgoff_t offset = page1->index, end_index;
689 struct address_space *mapping = page1->mapping;
690 struct inode *inode = mapping->host;
691 struct ubifs_inode *ui = ubifs_inode(inode);
692 struct bu_info *bu;
693 int err, page_idx, page_cnt, ret = 0, n = 0;
694 loff_t isize;
695
696 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS);
697 if (!bu)
698 return 0;
699
700 bu->buf_len = c->bulk_read_buf_size;
701 bu->buf = kmalloc(bu->buf_len, GFP_NOFS);
702 if (!bu->buf)
703 goto out_free;
704
705 data_key_init(c, &bu->key, inode->i_ino,
706 offset << UBIFS_BLOCKS_PER_PAGE_SHIFT);
707
708 err = ubifs_tnc_get_bu_keys(c, bu);
709 if (err)
710 goto out_warn;
711
712 if (bu->eof) {
713 /* Turn off bulk-read at the end of the file */
714 ui->read_in_a_row = 1;
715 ui->bulk_read = 0;
716 }
717
718 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
719 if (!page_cnt) {
720 /*
721 * This happens when there are multiple blocks per page and the
722 * blocks for the first page we are looking for, are not
723 * together. If all the pages were like this, bulk-read would
724 * reduce performance, so we turn it off for a while.
725 */
726 ui->read_in_a_row = 0;
727 ui->bulk_read = 0;
728 goto out_free;
729 }
730
731 if (bu->cnt) {
732 err = ubifs_tnc_bulk_read(c, bu);
733 if (err)
734 goto out_warn;
735 }
736
737 err = populate_page(c, page1, bu, &n);
738 if (err)
739 goto out_warn;
740
741 unlock_page(page1);
742 ret = 1;
743
744 isize = i_size_read(inode);
745 if (isize == 0)
746 goto out_free;
747 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
748
749 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
750 pgoff_t page_offset = offset + page_idx;
751 struct page *page;
752
753 if (page_offset > end_index)
754 break;
755 page = find_or_create_page(mapping, page_offset,
756 GFP_NOFS | __GFP_COLD);
757 if (!page)
758 break;
759 if (!PageUptodate(page))
760 err = populate_page(c, page, bu, &n);
761 unlock_page(page);
762 page_cache_release(page);
763 if (err)
764 break;
765 }
766
767 ui->last_page_read = offset + page_idx - 1;
768
769out_free:
770 kfree(bu->buf);
771 kfree(bu);
772 return ret;
773
774out_warn:
775 ubifs_warn("ignoring error %d and skipping bulk-read", err);
776 goto out_free;
777}
778
779/**
780 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
781 * @page: page from which to start bulk-read.
782 *
783 * Some flash media are capable of reading sequentially at faster rates. UBIFS
784 * bulk-read facility is designed to take advantage of that, by reading in one
785 * go consecutive data nodes that are also located consecutively in the same
786 * LEB. This function returns %1 if a bulk-read is done and %0 otherwise.
787 */
788static int ubifs_bulk_read(struct page *page)
789{
790 struct inode *inode = page->mapping->host;
791 struct ubifs_info *c = inode->i_sb->s_fs_info;
792 struct ubifs_inode *ui = ubifs_inode(inode);
793 pgoff_t index = page->index, last_page_read = ui->last_page_read;
794 int ret = 0;
795
796 ui->last_page_read = index;
797
798 if (!c->bulk_read)
799 return 0;
800 /*
801 * Bulk-read is protected by ui_mutex, but it is an optimization, so
802 * don't bother if we cannot lock the mutex.
803 */
804 if (!mutex_trylock(&ui->ui_mutex))
805 return 0;
806 if (index != last_page_read + 1) {
807 /* Turn off bulk-read if we stop reading sequentially */
808 ui->read_in_a_row = 1;
809 if (ui->bulk_read)
810 ui->bulk_read = 0;
811 goto out_unlock;
812 }
813 if (!ui->bulk_read) {
814 ui->read_in_a_row += 1;
815 if (ui->read_in_a_row < 3)
816 goto out_unlock;
817 /* Three reads in a row, so switch on bulk-read */
818 ui->bulk_read = 1;
819 }
820 ret = ubifs_do_bulk_read(c, page);
821out_unlock:
822 mutex_unlock(&ui->ui_mutex);
823 return ret;
824}
825
580static int ubifs_readpage(struct file *file, struct page *page) 826static int ubifs_readpage(struct file *file, struct page *page)
581{ 827{
828 if (ubifs_bulk_read(page))
829 return 0;
582 do_readpage(page); 830 do_readpage(page);
583 unlock_page(page); 831 unlock_page(page);
584 return 0; 832 return 0;