diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ubifs/file.c | 248 | ||||
-rw-r--r-- | fs/ubifs/key.h | 22 | ||||
-rw-r--r-- | fs/ubifs/super.c | 31 | ||||
-rw-r--r-- | fs/ubifs/tnc.c | 283 | ||||
-rw-r--r-- | fs/ubifs/ubifs.h | 45 |
5 files changed, 626 insertions, 3 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 3d698e2022b1..cdcfe95cbfb4 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -577,8 +577,256 @@ out: | |||
577 | return copied; | 577 | return copied; |
578 | } | 578 | } |
579 | 579 | ||
580 | /** | ||
581 | * populate_page - copy data nodes into a page for bulk-read. | ||
582 | * @c: UBIFS file-system description object | ||
583 | * @page: page | ||
584 | * @bu: bulk-read information | ||
585 | * @n: next zbranch slot | ||
586 | * | ||
587 | * This function returns %0 on success and a negative error code on failure. | ||
588 | */ | ||
589 | static int populate_page(struct ubifs_info *c, struct page *page, | ||
590 | struct bu_info *bu, int *n) | ||
591 | { | ||
592 | int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 1, read = 0; | ||
593 | struct inode *inode = page->mapping->host; | ||
594 | loff_t i_size = i_size_read(inode); | ||
595 | unsigned int page_block; | ||
596 | void *addr, *zaddr; | ||
597 | pgoff_t end_index; | ||
598 | |||
599 | dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", | ||
600 | inode->i_ino, page->index, i_size, page->flags); | ||
601 | |||
602 | addr = zaddr = kmap(page); | ||
603 | |||
604 | end_index = (i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
605 | if (!i_size || page->index > end_index) { | ||
606 | memset(addr, 0, PAGE_CACHE_SIZE); | ||
607 | goto out_hole; | ||
608 | } | ||
609 | |||
610 | page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; | ||
611 | while (1) { | ||
612 | int err, len, out_len, dlen; | ||
613 | |||
614 | if (nn >= bu->cnt || | ||
615 | key_block(c, &bu->zbranch[nn].key) != page_block) | ||
616 | memset(addr, 0, UBIFS_BLOCK_SIZE); | ||
617 | else { | ||
618 | struct ubifs_data_node *dn; | ||
619 | |||
620 | dn = bu->buf + (bu->zbranch[nn].offs - offs); | ||
621 | |||
622 | ubifs_assert(dn->ch.sqnum > | ||
623 | ubifs_inode(inode)->creat_sqnum); | ||
624 | |||
625 | len = le32_to_cpu(dn->size); | ||
626 | if (len <= 0 || len > UBIFS_BLOCK_SIZE) | ||
627 | goto out_err; | ||
628 | |||
629 | dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; | ||
630 | out_len = UBIFS_BLOCK_SIZE; | ||
631 | err = ubifs_decompress(&dn->data, dlen, addr, &out_len, | ||
632 | le16_to_cpu(dn->compr_type)); | ||
633 | if (err || len != out_len) | ||
634 | goto out_err; | ||
635 | |||
636 | if (len < UBIFS_BLOCK_SIZE) | ||
637 | memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); | ||
638 | |||
639 | nn += 1; | ||
640 | hole = 0; | ||
641 | read = (i << UBIFS_BLOCK_SHIFT) + len; | ||
642 | } | ||
643 | if (++i >= UBIFS_BLOCKS_PER_PAGE) | ||
644 | break; | ||
645 | addr += UBIFS_BLOCK_SIZE; | ||
646 | page_block += 1; | ||
647 | } | ||
648 | |||
649 | if (end_index == page->index) { | ||
650 | int len = i_size & (PAGE_CACHE_SIZE - 1); | ||
651 | |||
652 | if (len < read) | ||
653 | memset(zaddr + len, 0, read - len); | ||
654 | } | ||
655 | |||
656 | out_hole: | ||
657 | if (hole) { | ||
658 | SetPageChecked(page); | ||
659 | dbg_gen("hole"); | ||
660 | } | ||
661 | |||
662 | SetPageUptodate(page); | ||
663 | ClearPageError(page); | ||
664 | flush_dcache_page(page); | ||
665 | kunmap(page); | ||
666 | *n = nn; | ||
667 | return 0; | ||
668 | |||
669 | out_err: | ||
670 | ClearPageUptodate(page); | ||
671 | SetPageError(page); | ||
672 | flush_dcache_page(page); | ||
673 | kunmap(page); | ||
674 | ubifs_err("bad data node (block %u, inode %lu)", | ||
675 | page_block, inode->i_ino); | ||
676 | return -EINVAL; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * ubifs_do_bulk_read - do bulk-read. | ||
681 | * @c: UBIFS file-system description object | ||
682 | * @page1: first page | ||
683 | * | ||
684 | * This function returns %1 if the bulk-read is done, otherwise %0 is returned. | ||
685 | */ | ||
686 | static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) | ||
687 | { | ||
688 | pgoff_t offset = page1->index, end_index; | ||
689 | struct address_space *mapping = page1->mapping; | ||
690 | struct inode *inode = mapping->host; | ||
691 | struct ubifs_inode *ui = ubifs_inode(inode); | ||
692 | struct bu_info *bu; | ||
693 | int err, page_idx, page_cnt, ret = 0, n = 0; | ||
694 | loff_t isize; | ||
695 | |||
696 | bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); | ||
697 | if (!bu) | ||
698 | return 0; | ||
699 | |||
700 | bu->buf_len = c->bulk_read_buf_size; | ||
701 | bu->buf = kmalloc(bu->buf_len, GFP_NOFS); | ||
702 | if (!bu->buf) | ||
703 | goto out_free; | ||
704 | |||
705 | data_key_init(c, &bu->key, inode->i_ino, | ||
706 | offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); | ||
707 | |||
708 | err = ubifs_tnc_get_bu_keys(c, bu); | ||
709 | if (err) | ||
710 | goto out_warn; | ||
711 | |||
712 | if (bu->eof) { | ||
713 | /* Turn off bulk-read at the end of the file */ | ||
714 | ui->read_in_a_row = 1; | ||
715 | ui->bulk_read = 0; | ||
716 | } | ||
717 | |||
718 | page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT; | ||
719 | if (!page_cnt) { | ||
720 | /* | ||
721 | * This happens when there are multiple blocks per page and the | ||
722 | * blocks for the first page we are looking for, are not | ||
723 | * together. If all the pages were like this, bulk-read would | ||
724 | * reduce performance, so we turn it off for a while. | ||
725 | */ | ||
726 | ui->read_in_a_row = 0; | ||
727 | ui->bulk_read = 0; | ||
728 | goto out_free; | ||
729 | } | ||
730 | |||
731 | if (bu->cnt) { | ||
732 | err = ubifs_tnc_bulk_read(c, bu); | ||
733 | if (err) | ||
734 | goto out_warn; | ||
735 | } | ||
736 | |||
737 | err = populate_page(c, page1, bu, &n); | ||
738 | if (err) | ||
739 | goto out_warn; | ||
740 | |||
741 | unlock_page(page1); | ||
742 | ret = 1; | ||
743 | |||
744 | isize = i_size_read(inode); | ||
745 | if (isize == 0) | ||
746 | goto out_free; | ||
747 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); | ||
748 | |||
749 | for (page_idx = 1; page_idx < page_cnt; page_idx++) { | ||
750 | pgoff_t page_offset = offset + page_idx; | ||
751 | struct page *page; | ||
752 | |||
753 | if (page_offset > end_index) | ||
754 | break; | ||
755 | page = find_or_create_page(mapping, page_offset, | ||
756 | GFP_NOFS | __GFP_COLD); | ||
757 | if (!page) | ||
758 | break; | ||
759 | if (!PageUptodate(page)) | ||
760 | err = populate_page(c, page, bu, &n); | ||
761 | unlock_page(page); | ||
762 | page_cache_release(page); | ||
763 | if (err) | ||
764 | break; | ||
765 | } | ||
766 | |||
767 | ui->last_page_read = offset + page_idx - 1; | ||
768 | |||
769 | out_free: | ||
770 | kfree(bu->buf); | ||
771 | kfree(bu); | ||
772 | return ret; | ||
773 | |||
774 | out_warn: | ||
775 | ubifs_warn("ignoring error %d and skipping bulk-read", err); | ||
776 | goto out_free; | ||
777 | } | ||
778 | |||
779 | /** | ||
780 | * ubifs_bulk_read - determine whether to bulk-read and, if so, do it. | ||
781 | * @page: page from which to start bulk-read. | ||
782 | * | ||
783 | * Some flash media are capable of reading sequentially at faster rates. UBIFS | ||
784 | * bulk-read facility is designed to take advantage of that, by reading in one | ||
785 | * go consecutive data nodes that are also located consecutively in the same | ||
786 | * LEB. This function returns %1 if a bulk-read is done and %0 otherwise. | ||
787 | */ | ||
788 | static int ubifs_bulk_read(struct page *page) | ||
789 | { | ||
790 | struct inode *inode = page->mapping->host; | ||
791 | struct ubifs_info *c = inode->i_sb->s_fs_info; | ||
792 | struct ubifs_inode *ui = ubifs_inode(inode); | ||
793 | pgoff_t index = page->index, last_page_read = ui->last_page_read; | ||
794 | int ret = 0; | ||
795 | |||
796 | ui->last_page_read = index; | ||
797 | |||
798 | if (!c->bulk_read) | ||
799 | return 0; | ||
800 | /* | ||
801 | * Bulk-read is protected by ui_mutex, but it is an optimization, so | ||
802 | * don't bother if we cannot lock the mutex. | ||
803 | */ | ||
804 | if (!mutex_trylock(&ui->ui_mutex)) | ||
805 | return 0; | ||
806 | if (index != last_page_read + 1) { | ||
807 | /* Turn off bulk-read if we stop reading sequentially */ | ||
808 | ui->read_in_a_row = 1; | ||
809 | if (ui->bulk_read) | ||
810 | ui->bulk_read = 0; | ||
811 | goto out_unlock; | ||
812 | } | ||
813 | if (!ui->bulk_read) { | ||
814 | ui->read_in_a_row += 1; | ||
815 | if (ui->read_in_a_row < 3) | ||
816 | goto out_unlock; | ||
817 | /* Three reads in a row, so switch on bulk-read */ | ||
818 | ui->bulk_read = 1; | ||
819 | } | ||
820 | ret = ubifs_do_bulk_read(c, page); | ||
821 | out_unlock: | ||
822 | mutex_unlock(&ui->ui_mutex); | ||
823 | return ret; | ||
824 | } | ||
825 | |||
580 | static int ubifs_readpage(struct file *file, struct page *page) | 826 | static int ubifs_readpage(struct file *file, struct page *page) |
581 | { | 827 | { |
828 | if (ubifs_bulk_read(page)) | ||
829 | return 0; | ||
582 | do_readpage(page); | 830 | do_readpage(page); |
583 | unlock_page(page); | 831 | unlock_page(page); |
584 | return 0; | 832 | return 0; |
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h index 8f7476007549..9ee65086f627 100644 --- a/fs/ubifs/key.h +++ b/fs/ubifs/key.h | |||
@@ -484,7 +484,7 @@ static inline void key_copy(const struct ubifs_info *c, | |||
484 | * @key2: the second key to compare | 484 | * @key2: the second key to compare |
485 | * | 485 | * |
486 | * This function compares 2 keys and returns %-1 if @key1 is less than | 486 | * This function compares 2 keys and returns %-1 if @key1 is less than |
487 | * @key2, 0 if the keys are equivalent and %1 if @key1 is greater than @key2. | 487 | * @key2, %0 if the keys are equivalent and %1 if @key1 is greater than @key2. |
488 | */ | 488 | */ |
489 | static inline int keys_cmp(const struct ubifs_info *c, | 489 | static inline int keys_cmp(const struct ubifs_info *c, |
490 | const union ubifs_key *key1, | 490 | const union ubifs_key *key1, |
@@ -503,6 +503,26 @@ static inline int keys_cmp(const struct ubifs_info *c, | |||
503 | } | 503 | } |
504 | 504 | ||
505 | /** | 505 | /** |
506 | * keys_eq - determine if keys are equivalent. | ||
507 | * @c: UBIFS file-system description object | ||
508 | * @key1: the first key to compare | ||
509 | * @key2: the second key to compare | ||
510 | * | ||
511 | * This function compares 2 keys and returns %1 if @key1 is equal to @key2 and | ||
512 | * %0 if not. | ||
513 | */ | ||
514 | static inline int keys_eq(const struct ubifs_info *c, | ||
515 | const union ubifs_key *key1, | ||
516 | const union ubifs_key *key2) | ||
517 | { | ||
518 | if (key1->u32[0] != key2->u32[0]) | ||
519 | return 0; | ||
520 | if (key1->u32[1] != key2->u32[1]) | ||
521 | return 0; | ||
522 | return 1; | ||
523 | } | ||
524 | |||
525 | /** | ||
506 | * is_hash_key - is a key vulnerable to hash collisions. | 526 | * is_hash_key - is a key vulnerable to hash collisions. |
507 | * @c: UBIFS file-system description object | 527 | * @c: UBIFS file-system description object |
508 | * @key: key | 528 | * @key: key |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index d87b0cf5f661..b1c57e8ee855 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -401,6 +401,11 @@ static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
401 | else if (c->mount_opts.unmount_mode == 1) | 401 | else if (c->mount_opts.unmount_mode == 1) |
402 | seq_printf(s, ",norm_unmount"); | 402 | seq_printf(s, ",norm_unmount"); |
403 | 403 | ||
404 | if (c->mount_opts.bulk_read == 2) | ||
405 | seq_printf(s, ",bulk_read"); | ||
406 | else if (c->mount_opts.bulk_read == 1) | ||
407 | seq_printf(s, ",no_bulk_read"); | ||
408 | |||
404 | return 0; | 409 | return 0; |
405 | } | 410 | } |
406 | 411 | ||
@@ -538,6 +543,18 @@ static int init_constants_early(struct ubifs_info *c) | |||
538 | * calculations when reporting free space. | 543 | * calculations when reporting free space. |
539 | */ | 544 | */ |
540 | c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; | 545 | c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; |
546 | /* Buffer size for bulk-reads */ | ||
547 | c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; | ||
548 | if (c->bulk_read_buf_size > c->leb_size) | ||
549 | c->bulk_read_buf_size = c->leb_size; | ||
550 | if (c->bulk_read_buf_size > 128 * 1024) { | ||
551 | /* Check if we can kmalloc more than 128KiB */ | ||
552 | void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL); | ||
553 | |||
554 | kfree(try); | ||
555 | if (!try) | ||
556 | c->bulk_read_buf_size = 128 * 1024; | ||
557 | } | ||
541 | return 0; | 558 | return 0; |
542 | } | 559 | } |
543 | 560 | ||
@@ -840,17 +857,23 @@ static int check_volume_empty(struct ubifs_info *c) | |||
840 | * | 857 | * |
841 | * Opt_fast_unmount: do not run a journal commit before un-mounting | 858 | * Opt_fast_unmount: do not run a journal commit before un-mounting |
842 | * Opt_norm_unmount: run a journal commit before un-mounting | 859 | * Opt_norm_unmount: run a journal commit before un-mounting |
860 | * Opt_bulk_read: enable bulk-reads | ||
861 | * Opt_no_bulk_read: disable bulk-reads | ||
843 | * Opt_err: just end of array marker | 862 | * Opt_err: just end of array marker |
844 | */ | 863 | */ |
845 | enum { | 864 | enum { |
846 | Opt_fast_unmount, | 865 | Opt_fast_unmount, |
847 | Opt_norm_unmount, | 866 | Opt_norm_unmount, |
867 | Opt_bulk_read, | ||
868 | Opt_no_bulk_read, | ||
848 | Opt_err, | 869 | Opt_err, |
849 | }; | 870 | }; |
850 | 871 | ||
851 | static match_table_t tokens = { | 872 | static match_table_t tokens = { |
852 | {Opt_fast_unmount, "fast_unmount"}, | 873 | {Opt_fast_unmount, "fast_unmount"}, |
853 | {Opt_norm_unmount, "norm_unmount"}, | 874 | {Opt_norm_unmount, "norm_unmount"}, |
875 | {Opt_bulk_read, "bulk_read"}, | ||
876 | {Opt_no_bulk_read, "no_bulk_read"}, | ||
854 | {Opt_err, NULL}, | 877 | {Opt_err, NULL}, |
855 | }; | 878 | }; |
856 | 879 | ||
@@ -888,6 +911,14 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, | |||
888 | c->mount_opts.unmount_mode = 1; | 911 | c->mount_opts.unmount_mode = 1; |
889 | c->fast_unmount = 0; | 912 | c->fast_unmount = 0; |
890 | break; | 913 | break; |
914 | case Opt_bulk_read: | ||
915 | c->mount_opts.bulk_read = 2; | ||
916 | c->bulk_read = 1; | ||
917 | break; | ||
918 | case Opt_no_bulk_read: | ||
919 | c->mount_opts.bulk_read = 1; | ||
920 | c->bulk_read = 0; | ||
921 | break; | ||
891 | default: | 922 | default: |
892 | ubifs_err("unrecognized mount option \"%s\" " | 923 | ubifs_err("unrecognized mount option \"%s\" " |
893 | "or missing value", p); | 924 | "or missing value", p); |
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index ba13c92fdf6a..d279012d8dd5 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
@@ -1492,6 +1492,289 @@ out: | |||
1492 | } | 1492 | } |
1493 | 1493 | ||
1494 | /** | 1494 | /** |
1495 | * ubifs_tnc_get_bu_keys - lookup keys for bulk-read. | ||
1496 | * @c: UBIFS file-system description object | ||
1497 | * @bu: bulk-read parameters and results | ||
1498 | * | ||
1499 | * Lookup consecutive data node keys for the same inode that reside | ||
1500 | * consecutively in the same LEB. | ||
1501 | */ | ||
1502 | int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) | ||
1503 | { | ||
1504 | int n, err = 0, lnum = -1, uninitialized_var(offs); | ||
1505 | int uninitialized_var(len); | ||
1506 | unsigned int block = key_block(c, &bu->key); | ||
1507 | struct ubifs_znode *znode; | ||
1508 | |||
1509 | bu->cnt = 0; | ||
1510 | bu->blk_cnt = 0; | ||
1511 | bu->eof = 0; | ||
1512 | |||
1513 | mutex_lock(&c->tnc_mutex); | ||
1514 | /* Find first key */ | ||
1515 | err = ubifs_lookup_level0(c, &bu->key, &znode, &n); | ||
1516 | if (err < 0) | ||
1517 | goto out; | ||
1518 | if (err) { | ||
1519 | /* Key found */ | ||
1520 | len = znode->zbranch[n].len; | ||
1521 | /* The buffer must be big enough for at least 1 node */ | ||
1522 | if (len > bu->buf_len) { | ||
1523 | err = -EINVAL; | ||
1524 | goto out; | ||
1525 | } | ||
1526 | /* Add this key */ | ||
1527 | bu->zbranch[bu->cnt++] = znode->zbranch[n]; | ||
1528 | bu->blk_cnt += 1; | ||
1529 | lnum = znode->zbranch[n].lnum; | ||
1530 | offs = ALIGN(znode->zbranch[n].offs + len, 8); | ||
1531 | } | ||
1532 | while (1) { | ||
1533 | struct ubifs_zbranch *zbr; | ||
1534 | union ubifs_key *key; | ||
1535 | unsigned int next_block; | ||
1536 | |||
1537 | /* Find next key */ | ||
1538 | err = tnc_next(c, &znode, &n); | ||
1539 | if (err) | ||
1540 | goto out; | ||
1541 | zbr = &znode->zbranch[n]; | ||
1542 | key = &zbr->key; | ||
1543 | /* See if there is another data key for this file */ | ||
1544 | if (key_inum(c, key) != key_inum(c, &bu->key) || | ||
1545 | key_type(c, key) != UBIFS_DATA_KEY) { | ||
1546 | err = -ENOENT; | ||
1547 | goto out; | ||
1548 | } | ||
1549 | if (lnum < 0) { | ||
1550 | /* First key found */ | ||
1551 | lnum = zbr->lnum; | ||
1552 | offs = ALIGN(zbr->offs + zbr->len, 8); | ||
1553 | len = zbr->len; | ||
1554 | if (len > bu->buf_len) { | ||
1555 | err = -EINVAL; | ||
1556 | goto out; | ||
1557 | } | ||
1558 | } else { | ||
1559 | /* | ||
1560 | * The data nodes must be in consecutive positions in | ||
1561 | * the same LEB. | ||
1562 | */ | ||
1563 | if (zbr->lnum != lnum || zbr->offs != offs) | ||
1564 | goto out; | ||
1565 | offs += ALIGN(zbr->len, 8); | ||
1566 | len = ALIGN(len, 8) + zbr->len; | ||
1567 | /* Must not exceed buffer length */ | ||
1568 | if (len > bu->buf_len) | ||
1569 | goto out; | ||
1570 | } | ||
1571 | /* Allow for holes */ | ||
1572 | next_block = key_block(c, key); | ||
1573 | bu->blk_cnt += (next_block - block - 1); | ||
1574 | if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) | ||
1575 | goto out; | ||
1576 | block = next_block; | ||
1577 | /* Add this key */ | ||
1578 | bu->zbranch[bu->cnt++] = *zbr; | ||
1579 | bu->blk_cnt += 1; | ||
1580 | /* See if we have room for more */ | ||
1581 | if (bu->cnt >= UBIFS_MAX_BULK_READ) | ||
1582 | goto out; | ||
1583 | if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) | ||
1584 | goto out; | ||
1585 | } | ||
1586 | out: | ||
1587 | if (err == -ENOENT) { | ||
1588 | bu->eof = 1; | ||
1589 | err = 0; | ||
1590 | } | ||
1591 | bu->gc_seq = c->gc_seq; | ||
1592 | mutex_unlock(&c->tnc_mutex); | ||
1593 | if (err) | ||
1594 | return err; | ||
1595 | /* | ||
1596 | * An enormous hole could cause bulk-read to encompass too many | ||
1597 | * page cache pages, so limit the number here. | ||
1598 | */ | ||
1599 | if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) | ||
1600 | bu->blk_cnt = UBIFS_MAX_BULK_READ; | ||
1601 | /* | ||
1602 | * Ensure that bulk-read covers a whole number of page cache | ||
1603 | * pages. | ||
1604 | */ | ||
1605 | if (UBIFS_BLOCKS_PER_PAGE == 1 || | ||
1606 | !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1))) | ||
1607 | return 0; | ||
1608 | if (bu->eof) { | ||
1609 | /* At the end of file we can round up */ | ||
1610 | bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1; | ||
1611 | return 0; | ||
1612 | } | ||
1613 | /* Exclude data nodes that do not make up a whole page cache page */ | ||
1614 | block = key_block(c, &bu->key) + bu->blk_cnt; | ||
1615 | block &= ~(UBIFS_BLOCKS_PER_PAGE - 1); | ||
1616 | while (bu->cnt) { | ||
1617 | if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block) | ||
1618 | break; | ||
1619 | bu->cnt -= 1; | ||
1620 | } | ||
1621 | return 0; | ||
1622 | } | ||
1623 | |||
1624 | /** | ||
1625 | * read_wbuf - bulk-read from a LEB with a wbuf. | ||
1626 | * @wbuf: wbuf that may overlap the read | ||
1627 | * @buf: buffer into which to read | ||
1628 | * @len: read length | ||
1629 | * @lnum: LEB number from which to read | ||
1630 | * @offs: offset from which to read | ||
1631 | * | ||
1632 | * This functions returns %0 on success or a negative error code on failure. | ||
1633 | */ | ||
1634 | static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum, | ||
1635 | int offs) | ||
1636 | { | ||
1637 | const struct ubifs_info *c = wbuf->c; | ||
1638 | int rlen, overlap; | ||
1639 | |||
1640 | dbg_io("LEB %d:%d, length %d", lnum, offs, len); | ||
1641 | ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); | ||
1642 | ubifs_assert(!(offs & 7) && offs < c->leb_size); | ||
1643 | ubifs_assert(offs + len <= c->leb_size); | ||
1644 | |||
1645 | spin_lock(&wbuf->lock); | ||
1646 | overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); | ||
1647 | if (!overlap) { | ||
1648 | /* We may safely unlock the write-buffer and read the data */ | ||
1649 | spin_unlock(&wbuf->lock); | ||
1650 | return ubi_read(c->ubi, lnum, buf, offs, len); | ||
1651 | } | ||
1652 | |||
1653 | /* Don't read under wbuf */ | ||
1654 | rlen = wbuf->offs - offs; | ||
1655 | if (rlen < 0) | ||
1656 | rlen = 0; | ||
1657 | |||
1658 | /* Copy the rest from the write-buffer */ | ||
1659 | memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); | ||
1660 | spin_unlock(&wbuf->lock); | ||
1661 | |||
1662 | if (rlen > 0) | ||
1663 | /* Read everything that goes before write-buffer */ | ||
1664 | return ubi_read(c->ubi, lnum, buf, offs, rlen); | ||
1665 | |||
1666 | return 0; | ||
1667 | } | ||
1668 | |||
1669 | /** | ||
1670 | * validate_data_node - validate data nodes for bulk-read. | ||
1671 | * @c: UBIFS file-system description object | ||
1672 | * @buf: buffer containing data node to validate | ||
1673 | * @zbr: zbranch of data node to validate | ||
1674 | * | ||
1675 | * This functions returns %0 on success or a negative error code on failure. | ||
1676 | */ | ||
1677 | static int validate_data_node(struct ubifs_info *c, void *buf, | ||
1678 | struct ubifs_zbranch *zbr) | ||
1679 | { | ||
1680 | union ubifs_key key1; | ||
1681 | struct ubifs_ch *ch = buf; | ||
1682 | int err, len; | ||
1683 | |||
1684 | if (ch->node_type != UBIFS_DATA_NODE) { | ||
1685 | ubifs_err("bad node type (%d but expected %d)", | ||
1686 | ch->node_type, UBIFS_DATA_NODE); | ||
1687 | goto out_err; | ||
1688 | } | ||
1689 | |||
1690 | err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0); | ||
1691 | if (err) { | ||
1692 | ubifs_err("expected node type %d", UBIFS_DATA_NODE); | ||
1693 | goto out; | ||
1694 | } | ||
1695 | |||
1696 | len = le32_to_cpu(ch->len); | ||
1697 | if (len != zbr->len) { | ||
1698 | ubifs_err("bad node length %d, expected %d", len, zbr->len); | ||
1699 | goto out_err; | ||
1700 | } | ||
1701 | |||
1702 | /* Make sure the key of the read node is correct */ | ||
1703 | key_read(c, buf + UBIFS_KEY_OFFSET, &key1); | ||
1704 | if (!keys_eq(c, &zbr->key, &key1)) { | ||
1705 | ubifs_err("bad key in node at LEB %d:%d", | ||
1706 | zbr->lnum, zbr->offs); | ||
1707 | dbg_tnc("looked for key %s found node's key %s", | ||
1708 | DBGKEY(&zbr->key), DBGKEY1(&key1)); | ||
1709 | goto out_err; | ||
1710 | } | ||
1711 | |||
1712 | return 0; | ||
1713 | |||
1714 | out_err: | ||
1715 | err = -EINVAL; | ||
1716 | out: | ||
1717 | ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs); | ||
1718 | dbg_dump_node(c, buf); | ||
1719 | dbg_dump_stack(); | ||
1720 | return err; | ||
1721 | } | ||
1722 | |||
1723 | /** | ||
1724 | * ubifs_tnc_bulk_read - read a number of data nodes in one go. | ||
1725 | * @c: UBIFS file-system description object | ||
1726 | * @bu: bulk-read parameters and results | ||
1727 | * | ||
1728 | * This functions reads and validates the data nodes that were identified by the | ||
1729 | * 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success, | ||
1730 | * -EAGAIN to indicate a race with GC, or another negative error code on | ||
1731 | * failure. | ||
1732 | */ | ||
1733 | int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) | ||
1734 | { | ||
1735 | int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i; | ||
1736 | struct ubifs_wbuf *wbuf; | ||
1737 | void *buf; | ||
1738 | |||
1739 | len = bu->zbranch[bu->cnt - 1].offs; | ||
1740 | len += bu->zbranch[bu->cnt - 1].len - offs; | ||
1741 | if (len > bu->buf_len) { | ||
1742 | ubifs_err("buffer too small %d vs %d", bu->buf_len, len); | ||
1743 | return -EINVAL; | ||
1744 | } | ||
1745 | |||
1746 | /* Do the read */ | ||
1747 | wbuf = ubifs_get_wbuf(c, lnum); | ||
1748 | if (wbuf) | ||
1749 | err = read_wbuf(wbuf, bu->buf, len, lnum, offs); | ||
1750 | else | ||
1751 | err = ubi_read(c->ubi, lnum, bu->buf, offs, len); | ||
1752 | |||
1753 | /* Check for a race with GC */ | ||
1754 | if (maybe_leb_gced(c, lnum, bu->gc_seq)) | ||
1755 | return -EAGAIN; | ||
1756 | |||
1757 | if (err && err != -EBADMSG) { | ||
1758 | ubifs_err("failed to read from LEB %d:%d, error %d", | ||
1759 | lnum, offs, err); | ||
1760 | dbg_dump_stack(); | ||
1761 | dbg_tnc("key %s", DBGKEY(&bu->key)); | ||
1762 | return err; | ||
1763 | } | ||
1764 | |||
1765 | /* Validate the nodes read */ | ||
1766 | buf = bu->buf; | ||
1767 | for (i = 0; i < bu->cnt; i++) { | ||
1768 | err = validate_data_node(c, buf, &bu->zbranch[i]); | ||
1769 | if (err) | ||
1770 | return err; | ||
1771 | buf = buf + ALIGN(bu->zbranch[i].len, 8); | ||
1772 | } | ||
1773 | |||
1774 | return 0; | ||
1775 | } | ||
1776 | |||
1777 | /** | ||
1495 | * do_lookup_nm- look up a "hashed" node. | 1778 | * do_lookup_nm- look up a "hashed" node. |
1496 | * @c: UBIFS file-system description object | 1779 | * @c: UBIFS file-system description object |
1497 | * @key: node key to lookup | 1780 | * @key: node key to lookup |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index ce8654928aad..8513239ea8a0 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -142,6 +142,9 @@ | |||
142 | /* Maximum expected tree height for use by bottom_up_buf */ | 142 | /* Maximum expected tree height for use by bottom_up_buf */ |
143 | #define BOTTOM_UP_HEIGHT 64 | 143 | #define BOTTOM_UP_HEIGHT 64 |
144 | 144 | ||
145 | /* Maximum number of data nodes to bulk-read */ | ||
146 | #define UBIFS_MAX_BULK_READ 32 | ||
147 | |||
145 | /* | 148 | /* |
146 | * Lockdep classes for UBIFS inode @ui_mutex. | 149 | * Lockdep classes for UBIFS inode @ui_mutex. |
147 | */ | 150 | */ |
@@ -329,8 +332,8 @@ struct ubifs_gced_idx_leb { | |||
329 | * @dirty: non-zero if the inode is dirty | 332 | * @dirty: non-zero if the inode is dirty |
330 | * @xattr: non-zero if this is an extended attribute inode | 333 | * @xattr: non-zero if this is an extended attribute inode |
331 | * @ui_mutex: serializes inode write-back with the rest of VFS operations, | 334 | * @ui_mutex: serializes inode write-back with the rest of VFS operations, |
332 | * serializes "clean <-> dirty" state changes, protects @dirty, | 335 | * serializes "clean <-> dirty" state changes, serializes bulk-read, |
333 | * @ui_size, and @xattr_size | 336 | * protects @dirty, @ui_size, and @xattr_size |
334 | * @ui_lock: protects @synced_i_size | 337 | * @ui_lock: protects @synced_i_size |
335 | * @synced_i_size: synchronized size of inode, i.e. the value of inode size | 338 | * @synced_i_size: synchronized size of inode, i.e. the value of inode size |
336 | * currently stored on the flash; used only for regular file | 339 | * currently stored on the flash; used only for regular file |
@@ -338,6 +341,9 @@ struct ubifs_gced_idx_leb { | |||
338 | * @ui_size: inode size used by UBIFS when writing to flash | 341 | * @ui_size: inode size used by UBIFS when writing to flash |
339 | * @flags: inode flags (@UBIFS_COMPR_FL, etc) | 342 | * @flags: inode flags (@UBIFS_COMPR_FL, etc) |
340 | * @compr_type: default compression type used for this inode | 343 | * @compr_type: default compression type used for this inode |
344 | * @last_page_read: page number of last page read (for bulk read) | ||
345 | * @read_in_a_row: number of consecutive pages read in a row (for bulk read) | ||
346 | * @bulk_read: indicates whether bulk-read should be used | ||
341 | * @data_len: length of the data attached to the inode | 347 | * @data_len: length of the data attached to the inode |
342 | * @data: inode's data | 348 | * @data: inode's data |
343 | * | 349 | * |
@@ -385,6 +391,9 @@ struct ubifs_inode { | |||
385 | loff_t ui_size; | 391 | loff_t ui_size; |
386 | int flags; | 392 | int flags; |
387 | int compr_type; | 393 | int compr_type; |
394 | pgoff_t last_page_read; | ||
395 | pgoff_t read_in_a_row; | ||
396 | int bulk_read; | ||
388 | int data_len; | 397 | int data_len; |
389 | void *data; | 398 | void *data; |
390 | }; | 399 | }; |
@@ -744,6 +753,28 @@ struct ubifs_znode { | |||
744 | }; | 753 | }; |
745 | 754 | ||
746 | /** | 755 | /** |
756 | * struct bu_info - bulk-read information | ||
757 | * @key: first data node key | ||
758 | * @zbranch: zbranches of data nodes to bulk read | ||
759 | * @buf: buffer to read into | ||
760 | * @buf_len: buffer length | ||
761 | * @gc_seq: GC sequence number to detect races with GC | ||
762 | * @cnt: number of data nodes for bulk read | ||
763 | * @blk_cnt: number of data blocks including holes | ||
764 | * @oef: end of file reached | ||
765 | */ | ||
766 | struct bu_info { | ||
767 | union ubifs_key key; | ||
768 | struct ubifs_zbranch zbranch[UBIFS_MAX_BULK_READ]; | ||
769 | void *buf; | ||
770 | int buf_len; | ||
771 | int gc_seq; | ||
772 | int cnt; | ||
773 | int blk_cnt; | ||
774 | int eof; | ||
775 | }; | ||
776 | |||
777 | /** | ||
747 | * struct ubifs_node_range - node length range description data structure. | 778 | * struct ubifs_node_range - node length range description data structure. |
748 | * @len: fixed node length | 779 | * @len: fixed node length |
749 | * @min_len: minimum possible node length | 780 | * @min_len: minimum possible node length |
@@ -862,9 +893,11 @@ struct ubifs_orphan { | |||
862 | /** | 893 | /** |
863 | * struct ubifs_mount_opts - UBIFS-specific mount options information. | 894 | * struct ubifs_mount_opts - UBIFS-specific mount options information. |
864 | * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast) | 895 | * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast) |
896 | * @bulk_read: enable bulk-reads | ||
865 | */ | 897 | */ |
866 | struct ubifs_mount_opts { | 898 | struct ubifs_mount_opts { |
867 | unsigned int unmount_mode:2; | 899 | unsigned int unmount_mode:2; |
900 | unsigned int bulk_read:2; | ||
868 | }; | 901 | }; |
869 | 902 | ||
870 | /** | 903 | /** |
@@ -965,6 +998,9 @@ struct ubifs_mount_opts { | |||
965 | * @old_leb_cnt: count of logical eraseblocks before re-size | 998 | * @old_leb_cnt: count of logical eraseblocks before re-size |
966 | * @ro_media: the underlying UBI volume is read-only | 999 | * @ro_media: the underlying UBI volume is read-only |
967 | * | 1000 | * |
1001 | * @bulk_read: enable bulk-reads | ||
1002 | * @bulk_read_buf_size: buffer size for bulk-reads | ||
1003 | * | ||
968 | * @dirty_pg_cnt: number of dirty pages (not used) | 1004 | * @dirty_pg_cnt: number of dirty pages (not used) |
969 | * @dirty_zn_cnt: number of dirty znodes | 1005 | * @dirty_zn_cnt: number of dirty znodes |
970 | * @clean_zn_cnt: number of clean znodes | 1006 | * @clean_zn_cnt: number of clean znodes |
@@ -1205,6 +1241,9 @@ struct ubifs_info { | |||
1205 | int old_leb_cnt; | 1241 | int old_leb_cnt; |
1206 | int ro_media; | 1242 | int ro_media; |
1207 | 1243 | ||
1244 | int bulk_read; | ||
1245 | int bulk_read_buf_size; | ||
1246 | |||
1208 | atomic_long_t dirty_pg_cnt; | 1247 | atomic_long_t dirty_pg_cnt; |
1209 | atomic_long_t dirty_zn_cnt; | 1248 | atomic_long_t dirty_zn_cnt; |
1210 | atomic_long_t clean_zn_cnt; | 1249 | atomic_long_t clean_zn_cnt; |
@@ -1490,6 +1529,8 @@ void destroy_old_idx(struct ubifs_info *c); | |||
1490 | int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level, | 1529 | int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level, |
1491 | int lnum, int offs); | 1530 | int lnum, int offs); |
1492 | int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode); | 1531 | int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode); |
1532 | int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu); | ||
1533 | int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu); | ||
1493 | 1534 | ||
1494 | /* tnc_misc.c */ | 1535 | /* tnc_misc.c */ |
1495 | struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr, | 1536 | struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr, |