aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ubifs/tnc.c
diff options
context:
space:
mode:
authorAdrian Hunter <ext-adrian.hunter@nokia.com>2008-09-02 09:29:46 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-09-30 04:12:56 -0400
commit4793e7c5e1c88382ead18db5ca072bac54467318 (patch)
tree07e5e99d988b28fed07d5a01141169362c5a007d /fs/ubifs/tnc.c
parenta70948b564e9f6cb81115c606d46f5b74a77b7c2 (diff)
UBIFS: add bulk-read facility
Some flash media are capable of reading sequentially at faster rates. UBIFS bulk-read facility is designed to take advantage of that, by reading in one go consecutive data nodes that are also located consecutively in the same LEB. Read speed on Arm platform with OneNAND goes from 17 MiB/s to 19 MiB/s. Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com>
Diffstat (limited to 'fs/ubifs/tnc.c')
-rw-r--r--fs/ubifs/tnc.c283
1 files changed, 283 insertions, 0 deletions
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index ba13c92fdf6a..d279012d8dd5 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -1492,6 +1492,289 @@ out:
1492} 1492}
1493 1493
1494/** 1494/**
1495 * ubifs_tnc_get_bu_keys - lookup keys for bulk-read.
1496 * @c: UBIFS file-system description object
1497 * @bu: bulk-read parameters and results
1498 *
1499 * Lookup consecutive data node keys for the same inode that reside
1500 * consecutively in the same LEB.
1501 */
1502int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1503{
1504 int n, err = 0, lnum = -1, uninitialized_var(offs);
1505 int uninitialized_var(len);
1506 unsigned int block = key_block(c, &bu->key);
1507 struct ubifs_znode *znode;
1508
1509 bu->cnt = 0;
1510 bu->blk_cnt = 0;
1511 bu->eof = 0;
1512
1513 mutex_lock(&c->tnc_mutex);
1514 /* Find first key */
1515 err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1516 if (err < 0)
1517 goto out;
1518 if (err) {
1519 /* Key found */
1520 len = znode->zbranch[n].len;
1521 /* The buffer must be big enough for at least 1 node */
1522 if (len > bu->buf_len) {
1523 err = -EINVAL;
1524 goto out;
1525 }
1526 /* Add this key */
1527 bu->zbranch[bu->cnt++] = znode->zbranch[n];
1528 bu->blk_cnt += 1;
1529 lnum = znode->zbranch[n].lnum;
1530 offs = ALIGN(znode->zbranch[n].offs + len, 8);
1531 }
1532 while (1) {
1533 struct ubifs_zbranch *zbr;
1534 union ubifs_key *key;
1535 unsigned int next_block;
1536
1537 /* Find next key */
1538 err = tnc_next(c, &znode, &n);
1539 if (err)
1540 goto out;
1541 zbr = &znode->zbranch[n];
1542 key = &zbr->key;
1543 /* See if there is another data key for this file */
1544 if (key_inum(c, key) != key_inum(c, &bu->key) ||
1545 key_type(c, key) != UBIFS_DATA_KEY) {
1546 err = -ENOENT;
1547 goto out;
1548 }
1549 if (lnum < 0) {
1550 /* First key found */
1551 lnum = zbr->lnum;
1552 offs = ALIGN(zbr->offs + zbr->len, 8);
1553 len = zbr->len;
1554 if (len > bu->buf_len) {
1555 err = -EINVAL;
1556 goto out;
1557 }
1558 } else {
1559 /*
1560 * The data nodes must be in consecutive positions in
1561 * the same LEB.
1562 */
1563 if (zbr->lnum != lnum || zbr->offs != offs)
1564 goto out;
1565 offs += ALIGN(zbr->len, 8);
1566 len = ALIGN(len, 8) + zbr->len;
1567 /* Must not exceed buffer length */
1568 if (len > bu->buf_len)
1569 goto out;
1570 }
1571 /* Allow for holes */
1572 next_block = key_block(c, key);
1573 bu->blk_cnt += (next_block - block - 1);
1574 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1575 goto out;
1576 block = next_block;
1577 /* Add this key */
1578 bu->zbranch[bu->cnt++] = *zbr;
1579 bu->blk_cnt += 1;
1580 /* See if we have room for more */
1581 if (bu->cnt >= UBIFS_MAX_BULK_READ)
1582 goto out;
1583 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1584 goto out;
1585 }
1586out:
1587 if (err == -ENOENT) {
1588 bu->eof = 1;
1589 err = 0;
1590 }
1591 bu->gc_seq = c->gc_seq;
1592 mutex_unlock(&c->tnc_mutex);
1593 if (err)
1594 return err;
1595 /*
1596 * An enormous hole could cause bulk-read to encompass too many
1597 * page cache pages, so limit the number here.
1598 */
1599 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1600 bu->blk_cnt = UBIFS_MAX_BULK_READ;
1601 /*
1602 * Ensure that bulk-read covers a whole number of page cache
1603 * pages.
1604 */
1605 if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1606 !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1607 return 0;
1608 if (bu->eof) {
1609 /* At the end of file we can round up */
1610 bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1611 return 0;
1612 }
1613 /* Exclude data nodes that do not make up a whole page cache page */
1614 block = key_block(c, &bu->key) + bu->blk_cnt;
1615 block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1616 while (bu->cnt) {
1617 if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1618 break;
1619 bu->cnt -= 1;
1620 }
1621 return 0;
1622}
1623
1624/**
1625 * read_wbuf - bulk-read from a LEB with a wbuf.
1626 * @wbuf: wbuf that may overlap the read
1627 * @buf: buffer into which to read
1628 * @len: read length
1629 * @lnum: LEB number from which to read
1630 * @offs: offset from which to read
1631 *
1632 * This functions returns %0 on success or a negative error code on failure.
1633 */
1634static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1635 int offs)
1636{
1637 const struct ubifs_info *c = wbuf->c;
1638 int rlen, overlap;
1639
1640 dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1641 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1642 ubifs_assert(!(offs & 7) && offs < c->leb_size);
1643 ubifs_assert(offs + len <= c->leb_size);
1644
1645 spin_lock(&wbuf->lock);
1646 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1647 if (!overlap) {
1648 /* We may safely unlock the write-buffer and read the data */
1649 spin_unlock(&wbuf->lock);
1650 return ubi_read(c->ubi, lnum, buf, offs, len);
1651 }
1652
1653 /* Don't read under wbuf */
1654 rlen = wbuf->offs - offs;
1655 if (rlen < 0)
1656 rlen = 0;
1657
1658 /* Copy the rest from the write-buffer */
1659 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1660 spin_unlock(&wbuf->lock);
1661
1662 if (rlen > 0)
1663 /* Read everything that goes before write-buffer */
1664 return ubi_read(c->ubi, lnum, buf, offs, rlen);
1665
1666 return 0;
1667}
1668
1669/**
1670 * validate_data_node - validate data nodes for bulk-read.
1671 * @c: UBIFS file-system description object
1672 * @buf: buffer containing data node to validate
1673 * @zbr: zbranch of data node to validate
1674 *
1675 * This functions returns %0 on success or a negative error code on failure.
1676 */
1677static int validate_data_node(struct ubifs_info *c, void *buf,
1678 struct ubifs_zbranch *zbr)
1679{
1680 union ubifs_key key1;
1681 struct ubifs_ch *ch = buf;
1682 int err, len;
1683
1684 if (ch->node_type != UBIFS_DATA_NODE) {
1685 ubifs_err("bad node type (%d but expected %d)",
1686 ch->node_type, UBIFS_DATA_NODE);
1687 goto out_err;
1688 }
1689
1690 err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0);
1691 if (err) {
1692 ubifs_err("expected node type %d", UBIFS_DATA_NODE);
1693 goto out;
1694 }
1695
1696 len = le32_to_cpu(ch->len);
1697 if (len != zbr->len) {
1698 ubifs_err("bad node length %d, expected %d", len, zbr->len);
1699 goto out_err;
1700 }
1701
1702 /* Make sure the key of the read node is correct */
1703 key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1704 if (!keys_eq(c, &zbr->key, &key1)) {
1705 ubifs_err("bad key in node at LEB %d:%d",
1706 zbr->lnum, zbr->offs);
1707 dbg_tnc("looked for key %s found node's key %s",
1708 DBGKEY(&zbr->key), DBGKEY1(&key1));
1709 goto out_err;
1710 }
1711
1712 return 0;
1713
1714out_err:
1715 err = -EINVAL;
1716out:
1717 ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1718 dbg_dump_node(c, buf);
1719 dbg_dump_stack();
1720 return err;
1721}
1722
1723/**
1724 * ubifs_tnc_bulk_read - read a number of data nodes in one go.
1725 * @c: UBIFS file-system description object
1726 * @bu: bulk-read parameters and results
1727 *
1728 * This functions reads and validates the data nodes that were identified by the
1729 * 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success,
1730 * -EAGAIN to indicate a race with GC, or another negative error code on
1731 * failure.
1732 */
1733int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1734{
1735 int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1736 struct ubifs_wbuf *wbuf;
1737 void *buf;
1738
1739 len = bu->zbranch[bu->cnt - 1].offs;
1740 len += bu->zbranch[bu->cnt - 1].len - offs;
1741 if (len > bu->buf_len) {
1742 ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
1743 return -EINVAL;
1744 }
1745
1746 /* Do the read */
1747 wbuf = ubifs_get_wbuf(c, lnum);
1748 if (wbuf)
1749 err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1750 else
1751 err = ubi_read(c->ubi, lnum, bu->buf, offs, len);
1752
1753 /* Check for a race with GC */
1754 if (maybe_leb_gced(c, lnum, bu->gc_seq))
1755 return -EAGAIN;
1756
1757 if (err && err != -EBADMSG) {
1758 ubifs_err("failed to read from LEB %d:%d, error %d",
1759 lnum, offs, err);
1760 dbg_dump_stack();
1761 dbg_tnc("key %s", DBGKEY(&bu->key));
1762 return err;
1763 }
1764
1765 /* Validate the nodes read */
1766 buf = bu->buf;
1767 for (i = 0; i < bu->cnt; i++) {
1768 err = validate_data_node(c, buf, &bu->zbranch[i]);
1769 if (err)
1770 return err;
1771 buf = buf + ALIGN(bu->zbranch[i].len, 8);
1772 }
1773
1774 return 0;
1775}
1776
1777/**
1495 * do_lookup_nm- look up a "hashed" node. 1778 * do_lookup_nm- look up a "hashed" node.
1496 * @c: UBIFS file-system description object 1779 * @c: UBIFS file-system description object
1497 * @key: node key to lookup 1780 * @key: node key to lookup