aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-08-20 08:51:49 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:06 -0400
commit902b22f341efa00be802418a0a8c57bddcd269a6 (patch)
treee8fdfb0a33c19d615e46749217e62c974d3a3611 /fs/btrfs/extent_io.c
parent53863232ef961778aa414b700ed88a48e8e871e6 (diff)
Btrfs: Remove broken optimisations in end_bio functions.
These ended up freeing objects while they were still using them. Under guidance from Chris, just rip out the 'clever' bits and do things the simple way. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c159
1 files changed, 21 insertions, 138 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index f46f88620c70..83ba0c328722 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -15,6 +15,8 @@
15#include "extent_io.h" 15#include "extent_io.h"
16#include "extent_map.h" 16#include "extent_map.h"
17#include "compat.h" 17#include "compat.h"
18#include "ctree.h"
19#include "btrfs_inode.h"
18 20
19/* temporary define until extent_map moves out of btrfs */ 21/* temporary define until extent_map moves out of btrfs */
20struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 22struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
@@ -1394,15 +1396,11 @@ static int end_bio_extent_writepage(struct bio *bio,
1394{ 1396{
1395 int uptodate = err == 0; 1397 int uptodate = err == 0;
1396 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1398 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1397 struct extent_state *state = bio->bi_private; 1399 struct extent_io_tree *tree;
1398 struct extent_io_tree *tree = state->tree;
1399 struct rb_node *node;
1400 u64 start; 1400 u64 start;
1401 u64 end; 1401 u64 end;
1402 u64 cur;
1403 int whole_page; 1402 int whole_page;
1404 int ret; 1403 int ret;
1405 unsigned long flags;
1406 1404
1407#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) 1405#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1408 if (bio->bi_size) 1406 if (bio->bi_size)
@@ -1410,6 +1408,8 @@ static int end_bio_extent_writepage(struct bio *bio,
1410#endif 1408#endif
1411 do { 1409 do {
1412 struct page *page = bvec->bv_page; 1410 struct page *page = bvec->bv_page;
1411 tree = &BTRFS_I(page->mapping->host)->io_tree;
1412
1413 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1413 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1414 bvec->bv_offset; 1414 bvec->bv_offset;
1415 end = start + bvec->bv_len - 1; 1415 end = start + bvec->bv_len - 1;
@@ -1423,7 +1423,7 @@ static int end_bio_extent_writepage(struct bio *bio,
1423 prefetchw(&bvec->bv_page->flags); 1423 prefetchw(&bvec->bv_page->flags);
1424 if (tree->ops && tree->ops->writepage_end_io_hook) { 1424 if (tree->ops && tree->ops->writepage_end_io_hook) {
1425 ret = tree->ops->writepage_end_io_hook(page, start, 1425 ret = tree->ops->writepage_end_io_hook(page, start,
1426 end, state, uptodate); 1426 end, NULL, uptodate);
1427 if (ret) 1427 if (ret)
1428 uptodate = 0; 1428 uptodate = 0;
1429 } 1429 }
@@ -1431,9 +1431,8 @@ static int end_bio_extent_writepage(struct bio *bio,
1431 if (!uptodate && tree->ops && 1431 if (!uptodate && tree->ops &&
1432 tree->ops->writepage_io_failed_hook) { 1432 tree->ops->writepage_io_failed_hook) {
1433 ret = tree->ops->writepage_io_failed_hook(bio, page, 1433 ret = tree->ops->writepage_io_failed_hook(bio, page,
1434 start, end, state); 1434 start, end, NULL);
1435 if (ret == 0) { 1435 if (ret == 0) {
1436 state = NULL;
1437 uptodate = (err == 0); 1436 uptodate = (err == 0);
1438 continue; 1437 continue;
1439 } 1438 }
@@ -1445,68 +1444,7 @@ static int end_bio_extent_writepage(struct bio *bio,
1445 SetPageError(page); 1444 SetPageError(page);
1446 } 1445 }
1447 1446
1448 /* 1447 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1449 * bios can get merged in funny ways, and so we need to
1450 * be careful with the state variable. We know the
1451 * state won't be merged with others because it has
1452 * WRITEBACK set, but we can't be sure each biovec is
1453 * sequential in the file. So, if our cached state
1454 * doesn't match the expected end, search the tree
1455 * for the correct one.
1456 */
1457
1458 spin_lock_irqsave(&tree->lock, flags);
1459 if (!state || state->end != end) {
1460 state = NULL;
1461 node = __etree_search(tree, start, NULL, NULL);
1462 if (node) {
1463 state = rb_entry(node, struct extent_state,
1464 rb_node);
1465 if (state->end != end ||
1466 !(state->state & EXTENT_WRITEBACK))
1467 state = NULL;
1468 }
1469 if (!state) {
1470 spin_unlock_irqrestore(&tree->lock, flags);
1471 clear_extent_writeback(tree, start,
1472 end, GFP_ATOMIC);
1473 goto next_io;
1474 }
1475 }
1476 cur = end;
1477 while(1) {
1478 struct extent_state *clear = state;
1479 cur = state->start;
1480 node = rb_prev(&state->rb_node);
1481 if (node) {
1482 state = rb_entry(node,
1483 struct extent_state,
1484 rb_node);
1485 } else {
1486 state = NULL;
1487 }
1488
1489 clear_state_bit(tree, clear, EXTENT_WRITEBACK,
1490 1, 0);
1491 if (cur == start)
1492 break;
1493 if (cur < start) {
1494 WARN_ON(1);
1495 break;
1496 }
1497 if (!node)
1498 break;
1499 }
1500 /* before releasing the lock, make sure the next state
1501 * variable has the expected bits set and corresponds
1502 * to the correct offsets in the file
1503 */
1504 if (state && (state->end + 1 != start ||
1505 !(state->state & EXTENT_WRITEBACK))) {
1506 state = NULL;
1507 }
1508 spin_unlock_irqrestore(&tree->lock, flags);
1509next_io:
1510 1448
1511 if (whole_page) 1449 if (whole_page)
1512 end_page_writeback(page); 1450 end_page_writeback(page);
@@ -1539,13 +1477,9 @@ static int end_bio_extent_readpage(struct bio *bio,
1539{ 1477{
1540 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1478 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1541 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1479 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1542 struct extent_state *state = bio->bi_private; 1480 struct extent_io_tree *tree;
1543 struct extent_io_tree *tree = state->tree;
1544 struct rb_node *node;
1545 u64 start; 1481 u64 start;
1546 u64 end; 1482 u64 end;
1547 u64 cur;
1548 unsigned long flags;
1549 int whole_page; 1483 int whole_page;
1550 int ret; 1484 int ret;
1551 1485
@@ -1556,6 +1490,8 @@ static int end_bio_extent_readpage(struct bio *bio,
1556 1490
1557 do { 1491 do {
1558 struct page *page = bvec->bv_page; 1492 struct page *page = bvec->bv_page;
1493 tree = &BTRFS_I(page->mapping->host)->io_tree;
1494
1559 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1495 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1560 bvec->bv_offset; 1496 bvec->bv_offset;
1561 end = start + bvec->bv_len - 1; 1497 end = start + bvec->bv_len - 1;
@@ -1570,80 +1506,26 @@ static int end_bio_extent_readpage(struct bio *bio,
1570 1506
1571 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 1507 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1572 ret = tree->ops->readpage_end_io_hook(page, start, end, 1508 ret = tree->ops->readpage_end_io_hook(page, start, end,
1573 state); 1509 NULL);
1574 if (ret) 1510 if (ret)
1575 uptodate = 0; 1511 uptodate = 0;
1576 } 1512 }
1577 if (!uptodate && tree->ops && 1513 if (!uptodate && tree->ops &&
1578 tree->ops->readpage_io_failed_hook) { 1514 tree->ops->readpage_io_failed_hook) {
1579 ret = tree->ops->readpage_io_failed_hook(bio, page, 1515 ret = tree->ops->readpage_io_failed_hook(bio, page,
1580 start, end, state); 1516 start, end, NULL);
1581 if (ret == 0) { 1517 if (ret == 0) {
1582 state = NULL;
1583 uptodate = 1518 uptodate =
1584 test_bit(BIO_UPTODATE, &bio->bi_flags); 1519 test_bit(BIO_UPTODATE, &bio->bi_flags);
1585 continue; 1520 continue;
1586 } 1521 }
1587 } 1522 }
1588 1523
1589 spin_lock_irqsave(&tree->lock, flags); 1524 if (uptodate)
1590 if (!state || state->end != end) { 1525 set_extent_uptodate(tree, start, end,
1591 state = NULL; 1526 GFP_ATOMIC);
1592 node = __etree_search(tree, start, NULL, NULL); 1527 unlock_extent(tree, start, end, GFP_ATOMIC);
1593 if (node) {
1594 state = rb_entry(node, struct extent_state,
1595 rb_node);
1596 if (state->end != end ||
1597 !(state->state & EXTENT_LOCKED))
1598 state = NULL;
1599 }
1600 if (!state) {
1601 spin_unlock_irqrestore(&tree->lock, flags);
1602 if (uptodate)
1603 set_extent_uptodate(tree, start, end,
1604 GFP_ATOMIC);
1605 unlock_extent(tree, start, end, GFP_ATOMIC);
1606 goto next_io;
1607 }
1608 }
1609 1528
1610 cur = end;
1611 while(1) {
1612 struct extent_state *clear = state;
1613 cur = state->start;
1614 node = rb_prev(&state->rb_node);
1615 if (node) {
1616 state = rb_entry(node,
1617 struct extent_state,
1618 rb_node);
1619 } else {
1620 state = NULL;
1621 }
1622 if (uptodate) {
1623 set_state_cb(tree, clear, EXTENT_UPTODATE);
1624 clear->state |= EXTENT_UPTODATE;
1625 }
1626 clear_state_bit(tree, clear, EXTENT_LOCKED,
1627 1, 0);
1628 if (cur == start)
1629 break;
1630 if (cur < start) {
1631 WARN_ON(1);
1632 break;
1633 }
1634 if (!node)
1635 break;
1636 }
1637 /* before releasing the lock, make sure the next state
1638 * variable has the expected bits set and corresponds
1639 * to the correct offsets in the file
1640 */
1641 if (state && (state->end + 1 != start ||
1642 !(state->state & EXTENT_LOCKED))) {
1643 state = NULL;
1644 }
1645 spin_unlock_irqrestore(&tree->lock, flags);
1646next_io:
1647 if (whole_page) { 1529 if (whole_page) {
1648 if (uptodate) { 1530 if (uptodate) {
1649 SetPageUptodate(page); 1531 SetPageUptodate(page);
@@ -1683,8 +1565,7 @@ static int end_bio_extent_preparewrite(struct bio *bio,
1683{ 1565{
1684 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1566 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1685 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1567 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1686 struct extent_state *state = bio->bi_private; 1568 struct extent_io_tree *tree;
1687 struct extent_io_tree *tree = state->tree;
1688 u64 start; 1569 u64 start;
1689 u64 end; 1570 u64 end;
1690 1571
@@ -1695,6 +1576,8 @@ static int end_bio_extent_preparewrite(struct bio *bio,
1695 1576
1696 do { 1577 do {
1697 struct page *page = bvec->bv_page; 1578 struct page *page = bvec->bv_page;
1579 tree = &BTRFS_I(page->mapping->host)->io_tree;
1580
1698 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1581 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1699 bvec->bv_offset; 1582 bvec->bv_offset;
1700 end = start + bvec->bv_len - 1; 1583 end = start + bvec->bv_len - 1;
@@ -1765,7 +1648,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
1765 BUG_ON(state->end != end); 1648 BUG_ON(state->end != end);
1766 spin_unlock_irq(&tree->lock); 1649 spin_unlock_irq(&tree->lock);
1767 1650
1768 bio->bi_private = state; 1651 bio->bi_private = NULL;
1769 1652
1770 bio_get(bio); 1653 bio_get(bio);
1771 1654