diff options
author | Chris Mason <chris.mason@oracle.com> | 2008-09-24 11:48:04 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 15:41:59 -0400 |
commit | 2b1f55b0f0d0d1a66470ef4ea2696cd5dd741a12 (patch) | |
tree | 980019fac9b0891e682bc7d9a8c8a58284b467fc /fs/btrfs/extent_io.c | |
parent | 9b49c9b9f93e148815f2544d0c91f43b6d72eea9 (diff) |
Remove Btrfs compat code for older kernels
Btrfs had compatibility code for kernels back to 2.6.18. These have
been removed, and will be maintained in a separate backport
git tree from now on.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 66 |
1 files changed, 3 insertions, 63 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 58ad25838a41..e3a25be5c663 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1397,12 +1397,7 @@ static int check_page_writeback(struct extent_io_tree *tree, | |||
1397 | * Scheduling is not allowed, so the extent state tree is expected | 1397 | * Scheduling is not allowed, so the extent state tree is expected |
1398 | * to have one and only one object corresponding to this IO. | 1398 | * to have one and only one object corresponding to this IO. |
1399 | */ | 1399 | */ |
1400 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) | ||
1401 | static void end_bio_extent_writepage(struct bio *bio, int err) | 1400 | static void end_bio_extent_writepage(struct bio *bio, int err) |
1402 | #else | ||
1403 | static int end_bio_extent_writepage(struct bio *bio, | ||
1404 | unsigned int bytes_done, int err) | ||
1405 | #endif | ||
1406 | { | 1401 | { |
1407 | int uptodate = err == 0; | 1402 | int uptodate = err == 0; |
1408 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 1403 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
@@ -1412,10 +1407,6 @@ static int end_bio_extent_writepage(struct bio *bio, | |||
1412 | int whole_page; | 1407 | int whole_page; |
1413 | int ret; | 1408 | int ret; |
1414 | 1409 | ||
1415 | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) | ||
1416 | if (bio->bi_size) | ||
1417 | return 1; | ||
1418 | #endif | ||
1419 | do { | 1410 | do { |
1420 | struct page *page = bvec->bv_page; | 1411 | struct page *page = bvec->bv_page; |
1421 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1412 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
@@ -1461,10 +1452,8 @@ static int end_bio_extent_writepage(struct bio *bio, | |||
1461 | else | 1452 | else |
1462 | check_page_writeback(tree, page); | 1453 | check_page_writeback(tree, page); |
1463 | } while (bvec >= bio->bi_io_vec); | 1454 | } while (bvec >= bio->bi_io_vec); |
1455 | |||
1464 | bio_put(bio); | 1456 | bio_put(bio); |
1465 | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) | ||
1466 | return 0; | ||
1467 | #endif | ||
1468 | } | 1457 | } |
1469 | 1458 | ||
1470 | /* | 1459 | /* |
@@ -1478,12 +1467,7 @@ static int end_bio_extent_writepage(struct bio *bio, | |||
1478 | * Scheduling is not allowed, so the extent state tree is expected | 1467 | * Scheduling is not allowed, so the extent state tree is expected |
1479 | * to have one and only one object corresponding to this IO. | 1468 | * to have one and only one object corresponding to this IO. |
1480 | */ | 1469 | */ |
1481 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) | ||
1482 | static void end_bio_extent_readpage(struct bio *bio, int err) | 1470 | static void end_bio_extent_readpage(struct bio *bio, int err) |
1483 | #else | ||
1484 | static int end_bio_extent_readpage(struct bio *bio, | ||
1485 | unsigned int bytes_done, int err) | ||
1486 | #endif | ||
1487 | { | 1471 | { |
1488 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 1472 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
1489 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 1473 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
@@ -1493,11 +1477,6 @@ static int end_bio_extent_readpage(struct bio *bio, | |||
1493 | int whole_page; | 1477 | int whole_page; |
1494 | int ret; | 1478 | int ret; |
1495 | 1479 | ||
1496 | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) | ||
1497 | if (bio->bi_size) | ||
1498 | return 1; | ||
1499 | #endif | ||
1500 | |||
1501 | do { | 1480 | do { |
1502 | struct page *page = bvec->bv_page; | 1481 | struct page *page = bvec->bv_page; |
1503 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1482 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
@@ -1556,9 +1535,6 @@ static int end_bio_extent_readpage(struct bio *bio, | |||
1556 | } while (bvec >= bio->bi_io_vec); | 1535 | } while (bvec >= bio->bi_io_vec); |
1557 | 1536 | ||
1558 | bio_put(bio); | 1537 | bio_put(bio); |
1559 | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) | ||
1560 | return 0; | ||
1561 | #endif | ||
1562 | } | 1538 | } |
1563 | 1539 | ||
1564 | /* | 1540 | /* |
@@ -1566,12 +1542,7 @@ static int end_bio_extent_readpage(struct bio *bio, | |||
1566 | * the structs in the extent tree when done, and set the uptodate bits | 1542 | * the structs in the extent tree when done, and set the uptodate bits |
1567 | * as appropriate. | 1543 | * as appropriate. |
1568 | */ | 1544 | */ |
1569 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) | ||
1570 | static void end_bio_extent_preparewrite(struct bio *bio, int err) | 1545 | static void end_bio_extent_preparewrite(struct bio *bio, int err) |
1571 | #else | ||
1572 | static int end_bio_extent_preparewrite(struct bio *bio, | ||
1573 | unsigned int bytes_done, int err) | ||
1574 | #endif | ||
1575 | { | 1546 | { |
1576 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 1547 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
1577 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 1548 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
@@ -1579,11 +1550,6 @@ static int end_bio_extent_preparewrite(struct bio *bio, | |||
1579 | u64 start; | 1550 | u64 start; |
1580 | u64 end; | 1551 | u64 end; |
1581 | 1552 | ||
1582 | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) | ||
1583 | if (bio->bi_size) | ||
1584 | return 1; | ||
1585 | #endif | ||
1586 | |||
1587 | do { | 1553 | do { |
1588 | struct page *page = bvec->bv_page; | 1554 | struct page *page = bvec->bv_page; |
1589 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1555 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
@@ -1607,9 +1573,6 @@ static int end_bio_extent_preparewrite(struct bio *bio, | |||
1607 | } while (bvec >= bio->bi_io_vec); | 1573 | } while (bvec >= bio->bi_io_vec); |
1608 | 1574 | ||
1609 | bio_put(bio); | 1575 | bio_put(bio); |
1610 | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) | ||
1611 | return 0; | ||
1612 | #endif | ||
1613 | } | 1576 | } |
1614 | 1577 | ||
1615 | static struct bio * | 1578 | static struct bio * |
@@ -2079,12 +2042,6 @@ done: | |||
2079 | return 0; | 2042 | return 0; |
2080 | } | 2043 | } |
2081 | 2044 | ||
2082 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) | ||
2083 | /* Taken directly from 2.6.23 with a mod for a lockpage hook */ | ||
2084 | typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, | ||
2085 | void *data); | ||
2086 | #endif | ||
2087 | |||
2088 | /** | 2045 | /** |
2089 | * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. | 2046 | * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. |
2090 | * @mapping: address space structure to write | 2047 | * @mapping: address space structure to write |
@@ -2201,10 +2158,9 @@ retry: | |||
2201 | } | 2158 | } |
2202 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | 2159 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) |
2203 | mapping->writeback_index = index; | 2160 | mapping->writeback_index = index; |
2204 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) | 2161 | |
2205 | if (wbc->range_cont) | 2162 | if (wbc->range_cont) |
2206 | wbc->range_start = index << PAGE_CACHE_SHIFT; | 2163 | wbc->range_start = index << PAGE_CACHE_SHIFT; |
2207 | #endif | ||
2208 | return ret; | 2164 | return ret; |
2209 | } | 2165 | } |
2210 | EXPORT_SYMBOL(extent_write_cache_pages); | 2166 | EXPORT_SYMBOL(extent_write_cache_pages); |
@@ -2560,18 +2516,10 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, | |||
2560 | * by increasing the reference count. So we know the page must | 2516 | * by increasing the reference count. So we know the page must |
2561 | * be in the radix tree. | 2517 | * be in the radix tree. |
2562 | */ | 2518 | */ |
2563 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) | ||
2564 | rcu_read_lock(); | 2519 | rcu_read_lock(); |
2565 | #else | ||
2566 | read_lock_irq(&mapping->tree_lock); | ||
2567 | #endif | ||
2568 | p = radix_tree_lookup(&mapping->page_tree, i); | 2520 | p = radix_tree_lookup(&mapping->page_tree, i); |
2569 | |||
2570 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) | ||
2571 | rcu_read_unlock(); | 2521 | rcu_read_unlock(); |
2572 | #else | 2522 | |
2573 | read_unlock_irq(&mapping->tree_lock); | ||
2574 | #endif | ||
2575 | return p; | 2523 | return p; |
2576 | } | 2524 | } |
2577 | 2525 | ||
@@ -2773,21 +2721,13 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, | |||
2773 | } | 2721 | } |
2774 | } | 2722 | } |
2775 | clear_page_dirty_for_io(page); | 2723 | clear_page_dirty_for_io(page); |
2776 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) | ||
2777 | spin_lock_irq(&page->mapping->tree_lock); | 2724 | spin_lock_irq(&page->mapping->tree_lock); |
2778 | #else | ||
2779 | read_lock_irq(&page->mapping->tree_lock); | ||
2780 | #endif | ||
2781 | if (!PageDirty(page)) { | 2725 | if (!PageDirty(page)) { |
2782 | radix_tree_tag_clear(&page->mapping->page_tree, | 2726 | radix_tree_tag_clear(&page->mapping->page_tree, |
2783 | page_index(page), | 2727 | page_index(page), |
2784 | PAGECACHE_TAG_DIRTY); | 2728 | PAGECACHE_TAG_DIRTY); |
2785 | } | 2729 | } |
2786 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) | ||
2787 | spin_unlock_irq(&page->mapping->tree_lock); | 2730 | spin_unlock_irq(&page->mapping->tree_lock); |
2788 | #else | ||
2789 | read_unlock_irq(&page->mapping->tree_lock); | ||
2790 | #endif | ||
2791 | unlock_page(page); | 2731 | unlock_page(page); |
2792 | } | 2732 | } |
2793 | return 0; | 2733 | return 0; |