diff options
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 103 |
1 files changed, 43 insertions, 60 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 44c2b0ef9a41..ea79072f5210 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -37,14 +37,14 @@ | |||
37 | 37 | ||
38 | #include "xfs_sb.h" | 38 | #include "xfs_sb.h" |
39 | #include "xfs_inum.h" | 39 | #include "xfs_inum.h" |
40 | #include "xfs_log.h" | ||
40 | #include "xfs_ag.h" | 41 | #include "xfs_ag.h" |
41 | #include "xfs_dmapi.h" | ||
42 | #include "xfs_mount.h" | 42 | #include "xfs_mount.h" |
43 | #include "xfs_trace.h" | 43 | #include "xfs_trace.h" |
44 | 44 | ||
45 | static kmem_zone_t *xfs_buf_zone; | 45 | static kmem_zone_t *xfs_buf_zone; |
46 | STATIC int xfsbufd(void *); | 46 | STATIC int xfsbufd(void *); |
47 | STATIC int xfsbufd_wakeup(int, gfp_t); | 47 | STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t); |
48 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); | 48 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); |
49 | static struct shrinker xfs_buf_shake = { | 49 | static struct shrinker xfs_buf_shake = { |
50 | .shrink = xfsbufd_wakeup, | 50 | .shrink = xfsbufd_wakeup, |
@@ -339,7 +339,7 @@ _xfs_buf_lookup_pages( | |||
339 | __func__, gfp_mask); | 339 | __func__, gfp_mask); |
340 | 340 | ||
341 | XFS_STATS_INC(xb_page_retries); | 341 | XFS_STATS_INC(xb_page_retries); |
342 | xfsbufd_wakeup(0, gfp_mask); | 342 | xfsbufd_wakeup(NULL, 0, gfp_mask); |
343 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 343 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
344 | goto retry; | 344 | goto retry; |
345 | } | 345 | } |
@@ -578,9 +578,9 @@ _xfs_buf_read( | |||
578 | XBF_READ_AHEAD | _XBF_RUN_QUEUES); | 578 | XBF_READ_AHEAD | _XBF_RUN_QUEUES); |
579 | 579 | ||
580 | status = xfs_buf_iorequest(bp); | 580 | status = xfs_buf_iorequest(bp); |
581 | if (!status && !(flags & XBF_ASYNC)) | 581 | if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC)) |
582 | status = xfs_buf_iowait(bp); | 582 | return status; |
583 | return status; | 583 | return xfs_buf_iowait(bp); |
584 | } | 584 | } |
585 | 585 | ||
586 | xfs_buf_t * | 586 | xfs_buf_t * |
@@ -850,6 +850,12 @@ xfs_buf_lock_value( | |||
850 | * Note that this in no way locks the underlying pages, so it is only | 850 | * Note that this in no way locks the underlying pages, so it is only |
851 | * useful for synchronizing concurrent use of buffer objects, not for | 851 | * useful for synchronizing concurrent use of buffer objects, not for |
852 | * synchronizing independent access to the underlying pages. | 852 | * synchronizing independent access to the underlying pages. |
853 | * | ||
854 | * If we come across a stale, pinned, locked buffer, we know that we | ||
855 | * are being asked to lock a buffer that has been reallocated. Because | ||
856 | * it is pinned, we know that the log has not been pushed to disk and | ||
857 | * hence it will still be locked. Rather than sleeping until someone | ||
858 | * else pushes the log, push it ourselves before trying to get the lock. | ||
853 | */ | 859 | */ |
854 | void | 860 | void |
855 | xfs_buf_lock( | 861 | xfs_buf_lock( |
@@ -857,6 +863,8 @@ xfs_buf_lock( | |||
857 | { | 863 | { |
858 | trace_xfs_buf_lock(bp, _RET_IP_); | 864 | trace_xfs_buf_lock(bp, _RET_IP_); |
859 | 865 | ||
866 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) | ||
867 | xfs_log_force(bp->b_mount, 0); | ||
860 | if (atomic_read(&bp->b_io_remaining)) | 868 | if (atomic_read(&bp->b_io_remaining)) |
861 | blk_run_address_space(bp->b_target->bt_mapping); | 869 | blk_run_address_space(bp->b_target->bt_mapping); |
862 | down(&bp->b_sema); | 870 | down(&bp->b_sema); |
@@ -888,36 +896,6 @@ xfs_buf_unlock( | |||
888 | trace_xfs_buf_unlock(bp, _RET_IP_); | 896 | trace_xfs_buf_unlock(bp, _RET_IP_); |
889 | } | 897 | } |
890 | 898 | ||
891 | |||
892 | /* | ||
893 | * Pinning Buffer Storage in Memory | ||
894 | * Ensure that no attempt to force a buffer to disk will succeed. | ||
895 | */ | ||
896 | void | ||
897 | xfs_buf_pin( | ||
898 | xfs_buf_t *bp) | ||
899 | { | ||
900 | trace_xfs_buf_pin(bp, _RET_IP_); | ||
901 | atomic_inc(&bp->b_pin_count); | ||
902 | } | ||
903 | |||
904 | void | ||
905 | xfs_buf_unpin( | ||
906 | xfs_buf_t *bp) | ||
907 | { | ||
908 | trace_xfs_buf_unpin(bp, _RET_IP_); | ||
909 | |||
910 | if (atomic_dec_and_test(&bp->b_pin_count)) | ||
911 | wake_up_all(&bp->b_waiters); | ||
912 | } | ||
913 | |||
914 | int | ||
915 | xfs_buf_ispin( | ||
916 | xfs_buf_t *bp) | ||
917 | { | ||
918 | return atomic_read(&bp->b_pin_count); | ||
919 | } | ||
920 | |||
921 | STATIC void | 899 | STATIC void |
922 | xfs_buf_wait_unpin( | 900 | xfs_buf_wait_unpin( |
923 | xfs_buf_t *bp) | 901 | xfs_buf_t *bp) |
@@ -1007,25 +985,19 @@ xfs_bwrite( | |||
1007 | struct xfs_mount *mp, | 985 | struct xfs_mount *mp, |
1008 | struct xfs_buf *bp) | 986 | struct xfs_buf *bp) |
1009 | { | 987 | { |
1010 | int iowait = (bp->b_flags & XBF_ASYNC) == 0; | 988 | int error; |
1011 | int error = 0; | ||
1012 | 989 | ||
1013 | bp->b_strat = xfs_bdstrat_cb; | ||
1014 | bp->b_mount = mp; | 990 | bp->b_mount = mp; |
1015 | bp->b_flags |= XBF_WRITE; | 991 | bp->b_flags |= XBF_WRITE; |
1016 | if (!iowait) | 992 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ); |
1017 | bp->b_flags |= _XBF_RUN_QUEUES; | ||
1018 | 993 | ||
1019 | xfs_buf_delwri_dequeue(bp); | 994 | xfs_buf_delwri_dequeue(bp); |
1020 | xfs_buf_iostrategy(bp); | 995 | xfs_bdstrat_cb(bp); |
1021 | |||
1022 | if (iowait) { | ||
1023 | error = xfs_buf_iowait(bp); | ||
1024 | if (error) | ||
1025 | xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); | ||
1026 | xfs_buf_relse(bp); | ||
1027 | } | ||
1028 | 996 | ||
997 | error = xfs_buf_iowait(bp); | ||
998 | if (error) | ||
999 | xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); | ||
1000 | xfs_buf_relse(bp); | ||
1029 | return error; | 1001 | return error; |
1030 | } | 1002 | } |
1031 | 1003 | ||
@@ -1036,7 +1008,6 @@ xfs_bdwrite( | |||
1036 | { | 1008 | { |
1037 | trace_xfs_buf_bdwrite(bp, _RET_IP_); | 1009 | trace_xfs_buf_bdwrite(bp, _RET_IP_); |
1038 | 1010 | ||
1039 | bp->b_strat = xfs_bdstrat_cb; | ||
1040 | bp->b_mount = mp; | 1011 | bp->b_mount = mp; |
1041 | 1012 | ||
1042 | bp->b_flags &= ~XBF_READ; | 1013 | bp->b_flags &= ~XBF_READ; |
@@ -1071,7 +1042,6 @@ xfs_bioerror( | |||
1071 | XFS_BUF_UNDONE(bp); | 1042 | XFS_BUF_UNDONE(bp); |
1072 | XFS_BUF_STALE(bp); | 1043 | XFS_BUF_STALE(bp); |
1073 | 1044 | ||
1074 | XFS_BUF_CLR_BDSTRAT_FUNC(bp); | ||
1075 | xfs_biodone(bp); | 1045 | xfs_biodone(bp); |
1076 | 1046 | ||
1077 | return EIO; | 1047 | return EIO; |
@@ -1101,7 +1071,6 @@ xfs_bioerror_relse( | |||
1101 | XFS_BUF_DONE(bp); | 1071 | XFS_BUF_DONE(bp); |
1102 | XFS_BUF_STALE(bp); | 1072 | XFS_BUF_STALE(bp); |
1103 | XFS_BUF_CLR_IODONE_FUNC(bp); | 1073 | XFS_BUF_CLR_IODONE_FUNC(bp); |
1104 | XFS_BUF_CLR_BDSTRAT_FUNC(bp); | ||
1105 | if (!(fl & XBF_ASYNC)) { | 1074 | if (!(fl & XBF_ASYNC)) { |
1106 | /* | 1075 | /* |
1107 | * Mark b_error and B_ERROR _both_. | 1076 | * Mark b_error and B_ERROR _both_. |
@@ -1307,8 +1276,19 @@ submit_io: | |||
1307 | if (size) | 1276 | if (size) |
1308 | goto next_chunk; | 1277 | goto next_chunk; |
1309 | } else { | 1278 | } else { |
1310 | bio_put(bio); | 1279 | /* |
1280 | * if we get here, no pages were added to the bio. However, | ||
1281 | * we can't just error out here - if the pages are locked then | ||
1282 | * we have to unlock them otherwise we can hang on a later | ||
1283 | * access to the page. | ||
1284 | */ | ||
1311 | xfs_buf_ioerror(bp, EIO); | 1285 | xfs_buf_ioerror(bp, EIO); |
1286 | if (bp->b_flags & _XBF_PAGE_LOCKED) { | ||
1287 | int i; | ||
1288 | for (i = 0; i < bp->b_page_count; i++) | ||
1289 | unlock_page(bp->b_pages[i]); | ||
1290 | } | ||
1291 | bio_put(bio); | ||
1312 | } | 1292 | } |
1313 | } | 1293 | } |
1314 | 1294 | ||
@@ -1614,7 +1594,8 @@ xfs_mapping_buftarg( | |||
1614 | 1594 | ||
1615 | STATIC int | 1595 | STATIC int |
1616 | xfs_alloc_delwrite_queue( | 1596 | xfs_alloc_delwrite_queue( |
1617 | xfs_buftarg_t *btp) | 1597 | xfs_buftarg_t *btp, |
1598 | const char *fsname) | ||
1618 | { | 1599 | { |
1619 | int error = 0; | 1600 | int error = 0; |
1620 | 1601 | ||
@@ -1622,7 +1603,7 @@ xfs_alloc_delwrite_queue( | |||
1622 | INIT_LIST_HEAD(&btp->bt_delwrite_queue); | 1603 | INIT_LIST_HEAD(&btp->bt_delwrite_queue); |
1623 | spin_lock_init(&btp->bt_delwrite_lock); | 1604 | spin_lock_init(&btp->bt_delwrite_lock); |
1624 | btp->bt_flags = 0; | 1605 | btp->bt_flags = 0; |
1625 | btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd"); | 1606 | btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname); |
1626 | if (IS_ERR(btp->bt_task)) { | 1607 | if (IS_ERR(btp->bt_task)) { |
1627 | error = PTR_ERR(btp->bt_task); | 1608 | error = PTR_ERR(btp->bt_task); |
1628 | goto out_error; | 1609 | goto out_error; |
@@ -1635,7 +1616,8 @@ out_error: | |||
1635 | xfs_buftarg_t * | 1616 | xfs_buftarg_t * |
1636 | xfs_alloc_buftarg( | 1617 | xfs_alloc_buftarg( |
1637 | struct block_device *bdev, | 1618 | struct block_device *bdev, |
1638 | int external) | 1619 | int external, |
1620 | const char *fsname) | ||
1639 | { | 1621 | { |
1640 | xfs_buftarg_t *btp; | 1622 | xfs_buftarg_t *btp; |
1641 | 1623 | ||
@@ -1647,7 +1629,7 @@ xfs_alloc_buftarg( | |||
1647 | goto error; | 1629 | goto error; |
1648 | if (xfs_mapping_buftarg(btp, bdev)) | 1630 | if (xfs_mapping_buftarg(btp, bdev)) |
1649 | goto error; | 1631 | goto error; |
1650 | if (xfs_alloc_delwrite_queue(btp)) | 1632 | if (xfs_alloc_delwrite_queue(btp, fsname)) |
1651 | goto error; | 1633 | goto error; |
1652 | xfs_alloc_bufhash(btp, external); | 1634 | xfs_alloc_bufhash(btp, external); |
1653 | return btp; | 1635 | return btp; |
@@ -1756,6 +1738,7 @@ xfs_buf_runall_queues( | |||
1756 | 1738 | ||
1757 | STATIC int | 1739 | STATIC int |
1758 | xfsbufd_wakeup( | 1740 | xfsbufd_wakeup( |
1741 | struct shrinker *shrink, | ||
1759 | int priority, | 1742 | int priority, |
1760 | gfp_t mask) | 1743 | gfp_t mask) |
1761 | { | 1744 | { |
@@ -1797,7 +1780,7 @@ xfs_buf_delwri_split( | |||
1797 | trace_xfs_buf_delwri_split(bp, _RET_IP_); | 1780 | trace_xfs_buf_delwri_split(bp, _RET_IP_); |
1798 | ASSERT(bp->b_flags & XBF_DELWRI); | 1781 | ASSERT(bp->b_flags & XBF_DELWRI); |
1799 | 1782 | ||
1800 | if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { | 1783 | if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { |
1801 | if (!force && | 1784 | if (!force && |
1802 | time_before(jiffies, bp->b_queuetime + age)) { | 1785 | time_before(jiffies, bp->b_queuetime + age)) { |
1803 | xfs_buf_unlock(bp); | 1786 | xfs_buf_unlock(bp); |
@@ -1882,7 +1865,7 @@ xfsbufd( | |||
1882 | struct xfs_buf *bp; | 1865 | struct xfs_buf *bp; |
1883 | bp = list_first_entry(&tmp, struct xfs_buf, b_list); | 1866 | bp = list_first_entry(&tmp, struct xfs_buf, b_list); |
1884 | list_del_init(&bp->b_list); | 1867 | list_del_init(&bp->b_list); |
1885 | xfs_buf_iostrategy(bp); | 1868 | xfs_bdstrat_cb(bp); |
1886 | count++; | 1869 | count++; |
1887 | } | 1870 | } |
1888 | if (count) | 1871 | if (count) |
@@ -1929,7 +1912,7 @@ xfs_flush_buftarg( | |||
1929 | bp->b_flags &= ~XBF_ASYNC; | 1912 | bp->b_flags &= ~XBF_ASYNC; |
1930 | list_add(&bp->b_list, &wait_list); | 1913 | list_add(&bp->b_list, &wait_list); |
1931 | } | 1914 | } |
1932 | xfs_buf_iostrategy(bp); | 1915 | xfs_bdstrat_cb(bp); |
1933 | } | 1916 | } |
1934 | 1917 | ||
1935 | if (wait) { | 1918 | if (wait) { |