aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2016-07-19 21:53:35 -0400
committerDave Chinner <david@fromorbit.com>2016-07-19 21:53:35 -0400
commitbbfeb6141fcc8e2aea47b2f235cc8cc8ffb4a293 (patch)
tree65fc831b4e915412ac4baf0fd72525eb05d48aa8
parentf63716175c0730c2f29b4591146f0045cfcaa8dc (diff)
parent9c7504aa72b6e2104ba6dcef518c15672ec51175 (diff)
Merge branch 'xfs-4.8-buf-fixes' into for-next
-rw-r--r--fs/xfs/xfs_buf.c170
-rw-r--r--fs/xfs/xfs_buf.h7
-rw-r--r--fs/xfs/xfs_buf_item.c12
-rw-r--r--fs/xfs/xfs_log.c5
-rw-r--r--fs/xfs/xfs_mount.c10
-rw-r--r--fs/xfs/xfs_sysfs.c3
6 files changed, 151 insertions, 56 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 4665ff6e5153..32fc5401a756 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -80,6 +80,47 @@ xfs_buf_vmap_len(
80} 80}
81 81
82/* 82/*
83 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
84 * this buffer. The count is incremented once per buffer (per hold cycle)
85 * because the corresponding decrement is deferred to buffer release. Buffers
86 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
87 * tracking adds unnecessary overhead. This is used for sychronization purposes
88 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
89 * in-flight buffers.
90 *
91 * Buffers that are never released (e.g., superblock, iclog buffers) must set
92 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
93 * never reaches zero and unmount hangs indefinitely.
94 */
95static inline void
96xfs_buf_ioacct_inc(
97 struct xfs_buf *bp)
98{
99 if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
100 return;
101
102 ASSERT(bp->b_flags & XBF_ASYNC);
103 bp->b_flags |= _XBF_IN_FLIGHT;
104 percpu_counter_inc(&bp->b_target->bt_io_count);
105}
106
107/*
108 * Clear the in-flight state on a buffer about to be released to the LRU or
109 * freed and unaccount from the buftarg.
110 */
111static inline void
112xfs_buf_ioacct_dec(
113 struct xfs_buf *bp)
114{
115 if (!(bp->b_flags & _XBF_IN_FLIGHT))
116 return;
117
118 ASSERT(bp->b_flags & XBF_ASYNC);
119 bp->b_flags &= ~_XBF_IN_FLIGHT;
120 percpu_counter_dec(&bp->b_target->bt_io_count);
121}
122
123/*
83 * When we mark a buffer stale, we remove the buffer from the LRU and clear the 124 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
84 * b_lru_ref count so that the buffer is freed immediately when the buffer 125 * b_lru_ref count so that the buffer is freed immediately when the buffer
85 * reference count falls to zero. If the buffer is already on the LRU, we need 126 * reference count falls to zero. If the buffer is already on the LRU, we need
@@ -102,6 +143,14 @@ xfs_buf_stale(
102 */ 143 */
103 bp->b_flags &= ~_XBF_DELWRI_Q; 144 bp->b_flags &= ~_XBF_DELWRI_Q;
104 145
146 /*
147 * Once the buffer is marked stale and unlocked, a subsequent lookup
148 * could reset b_flags. There is no guarantee that the buffer is
149 * unaccounted (released to LRU) before that occurs. Drop in-flight
150 * status now to preserve accounting consistency.
151 */
152 xfs_buf_ioacct_dec(bp);
153
105 spin_lock(&bp->b_lock); 154 spin_lock(&bp->b_lock);
106 atomic_set(&bp->b_lru_ref, 0); 155 atomic_set(&bp->b_lru_ref, 0);
107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 156 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
@@ -815,7 +864,8 @@ xfs_buf_get_uncached(
815 struct xfs_buf *bp; 864 struct xfs_buf *bp;
816 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); 865 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
817 866
818 bp = _xfs_buf_alloc(target, &map, 1, 0); 867 /* flags might contain irrelevant bits, pass only what we care about */
868 bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
819 if (unlikely(bp == NULL)) 869 if (unlikely(bp == NULL))
820 goto fail; 870 goto fail;
821 871
@@ -866,63 +916,85 @@ xfs_buf_hold(
866} 916}
867 917
868/* 918/*
869 * Releases a hold on the specified buffer. If the 919 * Release a hold on the specified buffer. If the hold count is 1, the buffer is
870 * the hold count is 1, calls xfs_buf_free. 920 * placed on LRU or freed (depending on b_lru_ref).
871 */ 921 */
872void 922void
873xfs_buf_rele( 923xfs_buf_rele(
874 xfs_buf_t *bp) 924 xfs_buf_t *bp)
875{ 925{
876 struct xfs_perag *pag = bp->b_pag; 926 struct xfs_perag *pag = bp->b_pag;
927 bool release;
928 bool freebuf = false;
877 929
878 trace_xfs_buf_rele(bp, _RET_IP_); 930 trace_xfs_buf_rele(bp, _RET_IP_);
879 931
880 if (!pag) { 932 if (!pag) {
881 ASSERT(list_empty(&bp->b_lru)); 933 ASSERT(list_empty(&bp->b_lru));
882 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); 934 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
883 if (atomic_dec_and_test(&bp->b_hold)) 935 if (atomic_dec_and_test(&bp->b_hold)) {
936 xfs_buf_ioacct_dec(bp);
884 xfs_buf_free(bp); 937 xfs_buf_free(bp);
938 }
885 return; 939 return;
886 } 940 }
887 941
888 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); 942 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
889 943
890 ASSERT(atomic_read(&bp->b_hold) > 0); 944 ASSERT(atomic_read(&bp->b_hold) > 0);
891 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
892 spin_lock(&bp->b_lock);
893 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
894 /*
895 * If the buffer is added to the LRU take a new
896 * reference to the buffer for the LRU and clear the
897 * (now stale) dispose list state flag
898 */
899 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
900 bp->b_state &= ~XFS_BSTATE_DISPOSE;
901 atomic_inc(&bp->b_hold);
902 }
903 spin_unlock(&bp->b_lock);
904 spin_unlock(&pag->pag_buf_lock);
905 } else {
906 /*
907 * most of the time buffers will already be removed from
908 * the LRU, so optimise that case by checking for the
909 * XFS_BSTATE_DISPOSE flag indicating the last list the
910 * buffer was on was the disposal list
911 */
912 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
913 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
914 } else {
915 ASSERT(list_empty(&bp->b_lru));
916 }
917 spin_unlock(&bp->b_lock);
918 945
919 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 946 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
920 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); 947 spin_lock(&bp->b_lock);
921 spin_unlock(&pag->pag_buf_lock); 948 if (!release) {
922 xfs_perag_put(pag); 949 /*
923 xfs_buf_free(bp); 950 * Drop the in-flight state if the buffer is already on the LRU
951 * and it holds the only reference. This is racy because we
952 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
953 * ensures the decrement occurs only once per-buf.
954 */
955 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
956 xfs_buf_ioacct_dec(bp);
957 goto out_unlock;
958 }
959
960 /* the last reference has been dropped ... */
961 xfs_buf_ioacct_dec(bp);
962 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
963 /*
964 * If the buffer is added to the LRU take a new reference to the
965 * buffer for the LRU and clear the (now stale) dispose list
966 * state flag
967 */
968 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
969 bp->b_state &= ~XFS_BSTATE_DISPOSE;
970 atomic_inc(&bp->b_hold);
924 } 971 }
972 spin_unlock(&pag->pag_buf_lock);
973 } else {
974 /*
975 * most of the time buffers will already be removed from the
976 * LRU, so optimise that case by checking for the
977 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
978 * was on was the disposal list
979 */
980 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
981 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
982 } else {
983 ASSERT(list_empty(&bp->b_lru));
984 }
985
986 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
987 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
988 spin_unlock(&pag->pag_buf_lock);
989 xfs_perag_put(pag);
990 freebuf = true;
925 } 991 }
992
993out_unlock:
994 spin_unlock(&bp->b_lock);
995
996 if (freebuf)
997 xfs_buf_free(bp);
926} 998}
927 999
928 1000
@@ -1341,6 +1413,7 @@ xfs_buf_submit(
1341 * xfs_buf_ioend too early. 1413 * xfs_buf_ioend too early.
1342 */ 1414 */
1343 atomic_set(&bp->b_io_remaining, 1); 1415 atomic_set(&bp->b_io_remaining, 1);
1416 xfs_buf_ioacct_inc(bp);
1344 _xfs_buf_ioapply(bp); 1417 _xfs_buf_ioapply(bp);
1345 1418
1346 /* 1419 /*
@@ -1526,13 +1599,19 @@ xfs_wait_buftarg(
1526 int loop = 0; 1599 int loop = 0;
1527 1600
1528 /* 1601 /*
1529 * We need to flush the buffer workqueue to ensure that all IO 1602 * First wait on the buftarg I/O count for all in-flight buffers to be
1530 * completion processing is 100% done. Just waiting on buffer locks is 1603 * released. This is critical as new buffers do not make the LRU until
1531 * not sufficient for async IO as the reference count held over IO is 1604 * they are released.
1532 * not released until after the buffer lock is dropped. Hence we need to 1605 *
1533 * ensure here that all reference counts have been dropped before we 1606 * Next, flush the buffer workqueue to ensure all completion processing
1534 * start walking the LRU list. 1607 * has finished. Just waiting on buffer locks is not sufficient for
1608 * async IO as the reference count held over IO is not released until
1609 * after the buffer lock is dropped. Hence we need to ensure here that
1610 * all reference counts have been dropped before we start walking the
1611 * LRU list.
1535 */ 1612 */
1613 while (percpu_counter_sum(&btp->bt_io_count))
1614 delay(100);
1536 drain_workqueue(btp->bt_mount->m_buf_workqueue); 1615 drain_workqueue(btp->bt_mount->m_buf_workqueue);
1537 1616
1538 /* loop until there is nothing left on the lru list. */ 1617 /* loop until there is nothing left on the lru list. */
@@ -1629,6 +1708,8 @@ xfs_free_buftarg(
1629 struct xfs_buftarg *btp) 1708 struct xfs_buftarg *btp)
1630{ 1709{
1631 unregister_shrinker(&btp->bt_shrinker); 1710 unregister_shrinker(&btp->bt_shrinker);
1711 ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
1712 percpu_counter_destroy(&btp->bt_io_count);
1632 list_lru_destroy(&btp->bt_lru); 1713 list_lru_destroy(&btp->bt_lru);
1633 1714
1634 if (mp->m_flags & XFS_MOUNT_BARRIER) 1715 if (mp->m_flags & XFS_MOUNT_BARRIER)
@@ -1693,6 +1774,9 @@ xfs_alloc_buftarg(
1693 if (list_lru_init(&btp->bt_lru)) 1774 if (list_lru_init(&btp->bt_lru))
1694 goto error; 1775 goto error;
1695 1776
1777 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
1778 goto error;
1779
1696 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; 1780 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1697 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; 1781 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1698 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1782 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
@@ -1834,7 +1918,7 @@ xfs_buf_delwri_submit_buffers(
1834 * side. We need to move the buffer onto the io_list 1918 * side. We need to move the buffer onto the io_list
1835 * at this point so the caller can still access it. 1919 * at this point so the caller can still access it.
1836 */ 1920 */
1837 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); 1921 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
1838 bp->b_flags |= XBF_WRITE | XBF_ASYNC; 1922 bp->b_flags |= XBF_WRITE | XBF_ASYNC;
1839 if (wait_list) { 1923 if (wait_list) {
1840 xfs_buf_hold(bp); 1924 xfs_buf_hold(bp);
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 8bfb974f0772..1c2e52b2d926 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -43,6 +43,7 @@ typedef enum {
43#define XBF_READ (1 << 0) /* buffer intended for reading from device */ 43#define XBF_READ (1 << 0) /* buffer intended for reading from device */
44#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ 44#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
45#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ 45#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
46#define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
46#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ 47#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
47#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 48#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
48#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ 49#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
@@ -62,6 +63,7 @@ typedef enum {
62#define _XBF_KMEM (1 << 21)/* backed by heap memory */ 63#define _XBF_KMEM (1 << 21)/* backed by heap memory */
63#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ 64#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
64#define _XBF_COMPOUND (1 << 23)/* compound buffer */ 65#define _XBF_COMPOUND (1 << 23)/* compound buffer */
66#define _XBF_IN_FLIGHT (1 << 25) /* I/O in flight, for accounting purposes */
65 67
66typedef unsigned int xfs_buf_flags_t; 68typedef unsigned int xfs_buf_flags_t;
67 69
@@ -81,7 +83,8 @@ typedef unsigned int xfs_buf_flags_t;
81 { _XBF_PAGES, "PAGES" }, \ 83 { _XBF_PAGES, "PAGES" }, \
82 { _XBF_KMEM, "KMEM" }, \ 84 { _XBF_KMEM, "KMEM" }, \
83 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 85 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
84 { _XBF_COMPOUND, "COMPOUND" } 86 { _XBF_COMPOUND, "COMPOUND" }, \
87 { _XBF_IN_FLIGHT, "IN_FLIGHT" }
85 88
86 89
87/* 90/*
@@ -115,6 +118,8 @@ typedef struct xfs_buftarg {
115 /* LRU control structures */ 118 /* LRU control structures */
116 struct shrinker bt_shrinker; 119 struct shrinker bt_shrinker;
117 struct list_lru bt_lru; 120 struct list_lru bt_lru;
121
122 struct percpu_counter bt_io_count;
118} xfs_buftarg_t; 123} xfs_buftarg_t;
119 124
120struct xfs_buf; 125struct xfs_buf;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2e95ad036316..0337690c88bc 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1081,6 +1081,8 @@ xfs_buf_iodone_callback_error(
1081 trace_xfs_buf_item_iodone_async(bp, _RET_IP_); 1081 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1082 ASSERT(bp->b_iodone != NULL); 1082 ASSERT(bp->b_iodone != NULL);
1083 1083
1084 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1085
1084 /* 1086 /*
1085 * If the write was asynchronous then no one will be looking for the 1087 * If the write was asynchronous then no one will be looking for the
1086 * error. If this is the first failure of this type, clear the error 1088 * error. If this is the first failure of this type, clear the error
@@ -1088,13 +1090,12 @@ xfs_buf_iodone_callback_error(
1088 * async write failure at least once, but we also need to set the buffer 1090 * async write failure at least once, but we also need to set the buffer
1089 * up to behave correctly now for repeated failures. 1091 * up to behave correctly now for repeated failures.
1090 */ 1092 */
1091 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL)) || 1093 if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) ||
1092 bp->b_last_error != bp->b_error) { 1094 bp->b_last_error != bp->b_error) {
1093 bp->b_flags |= (XBF_WRITE | XBF_ASYNC | 1095 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1094 XBF_DONE | XBF_WRITE_FAIL);
1095 bp->b_last_error = bp->b_error; 1096 bp->b_last_error = bp->b_error;
1096 bp->b_retries = 0; 1097 if (cfg->retry_timeout && !bp->b_first_retry_time)
1097 bp->b_first_retry_time = jiffies; 1098 bp->b_first_retry_time = jiffies;
1098 1099
1099 xfs_buf_ioerror(bp, 0); 1100 xfs_buf_ioerror(bp, 0);
1100 xfs_buf_submit(bp); 1101 xfs_buf_submit(bp);
@@ -1105,7 +1106,6 @@ xfs_buf_iodone_callback_error(
1105 * Repeated failure on an async write. Take action according to the 1106 * Repeated failure on an async write. Take action according to the
1106 * error configuration we have been set up to use. 1107 * error configuration we have been set up to use.
1107 */ 1108 */
1108 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1109 1109
1110 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER && 1110 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1111 ++bp->b_retries > cfg->max_retries) 1111 ++bp->b_retries > cfg->max_retries)
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 63dad9ea9016..3b74fa011bb1 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1415,7 +1415,7 @@ xlog_alloc_log(
1415 */ 1415 */
1416 error = -ENOMEM; 1416 error = -ENOMEM;
1417 bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, 1417 bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL,
1418 BTOBB(log->l_iclog_size), 0); 1418 BTOBB(log->l_iclog_size), XBF_NO_IOACCT);
1419 if (!bp) 1419 if (!bp)
1420 goto out_free_log; 1420 goto out_free_log;
1421 1421
@@ -1454,7 +1454,8 @@ xlog_alloc_log(
1454 prev_iclog = iclog; 1454 prev_iclog = iclog;
1455 1455
1456 bp = xfs_buf_get_uncached(mp->m_logdev_targp, 1456 bp = xfs_buf_get_uncached(mp->m_logdev_targp,
1457 BTOBB(log->l_iclog_size), 0); 1457 BTOBB(log->l_iclog_size),
1458 XBF_NO_IOACCT);
1458 if (!bp) 1459 if (!bp)
1459 goto out_free_iclog; 1460 goto out_free_iclog;
1460 1461
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index e39b02351b4a..970c19ba2f56 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -272,13 +272,15 @@ xfs_readsb(
272 buf_ops = NULL; 272 buf_ops = NULL;
273 273
274 /* 274 /*
275 * Allocate a (locked) buffer to hold the superblock. 275 * Allocate a (locked) buffer to hold the superblock. This will be kept
276 * This will be kept around at all times to optimize 276 * around at all times to optimize access to the superblock. Therefore,
277 * access to the superblock. 277 * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
278 * elevated.
278 */ 279 */
279reread: 280reread:
280 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 281 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
281 BTOBB(sector_size), 0, &bp, buf_ops); 282 BTOBB(sector_size), XBF_NO_IOACCT, &bp,
283 buf_ops);
282 if (error) { 284 if (error) {
283 if (loud) 285 if (loud)
284 xfs_warn(mp, "SB validate failed with error %d.", error); 286 xfs_warn(mp, "SB validate failed with error %d.", error);
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 4c2c55086208..79cfd3fc5324 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -634,6 +634,9 @@ xfs_error_get_cfg(
634{ 634{
635 struct xfs_error_cfg *cfg; 635 struct xfs_error_cfg *cfg;
636 636
637 if (error < 0)
638 error = -error;
639
637 switch (error) { 640 switch (error) {
638 case EIO: 641 case EIO:
639 cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO]; 642 cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];