diff options
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 117 |
1 files changed, 41 insertions, 76 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 4ddc973aea7a..b4c7d4248aac 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "xfs_ag.h" | 39 | #include "xfs_ag.h" |
40 | #include "xfs_dmapi.h" | 40 | #include "xfs_dmapi.h" |
41 | #include "xfs_mount.h" | 41 | #include "xfs_mount.h" |
42 | #include "xfs_trace.h" | ||
42 | 43 | ||
43 | static kmem_zone_t *xfs_buf_zone; | 44 | static kmem_zone_t *xfs_buf_zone; |
44 | STATIC int xfsbufd(void *); | 45 | STATIC int xfsbufd(void *); |
@@ -53,34 +54,6 @@ static struct workqueue_struct *xfslogd_workqueue; | |||
53 | struct workqueue_struct *xfsdatad_workqueue; | 54 | struct workqueue_struct *xfsdatad_workqueue; |
54 | struct workqueue_struct *xfsconvertd_workqueue; | 55 | struct workqueue_struct *xfsconvertd_workqueue; |
55 | 56 | ||
56 | #ifdef XFS_BUF_TRACE | ||
57 | void | ||
58 | xfs_buf_trace( | ||
59 | xfs_buf_t *bp, | ||
60 | char *id, | ||
61 | void *data, | ||
62 | void *ra) | ||
63 | { | ||
64 | ktrace_enter(xfs_buf_trace_buf, | ||
65 | bp, id, | ||
66 | (void *)(unsigned long)bp->b_flags, | ||
67 | (void *)(unsigned long)bp->b_hold.counter, | ||
68 | (void *)(unsigned long)bp->b_sema.count, | ||
69 | (void *)current, | ||
70 | data, ra, | ||
71 | (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff), | ||
72 | (void *)(unsigned long)(bp->b_file_offset & 0xffffffff), | ||
73 | (void *)(unsigned long)bp->b_buffer_length, | ||
74 | NULL, NULL, NULL, NULL, NULL); | ||
75 | } | ||
76 | ktrace_t *xfs_buf_trace_buf; | ||
77 | #define XFS_BUF_TRACE_SIZE 4096 | ||
78 | #define XB_TRACE(bp, id, data) \ | ||
79 | xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0)) | ||
80 | #else | ||
81 | #define XB_TRACE(bp, id, data) do { } while (0) | ||
82 | #endif | ||
83 | |||
84 | #ifdef XFS_BUF_LOCK_TRACKING | 57 | #ifdef XFS_BUF_LOCK_TRACKING |
85 | # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) | 58 | # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) |
86 | # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) | 59 | # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) |
@@ -279,7 +252,8 @@ _xfs_buf_initialize( | |||
279 | init_waitqueue_head(&bp->b_waiters); | 252 | init_waitqueue_head(&bp->b_waiters); |
280 | 253 | ||
281 | XFS_STATS_INC(xb_create); | 254 | XFS_STATS_INC(xb_create); |
282 | XB_TRACE(bp, "initialize", target); | 255 | |
256 | trace_xfs_buf_init(bp, _RET_IP_); | ||
283 | } | 257 | } |
284 | 258 | ||
285 | /* | 259 | /* |
@@ -332,7 +306,7 @@ void | |||
332 | xfs_buf_free( | 306 | xfs_buf_free( |
333 | xfs_buf_t *bp) | 307 | xfs_buf_t *bp) |
334 | { | 308 | { |
335 | XB_TRACE(bp, "free", 0); | 309 | trace_xfs_buf_free(bp, _RET_IP_); |
336 | 310 | ||
337 | ASSERT(list_empty(&bp->b_hash_list)); | 311 | ASSERT(list_empty(&bp->b_hash_list)); |
338 | 312 | ||
@@ -445,7 +419,6 @@ _xfs_buf_lookup_pages( | |||
445 | if (page_count == bp->b_page_count) | 419 | if (page_count == bp->b_page_count) |
446 | bp->b_flags |= XBF_DONE; | 420 | bp->b_flags |= XBF_DONE; |
447 | 421 | ||
448 | XB_TRACE(bp, "lookup_pages", (long)page_count); | ||
449 | return error; | 422 | return error; |
450 | } | 423 | } |
451 | 424 | ||
@@ -548,7 +521,6 @@ found: | |||
548 | if (down_trylock(&bp->b_sema)) { | 521 | if (down_trylock(&bp->b_sema)) { |
549 | if (!(flags & XBF_TRYLOCK)) { | 522 | if (!(flags & XBF_TRYLOCK)) { |
550 | /* wait for buffer ownership */ | 523 | /* wait for buffer ownership */ |
551 | XB_TRACE(bp, "get_lock", 0); | ||
552 | xfs_buf_lock(bp); | 524 | xfs_buf_lock(bp); |
553 | XFS_STATS_INC(xb_get_locked_waited); | 525 | XFS_STATS_INC(xb_get_locked_waited); |
554 | } else { | 526 | } else { |
@@ -571,7 +543,8 @@ found: | |||
571 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); | 543 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); |
572 | bp->b_flags &= XBF_MAPPED; | 544 | bp->b_flags &= XBF_MAPPED; |
573 | } | 545 | } |
574 | XB_TRACE(bp, "got_lock", 0); | 546 | |
547 | trace_xfs_buf_find(bp, flags, _RET_IP_); | ||
575 | XFS_STATS_INC(xb_get_locked); | 548 | XFS_STATS_INC(xb_get_locked); |
576 | return bp; | 549 | return bp; |
577 | } | 550 | } |
@@ -627,7 +600,7 @@ xfs_buf_get( | |||
627 | bp->b_bn = ioff; | 600 | bp->b_bn = ioff; |
628 | bp->b_count_desired = bp->b_buffer_length; | 601 | bp->b_count_desired = bp->b_buffer_length; |
629 | 602 | ||
630 | XB_TRACE(bp, "get", (unsigned long)flags); | 603 | trace_xfs_buf_get(bp, flags, _RET_IP_); |
631 | return bp; | 604 | return bp; |
632 | 605 | ||
633 | no_buffer: | 606 | no_buffer: |
@@ -644,8 +617,6 @@ _xfs_buf_read( | |||
644 | { | 617 | { |
645 | int status; | 618 | int status; |
646 | 619 | ||
647 | XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags); | ||
648 | |||
649 | ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); | 620 | ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); |
650 | ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); | 621 | ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); |
651 | 622 | ||
@@ -673,19 +644,18 @@ xfs_buf_read( | |||
673 | 644 | ||
674 | bp = xfs_buf_get(target, ioff, isize, flags); | 645 | bp = xfs_buf_get(target, ioff, isize, flags); |
675 | if (bp) { | 646 | if (bp) { |
647 | trace_xfs_buf_read(bp, flags, _RET_IP_); | ||
648 | |||
676 | if (!XFS_BUF_ISDONE(bp)) { | 649 | if (!XFS_BUF_ISDONE(bp)) { |
677 | XB_TRACE(bp, "read", (unsigned long)flags); | ||
678 | XFS_STATS_INC(xb_get_read); | 650 | XFS_STATS_INC(xb_get_read); |
679 | _xfs_buf_read(bp, flags); | 651 | _xfs_buf_read(bp, flags); |
680 | } else if (flags & XBF_ASYNC) { | 652 | } else if (flags & XBF_ASYNC) { |
681 | XB_TRACE(bp, "read_async", (unsigned long)flags); | ||
682 | /* | 653 | /* |
683 | * Read ahead call which is already satisfied, | 654 | * Read ahead call which is already satisfied, |
684 | * drop the buffer | 655 | * drop the buffer |
685 | */ | 656 | */ |
686 | goto no_buffer; | 657 | goto no_buffer; |
687 | } else { | 658 | } else { |
688 | XB_TRACE(bp, "read_done", (unsigned long)flags); | ||
689 | /* We do not want read in the flags */ | 659 | /* We do not want read in the flags */ |
690 | bp->b_flags &= ~XBF_READ; | 660 | bp->b_flags &= ~XBF_READ; |
691 | } | 661 | } |
@@ -823,7 +793,7 @@ xfs_buf_get_noaddr( | |||
823 | 793 | ||
824 | xfs_buf_unlock(bp); | 794 | xfs_buf_unlock(bp); |
825 | 795 | ||
826 | XB_TRACE(bp, "no_daddr", len); | 796 | trace_xfs_buf_get_noaddr(bp, _RET_IP_); |
827 | return bp; | 797 | return bp; |
828 | 798 | ||
829 | fail_free_mem: | 799 | fail_free_mem: |
@@ -845,8 +815,8 @@ void | |||
845 | xfs_buf_hold( | 815 | xfs_buf_hold( |
846 | xfs_buf_t *bp) | 816 | xfs_buf_t *bp) |
847 | { | 817 | { |
818 | trace_xfs_buf_hold(bp, _RET_IP_); | ||
848 | atomic_inc(&bp->b_hold); | 819 | atomic_inc(&bp->b_hold); |
849 | XB_TRACE(bp, "hold", 0); | ||
850 | } | 820 | } |
851 | 821 | ||
852 | /* | 822 | /* |
@@ -859,7 +829,7 @@ xfs_buf_rele( | |||
859 | { | 829 | { |
860 | xfs_bufhash_t *hash = bp->b_hash; | 830 | xfs_bufhash_t *hash = bp->b_hash; |
861 | 831 | ||
862 | XB_TRACE(bp, "rele", bp->b_relse); | 832 | trace_xfs_buf_rele(bp, _RET_IP_); |
863 | 833 | ||
864 | if (unlikely(!hash)) { | 834 | if (unlikely(!hash)) { |
865 | ASSERT(!bp->b_relse); | 835 | ASSERT(!bp->b_relse); |
@@ -909,21 +879,19 @@ xfs_buf_cond_lock( | |||
909 | int locked; | 879 | int locked; |
910 | 880 | ||
911 | locked = down_trylock(&bp->b_sema) == 0; | 881 | locked = down_trylock(&bp->b_sema) == 0; |
912 | if (locked) { | 882 | if (locked) |
913 | XB_SET_OWNER(bp); | 883 | XB_SET_OWNER(bp); |
914 | } | 884 | |
915 | XB_TRACE(bp, "cond_lock", (long)locked); | 885 | trace_xfs_buf_cond_lock(bp, _RET_IP_); |
916 | return locked ? 0 : -EBUSY; | 886 | return locked ? 0 : -EBUSY; |
917 | } | 887 | } |
918 | 888 | ||
919 | #if defined(DEBUG) || defined(XFS_BLI_TRACE) | ||
920 | int | 889 | int |
921 | xfs_buf_lock_value( | 890 | xfs_buf_lock_value( |
922 | xfs_buf_t *bp) | 891 | xfs_buf_t *bp) |
923 | { | 892 | { |
924 | return bp->b_sema.count; | 893 | return bp->b_sema.count; |
925 | } | 894 | } |
926 | #endif | ||
927 | 895 | ||
928 | /* | 896 | /* |
929 | * Locks a buffer object. | 897 | * Locks a buffer object. |
@@ -935,12 +903,14 @@ void | |||
935 | xfs_buf_lock( | 903 | xfs_buf_lock( |
936 | xfs_buf_t *bp) | 904 | xfs_buf_t *bp) |
937 | { | 905 | { |
938 | XB_TRACE(bp, "lock", 0); | 906 | trace_xfs_buf_lock(bp, _RET_IP_); |
907 | |||
939 | if (atomic_read(&bp->b_io_remaining)) | 908 | if (atomic_read(&bp->b_io_remaining)) |
940 | blk_run_address_space(bp->b_target->bt_mapping); | 909 | blk_run_address_space(bp->b_target->bt_mapping); |
941 | down(&bp->b_sema); | 910 | down(&bp->b_sema); |
942 | XB_SET_OWNER(bp); | 911 | XB_SET_OWNER(bp); |
943 | XB_TRACE(bp, "locked", 0); | 912 | |
913 | trace_xfs_buf_lock_done(bp, _RET_IP_); | ||
944 | } | 914 | } |
945 | 915 | ||
946 | /* | 916 | /* |
@@ -962,7 +932,8 @@ xfs_buf_unlock( | |||
962 | 932 | ||
963 | XB_CLEAR_OWNER(bp); | 933 | XB_CLEAR_OWNER(bp); |
964 | up(&bp->b_sema); | 934 | up(&bp->b_sema); |
965 | XB_TRACE(bp, "unlock", 0); | 935 | |
936 | trace_xfs_buf_unlock(bp, _RET_IP_); | ||
966 | } | 937 | } |
967 | 938 | ||
968 | 939 | ||
@@ -974,17 +945,18 @@ void | |||
974 | xfs_buf_pin( | 945 | xfs_buf_pin( |
975 | xfs_buf_t *bp) | 946 | xfs_buf_t *bp) |
976 | { | 947 | { |
948 | trace_xfs_buf_pin(bp, _RET_IP_); | ||
977 | atomic_inc(&bp->b_pin_count); | 949 | atomic_inc(&bp->b_pin_count); |
978 | XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter); | ||
979 | } | 950 | } |
980 | 951 | ||
981 | void | 952 | void |
982 | xfs_buf_unpin( | 953 | xfs_buf_unpin( |
983 | xfs_buf_t *bp) | 954 | xfs_buf_t *bp) |
984 | { | 955 | { |
956 | trace_xfs_buf_unpin(bp, _RET_IP_); | ||
957 | |||
985 | if (atomic_dec_and_test(&bp->b_pin_count)) | 958 | if (atomic_dec_and_test(&bp->b_pin_count)) |
986 | wake_up_all(&bp->b_waiters); | 959 | wake_up_all(&bp->b_waiters); |
987 | XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter); | ||
988 | } | 960 | } |
989 | 961 | ||
990 | int | 962 | int |
@@ -1035,7 +1007,7 @@ xfs_buf_iodone_work( | |||
1035 | */ | 1007 | */ |
1036 | if ((bp->b_error == EOPNOTSUPP) && | 1008 | if ((bp->b_error == EOPNOTSUPP) && |
1037 | (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) { | 1009 | (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) { |
1038 | XB_TRACE(bp, "ordered_retry", bp->b_iodone); | 1010 | trace_xfs_buf_ordered_retry(bp, _RET_IP_); |
1039 | bp->b_flags &= ~XBF_ORDERED; | 1011 | bp->b_flags &= ~XBF_ORDERED; |
1040 | bp->b_flags |= _XFS_BARRIER_FAILED; | 1012 | bp->b_flags |= _XFS_BARRIER_FAILED; |
1041 | xfs_buf_iorequest(bp); | 1013 | xfs_buf_iorequest(bp); |
@@ -1050,12 +1022,12 @@ xfs_buf_ioend( | |||
1050 | xfs_buf_t *bp, | 1022 | xfs_buf_t *bp, |
1051 | int schedule) | 1023 | int schedule) |
1052 | { | 1024 | { |
1025 | trace_xfs_buf_iodone(bp, _RET_IP_); | ||
1026 | |||
1053 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); | 1027 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); |
1054 | if (bp->b_error == 0) | 1028 | if (bp->b_error == 0) |
1055 | bp->b_flags |= XBF_DONE; | 1029 | bp->b_flags |= XBF_DONE; |
1056 | 1030 | ||
1057 | XB_TRACE(bp, "iodone", bp->b_iodone); | ||
1058 | |||
1059 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { | 1031 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { |
1060 | if (schedule) { | 1032 | if (schedule) { |
1061 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); | 1033 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); |
@@ -1075,7 +1047,7 @@ xfs_buf_ioerror( | |||
1075 | { | 1047 | { |
1076 | ASSERT(error >= 0 && error <= 0xffff); | 1048 | ASSERT(error >= 0 && error <= 0xffff); |
1077 | bp->b_error = (unsigned short)error; | 1049 | bp->b_error = (unsigned short)error; |
1078 | XB_TRACE(bp, "ioerror", (unsigned long)error); | 1050 | trace_xfs_buf_ioerror(bp, error, _RET_IP_); |
1079 | } | 1051 | } |
1080 | 1052 | ||
1081 | int | 1053 | int |
@@ -1083,7 +1055,7 @@ xfs_bawrite( | |||
1083 | void *mp, | 1055 | void *mp, |
1084 | struct xfs_buf *bp) | 1056 | struct xfs_buf *bp) |
1085 | { | 1057 | { |
1086 | XB_TRACE(bp, "bawrite", 0); | 1058 | trace_xfs_buf_bawrite(bp, _RET_IP_); |
1087 | 1059 | ||
1088 | ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); | 1060 | ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); |
1089 | 1061 | ||
@@ -1102,7 +1074,7 @@ xfs_bdwrite( | |||
1102 | void *mp, | 1074 | void *mp, |
1103 | struct xfs_buf *bp) | 1075 | struct xfs_buf *bp) |
1104 | { | 1076 | { |
1105 | XB_TRACE(bp, "bdwrite", 0); | 1077 | trace_xfs_buf_bdwrite(bp, _RET_IP_); |
1106 | 1078 | ||
1107 | bp->b_strat = xfs_bdstrat_cb; | 1079 | bp->b_strat = xfs_bdstrat_cb; |
1108 | bp->b_mount = mp; | 1080 | bp->b_mount = mp; |
@@ -1253,7 +1225,7 @@ int | |||
1253 | xfs_buf_iorequest( | 1225 | xfs_buf_iorequest( |
1254 | xfs_buf_t *bp) | 1226 | xfs_buf_t *bp) |
1255 | { | 1227 | { |
1256 | XB_TRACE(bp, "iorequest", 0); | 1228 | trace_xfs_buf_iorequest(bp, _RET_IP_); |
1257 | 1229 | ||
1258 | if (bp->b_flags & XBF_DELWRI) { | 1230 | if (bp->b_flags & XBF_DELWRI) { |
1259 | xfs_buf_delwri_queue(bp, 1); | 1231 | xfs_buf_delwri_queue(bp, 1); |
@@ -1287,11 +1259,13 @@ int | |||
1287 | xfs_buf_iowait( | 1259 | xfs_buf_iowait( |
1288 | xfs_buf_t *bp) | 1260 | xfs_buf_t *bp) |
1289 | { | 1261 | { |
1290 | XB_TRACE(bp, "iowait", 0); | 1262 | trace_xfs_buf_iowait(bp, _RET_IP_); |
1263 | |||
1291 | if (atomic_read(&bp->b_io_remaining)) | 1264 | if (atomic_read(&bp->b_io_remaining)) |
1292 | blk_run_address_space(bp->b_target->bt_mapping); | 1265 | blk_run_address_space(bp->b_target->bt_mapping); |
1293 | wait_for_completion(&bp->b_iowait); | 1266 | wait_for_completion(&bp->b_iowait); |
1294 | XB_TRACE(bp, "iowaited", (long)bp->b_error); | 1267 | |
1268 | trace_xfs_buf_iowait_done(bp, _RET_IP_); | ||
1295 | return bp->b_error; | 1269 | return bp->b_error; |
1296 | } | 1270 | } |
1297 | 1271 | ||
@@ -1604,7 +1578,8 @@ xfs_buf_delwri_queue( | |||
1604 | struct list_head *dwq = &bp->b_target->bt_delwrite_queue; | 1578 | struct list_head *dwq = &bp->b_target->bt_delwrite_queue; |
1605 | spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; | 1579 | spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; |
1606 | 1580 | ||
1607 | XB_TRACE(bp, "delwri_q", (long)unlock); | 1581 | trace_xfs_buf_delwri_queue(bp, _RET_IP_); |
1582 | |||
1608 | ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); | 1583 | ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); |
1609 | 1584 | ||
1610 | spin_lock(dwlk); | 1585 | spin_lock(dwlk); |
@@ -1644,7 +1619,7 @@ xfs_buf_delwri_dequeue( | |||
1644 | if (dequeued) | 1619 | if (dequeued) |
1645 | xfs_buf_rele(bp); | 1620 | xfs_buf_rele(bp); |
1646 | 1621 | ||
1647 | XB_TRACE(bp, "delwri_dq", (long)dequeued); | 1622 | trace_xfs_buf_delwri_dequeue(bp, _RET_IP_); |
1648 | } | 1623 | } |
1649 | 1624 | ||
1650 | STATIC void | 1625 | STATIC void |
@@ -1692,7 +1667,7 @@ xfs_buf_delwri_split( | |||
1692 | INIT_LIST_HEAD(list); | 1667 | INIT_LIST_HEAD(list); |
1693 | spin_lock(dwlk); | 1668 | spin_lock(dwlk); |
1694 | list_for_each_entry_safe(bp, n, dwq, b_list) { | 1669 | list_for_each_entry_safe(bp, n, dwq, b_list) { |
1695 | XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp)); | 1670 | trace_xfs_buf_delwri_split(bp, _RET_IP_); |
1696 | ASSERT(bp->b_flags & XBF_DELWRI); | 1671 | ASSERT(bp->b_flags & XBF_DELWRI); |
1697 | 1672 | ||
1698 | if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { | 1673 | if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { |
@@ -1816,14 +1791,10 @@ xfs_flush_buftarg( | |||
1816 | int __init | 1791 | int __init |
1817 | xfs_buf_init(void) | 1792 | xfs_buf_init(void) |
1818 | { | 1793 | { |
1819 | #ifdef XFS_BUF_TRACE | ||
1820 | xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS); | ||
1821 | #endif | ||
1822 | |||
1823 | xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", | 1794 | xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", |
1824 | KM_ZONE_HWALIGN, NULL); | 1795 | KM_ZONE_HWALIGN, NULL); |
1825 | if (!xfs_buf_zone) | 1796 | if (!xfs_buf_zone) |
1826 | goto out_free_trace_buf; | 1797 | goto out; |
1827 | 1798 | ||
1828 | xfslogd_workqueue = create_workqueue("xfslogd"); | 1799 | xfslogd_workqueue = create_workqueue("xfslogd"); |
1829 | if (!xfslogd_workqueue) | 1800 | if (!xfslogd_workqueue) |
@@ -1846,10 +1817,7 @@ xfs_buf_init(void) | |||
1846 | destroy_workqueue(xfslogd_workqueue); | 1817 | destroy_workqueue(xfslogd_workqueue); |
1847 | out_free_buf_zone: | 1818 | out_free_buf_zone: |
1848 | kmem_zone_destroy(xfs_buf_zone); | 1819 | kmem_zone_destroy(xfs_buf_zone); |
1849 | out_free_trace_buf: | 1820 | out: |
1850 | #ifdef XFS_BUF_TRACE | ||
1851 | ktrace_free(xfs_buf_trace_buf); | ||
1852 | #endif | ||
1853 | return -ENOMEM; | 1821 | return -ENOMEM; |
1854 | } | 1822 | } |
1855 | 1823 | ||
@@ -1861,9 +1829,6 @@ xfs_buf_terminate(void) | |||
1861 | destroy_workqueue(xfsdatad_workqueue); | 1829 | destroy_workqueue(xfsdatad_workqueue); |
1862 | destroy_workqueue(xfslogd_workqueue); | 1830 | destroy_workqueue(xfslogd_workqueue); |
1863 | kmem_zone_destroy(xfs_buf_zone); | 1831 | kmem_zone_destroy(xfs_buf_zone); |
1864 | #ifdef XFS_BUF_TRACE | ||
1865 | ktrace_free(xfs_buf_trace_buf); | ||
1866 | #endif | ||
1867 | } | 1832 | } |
1868 | 1833 | ||
1869 | #ifdef CONFIG_KDB_MODULES | 1834 | #ifdef CONFIG_KDB_MODULES |