diff options
-rw-r--r-- | fs/xfs/xfs_log.c | 161 | ||||
-rw-r--r-- | fs/xfs/xfs_log_priv.h | 4 |
2 files changed, 72 insertions, 93 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 77c12715a7d0..5eb82306ef76 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -399,10 +399,10 @@ xfs_log_notify(xfs_mount_t *mp, /* mount of partition */ | |||
399 | { | 399 | { |
400 | xlog_t *log = mp->m_log; | 400 | xlog_t *log = mp->m_log; |
401 | xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; | 401 | xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; |
402 | int abortflg, spl; | 402 | int abortflg; |
403 | 403 | ||
404 | cb->cb_next = NULL; | 404 | cb->cb_next = NULL; |
405 | spl = LOG_LOCK(log); | 405 | spin_lock(&log->l_icloglock); |
406 | abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); | 406 | abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); |
407 | if (!abortflg) { | 407 | if (!abortflg) { |
408 | ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || | 408 | ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || |
@@ -411,7 +411,7 @@ xfs_log_notify(xfs_mount_t *mp, /* mount of partition */ | |||
411 | *(iclog->ic_callback_tail) = cb; | 411 | *(iclog->ic_callback_tail) = cb; |
412 | iclog->ic_callback_tail = &(cb->cb_next); | 412 | iclog->ic_callback_tail = &(cb->cb_next); |
413 | } | 413 | } |
414 | LOG_UNLOCK(log, spl); | 414 | spin_unlock(&log->l_icloglock); |
415 | return abortflg; | 415 | return abortflg; |
416 | } /* xfs_log_notify */ | 416 | } /* xfs_log_notify */ |
417 | 417 | ||
@@ -606,7 +606,6 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
606 | xfs_log_ticket_t tic = NULL; | 606 | xfs_log_ticket_t tic = NULL; |
607 | xfs_lsn_t lsn; | 607 | xfs_lsn_t lsn; |
608 | int error; | 608 | int error; |
609 | SPLDECL(s); | ||
610 | 609 | ||
611 | /* the data section must be 32 bit size aligned */ | 610 | /* the data section must be 32 bit size aligned */ |
612 | struct { | 611 | struct { |
@@ -659,24 +658,24 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
659 | } | 658 | } |
660 | 659 | ||
661 | 660 | ||
662 | s = LOG_LOCK(log); | 661 | spin_lock(&log->l_icloglock); |
663 | iclog = log->l_iclog; | 662 | iclog = log->l_iclog; |
664 | iclog->ic_refcnt++; | 663 | iclog->ic_refcnt++; |
665 | LOG_UNLOCK(log, s); | 664 | spin_unlock(&log->l_icloglock); |
666 | xlog_state_want_sync(log, iclog); | 665 | xlog_state_want_sync(log, iclog); |
667 | (void) xlog_state_release_iclog(log, iclog); | 666 | (void) xlog_state_release_iclog(log, iclog); |
668 | 667 | ||
669 | s = LOG_LOCK(log); | 668 | spin_lock(&log->l_icloglock); |
670 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || | 669 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || |
671 | iclog->ic_state == XLOG_STATE_DIRTY)) { | 670 | iclog->ic_state == XLOG_STATE_DIRTY)) { |
672 | if (!XLOG_FORCED_SHUTDOWN(log)) { | 671 | if (!XLOG_FORCED_SHUTDOWN(log)) { |
673 | sv_wait(&iclog->ic_forcesema, PMEM, | 672 | sv_wait(&iclog->ic_forcesema, PMEM, |
674 | &log->l_icloglock, s); | 673 | &log->l_icloglock, s); |
675 | } else { | 674 | } else { |
676 | LOG_UNLOCK(log, s); | 675 | spin_unlock(&log->l_icloglock); |
677 | } | 676 | } |
678 | } else { | 677 | } else { |
679 | LOG_UNLOCK(log, s); | 678 | spin_unlock(&log->l_icloglock); |
680 | } | 679 | } |
681 | if (tic) { | 680 | if (tic) { |
682 | xlog_trace_loggrant(log, tic, "unmount rec"); | 681 | xlog_trace_loggrant(log, tic, "unmount rec"); |
@@ -697,15 +696,15 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
697 | * a file system that went into forced_shutdown as | 696 | * a file system that went into forced_shutdown as |
698 | * the result of an unmount.. | 697 | * the result of an unmount.. |
699 | */ | 698 | */ |
700 | s = LOG_LOCK(log); | 699 | spin_lock(&log->l_icloglock); |
701 | iclog = log->l_iclog; | 700 | iclog = log->l_iclog; |
702 | iclog->ic_refcnt++; | 701 | iclog->ic_refcnt++; |
703 | LOG_UNLOCK(log, s); | 702 | spin_unlock(&log->l_icloglock); |
704 | 703 | ||
705 | xlog_state_want_sync(log, iclog); | 704 | xlog_state_want_sync(log, iclog); |
706 | (void) xlog_state_release_iclog(log, iclog); | 705 | (void) xlog_state_release_iclog(log, iclog); |
707 | 706 | ||
708 | s = LOG_LOCK(log); | 707 | spin_lock(&log->l_icloglock); |
709 | 708 | ||
710 | if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE | 709 | if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE |
711 | || iclog->ic_state == XLOG_STATE_DIRTY | 710 | || iclog->ic_state == XLOG_STATE_DIRTY |
@@ -714,7 +713,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
714 | sv_wait(&iclog->ic_forcesema, PMEM, | 713 | sv_wait(&iclog->ic_forcesema, PMEM, |
715 | &log->l_icloglock, s); | 714 | &log->l_icloglock, s); |
716 | } else { | 715 | } else { |
717 | LOG_UNLOCK(log, s); | 716 | spin_unlock(&log->l_icloglock); |
718 | } | 717 | } |
719 | } | 718 | } |
720 | 719 | ||
@@ -770,9 +769,9 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
770 | 769 | ||
771 | if (tail_lsn == 0) { | 770 | if (tail_lsn == 0) { |
772 | /* needed since sync_lsn is 64 bits */ | 771 | /* needed since sync_lsn is 64 bits */ |
773 | s = LOG_LOCK(log); | 772 | spin_lock(&log->l_icloglock); |
774 | tail_lsn = log->l_last_sync_lsn; | 773 | tail_lsn = log->l_last_sync_lsn; |
775 | LOG_UNLOCK(log, s); | 774 | spin_unlock(&log->l_icloglock); |
776 | } | 775 | } |
777 | 776 | ||
778 | s = GRANT_LOCK(log); | 777 | s = GRANT_LOCK(log); |
@@ -836,14 +835,13 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
836 | int | 835 | int |
837 | xfs_log_need_covered(xfs_mount_t *mp) | 836 | xfs_log_need_covered(xfs_mount_t *mp) |
838 | { | 837 | { |
839 | SPLDECL(s); | ||
840 | int needed = 0, gen; | 838 | int needed = 0, gen; |
841 | xlog_t *log = mp->m_log; | 839 | xlog_t *log = mp->m_log; |
842 | 840 | ||
843 | if (!xfs_fs_writable(mp)) | 841 | if (!xfs_fs_writable(mp)) |
844 | return 0; | 842 | return 0; |
845 | 843 | ||
846 | s = LOG_LOCK(log); | 844 | spin_lock(&log->l_icloglock); |
847 | if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || | 845 | if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || |
848 | (log->l_covered_state == XLOG_STATE_COVER_NEED2)) | 846 | (log->l_covered_state == XLOG_STATE_COVER_NEED2)) |
849 | && !xfs_trans_first_ail(mp, &gen) | 847 | && !xfs_trans_first_ail(mp, &gen) |
@@ -856,7 +854,7 @@ xfs_log_need_covered(xfs_mount_t *mp) | |||
856 | } | 854 | } |
857 | needed = 1; | 855 | needed = 1; |
858 | } | 856 | } |
859 | LOG_UNLOCK(log, s); | 857 | spin_unlock(&log->l_icloglock); |
860 | return needed; | 858 | return needed; |
861 | } | 859 | } |
862 | 860 | ||
@@ -1534,7 +1532,6 @@ xlog_dealloc_log(xlog_t *log) | |||
1534 | xlog_ticket_t *tic, *next_tic; | 1532 | xlog_ticket_t *tic, *next_tic; |
1535 | int i; | 1533 | int i; |
1536 | 1534 | ||
1537 | |||
1538 | iclog = log->l_iclog; | 1535 | iclog = log->l_iclog; |
1539 | for (i=0; i<log->l_iclog_bufs; i++) { | 1536 | for (i=0; i<log->l_iclog_bufs; i++) { |
1540 | sv_destroy(&iclog->ic_forcesema); | 1537 | sv_destroy(&iclog->ic_forcesema); |
@@ -1592,14 +1589,12 @@ xlog_state_finish_copy(xlog_t *log, | |||
1592 | int record_cnt, | 1589 | int record_cnt, |
1593 | int copy_bytes) | 1590 | int copy_bytes) |
1594 | { | 1591 | { |
1595 | SPLDECL(s); | 1592 | spin_lock(&log->l_icloglock); |
1596 | |||
1597 | s = LOG_LOCK(log); | ||
1598 | 1593 | ||
1599 | iclog->ic_header.h_num_logops += record_cnt; | 1594 | iclog->ic_header.h_num_logops += record_cnt; |
1600 | iclog->ic_offset += copy_bytes; | 1595 | iclog->ic_offset += copy_bytes; |
1601 | 1596 | ||
1602 | LOG_UNLOCK(log, s); | 1597 | spin_unlock(&log->l_icloglock); |
1603 | } /* xlog_state_finish_copy */ | 1598 | } /* xlog_state_finish_copy */ |
1604 | 1599 | ||
1605 | 1600 | ||
@@ -2091,7 +2086,7 @@ xlog_state_do_callback( | |||
2091 | * looping too many times */ | 2086 | * looping too many times */ |
2092 | SPLDECL(s); | 2087 | SPLDECL(s); |
2093 | 2088 | ||
2094 | s = LOG_LOCK(log); | 2089 | spin_lock(&log->l_icloglock); |
2095 | first_iclog = iclog = log->l_iclog; | 2090 | first_iclog = iclog = log->l_iclog; |
2096 | ioerrors = 0; | 2091 | ioerrors = 0; |
2097 | funcdidcallbacks = 0; | 2092 | funcdidcallbacks = 0; |
@@ -2136,7 +2131,7 @@ xlog_state_do_callback( | |||
2136 | * to DO_CALLBACK, we will not process it when | 2131 | * to DO_CALLBACK, we will not process it when |
2137 | * we retry since a previous iclog is in the | 2132 | * we retry since a previous iclog is in the |
2138 | * CALLBACK and the state cannot change since | 2133 | * CALLBACK and the state cannot change since |
2139 | * we are holding the LOG_LOCK. | 2134 | * we are holding the l_icloglock. |
2140 | */ | 2135 | */ |
2141 | if (!(iclog->ic_state & | 2136 | if (!(iclog->ic_state & |
2142 | (XLOG_STATE_DONE_SYNC | | 2137 | (XLOG_STATE_DONE_SYNC | |
@@ -2174,7 +2169,7 @@ xlog_state_do_callback( | |||
2174 | 2169 | ||
2175 | iclog->ic_state = XLOG_STATE_CALLBACK; | 2170 | iclog->ic_state = XLOG_STATE_CALLBACK; |
2176 | 2171 | ||
2177 | LOG_UNLOCK(log, s); | 2172 | spin_unlock(&log->l_icloglock); |
2178 | 2173 | ||
2179 | /* l_last_sync_lsn field protected by | 2174 | /* l_last_sync_lsn field protected by |
2180 | * GRANT_LOCK. Don't worry about iclog's lsn. | 2175 | * GRANT_LOCK. Don't worry about iclog's lsn. |
@@ -2195,7 +2190,7 @@ xlog_state_do_callback( | |||
2195 | * empty and change the state to DIRTY so that | 2190 | * empty and change the state to DIRTY so that |
2196 | * we don't miss any more callbacks being added. | 2191 | * we don't miss any more callbacks being added. |
2197 | */ | 2192 | */ |
2198 | s = LOG_LOCK(log); | 2193 | spin_lock(&log->l_icloglock); |
2199 | } else { | 2194 | } else { |
2200 | ioerrors++; | 2195 | ioerrors++; |
2201 | } | 2196 | } |
@@ -2204,14 +2199,14 @@ xlog_state_do_callback( | |||
2204 | while (cb) { | 2199 | while (cb) { |
2205 | iclog->ic_callback_tail = &(iclog->ic_callback); | 2200 | iclog->ic_callback_tail = &(iclog->ic_callback); |
2206 | iclog->ic_callback = NULL; | 2201 | iclog->ic_callback = NULL; |
2207 | LOG_UNLOCK(log, s); | 2202 | spin_unlock(&log->l_icloglock); |
2208 | 2203 | ||
2209 | /* perform callbacks in the order given */ | 2204 | /* perform callbacks in the order given */ |
2210 | for (; cb; cb = cb_next) { | 2205 | for (; cb; cb = cb_next) { |
2211 | cb_next = cb->cb_next; | 2206 | cb_next = cb->cb_next; |
2212 | cb->cb_func(cb->cb_arg, aborted); | 2207 | cb->cb_func(cb->cb_arg, aborted); |
2213 | } | 2208 | } |
2214 | s = LOG_LOCK(log); | 2209 | spin_lock(&log->l_icloglock); |
2215 | cb = iclog->ic_callback; | 2210 | cb = iclog->ic_callback; |
2216 | } | 2211 | } |
2217 | 2212 | ||
@@ -2258,7 +2253,7 @@ xlog_state_do_callback( | |||
2258 | * | 2253 | * |
2259 | * SYNCING - i/o completion will go through logs | 2254 | * SYNCING - i/o completion will go through logs |
2260 | * DONE_SYNC - interrupt thread should be waiting for | 2255 | * DONE_SYNC - interrupt thread should be waiting for |
2261 | * LOG_LOCK | 2256 | * l_icloglock |
2262 | * IOERROR - give up hope all ye who enter here | 2257 | * IOERROR - give up hope all ye who enter here |
2263 | */ | 2258 | */ |
2264 | if (iclog->ic_state == XLOG_STATE_WANT_SYNC || | 2259 | if (iclog->ic_state == XLOG_STATE_WANT_SYNC || |
@@ -2276,7 +2271,7 @@ xlog_state_do_callback( | |||
2276 | flushcnt = log->l_flushcnt; | 2271 | flushcnt = log->l_flushcnt; |
2277 | log->l_flushcnt = 0; | 2272 | log->l_flushcnt = 0; |
2278 | } | 2273 | } |
2279 | LOG_UNLOCK(log, s); | 2274 | spin_unlock(&log->l_icloglock); |
2280 | while (flushcnt--) | 2275 | while (flushcnt--) |
2281 | vsema(&log->l_flushsema); | 2276 | vsema(&log->l_flushsema); |
2282 | } /* xlog_state_do_callback */ | 2277 | } /* xlog_state_do_callback */ |
@@ -2302,9 +2297,8 @@ xlog_state_done_syncing( | |||
2302 | int aborted) | 2297 | int aborted) |
2303 | { | 2298 | { |
2304 | xlog_t *log = iclog->ic_log; | 2299 | xlog_t *log = iclog->ic_log; |
2305 | SPLDECL(s); | ||
2306 | 2300 | ||
2307 | s = LOG_LOCK(log); | 2301 | spin_lock(&log->l_icloglock); |
2308 | 2302 | ||
2309 | ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || | 2303 | ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || |
2310 | iclog->ic_state == XLOG_STATE_IOERROR); | 2304 | iclog->ic_state == XLOG_STATE_IOERROR); |
@@ -2320,7 +2314,7 @@ xlog_state_done_syncing( | |||
2320 | */ | 2314 | */ |
2321 | if (iclog->ic_state != XLOG_STATE_IOERROR) { | 2315 | if (iclog->ic_state != XLOG_STATE_IOERROR) { |
2322 | if (--iclog->ic_bwritecnt == 1) { | 2316 | if (--iclog->ic_bwritecnt == 1) { |
2323 | LOG_UNLOCK(log, s); | 2317 | spin_unlock(&log->l_icloglock); |
2324 | return; | 2318 | return; |
2325 | } | 2319 | } |
2326 | iclog->ic_state = XLOG_STATE_DONE_SYNC; | 2320 | iclog->ic_state = XLOG_STATE_DONE_SYNC; |
@@ -2332,7 +2326,7 @@ xlog_state_done_syncing( | |||
2332 | * I/O, the others get to wait for the result. | 2326 | * I/O, the others get to wait for the result. |
2333 | */ | 2327 | */ |
2334 | sv_broadcast(&iclog->ic_writesema); | 2328 | sv_broadcast(&iclog->ic_writesema); |
2335 | LOG_UNLOCK(log, s); | 2329 | spin_unlock(&log->l_icloglock); |
2336 | xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ | 2330 | xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ |
2337 | } /* xlog_state_done_syncing */ | 2331 | } /* xlog_state_done_syncing */ |
2338 | 2332 | ||
@@ -2365,23 +2359,22 @@ xlog_state_get_iclog_space(xlog_t *log, | |||
2365 | int *continued_write, | 2359 | int *continued_write, |
2366 | int *logoffsetp) | 2360 | int *logoffsetp) |
2367 | { | 2361 | { |
2368 | SPLDECL(s); | ||
2369 | int log_offset; | 2362 | int log_offset; |
2370 | xlog_rec_header_t *head; | 2363 | xlog_rec_header_t *head; |
2371 | xlog_in_core_t *iclog; | 2364 | xlog_in_core_t *iclog; |
2372 | int error; | 2365 | int error; |
2373 | 2366 | ||
2374 | restart: | 2367 | restart: |
2375 | s = LOG_LOCK(log); | 2368 | spin_lock(&log->l_icloglock); |
2376 | if (XLOG_FORCED_SHUTDOWN(log)) { | 2369 | if (XLOG_FORCED_SHUTDOWN(log)) { |
2377 | LOG_UNLOCK(log, s); | 2370 | spin_unlock(&log->l_icloglock); |
2378 | return XFS_ERROR(EIO); | 2371 | return XFS_ERROR(EIO); |
2379 | } | 2372 | } |
2380 | 2373 | ||
2381 | iclog = log->l_iclog; | 2374 | iclog = log->l_iclog; |
2382 | if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { | 2375 | if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { |
2383 | log->l_flushcnt++; | 2376 | log->l_flushcnt++; |
2384 | LOG_UNLOCK(log, s); | 2377 | spin_unlock(&log->l_icloglock); |
2385 | xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); | 2378 | xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); |
2386 | XFS_STATS_INC(xs_log_noiclogs); | 2379 | XFS_STATS_INC(xs_log_noiclogs); |
2387 | /* Ensure that log writes happen */ | 2380 | /* Ensure that log writes happen */ |
@@ -2423,12 +2416,12 @@ restart: | |||
2423 | 2416 | ||
2424 | /* If I'm the only one writing to this iclog, sync it to disk */ | 2417 | /* If I'm the only one writing to this iclog, sync it to disk */ |
2425 | if (iclog->ic_refcnt == 1) { | 2418 | if (iclog->ic_refcnt == 1) { |
2426 | LOG_UNLOCK(log, s); | 2419 | spin_unlock(&log->l_icloglock); |
2427 | if ((error = xlog_state_release_iclog(log, iclog))) | 2420 | if ((error = xlog_state_release_iclog(log, iclog))) |
2428 | return error; | 2421 | return error; |
2429 | } else { | 2422 | } else { |
2430 | iclog->ic_refcnt--; | 2423 | iclog->ic_refcnt--; |
2431 | LOG_UNLOCK(log, s); | 2424 | spin_unlock(&log->l_icloglock); |
2432 | } | 2425 | } |
2433 | goto restart; | 2426 | goto restart; |
2434 | } | 2427 | } |
@@ -2449,7 +2442,7 @@ restart: | |||
2449 | *iclogp = iclog; | 2442 | *iclogp = iclog; |
2450 | 2443 | ||
2451 | ASSERT(iclog->ic_offset <= iclog->ic_size); | 2444 | ASSERT(iclog->ic_offset <= iclog->ic_size); |
2452 | LOG_UNLOCK(log, s); | 2445 | spin_unlock(&log->l_icloglock); |
2453 | 2446 | ||
2454 | *logoffsetp = log_offset; | 2447 | *logoffsetp = log_offset; |
2455 | return 0; | 2448 | return 0; |
@@ -2803,11 +2796,9 @@ void | |||
2803 | xlog_state_put_ticket(xlog_t *log, | 2796 | xlog_state_put_ticket(xlog_t *log, |
2804 | xlog_ticket_t *tic) | 2797 | xlog_ticket_t *tic) |
2805 | { | 2798 | { |
2806 | unsigned long s; | 2799 | spin_lock(&log->l_icloglock); |
2807 | |||
2808 | s = LOG_LOCK(log); | ||
2809 | xlog_ticket_put(log, tic); | 2800 | xlog_ticket_put(log, tic); |
2810 | LOG_UNLOCK(log, s); | 2801 | spin_unlock(&log->l_icloglock); |
2811 | } /* xlog_state_put_ticket */ | 2802 | } /* xlog_state_put_ticket */ |
2812 | 2803 | ||
2813 | /* | 2804 | /* |
@@ -2823,15 +2814,14 @@ int | |||
2823 | xlog_state_release_iclog(xlog_t *log, | 2814 | xlog_state_release_iclog(xlog_t *log, |
2824 | xlog_in_core_t *iclog) | 2815 | xlog_in_core_t *iclog) |
2825 | { | 2816 | { |
2826 | SPLDECL(s); | ||
2827 | int sync = 0; /* do we sync? */ | 2817 | int sync = 0; /* do we sync? */ |
2828 | 2818 | ||
2829 | xlog_assign_tail_lsn(log->l_mp); | 2819 | xlog_assign_tail_lsn(log->l_mp); |
2830 | 2820 | ||
2831 | s = LOG_LOCK(log); | 2821 | spin_lock(&log->l_icloglock); |
2832 | 2822 | ||
2833 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 2823 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
2834 | LOG_UNLOCK(log, s); | 2824 | spin_unlock(&log->l_icloglock); |
2835 | return XFS_ERROR(EIO); | 2825 | return XFS_ERROR(EIO); |
2836 | } | 2826 | } |
2837 | 2827 | ||
@@ -2848,7 +2838,7 @@ xlog_state_release_iclog(xlog_t *log, | |||
2848 | /* cycle incremented when incrementing curr_block */ | 2838 | /* cycle incremented when incrementing curr_block */ |
2849 | } | 2839 | } |
2850 | 2840 | ||
2851 | LOG_UNLOCK(log, s); | 2841 | spin_unlock(&log->l_icloglock); |
2852 | 2842 | ||
2853 | /* | 2843 | /* |
2854 | * We let the log lock go, so it's possible that we hit a log I/O | 2844 | * We let the log lock go, so it's possible that we hit a log I/O |
@@ -2939,13 +2929,12 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2939 | { | 2929 | { |
2940 | xlog_in_core_t *iclog; | 2930 | xlog_in_core_t *iclog; |
2941 | xfs_lsn_t lsn; | 2931 | xfs_lsn_t lsn; |
2942 | SPLDECL(s); | ||
2943 | 2932 | ||
2944 | s = LOG_LOCK(log); | 2933 | spin_lock(&log->l_icloglock); |
2945 | 2934 | ||
2946 | iclog = log->l_iclog; | 2935 | iclog = log->l_iclog; |
2947 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 2936 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
2948 | LOG_UNLOCK(log, s); | 2937 | spin_unlock(&log->l_icloglock); |
2949 | return XFS_ERROR(EIO); | 2938 | return XFS_ERROR(EIO); |
2950 | } | 2939 | } |
2951 | 2940 | ||
@@ -2980,12 +2969,12 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2980 | iclog->ic_refcnt++; | 2969 | iclog->ic_refcnt++; |
2981 | lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | 2970 | lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); |
2982 | xlog_state_switch_iclogs(log, iclog, 0); | 2971 | xlog_state_switch_iclogs(log, iclog, 0); |
2983 | LOG_UNLOCK(log, s); | 2972 | spin_unlock(&log->l_icloglock); |
2984 | 2973 | ||
2985 | if (xlog_state_release_iclog(log, iclog)) | 2974 | if (xlog_state_release_iclog(log, iclog)) |
2986 | return XFS_ERROR(EIO); | 2975 | return XFS_ERROR(EIO); |
2987 | *log_flushed = 1; | 2976 | *log_flushed = 1; |
2988 | s = LOG_LOCK(log); | 2977 | spin_lock(&log->l_icloglock); |
2989 | if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && | 2978 | if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && |
2990 | iclog->ic_state != XLOG_STATE_DIRTY) | 2979 | iclog->ic_state != XLOG_STATE_DIRTY) |
2991 | goto maybe_sleep; | 2980 | goto maybe_sleep; |
@@ -3011,12 +3000,12 @@ maybe_sleep: | |||
3011 | if (flags & XFS_LOG_SYNC) { | 3000 | if (flags & XFS_LOG_SYNC) { |
3012 | /* | 3001 | /* |
3013 | * We must check if we're shutting down here, before | 3002 | * We must check if we're shutting down here, before |
3014 | * we wait, while we're holding the LOG_LOCK. | 3003 | * we wait, while we're holding the l_icloglock. |
3015 | * Then we check again after waking up, in case our | 3004 | * Then we check again after waking up, in case our |
3016 | * sleep was disturbed by a bad news. | 3005 | * sleep was disturbed by a bad news. |
3017 | */ | 3006 | */ |
3018 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3007 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3019 | LOG_UNLOCK(log, s); | 3008 | spin_unlock(&log->l_icloglock); |
3020 | return XFS_ERROR(EIO); | 3009 | return XFS_ERROR(EIO); |
3021 | } | 3010 | } |
3022 | XFS_STATS_INC(xs_log_force_sleep); | 3011 | XFS_STATS_INC(xs_log_force_sleep); |
@@ -3033,7 +3022,7 @@ maybe_sleep: | |||
3033 | } else { | 3022 | } else { |
3034 | 3023 | ||
3035 | no_sleep: | 3024 | no_sleep: |
3036 | LOG_UNLOCK(log, s); | 3025 | spin_unlock(&log->l_icloglock); |
3037 | } | 3026 | } |
3038 | return 0; | 3027 | return 0; |
3039 | } /* xlog_state_sync_all */ | 3028 | } /* xlog_state_sync_all */ |
@@ -3059,15 +3048,13 @@ xlog_state_sync(xlog_t *log, | |||
3059 | { | 3048 | { |
3060 | xlog_in_core_t *iclog; | 3049 | xlog_in_core_t *iclog; |
3061 | int already_slept = 0; | 3050 | int already_slept = 0; |
3062 | SPLDECL(s); | ||
3063 | |||
3064 | 3051 | ||
3065 | try_again: | 3052 | try_again: |
3066 | s = LOG_LOCK(log); | 3053 | spin_lock(&log->l_icloglock); |
3067 | iclog = log->l_iclog; | 3054 | iclog = log->l_iclog; |
3068 | 3055 | ||
3069 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3056 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3070 | LOG_UNLOCK(log, s); | 3057 | spin_unlock(&log->l_icloglock); |
3071 | return XFS_ERROR(EIO); | 3058 | return XFS_ERROR(EIO); |
3072 | } | 3059 | } |
3073 | 3060 | ||
@@ -3078,7 +3065,7 @@ try_again: | |||
3078 | } | 3065 | } |
3079 | 3066 | ||
3080 | if (iclog->ic_state == XLOG_STATE_DIRTY) { | 3067 | if (iclog->ic_state == XLOG_STATE_DIRTY) { |
3081 | LOG_UNLOCK(log, s); | 3068 | spin_unlock(&log->l_icloglock); |
3082 | return 0; | 3069 | return 0; |
3083 | } | 3070 | } |
3084 | 3071 | ||
@@ -3113,11 +3100,11 @@ try_again: | |||
3113 | } else { | 3100 | } else { |
3114 | iclog->ic_refcnt++; | 3101 | iclog->ic_refcnt++; |
3115 | xlog_state_switch_iclogs(log, iclog, 0); | 3102 | xlog_state_switch_iclogs(log, iclog, 0); |
3116 | LOG_UNLOCK(log, s); | 3103 | spin_unlock(&log->l_icloglock); |
3117 | if (xlog_state_release_iclog(log, iclog)) | 3104 | if (xlog_state_release_iclog(log, iclog)) |
3118 | return XFS_ERROR(EIO); | 3105 | return XFS_ERROR(EIO); |
3119 | *log_flushed = 1; | 3106 | *log_flushed = 1; |
3120 | s = LOG_LOCK(log); | 3107 | spin_lock(&log->l_icloglock); |
3121 | } | 3108 | } |
3122 | } | 3109 | } |
3123 | 3110 | ||
@@ -3129,7 +3116,7 @@ try_again: | |||
3129 | * gotten a log write error. | 3116 | * gotten a log write error. |
3130 | */ | 3117 | */ |
3131 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3118 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3132 | LOG_UNLOCK(log, s); | 3119 | spin_unlock(&log->l_icloglock); |
3133 | return XFS_ERROR(EIO); | 3120 | return XFS_ERROR(EIO); |
3134 | } | 3121 | } |
3135 | XFS_STATS_INC(xs_log_force_sleep); | 3122 | XFS_STATS_INC(xs_log_force_sleep); |
@@ -3143,13 +3130,13 @@ try_again: | |||
3143 | return XFS_ERROR(EIO); | 3130 | return XFS_ERROR(EIO); |
3144 | *log_flushed = 1; | 3131 | *log_flushed = 1; |
3145 | } else { /* just return */ | 3132 | } else { /* just return */ |
3146 | LOG_UNLOCK(log, s); | 3133 | spin_unlock(&log->l_icloglock); |
3147 | } | 3134 | } |
3148 | return 0; | 3135 | return 0; |
3149 | 3136 | ||
3150 | } while (iclog != log->l_iclog); | 3137 | } while (iclog != log->l_iclog); |
3151 | 3138 | ||
3152 | LOG_UNLOCK(log, s); | 3139 | spin_unlock(&log->l_icloglock); |
3153 | return 0; | 3140 | return 0; |
3154 | } /* xlog_state_sync */ | 3141 | } /* xlog_state_sync */ |
3155 | 3142 | ||
@@ -3161,9 +3148,7 @@ try_again: | |||
3161 | void | 3148 | void |
3162 | xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) | 3149 | xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) |
3163 | { | 3150 | { |
3164 | SPLDECL(s); | 3151 | spin_lock(&log->l_icloglock); |
3165 | |||
3166 | s = LOG_LOCK(log); | ||
3167 | 3152 | ||
3168 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { | 3153 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
3169 | xlog_state_switch_iclogs(log, iclog, 0); | 3154 | xlog_state_switch_iclogs(log, iclog, 0); |
@@ -3172,7 +3157,7 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) | |||
3172 | (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); | 3157 | (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); |
3173 | } | 3158 | } |
3174 | 3159 | ||
3175 | LOG_UNLOCK(log, s); | 3160 | spin_unlock(&log->l_icloglock); |
3176 | } /* xlog_state_want_sync */ | 3161 | } /* xlog_state_want_sync */ |
3177 | 3162 | ||
3178 | 3163 | ||
@@ -3194,7 +3179,6 @@ xlog_state_ticket_alloc(xlog_t *log) | |||
3194 | xlog_ticket_t *next; | 3179 | xlog_ticket_t *next; |
3195 | xfs_caddr_t buf; | 3180 | xfs_caddr_t buf; |
3196 | uint i = (NBPP / sizeof(xlog_ticket_t)) - 2; | 3181 | uint i = (NBPP / sizeof(xlog_ticket_t)) - 2; |
3197 | SPLDECL(s); | ||
3198 | 3182 | ||
3199 | /* | 3183 | /* |
3200 | * The kmem_zalloc may sleep, so we shouldn't be holding the | 3184 | * The kmem_zalloc may sleep, so we shouldn't be holding the |
@@ -3202,7 +3186,7 @@ xlog_state_ticket_alloc(xlog_t *log) | |||
3202 | */ | 3186 | */ |
3203 | buf = (xfs_caddr_t) kmem_zalloc(NBPP, KM_SLEEP); | 3187 | buf = (xfs_caddr_t) kmem_zalloc(NBPP, KM_SLEEP); |
3204 | 3188 | ||
3205 | s = LOG_LOCK(log); | 3189 | spin_lock(&log->l_icloglock); |
3206 | 3190 | ||
3207 | /* Attach 1st ticket to Q, so we can keep track of allocated memory */ | 3191 | /* Attach 1st ticket to Q, so we can keep track of allocated memory */ |
3208 | t_list = (xlog_ticket_t *)buf; | 3192 | t_list = (xlog_ticket_t *)buf; |
@@ -3231,7 +3215,7 @@ xlog_state_ticket_alloc(xlog_t *log) | |||
3231 | } | 3215 | } |
3232 | t_list->t_next = NULL; | 3216 | t_list->t_next = NULL; |
3233 | log->l_tail = t_list; | 3217 | log->l_tail = t_list; |
3234 | LOG_UNLOCK(log, s); | 3218 | spin_unlock(&log->l_icloglock); |
3235 | } /* xlog_state_ticket_alloc */ | 3219 | } /* xlog_state_ticket_alloc */ |
3236 | 3220 | ||
3237 | 3221 | ||
@@ -3282,15 +3266,14 @@ xlog_ticket_get(xlog_t *log, | |||
3282 | { | 3266 | { |
3283 | xlog_ticket_t *tic; | 3267 | xlog_ticket_t *tic; |
3284 | uint num_headers; | 3268 | uint num_headers; |
3285 | SPLDECL(s); | ||
3286 | 3269 | ||
3287 | alloc: | 3270 | alloc: |
3288 | if (log->l_freelist == NULL) | 3271 | if (log->l_freelist == NULL) |
3289 | xlog_state_ticket_alloc(log); /* potentially sleep */ | 3272 | xlog_state_ticket_alloc(log); /* potentially sleep */ |
3290 | 3273 | ||
3291 | s = LOG_LOCK(log); | 3274 | spin_lock(&log->l_icloglock); |
3292 | if (log->l_freelist == NULL) { | 3275 | if (log->l_freelist == NULL) { |
3293 | LOG_UNLOCK(log, s); | 3276 | spin_unlock(&log->l_icloglock); |
3294 | goto alloc; | 3277 | goto alloc; |
3295 | } | 3278 | } |
3296 | tic = log->l_freelist; | 3279 | tic = log->l_freelist; |
@@ -3298,7 +3281,7 @@ xlog_ticket_get(xlog_t *log, | |||
3298 | if (log->l_freelist == NULL) | 3281 | if (log->l_freelist == NULL) |
3299 | log->l_tail = NULL; | 3282 | log->l_tail = NULL; |
3300 | log->l_ticket_cnt--; | 3283 | log->l_ticket_cnt--; |
3301 | LOG_UNLOCK(log, s); | 3284 | spin_unlock(&log->l_icloglock); |
3302 | 3285 | ||
3303 | /* | 3286 | /* |
3304 | * Permanent reservations have up to 'cnt'-1 active log operations | 3287 | * Permanent reservations have up to 'cnt'-1 active log operations |
@@ -3473,10 +3456,9 @@ xlog_verify_iclog(xlog_t *log, | |||
3473 | __uint8_t clientid; | 3456 | __uint8_t clientid; |
3474 | int len, i, j, k, op_len; | 3457 | int len, i, j, k, op_len; |
3475 | int idx; | 3458 | int idx; |
3476 | SPLDECL(s); | ||
3477 | 3459 | ||
3478 | /* check validity of iclog pointers */ | 3460 | /* check validity of iclog pointers */ |
3479 | s = LOG_LOCK(log); | 3461 | spin_lock(&log->l_icloglock); |
3480 | icptr = log->l_iclog; | 3462 | icptr = log->l_iclog; |
3481 | for (i=0; i < log->l_iclog_bufs; i++) { | 3463 | for (i=0; i < log->l_iclog_bufs; i++) { |
3482 | if (icptr == NULL) | 3464 | if (icptr == NULL) |
@@ -3485,7 +3467,7 @@ xlog_verify_iclog(xlog_t *log, | |||
3485 | } | 3467 | } |
3486 | if (icptr != log->l_iclog) | 3468 | if (icptr != log->l_iclog) |
3487 | xlog_panic("xlog_verify_iclog: corrupt iclog ring"); | 3469 | xlog_panic("xlog_verify_iclog: corrupt iclog ring"); |
3488 | LOG_UNLOCK(log, s); | 3470 | spin_unlock(&log->l_icloglock); |
3489 | 3471 | ||
3490 | /* check log magic numbers */ | 3472 | /* check log magic numbers */ |
3491 | ptr = (xfs_caddr_t) &(iclog->ic_header); | 3473 | ptr = (xfs_caddr_t) &(iclog->ic_header); |
@@ -3549,7 +3531,7 @@ xlog_verify_iclog(xlog_t *log, | |||
3549 | #endif | 3531 | #endif |
3550 | 3532 | ||
3551 | /* | 3533 | /* |
3552 | * Mark all iclogs IOERROR. LOG_LOCK is held by the caller. | 3534 | * Mark all iclogs IOERROR. l_icloglock is held by the caller. |
3553 | */ | 3535 | */ |
3554 | STATIC int | 3536 | STATIC int |
3555 | xlog_state_ioerror( | 3537 | xlog_state_ioerror( |
@@ -3598,7 +3580,6 @@ xfs_log_force_umount( | |||
3598 | int retval; | 3580 | int retval; |
3599 | int dummy; | 3581 | int dummy; |
3600 | SPLDECL(s); | 3582 | SPLDECL(s); |
3601 | SPLDECL(s2); | ||
3602 | 3583 | ||
3603 | log = mp->m_log; | 3584 | log = mp->m_log; |
3604 | 3585 | ||
@@ -3628,7 +3609,7 @@ xfs_log_force_umount( | |||
3628 | * everybody up to tell the bad news. | 3609 | * everybody up to tell the bad news. |
3629 | */ | 3610 | */ |
3630 | s = GRANT_LOCK(log); | 3611 | s = GRANT_LOCK(log); |
3631 | s2 = LOG_LOCK(log); | 3612 | spin_lock(&log->l_icloglock); |
3632 | mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; | 3613 | mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; |
3633 | XFS_BUF_DONE(mp->m_sb_bp); | 3614 | XFS_BUF_DONE(mp->m_sb_bp); |
3634 | /* | 3615 | /* |
@@ -3644,7 +3625,7 @@ xfs_log_force_umount( | |||
3644 | */ | 3625 | */ |
3645 | if (logerror) | 3626 | if (logerror) |
3646 | retval = xlog_state_ioerror(log); | 3627 | retval = xlog_state_ioerror(log); |
3647 | LOG_UNLOCK(log, s2); | 3628 | spin_unlock(&log->l_icloglock); |
3648 | 3629 | ||
3649 | /* | 3630 | /* |
3650 | * We don't want anybody waiting for log reservations | 3631 | * We don't want anybody waiting for log reservations |
@@ -3676,9 +3657,9 @@ xfs_log_force_umount( | |||
3676 | * log down completely. | 3657 | * log down completely. |
3677 | */ | 3658 | */ |
3678 | xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy); | 3659 | xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy); |
3679 | s2 = LOG_LOCK(log); | 3660 | spin_lock(&log->l_icloglock); |
3680 | retval = xlog_state_ioerror(log); | 3661 | retval = xlog_state_ioerror(log); |
3681 | LOG_UNLOCK(log, s2); | 3662 | spin_unlock(&log->l_icloglock); |
3682 | } | 3663 | } |
3683 | /* | 3664 | /* |
3684 | * Wake up everybody waiting on xfs_log_force. | 3665 | * Wake up everybody waiting on xfs_log_force. |
@@ -3691,13 +3672,13 @@ xfs_log_force_umount( | |||
3691 | { | 3672 | { |
3692 | xlog_in_core_t *iclog; | 3673 | xlog_in_core_t *iclog; |
3693 | 3674 | ||
3694 | s = LOG_LOCK(log); | 3675 | spin_lock(&log->l_icloglock); |
3695 | iclog = log->l_iclog; | 3676 | iclog = log->l_iclog; |
3696 | do { | 3677 | do { |
3697 | ASSERT(iclog->ic_callback == 0); | 3678 | ASSERT(iclog->ic_callback == 0); |
3698 | iclog = iclog->ic_next; | 3679 | iclog = iclog->ic_next; |
3699 | } while (iclog != log->l_iclog); | 3680 | } while (iclog != log->l_iclog); |
3700 | LOG_UNLOCK(log, s); | 3681 | spin_unlock(&log->l_icloglock); |
3701 | } | 3682 | } |
3702 | #endif | 3683 | #endif |
3703 | /* return non-zero if log IOERROR transition had already happened */ | 3684 | /* return non-zero if log IOERROR transition had already happened */ |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 752f964b3699..5fa21b030666 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -107,8 +107,6 @@ struct xfs_mount; | |||
107 | 107 | ||
108 | #define GRANT_LOCK(log) mutex_spinlock(&(log)->l_grant_lock) | 108 | #define GRANT_LOCK(log) mutex_spinlock(&(log)->l_grant_lock) |
109 | #define GRANT_UNLOCK(log, s) mutex_spinunlock(&(log)->l_grant_lock, s) | 109 | #define GRANT_UNLOCK(log, s) mutex_spinunlock(&(log)->l_grant_lock, s) |
110 | #define LOG_LOCK(log) mutex_spinlock(&(log)->l_icloglock) | ||
111 | #define LOG_UNLOCK(log, s) mutex_spinunlock(&(log)->l_icloglock, s) | ||
112 | 110 | ||
113 | #define xlog_panic(args...) cmn_err(CE_PANIC, ## args) | 111 | #define xlog_panic(args...) cmn_err(CE_PANIC, ## args) |
114 | #define xlog_exit(args...) cmn_err(CE_PANIC, ## args) | 112 | #define xlog_exit(args...) cmn_err(CE_PANIC, ## args) |
@@ -415,7 +413,7 @@ typedef struct log { | |||
415 | xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */ | 413 | xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */ |
416 | xlog_ticket_t *l_tail; /* free list of tickets */ | 414 | xlog_ticket_t *l_tail; /* free list of tickets */ |
417 | xlog_in_core_t *l_iclog; /* head log queue */ | 415 | xlog_in_core_t *l_iclog; /* head log queue */ |
418 | lock_t l_icloglock; /* grab to change iclog state */ | 416 | spinlock_t l_icloglock; /* grab to change iclog state */ |
419 | xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed | 417 | xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed |
420 | * buffers */ | 418 | * buffers */ |
421 | xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ | 419 | xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ |