aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c135
1 files changed, 65 insertions, 70 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index ad3d26ddfe31..ccba14eb9dbe 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -160,7 +160,7 @@ void
160xlog_trace_iclog(xlog_in_core_t *iclog, uint state) 160xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
161{ 161{
162 if (!iclog->ic_trace) 162 if (!iclog->ic_trace)
163 iclog->ic_trace = ktrace_alloc(256, KM_SLEEP); 163 iclog->ic_trace = ktrace_alloc(256, KM_NOFS);
164 ktrace_enter(iclog->ic_trace, 164 ktrace_enter(iclog->ic_trace,
165 (void *)((unsigned long)state), 165 (void *)((unsigned long)state),
166 (void *)((unsigned long)current_pid()), 166 (void *)((unsigned long)current_pid()),
@@ -226,20 +226,24 @@ xlog_grant_sub_space(struct log *log, int bytes)
226static void 226static void
227xlog_grant_add_space_write(struct log *log, int bytes) 227xlog_grant_add_space_write(struct log *log, int bytes)
228{ 228{
229 log->l_grant_write_bytes += bytes; 229 int tmp = log->l_logsize - log->l_grant_write_bytes;
230 if (log->l_grant_write_bytes > log->l_logsize) { 230 if (tmp > bytes)
231 log->l_grant_write_bytes -= log->l_logsize; 231 log->l_grant_write_bytes += bytes;
232 else {
232 log->l_grant_write_cycle++; 233 log->l_grant_write_cycle++;
234 log->l_grant_write_bytes = bytes - tmp;
233 } 235 }
234} 236}
235 237
236static void 238static void
237xlog_grant_add_space_reserve(struct log *log, int bytes) 239xlog_grant_add_space_reserve(struct log *log, int bytes)
238{ 240{
239 log->l_grant_reserve_bytes += bytes; 241 int tmp = log->l_logsize - log->l_grant_reserve_bytes;
240 if (log->l_grant_reserve_bytes > log->l_logsize) { 242 if (tmp > bytes)
241 log->l_grant_reserve_bytes -= log->l_logsize; 243 log->l_grant_reserve_bytes += bytes;
244 else {
242 log->l_grant_reserve_cycle++; 245 log->l_grant_reserve_cycle++;
246 log->l_grant_reserve_bytes = bytes - tmp;
243 } 247 }
244} 248}
245 249
@@ -332,15 +336,12 @@ xfs_log_done(xfs_mount_t *mp,
332 } else { 336 } else {
333 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); 337 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
334 xlog_regrant_reserve_log_space(log, ticket); 338 xlog_regrant_reserve_log_space(log, ticket);
335 } 339 /* If this ticket was a permanent reservation and we aren't
336 340 * trying to release it, reset the inited flags; so next time
337 /* If this ticket was a permanent reservation and we aren't 341 * we write, a start record will be written out.
338 * trying to release it, reset the inited flags; so next time 342 */
339 * we write, a start record will be written out.
340 */
341 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) &&
342 (flags & XFS_LOG_REL_PERM_RESERV) == 0)
343 ticket->t_flags |= XLOG_TIC_INITED; 343 ticket->t_flags |= XLOG_TIC_INITED;
344 }
344 345
345 return lsn; 346 return lsn;
346} /* xfs_log_done */ 347} /* xfs_log_done */
@@ -353,11 +354,11 @@ xfs_log_done(xfs_mount_t *mp,
353 * Asynchronous forces are implemented by setting the WANT_SYNC 354 * Asynchronous forces are implemented by setting the WANT_SYNC
354 * bit in the appropriate in-core log and then returning. 355 * bit in the appropriate in-core log and then returning.
355 * 356 *
356 * Synchronous forces are implemented with a semaphore. All callers 357 * Synchronous forces are implemented with a signal variable. All callers
357 * to force a given lsn to disk will wait on a semaphore attached to the 358 * to force a given lsn to disk will wait on a the sv attached to the
358 * specific in-core log. When given in-core log finally completes its 359 * specific in-core log. When given in-core log finally completes its
359 * write to disk, that thread will wake up all threads waiting on the 360 * write to disk, that thread will wake up all threads waiting on the
360 * semaphore. 361 * sv.
361 */ 362 */
362int 363int
363_xfs_log_force( 364_xfs_log_force(
@@ -584,12 +585,12 @@ error:
584 * mp - ubiquitous xfs mount point structure 585 * mp - ubiquitous xfs mount point structure
585 */ 586 */
586int 587int
587xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags) 588xfs_log_mount_finish(xfs_mount_t *mp)
588{ 589{
589 int error; 590 int error;
590 591
591 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 592 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
592 error = xlog_recover_finish(mp->m_log, mfsi_flags); 593 error = xlog_recover_finish(mp->m_log);
593 else { 594 else {
594 error = 0; 595 error = 0;
595 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 596 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
@@ -703,7 +704,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
703 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 704 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
704 iclog->ic_state == XLOG_STATE_DIRTY)) { 705 iclog->ic_state == XLOG_STATE_DIRTY)) {
705 if (!XLOG_FORCED_SHUTDOWN(log)) { 706 if (!XLOG_FORCED_SHUTDOWN(log)) {
706 sv_wait(&iclog->ic_forcesema, PMEM, 707 sv_wait(&iclog->ic_force_wait, PMEM,
707 &log->l_icloglock, s); 708 &log->l_icloglock, s);
708 } else { 709 } else {
709 spin_unlock(&log->l_icloglock); 710 spin_unlock(&log->l_icloglock);
@@ -744,7 +745,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
744 || iclog->ic_state == XLOG_STATE_DIRTY 745 || iclog->ic_state == XLOG_STATE_DIRTY
745 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 746 || iclog->ic_state == XLOG_STATE_IOERROR) ) {
746 747
747 sv_wait(&iclog->ic_forcesema, PMEM, 748 sv_wait(&iclog->ic_force_wait, PMEM,
748 &log->l_icloglock, s); 749 &log->l_icloglock, s);
749 } else { 750 } else {
750 spin_unlock(&log->l_icloglock); 751 spin_unlock(&log->l_icloglock);
@@ -834,7 +835,7 @@ xfs_log_move_tail(xfs_mount_t *mp,
834 break; 835 break;
835 tail_lsn = 0; 836 tail_lsn = 0;
836 free_bytes -= tic->t_unit_res; 837 free_bytes -= tic->t_unit_res;
837 sv_signal(&tic->t_sema); 838 sv_signal(&tic->t_wait);
838 tic = tic->t_next; 839 tic = tic->t_next;
839 } while (tic != log->l_write_headq); 840 } while (tic != log->l_write_headq);
840 } 841 }
@@ -855,7 +856,7 @@ xfs_log_move_tail(xfs_mount_t *mp,
855 break; 856 break;
856 tail_lsn = 0; 857 tail_lsn = 0;
857 free_bytes -= need_bytes; 858 free_bytes -= need_bytes;
858 sv_signal(&tic->t_sema); 859 sv_signal(&tic->t_wait);
859 tic = tic->t_next; 860 tic = tic->t_next;
860 } while (tic != log->l_reserve_headq); 861 } while (tic != log->l_reserve_headq);
861 } 862 }
@@ -1228,7 +1229,7 @@ xlog_alloc_log(xfs_mount_t *mp,
1228 1229
1229 spin_lock_init(&log->l_icloglock); 1230 spin_lock_init(&log->l_icloglock);
1230 spin_lock_init(&log->l_grant_lock); 1231 spin_lock_init(&log->l_grant_lock);
1231 initnsema(&log->l_flushsema, 0, "ic-flush"); 1232 sv_init(&log->l_flush_wait, 0, "flush_wait");
1232 1233
1233 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ 1234 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1234 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); 1235 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1281,8 +1282,8 @@ xlog_alloc_log(xfs_mount_t *mp,
1281 1282
1282 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); 1283 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
1283 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); 1284 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
1284 sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force"); 1285 sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
1285 sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write"); 1286 sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
1286 1287
1287 iclogp = &iclog->ic_next; 1288 iclogp = &iclog->ic_next;
1288 } 1289 }
@@ -1561,8 +1562,8 @@ xlog_dealloc_log(xlog_t *log)
1561 1562
1562 iclog = log->l_iclog; 1563 iclog = log->l_iclog;
1563 for (i=0; i<log->l_iclog_bufs; i++) { 1564 for (i=0; i<log->l_iclog_bufs; i++) {
1564 sv_destroy(&iclog->ic_forcesema); 1565 sv_destroy(&iclog->ic_force_wait);
1565 sv_destroy(&iclog->ic_writesema); 1566 sv_destroy(&iclog->ic_write_wait);
1566 xfs_buf_free(iclog->ic_bp); 1567 xfs_buf_free(iclog->ic_bp);
1567#ifdef XFS_LOG_TRACE 1568#ifdef XFS_LOG_TRACE
1568 if (iclog->ic_trace != NULL) { 1569 if (iclog->ic_trace != NULL) {
@@ -1570,10 +1571,9 @@ xlog_dealloc_log(xlog_t *log)
1570 } 1571 }
1571#endif 1572#endif
1572 next_iclog = iclog->ic_next; 1573 next_iclog = iclog->ic_next;
1573 kmem_free(iclog, sizeof(xlog_in_core_t)); 1574 kmem_free(iclog);
1574 iclog = next_iclog; 1575 iclog = next_iclog;
1575 } 1576 }
1576 freesema(&log->l_flushsema);
1577 spinlock_destroy(&log->l_icloglock); 1577 spinlock_destroy(&log->l_icloglock);
1578 spinlock_destroy(&log->l_grant_lock); 1578 spinlock_destroy(&log->l_grant_lock);
1579 1579
@@ -1587,7 +1587,7 @@ xlog_dealloc_log(xlog_t *log)
1587 } 1587 }
1588#endif 1588#endif
1589 log->l_mp->m_log = NULL; 1589 log->l_mp->m_log = NULL;
1590 kmem_free(log, sizeof(xlog_t)); 1590 kmem_free(log);
1591} /* xlog_dealloc_log */ 1591} /* xlog_dealloc_log */
1592 1592
1593/* 1593/*
@@ -1973,7 +1973,7 @@ xlog_write(xfs_mount_t * mp,
1973/* Clean iclogs starting from the head. This ordering must be 1973/* Clean iclogs starting from the head. This ordering must be
1974 * maintained, so an iclog doesn't become ACTIVE beyond one that 1974 * maintained, so an iclog doesn't become ACTIVE beyond one that
1975 * is SYNCING. This is also required to maintain the notion that we use 1975 * is SYNCING. This is also required to maintain the notion that we use
1976 * a counting semaphore to hold off would be writers to the log when every 1976 * a ordered wait queue to hold off would be writers to the log when every
1977 * iclog is trying to sync to disk. 1977 * iclog is trying to sync to disk.
1978 * 1978 *
1979 * State Change: DIRTY -> ACTIVE 1979 * State Change: DIRTY -> ACTIVE
@@ -2097,6 +2097,7 @@ xlog_state_do_callback(
2097 int funcdidcallbacks; /* flag: function did callbacks */ 2097 int funcdidcallbacks; /* flag: function did callbacks */
2098 int repeats; /* for issuing console warnings if 2098 int repeats; /* for issuing console warnings if
2099 * looping too many times */ 2099 * looping too many times */
2100 int wake = 0;
2100 2101
2101 spin_lock(&log->l_icloglock); 2102 spin_lock(&log->l_icloglock);
2102 first_iclog = iclog = log->l_iclog; 2103 first_iclog = iclog = log->l_iclog;
@@ -2236,7 +2237,7 @@ xlog_state_do_callback(
2236 xlog_state_clean_log(log); 2237 xlog_state_clean_log(log);
2237 2238
2238 /* wake up threads waiting in xfs_log_force() */ 2239 /* wake up threads waiting in xfs_log_force() */
2239 sv_broadcast(&iclog->ic_forcesema); 2240 sv_broadcast(&iclog->ic_force_wait);
2240 2241
2241 iclog = iclog->ic_next; 2242 iclog = iclog->ic_next;
2242 } while (first_iclog != iclog); 2243 } while (first_iclog != iclog);
@@ -2278,15 +2279,13 @@ xlog_state_do_callback(
2278 } 2279 }
2279#endif 2280#endif
2280 2281
2281 flushcnt = 0; 2282 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2282 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) { 2283 wake = 1;
2283 flushcnt = log->l_flushcnt;
2284 log->l_flushcnt = 0;
2285 }
2286 spin_unlock(&log->l_icloglock); 2284 spin_unlock(&log->l_icloglock);
2287 while (flushcnt--) 2285
2288 vsema(&log->l_flushsema); 2286 if (wake)
2289} /* xlog_state_do_callback */ 2287 sv_broadcast(&log->l_flush_wait);
2288}
2290 2289
2291 2290
2292/* 2291/*
@@ -2300,8 +2299,7 @@ xlog_state_do_callback(
2300 * the second completion goes through. 2299 * the second completion goes through.
2301 * 2300 *
2302 * Callbacks could take time, so they are done outside the scope of the 2301 * Callbacks could take time, so they are done outside the scope of the
2303 * global state machine log lock. Assume that the calls to cvsema won't 2302 * global state machine log lock.
2304 * take a long time. At least we know it won't sleep.
2305 */ 2303 */
2306STATIC void 2304STATIC void
2307xlog_state_done_syncing( 2305xlog_state_done_syncing(
@@ -2337,7 +2335,7 @@ xlog_state_done_syncing(
2337 * iclog buffer, we wake them all, one will get to do the 2335 * iclog buffer, we wake them all, one will get to do the
2338 * I/O, the others get to wait for the result. 2336 * I/O, the others get to wait for the result.
2339 */ 2337 */
2340 sv_broadcast(&iclog->ic_writesema); 2338 sv_broadcast(&iclog->ic_write_wait);
2341 spin_unlock(&log->l_icloglock); 2339 spin_unlock(&log->l_icloglock);
2342 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2340 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
2343} /* xlog_state_done_syncing */ 2341} /* xlog_state_done_syncing */
@@ -2345,11 +2343,9 @@ xlog_state_done_syncing(
2345 2343
2346/* 2344/*
2347 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2345 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2348 * sleep. The flush semaphore is set to the number of in-core buffers and 2346 * sleep. We wait on the flush queue on the head iclog as that should be
2349 * decremented around disk syncing. Therefore, if all buffers are syncing, 2347 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2350 * this semaphore will cause new writes to sleep until a sync completes. 2348 * we will wait here and all new writes will sleep until a sync completes.
2351 * Otherwise, this code just does p() followed by v(). This approximates
2352 * a sleep/wakeup except we can't race.
2353 * 2349 *
2354 * The in-core logs are used in a circular fashion. They are not used 2350 * The in-core logs are used in a circular fashion. They are not used
2355 * out-of-order even when an iclog past the head is free. 2351 * out-of-order even when an iclog past the head is free.
@@ -2384,16 +2380,15 @@ restart:
2384 } 2380 }
2385 2381
2386 iclog = log->l_iclog; 2382 iclog = log->l_iclog;
2387 if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { 2383 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2388 log->l_flushcnt++;
2389 spin_unlock(&log->l_icloglock);
2390 xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); 2384 xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH);
2391 XFS_STATS_INC(xs_log_noiclogs); 2385 XFS_STATS_INC(xs_log_noiclogs);
2392 /* Ensure that log writes happen */ 2386
2393 psema(&log->l_flushsema, PINOD); 2387 /* Wait for log writes to have flushed */
2388 sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0);
2394 goto restart; 2389 goto restart;
2395 } 2390 }
2396 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 2391
2397 head = &iclog->ic_header; 2392 head = &iclog->ic_header;
2398 2393
2399 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 2394 atomic_inc(&iclog->ic_refcnt); /* prevents sync */
@@ -2507,7 +2502,7 @@ xlog_grant_log_space(xlog_t *log,
2507 goto error_return; 2502 goto error_return;
2508 2503
2509 XFS_STATS_INC(xs_sleep_logspace); 2504 XFS_STATS_INC(xs_sleep_logspace);
2510 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2505 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2511 /* 2506 /*
2512 * If we got an error, and the filesystem is shutting down, 2507 * If we got an error, and the filesystem is shutting down,
2513 * we'll catch it down below. So just continue... 2508 * we'll catch it down below. So just continue...
@@ -2533,7 +2528,7 @@ redo:
2533 xlog_trace_loggrant(log, tic, 2528 xlog_trace_loggrant(log, tic,
2534 "xlog_grant_log_space: sleep 2"); 2529 "xlog_grant_log_space: sleep 2");
2535 XFS_STATS_INC(xs_sleep_logspace); 2530 XFS_STATS_INC(xs_sleep_logspace);
2536 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2531 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2537 2532
2538 if (XLOG_FORCED_SHUTDOWN(log)) { 2533 if (XLOG_FORCED_SHUTDOWN(log)) {
2539 spin_lock(&log->l_grant_lock); 2534 spin_lock(&log->l_grant_lock);
@@ -2632,7 +2627,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2632 if (free_bytes < ntic->t_unit_res) 2627 if (free_bytes < ntic->t_unit_res)
2633 break; 2628 break;
2634 free_bytes -= ntic->t_unit_res; 2629 free_bytes -= ntic->t_unit_res;
2635 sv_signal(&ntic->t_sema); 2630 sv_signal(&ntic->t_wait);
2636 ntic = ntic->t_next; 2631 ntic = ntic->t_next;
2637 } while (ntic != log->l_write_headq); 2632 } while (ntic != log->l_write_headq);
2638 2633
@@ -2643,7 +2638,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2643 xlog_trace_loggrant(log, tic, 2638 xlog_trace_loggrant(log, tic,
2644 "xlog_regrant_write_log_space: sleep 1"); 2639 "xlog_regrant_write_log_space: sleep 1");
2645 XFS_STATS_INC(xs_sleep_logspace); 2640 XFS_STATS_INC(xs_sleep_logspace);
2646 sv_wait(&tic->t_sema, PINOD|PLTWAIT, 2641 sv_wait(&tic->t_wait, PINOD|PLTWAIT,
2647 &log->l_grant_lock, s); 2642 &log->l_grant_lock, s);
2648 2643
2649 /* If we're shutting down, this tic is already 2644 /* If we're shutting down, this tic is already
@@ -2672,7 +2667,7 @@ redo:
2672 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2667 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2673 xlog_ins_ticketq(&log->l_write_headq, tic); 2668 xlog_ins_ticketq(&log->l_write_headq, tic);
2674 XFS_STATS_INC(xs_sleep_logspace); 2669 XFS_STATS_INC(xs_sleep_logspace);
2675 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2670 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2676 2671
2677 /* If we're shutting down, this tic is already off the queue */ 2672 /* If we're shutting down, this tic is already off the queue */
2678 if (XLOG_FORCED_SHUTDOWN(log)) { 2673 if (XLOG_FORCED_SHUTDOWN(log)) {
@@ -2915,7 +2910,7 @@ xlog_state_switch_iclogs(xlog_t *log,
2915 * 2. the current iclog is drity, and the previous iclog is in the 2910 * 2. the current iclog is drity, and the previous iclog is in the
2916 * active or dirty state. 2911 * active or dirty state.
2917 * 2912 *
2918 * We may sleep (call psema) if: 2913 * We may sleep if:
2919 * 2914 *
2920 * 1. the current iclog is not in the active nor dirty state. 2915 * 1. the current iclog is not in the active nor dirty state.
2921 * 2. the current iclog dirty, and the previous iclog is not in the 2916 * 2. the current iclog dirty, and the previous iclog is not in the
@@ -3012,7 +3007,7 @@ maybe_sleep:
3012 return XFS_ERROR(EIO); 3007 return XFS_ERROR(EIO);
3013 } 3008 }
3014 XFS_STATS_INC(xs_log_force_sleep); 3009 XFS_STATS_INC(xs_log_force_sleep);
3015 sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s); 3010 sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
3016 /* 3011 /*
3017 * No need to grab the log lock here since we're 3012 * No need to grab the log lock here since we're
3018 * only deciding whether or not to return EIO 3013 * only deciding whether or not to return EIO
@@ -3095,7 +3090,7 @@ try_again:
3095 XLOG_STATE_SYNCING))) { 3090 XLOG_STATE_SYNCING))) {
3096 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); 3091 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3097 XFS_STATS_INC(xs_log_force_sleep); 3092 XFS_STATS_INC(xs_log_force_sleep);
3098 sv_wait(&iclog->ic_prev->ic_writesema, PSWP, 3093 sv_wait(&iclog->ic_prev->ic_write_wait, PSWP,
3099 &log->l_icloglock, s); 3094 &log->l_icloglock, s);
3100 *log_flushed = 1; 3095 *log_flushed = 1;
3101 already_slept = 1; 3096 already_slept = 1;
@@ -3115,7 +3110,7 @@ try_again:
3115 !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { 3110 !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3116 3111
3117 /* 3112 /*
3118 * Don't wait on the forcesema if we know that we've 3113 * Don't wait on completion if we know that we've
3119 * gotten a log write error. 3114 * gotten a log write error.
3120 */ 3115 */
3121 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3116 if (iclog->ic_state & XLOG_STATE_IOERROR) {
@@ -3123,7 +3118,7 @@ try_again:
3123 return XFS_ERROR(EIO); 3118 return XFS_ERROR(EIO);
3124 } 3119 }
3125 XFS_STATS_INC(xs_log_force_sleep); 3120 XFS_STATS_INC(xs_log_force_sleep);
3126 sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s); 3121 sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
3127 /* 3122 /*
3128 * No need to grab the log lock here since we're 3123 * No need to grab the log lock here since we're
3129 * only deciding whether or not to return EIO 3124 * only deciding whether or not to return EIO
@@ -3179,7 +3174,7 @@ STATIC void
3179xlog_ticket_put(xlog_t *log, 3174xlog_ticket_put(xlog_t *log,
3180 xlog_ticket_t *ticket) 3175 xlog_ticket_t *ticket)
3181{ 3176{
3182 sv_destroy(&ticket->t_sema); 3177 sv_destroy(&ticket->t_wait);
3183 kmem_zone_free(xfs_log_ticket_zone, ticket); 3178 kmem_zone_free(xfs_log_ticket_zone, ticket);
3184} /* xlog_ticket_put */ 3179} /* xlog_ticket_put */
3185 3180
@@ -3269,7 +3264,7 @@ xlog_ticket_get(xlog_t *log,
3269 tic->t_trans_type = 0; 3264 tic->t_trans_type = 0;
3270 if (xflags & XFS_LOG_PERM_RESERV) 3265 if (xflags & XFS_LOG_PERM_RESERV)
3271 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3266 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3272 sv_init(&(tic->t_sema), SV_DEFAULT, "logtick"); 3267 sv_init(&(tic->t_wait), SV_DEFAULT, "logtick");
3273 3268
3274 xlog_tic_reset_res(tic); 3269 xlog_tic_reset_res(tic);
3275 3270
@@ -3556,14 +3551,14 @@ xfs_log_force_umount(
3556 */ 3551 */
3557 if ((tic = log->l_reserve_headq)) { 3552 if ((tic = log->l_reserve_headq)) {
3558 do { 3553 do {
3559 sv_signal(&tic->t_sema); 3554 sv_signal(&tic->t_wait);
3560 tic = tic->t_next; 3555 tic = tic->t_next;
3561 } while (tic != log->l_reserve_headq); 3556 } while (tic != log->l_reserve_headq);
3562 } 3557 }
3563 3558
3564 if ((tic = log->l_write_headq)) { 3559 if ((tic = log->l_write_headq)) {
3565 do { 3560 do {
3566 sv_signal(&tic->t_sema); 3561 sv_signal(&tic->t_wait);
3567 tic = tic->t_next; 3562 tic = tic->t_next;
3568 } while (tic != log->l_write_headq); 3563 } while (tic != log->l_write_headq);
3569 } 3564 }