aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-02-19 21:31:30 -0500
committerBen Myers <bpm@sgi.com>2012-02-22 23:34:03 -0500
commit42ceedb3caffe67c4ec0dfbb78ce410832d429b9 (patch)
treece067cc4db352f49391215b6904c7c8ebfbdbc8d /fs/xfs
parente179840d74606ab1935c83fe5ad9d93c95ddc956 (diff)
xfs: share code for grant head availability checks
Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_log.c133
1 files changed, 60 insertions, 73 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 685997548fb..c6a29a05c60 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -245,6 +245,60 @@ shutdown:
245 return XFS_ERROR(EIO); 245 return XFS_ERROR(EIO);
246} 246}
247 247
248/*
249 * Atomically get the log space required for a log ticket.
250 *
251 * Once a ticket gets put onto head->waiters, it will only return after the
252 * needed reservation is satisfied.
253 *
254 * This function is structured so that it has a lock free fast path. This is
255 * necessary because every new transaction reservation will come through this
256 * path. Hence any lock will be globally hot if we take it unconditionally on
257 * every pass.
258 *
259 * As tickets are only ever moved on and off head->waiters under head->lock, we
260 * only need to take that lock if we are going to add the ticket to the queue
261 * and sleep. We can avoid taking the lock if the ticket was never added to
262 * head->waiters because the t_queue list head will be empty and we hold the
263 * only reference to it so it can safely be checked unlocked.
264 */
265STATIC int
266xlog_grant_head_check(
267 struct log *log,
268 struct xlog_grant_head *head,
269 struct xlog_ticket *tic,
270 int *need_bytes)
271{
272 int free_bytes;
273 int error = 0;
274
275 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
276
277 /*
278 * If there are other waiters on the queue then give them a chance at
279 * logspace before us. Wake up the first waiters, if we do not wake
280 * up all the waiters then go to sleep waiting for more free space,
281 * otherwise try to get some space for this transaction.
282 */
283 *need_bytes = xlog_ticket_reservation(log, head, tic);
284 free_bytes = xlog_space_left(log, &head->grant);
285 if (!list_empty_careful(&head->waiters)) {
286 spin_lock(&head->lock);
287 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
288 free_bytes < *need_bytes) {
289 error = xlog_grant_head_wait(log, head, tic,
290 *need_bytes);
291 }
292 spin_unlock(&head->lock);
293 } else if (free_bytes < *need_bytes) {
294 spin_lock(&head->lock);
295 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
296 spin_unlock(&head->lock);
297 }
298
299 return error;
300}
301
248static void 302static void
249xlog_tic_reset_res(xlog_ticket_t *tic) 303xlog_tic_reset_res(xlog_ticket_t *tic)
250{ 304{
@@ -2511,59 +2565,18 @@ restart:
2511 return 0; 2565 return 0;
2512} /* xlog_state_get_iclog_space */ 2566} /* xlog_state_get_iclog_space */
2513 2567
2514/*
2515 * Atomically get the log space required for a log ticket.
2516 *
2517 * Once a ticket gets put onto the reserveq, it will only return after the
2518 * needed reservation is satisfied.
2519 *
2520 * This function is structured so that it has a lock free fast path. This is
2521 * necessary because every new transaction reservation will come through this
2522 * path. Hence any lock will be globally hot if we take it unconditionally on
2523 * every pass.
2524 *
2525 * As tickets are only ever moved on and off the l_reserve.waiters under the
2526 * l_reserve.lock, we only need to take that lock if we are going to add
2527 * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
2528 * was never added to the reserveq because the t_queue list head will be empty
2529 * and we hold the only reference to it so it can safely be checked unlocked.
2530 */
2531STATIC int 2568STATIC int
2532xlog_grant_log_space( 2569xlog_grant_log_space(
2533 struct log *log, 2570 struct log *log,
2534 struct xlog_ticket *tic) 2571 struct xlog_ticket *tic)
2535{ 2572{
2536 int free_bytes, need_bytes; 2573 int need_bytes;
2537 int error = 0; 2574 int error = 0;
2538 2575
2539 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
2540
2541 trace_xfs_log_grant_enter(log, tic); 2576 trace_xfs_log_grant_enter(log, tic);
2542 2577
2543 /* 2578 error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
2544 * If there are other waiters on the queue then give them a chance at 2579 &need_bytes);
2545 * logspace before us. Wake up the first waiters, if we do not wake
2546 * up all the waiters then go to sleep waiting for more free space,
2547 * otherwise try to get some space for this transaction.
2548 */
2549 need_bytes = tic->t_unit_res;
2550 if (tic->t_flags & XFS_LOG_PERM_RESERV)
2551 need_bytes *= tic->t_ocnt;
2552 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
2553 if (!list_empty_careful(&log->l_reserve_head.waiters)) {
2554 spin_lock(&log->l_reserve_head.lock);
2555 if (!xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes) ||
2556 free_bytes < need_bytes) {
2557 error = xlog_grant_head_wait(log, &log->l_reserve_head,
2558 tic, need_bytes);
2559 }
2560 spin_unlock(&log->l_reserve_head.lock);
2561 } else if (free_bytes < need_bytes) {
2562 spin_lock(&log->l_reserve_head.lock);
2563 error = xlog_grant_head_wait(log, &log->l_reserve_head, tic,
2564 need_bytes);
2565 spin_unlock(&log->l_reserve_head.lock);
2566 }
2567 if (error) 2580 if (error)
2568 return error; 2581 return error;
2569 2582
@@ -2576,16 +2589,13 @@ xlog_grant_log_space(
2576 2589
2577/* 2590/*
2578 * Replenish the byte reservation required by moving the grant write head. 2591 * Replenish the byte reservation required by moving the grant write head.
2579 *
2580 * Similar to xlog_grant_log_space, the function is structured to have a lock
2581 * free fast path.
2582 */ 2592 */
2583STATIC int 2593STATIC int
2584xlog_regrant_write_log_space( 2594xlog_regrant_write_log_space(
2585 struct log *log, 2595 struct log *log,
2586 struct xlog_ticket *tic) 2596 struct xlog_ticket *tic)
2587{ 2597{
2588 int free_bytes, need_bytes; 2598 int need_bytes;
2589 int error = 0; 2599 int error = 0;
2590 2600
2591 tic->t_curr_res = tic->t_unit_res; 2601 tic->t_curr_res = tic->t_unit_res;
@@ -2594,33 +2604,10 @@ xlog_regrant_write_log_space(
2594 if (tic->t_cnt > 0) 2604 if (tic->t_cnt > 0)
2595 return 0; 2605 return 0;
2596 2606
2597 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
2598
2599 trace_xfs_log_regrant_write_enter(log, tic); 2607 trace_xfs_log_regrant_write_enter(log, tic);
2600 2608
2601 /* 2609 error = xlog_grant_head_check(log, &log->l_write_head, tic,
2602 * If there are other waiters on the queue then give them a chance at 2610 &need_bytes);
2603 * logspace before us. Wake up the first waiters, if we do not wake
2604 * up all the waiters then go to sleep waiting for more free space,
2605 * otherwise try to get some space for this transaction.
2606 */
2607 need_bytes = tic->t_unit_res;
2608 free_bytes = xlog_space_left(log, &log->l_write_head.grant);
2609 if (!list_empty_careful(&log->l_write_head.waiters)) {
2610 spin_lock(&log->l_write_head.lock);
2611 if (!xlog_grant_head_wake(log, &log->l_write_head, &free_bytes) ||
2612 free_bytes < need_bytes) {
2613 error = xlog_grant_head_wait(log, &log->l_write_head,
2614 tic, need_bytes);
2615 }
2616 spin_unlock(&log->l_write_head.lock);
2617 } else if (free_bytes < need_bytes) {
2618 spin_lock(&log->l_write_head.lock);
2619 error = xlog_grant_head_wait(log, &log->l_write_head, tic,
2620 need_bytes);
2621 spin_unlock(&log->l_write_head.lock);
2622 }
2623
2624 if (error) 2611 if (error)
2625 return error; 2612 return error;
2626 2613