aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log_priv.h
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-12-20 20:28:39 -0500
committerDave Chinner <david@fromorbit.com>2010-12-20 20:28:39 -0500
commit1c3cb9ec07fabf0c0970adc46fd2a1f09c1186dd (patch)
treeb8b6bf968f663723b06aa68ec499e49f3cdfbad0 /fs/xfs/xfs_log_priv.h
parent84f3c683c4d3f36d3c3ed320babd960a332ac458 (diff)
xfs: convert l_tail_lsn to an atomic variable.
log->l_tail_lsn is currently protected by the log grant lock. The lock is only needed for serialising readers against writers, so we don't really need the lock if we make the l_tail_lsn variable an atomic. Converting the l_tail_lsn variable to an atomic64_t means we can start to peel back the grant lock from various operations. Also, provide functions to safely crack an atomic LSN variable into it's component pieces and to recombined the components into an atomic variable. Use them where appropriate. This also removes the need for explicitly holding a spinlock to read the l_tail_lsn on 32 bit platforms. Signed-off-by: Dave Chinner <dchinner@redhat.com>
Diffstat (limited to 'fs/xfs/xfs_log_priv.h')
-rw-r--r--fs/xfs/xfs_log_priv.h37
1 files changed, 31 insertions, 6 deletions
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 958f356df10e..d34af1c21ed2 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -53,7 +53,6 @@ struct xfs_mount;
53 BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \ 53 BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
54 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) 54 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
55 55
56
57static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) 56static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
58{ 57{
59 return ((xfs_lsn_t)cycle << 32) | block; 58 return ((xfs_lsn_t)cycle << 32) | block;
@@ -505,8 +504,6 @@ typedef struct log {
505 * log entries" */ 504 * log entries" */
506 xlog_in_core_t *l_iclog; /* head log queue */ 505 xlog_in_core_t *l_iclog; /* head log queue */
507 spinlock_t l_icloglock; /* grab to change iclog state */ 506 spinlock_t l_icloglock; /* grab to change iclog state */
508 xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed
509 * buffers */
510 int l_curr_cycle; /* Cycle number of log writes */ 507 int l_curr_cycle; /* Cycle number of log writes */
511 int l_prev_cycle; /* Cycle number before last 508 int l_prev_cycle; /* Cycle number before last
512 * block increment */ 509 * block increment */
@@ -521,12 +518,15 @@ typedef struct log {
521 int64_t l_grant_write_head; 518 int64_t l_grant_write_head;
522 519
523 /* 520 /*
524 * l_last_sync_lsn is an atomic so it can be set and read without 521 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
525 * needing to hold specific locks. To avoid operations contending with 522 * read without needing to hold specific locks. To avoid operations
526 * other hot objects, place it on a separate cacheline. 523 * contending with other hot objects, place each of them on a separate
524 * cacheline.
527 */ 525 */
528 /* lsn of last LR on disk */ 526 /* lsn of last LR on disk */
529 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; 527 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
528 /* lsn of 1st LR with unflushed * buffers */
529 atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
530 530
531 /* The following field are used for debugging; need to hold icloglock */ 531 /* The following field are used for debugging; need to hold icloglock */
532#ifdef DEBUG 532#ifdef DEBUG
@@ -566,6 +566,31 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
566 xlog_in_core_t **commit_iclog, uint flags); 566 xlog_in_core_t **commit_iclog, uint flags);
567 567
568/* 568/*
569 * When we crack an atomic LSN, we sample it first so that the value will not
570 * change while we are cracking it into the component values. This means we
571 * will always get consistent component values to work from. This should always
572 * be used to smaple and crack LSNs taht are stored and updated in atomic
573 * variables.
574 */
575static inline void
576xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
577{
578 xfs_lsn_t val = atomic64_read(lsn);
579
580 *cycle = CYCLE_LSN(val);
581 *block = BLOCK_LSN(val);
582}
583
584/*
585 * Calculate and assign a value to an atomic LSN variable from component pieces.
586 */
587static inline void
588xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
589{
590 atomic64_set(lsn, xlog_assign_lsn(cycle, block));
591}
592
593/*
569 * When we crack the grrant head, we sample it first so that the value will not 594 * When we crack the grrant head, we sample it first so that the value will not
570 * change while we are cracking it into the component values. This means we 595 * change while we are cracking it into the component values. This means we
571 * will always get consistent component values to work from. 596 * will always get consistent component values to work from.