aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-12 06:50:06 -0400
committerBen Myers <bpm@sgi.com>2013-08-13 17:19:03 -0400
commitf5baac354db8b6abfe8ed4ff6b6c3438c42ea606 (patch)
treed4751244552a0c974d9e627209104a2a33e179ca
parent7492c5b42de857c13d8b7e0dafb2a5e331598e00 (diff)
xfs: avoid CIL allocation during insert
Now that we have the size of the log vector that has been allocated, we can determine if we need to allocate a new log vector for formatting and insertion. We only need to allocate a new vector if it won't fit into the existing buffer. However, we need to hold the CIL context lock while we do this so that we can't race with a push draining the currently queued log vectors. It is safe to do this as long as we do GFP_NOFS allocation to avoid avoid memory allocation recursing into the filesystem. Hence we can safely overwrite the existing log vector on the CIL if it is large enough to hold all the dirty regions of the current item. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
-rw-r--r--fs/xfs/xfs_log_cil.c52
-rw-r--r--fs/xfs/xfs_trans.h1
2 files changed, 33 insertions, 20 deletions
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 423ceaf0aeb0..b20b15761e9c 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -185,6 +185,22 @@ xlog_cil_prepare_log_vecs(
185 buf_size = sizeof(struct xfs_log_vec) + nbytes + 185 buf_size = sizeof(struct xfs_log_vec) + nbytes +
186 niovecs * sizeof(struct xfs_log_iovec); 186 niovecs * sizeof(struct xfs_log_iovec);
187 187
188 /* compare to existing item size */
189 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
190 /* same or smaller, optimise common overwrite case */
191 lv = lip->li_lv;
192 lv->lv_next = NULL;
193
194 if (ordered)
195 goto insert;
196
197 /* Ensure the lv is set up according to ->iop_size */
198 lv->lv_niovecs = niovecs;
199 lv->lv_buf = (char *)lv + buf_size - nbytes;
200 lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
201 goto insert;
202 }
203
188 /* allocate new data chunk */ 204 /* allocate new data chunk */
189 lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); 205 lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
190 lv->lv_item = lip; 206 lv->lv_item = lip;
@@ -204,8 +220,8 @@ xlog_cil_prepare_log_vecs(
204 lv->lv_buf = (char *)lv + buf_size - nbytes; 220 lv->lv_buf = (char *)lv + buf_size - nbytes;
205 221
206 lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv); 222 lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
207 ASSERT(lv->lv_buf_len <= nbytes);
208insert: 223insert:
224 ASSERT(lv->lv_buf_len <= nbytes);
209 if (!ret_lv) 225 if (!ret_lv)
210 ret_lv = lv; 226 ret_lv = lv;
211 else 227 else
@@ -230,7 +246,17 @@ xfs_cil_prepare_item(
230{ 246{
231 struct xfs_log_vec *old = lv->lv_item->li_lv; 247 struct xfs_log_vec *old = lv->lv_item->li_lv;
232 248
233 if (old) { 249 if (!old) {
250 /* new lv, must pin the log item */
251 ASSERT(!lv->lv_item->li_lv);
252
253 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
254 *len += lv->lv_buf_len;
255 *diff_iovecs += lv->lv_niovecs;
256 }
257 lv->lv_item->li_ops->iop_pin(lv->lv_item);
258
259 } else if (old != lv) {
234 /* existing lv on log item, space used is a delta */ 260 /* existing lv on log item, space used is a delta */
235 ASSERT((old->lv_buf && old->lv_buf_len && old->lv_niovecs) || 261 ASSERT((old->lv_buf && old->lv_buf_len && old->lv_niovecs) ||
236 old->lv_buf_len == XFS_LOG_VEC_ORDERED); 262 old->lv_buf_len == XFS_LOG_VEC_ORDERED);
@@ -249,15 +275,8 @@ xfs_cil_prepare_item(
249 *diff_iovecs += lv->lv_niovecs - old->lv_niovecs; 275 *diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
250 kmem_free(old); 276 kmem_free(old);
251 } else { 277 } else {
252 /* new lv, must pin the log item */ 278 /* re-used lv */
253 ASSERT(!lv->lv_item->li_lv); 279 /* XXX: can't account for len/diff_iovecs yet */
254
255 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
256 *len += lv->lv_buf_len;
257 *diff_iovecs += lv->lv_niovecs;
258 }
259 IOP_PIN(lv->lv_item);
260
261 } 280 }
262 281
263 /* attach new log vector to log item */ 282 /* attach new log vector to log item */
@@ -733,18 +752,13 @@ xfs_log_commit_cil(
733 if (flags & XFS_TRANS_RELEASE_LOG_RES) 752 if (flags & XFS_TRANS_RELEASE_LOG_RES)
734 log_flags = XFS_LOG_REL_PERM_RESERV; 753 log_flags = XFS_LOG_REL_PERM_RESERV;
735 754
736 /* 755 /* lock out background commit */
737 * Do all the hard work of formatting items (including memory 756 down_read(&log->l_cilp->xc_ctx_lock);
738 * allocation) outside the CIL context lock. This prevents stalling CIL 757
739 * pushes when we are low on memory and a transaction commit spends a
740 * lot of time in memory reclaim.
741 */
742 log_vector = xlog_cil_prepare_log_vecs(tp); 758 log_vector = xlog_cil_prepare_log_vecs(tp);
743 if (!log_vector) 759 if (!log_vector)
744 return ENOMEM; 760 return ENOMEM;
745 761
746 /* lock out background commit */
747 down_read(&log->l_cilp->xc_ctx_lock);
748 if (commit_lsn) 762 if (commit_lsn)
749 *commit_lsn = log->l_cilp->xc_ctx->sequence; 763 *commit_lsn = log->l_cilp->xc_ctx->sequence;
750 764
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 24bbdcd297c7..4786170baeb0 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -77,7 +77,6 @@ struct xfs_item_ops {
77 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 77 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
78}; 78};
79 79
80#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
81#define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove) 80#define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove)
82#define IOP_PUSH(ip, list) (*(ip)->li_ops->iop_push)(ip, list) 81#define IOP_PUSH(ip, list) (*(ip)->li_ops->iop_push)(ip, list)
83#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) 82#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)