aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log_cil.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_log_cil.c')
-rw-r--r--fs/xfs/xfs_log_cil.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index cdebd832c3db..4ef6fdbced78 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -205,16 +205,25 @@ xlog_cil_insert_format_items(
205 /* 205 /*
206 * We 64-bit align the length of each iovec so that the start 206 * We 64-bit align the length of each iovec so that the start
207 * of the next one is naturally aligned. We'll need to 207 * of the next one is naturally aligned. We'll need to
208 * account for that slack space here. 208 * account for that slack space here. Then round nbytes up
209 * to 64-bit alignment so that the initial buffer alignment is
210 * easy to calculate and verify.
209 */ 211 */
210 nbytes += niovecs * sizeof(uint64_t); 212 nbytes += niovecs * sizeof(uint64_t);
213 nbytes = round_up(nbytes, sizeof(uint64_t));
211 214
212 /* grab the old item if it exists for reservation accounting */ 215 /* grab the old item if it exists for reservation accounting */
213 old_lv = lip->li_lv; 216 old_lv = lip->li_lv;
214 217
215 /* calc buffer size */ 218 /*
216 buf_size = sizeof(struct xfs_log_vec) + nbytes + 219 * The data buffer needs to start 64-bit aligned, so round up
217 niovecs * sizeof(struct xfs_log_iovec); 220 * that space to ensure we can align it appropriately and not
221 * overrun the buffer.
222 */
223 buf_size = nbytes +
224 round_up((sizeof(struct xfs_log_vec) +
225 niovecs * sizeof(struct xfs_log_iovec)),
226 sizeof(uint64_t));
218 227
219 /* compare to existing item size */ 228 /* compare to existing item size */
220 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) { 229 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
@@ -251,6 +260,8 @@ xlog_cil_insert_format_items(
251 /* The allocated data region lies beyond the iovec region */ 260 /* The allocated data region lies beyond the iovec region */
252 lv->lv_buf_len = 0; 261 lv->lv_buf_len = 0;
253 lv->lv_buf = (char *)lv + buf_size - nbytes; 262 lv->lv_buf = (char *)lv + buf_size - nbytes;
263 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
264
254 lip->li_ops->iop_format(lip, lv); 265 lip->li_ops->iop_format(lip, lv);
255insert: 266insert:
256 ASSERT(lv->lv_buf_len <= nbytes); 267 ASSERT(lv->lv_buf_len <= nbytes);