summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBill O'Donnell <billodo@redhat.com>2019-10-04 19:38:44 -0400
committerDarrick J. Wong <darrick.wong@oracle.com>2019-10-06 18:39:06 -0400
commit3219e8cf0dade9884d3c6cb432d433b4ca56875d (patch)
treeac636fd31c84fd346ec0c923783aafb71033a4c3
parentd5cc14d9f92833bd71219bf7fff180f097c3816d (diff)
xfs: assure zeroed memory buffers for certain kmem allocations
Guarantee zeroed memory buffers for cases where potential memory leak to disk can occur. In these cases, kmem_alloc is used and doesn't zero the buffer, opening the possibility of information leakage to disk. Use existing infrastucture (xfs_buf_allocate_memory) to obtain the already zeroed buffer from kernel memory. This solution avoids the performance issue that would occur if a wholesale change to replace kmem_alloc with kmem_zalloc was done. Signed-off-by: Bill O'Donnell <billodo@redhat.com> [darrick: fix bitwise complaint about kmflag_mask] Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r--fs/xfs/xfs_buf.c12
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_log_recover.c2
3 files changed, 13 insertions, 3 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 21c243622a79..0abba171aa89 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -345,6 +345,15 @@ xfs_buf_allocate_memory(
345 unsigned short page_count, i; 345 unsigned short page_count, i;
346 xfs_off_t start, end; 346 xfs_off_t start, end;
347 int error; 347 int error;
348 xfs_km_flags_t kmflag_mask = 0;
349
350 /*
351 * assure zeroed buffer for non-read cases.
352 */
353 if (!(flags & XBF_READ)) {
354 kmflag_mask |= KM_ZERO;
355 gfp_mask |= __GFP_ZERO;
356 }
348 357
349 /* 358 /*
350 * for buffers that are contained within a single page, just allocate 359 * for buffers that are contained within a single page, just allocate
@@ -354,7 +363,8 @@ xfs_buf_allocate_memory(
354 size = BBTOB(bp->b_length); 363 size = BBTOB(bp->b_length);
355 if (size < PAGE_SIZE) { 364 if (size < PAGE_SIZE) {
356 int align_mask = xfs_buftarg_dma_alignment(bp->b_target); 365 int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
357 bp->b_addr = kmem_alloc_io(size, align_mask, KM_NOFS); 366 bp->b_addr = kmem_alloc_io(size, align_mask,
367 KM_NOFS | kmflag_mask);
358 if (!bp->b_addr) { 368 if (!bp->b_addr) {
359 /* low memory - use alloc_page loop instead */ 369 /* low memory - use alloc_page loop instead */
360 goto use_alloc_page; 370 goto use_alloc_page;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a2beee9f74da..641d07f30a27 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1443,7 +1443,7 @@ xlog_alloc_log(
1443 prev_iclog = iclog; 1443 prev_iclog = iclog;
1444 1444
1445 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask, 1445 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
1446 KM_MAYFAIL); 1446 KM_MAYFAIL | KM_ZERO);
1447 if (!iclog->ic_data) 1447 if (!iclog->ic_data)
1448 goto out_free_iclog; 1448 goto out_free_iclog;
1449#ifdef DEBUG 1449#ifdef DEBUG
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 508319039dce..c1a514ffff55 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -127,7 +127,7 @@ xlog_alloc_buffer(
127 if (nbblks > 1 && log->l_sectBBsize > 1) 127 if (nbblks > 1 && log->l_sectBBsize > 1)
128 nbblks += log->l_sectBBsize; 128 nbblks += log->l_sectBBsize;
129 nbblks = round_up(nbblks, log->l_sectBBsize); 129 nbblks = round_up(nbblks, log->l_sectBBsize);
130 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL); 130 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
131} 131}
132 132
133/* 133/*