aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 033f04bac85b..2287545c5498 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1481,9 +1481,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1481 bool force_page_uptodate = false; 1481 bool force_page_uptodate = false;
1482 bool need_unlock; 1482 bool need_unlock;
1483 1483
1484 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 1484 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
1485 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 1485 PAGE_CACHE_SIZE / (sizeof(struct page *)));
1486 (sizeof(struct page *)));
1487 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1486 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1488 nrptrs = max(nrptrs, 8); 1487 nrptrs = max(nrptrs, 8);
1489 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 1488 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
@@ -1497,8 +1496,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1497 size_t write_bytes = min(iov_iter_count(i), 1496 size_t write_bytes = min(iov_iter_count(i),
1498 nrptrs * (size_t)PAGE_CACHE_SIZE - 1497 nrptrs * (size_t)PAGE_CACHE_SIZE -
1499 offset); 1498 offset);
1500 size_t num_pages = (write_bytes + offset + 1499 size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1501 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1500 PAGE_CACHE_SIZE);
1502 size_t reserve_bytes; 1501 size_t reserve_bytes;
1503 size_t dirty_pages; 1502 size_t dirty_pages;
1504 size_t copied; 1503 size_t copied;
@@ -1526,9 +1525,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1526 * our prealloc extent may be smaller than 1525 * our prealloc extent may be smaller than
1527 * write_bytes, so scale down. 1526 * write_bytes, so scale down.
1528 */ 1527 */
1529 num_pages = (write_bytes + offset + 1528 num_pages = DIV_ROUND_UP(write_bytes + offset,
1530 PAGE_CACHE_SIZE - 1) >> 1529 PAGE_CACHE_SIZE);
1531 PAGE_CACHE_SHIFT;
1532 reserve_bytes = num_pages << PAGE_CACHE_SHIFT; 1530 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1533 ret = 0; 1531 ret = 0;
1534 } else { 1532 } else {
@@ -1590,9 +1588,8 @@ again:
1590 dirty_pages = 0; 1588 dirty_pages = 0;
1591 } else { 1589 } else {
1592 force_page_uptodate = false; 1590 force_page_uptodate = false;
1593 dirty_pages = (copied + offset + 1591 dirty_pages = DIV_ROUND_UP(copied + offset,
1594 PAGE_CACHE_SIZE - 1) >> 1592 PAGE_CACHE_SIZE);
1595 PAGE_CACHE_SHIFT;
1596 } 1593 }
1597 1594
1598 /* 1595 /*