summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2017-09-27 07:40:16 -0400
committerJens Axboe <axboe@kernel.dk>2017-10-03 10:38:17 -0400
commit640ab98fb3629c0f8417b9b2532eca596495f3bb (patch)
tree59f6d84e07327e6b94473bcd66e7366582b7f85b /fs
parent7beb2f845b715cb98584cf630e9a9d5b05501166 (diff)
buffer: have alloc_page_buffers() use __GFP_NOFAIL
Instead of adding weird retry logic in that function, utilize __GFP_NOFAIL to ensure that the vm takes care of handling any potential retries appropriately. This means we don't have to call free_more_memory() from here. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c33
-rw-r--r--fs/ntfs/aops.c2
-rw-r--r--fs/ntfs/mft.c2
3 files changed, 12 insertions, 25 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 170df856bdb9..1234ae343aef 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -861,16 +861,19 @@ int remove_inode_buffers(struct inode *inode)
861 * which may not fail from ordinary buffer allocations. 861 * which may not fail from ordinary buffer allocations.
862 */ 862 */
863struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 863struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
864 int retry) 864 bool retry)
865{ 865{
866 struct buffer_head *bh, *head; 866 struct buffer_head *bh, *head;
867 gfp_t gfp = GFP_NOFS;
867 long offset; 868 long offset;
868 869
869try_again: 870 if (retry)
871 gfp |= __GFP_NOFAIL;
872
870 head = NULL; 873 head = NULL;
871 offset = PAGE_SIZE; 874 offset = PAGE_SIZE;
872 while ((offset -= size) >= 0) { 875 while ((offset -= size) >= 0) {
873 bh = alloc_buffer_head(GFP_NOFS); 876 bh = alloc_buffer_head(gfp);
874 if (!bh) 877 if (!bh)
875 goto no_grow; 878 goto no_grow;
876 879
@@ -896,23 +899,7 @@ no_grow:
896 } while (head); 899 } while (head);
897 } 900 }
898 901
899 /* 902 return NULL;
900 * Return failure for non-async IO requests. Async IO requests
901 * are not allowed to fail, so we have to wait until buffer heads
902 * become available. But we don't want tasks sleeping with
903 * partially complete buffers, so all were released above.
904 */
905 if (!retry)
906 return NULL;
907
908 /* We're _really_ low on memory. Now we just
909 * wait for old buffer heads to become free due to
910 * finishing IO. Since this is an async request and
911 * the reserve list is empty, we're sure there are
912 * async buffer heads in use.
913 */
914 free_more_memory();
915 goto try_again;
916} 903}
917EXPORT_SYMBOL_GPL(alloc_page_buffers); 904EXPORT_SYMBOL_GPL(alloc_page_buffers);
918 905
@@ -1021,7 +1008,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
1021 /* 1008 /*
1022 * Allocate some buffers for this page 1009 * Allocate some buffers for this page
1023 */ 1010 */
1024 bh = alloc_page_buffers(page, size, 0); 1011 bh = alloc_page_buffers(page, size, false);
1025 if (!bh) 1012 if (!bh)
1026 goto failed; 1013 goto failed;
1027 1014
@@ -1575,7 +1562,7 @@ void create_empty_buffers(struct page *page,
1575{ 1562{
1576 struct buffer_head *bh, *head, *tail; 1563 struct buffer_head *bh, *head, *tail;
1577 1564
1578 head = alloc_page_buffers(page, blocksize, 1); 1565 head = alloc_page_buffers(page, blocksize, true);
1579 bh = head; 1566 bh = head;
1580 do { 1567 do {
1581 bh->b_state |= b_state; 1568 bh->b_state |= b_state;
@@ -2638,7 +2625,7 @@ int nobh_write_begin(struct address_space *mapping,
2638 * Be careful: the buffer linked list is a NULL terminated one, rather 2625 * Be careful: the buffer linked list is a NULL terminated one, rather
2639 * than the circular one we're used to. 2626 * than the circular one we're used to.
2640 */ 2627 */
2641 head = alloc_page_buffers(page, blocksize, 0); 2628 head = alloc_page_buffers(page, blocksize, false);
2642 if (!head) { 2629 if (!head) {
2643 ret = -ENOMEM; 2630 ret = -ENOMEM;
2644 goto out_release; 2631 goto out_release;
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index cc91856b5e2d..3a2e509c77c5 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1739,7 +1739,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
1739 spin_lock(&mapping->private_lock); 1739 spin_lock(&mapping->private_lock);
1740 if (unlikely(!page_has_buffers(page))) { 1740 if (unlikely(!page_has_buffers(page))) {
1741 spin_unlock(&mapping->private_lock); 1741 spin_unlock(&mapping->private_lock);
1742 bh = head = alloc_page_buffers(page, bh_size, 1); 1742 bh = head = alloc_page_buffers(page, bh_size, true);
1743 spin_lock(&mapping->private_lock); 1743 spin_lock(&mapping->private_lock);
1744 if (likely(!page_has_buffers(page))) { 1744 if (likely(!page_has_buffers(page))) {
1745 struct buffer_head *tail; 1745 struct buffer_head *tail;
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index b6f402194f02..ee8392aee9f6 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -507,7 +507,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
507 if (unlikely(!page_has_buffers(page))) { 507 if (unlikely(!page_has_buffers(page))) {
508 struct buffer_head *tail; 508 struct buffer_head *tail;
509 509
510 bh = head = alloc_page_buffers(page, blocksize, 1); 510 bh = head = alloc_page_buffers(page, blocksize, true);
511 do { 511 do {
512 set_buffer_uptodate(bh); 512 set_buffer_uptodate(bh);
513 tail = bh; 513 tail = bh;