diff options
| -rw-r--r-- | fs/nfs/pagelist.c | 84 | ||||
| -rw-r--r-- | fs/nfs/write.c | 21 | ||||
| -rw-r--r-- | include/linux/nfs_page.h | 1 |
3 files changed, 77 insertions, 29 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ba491926df5f..be7cbce6e4c7 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
| @@ -116,7 +116,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c) | |||
| 116 | if (atomic_read(&c->io_count) == 0) | 116 | if (atomic_read(&c->io_count) == 0) |
| 117 | break; | 117 | break; |
| 118 | ret = nfs_wait_bit_killable(&q.key); | 118 | ret = nfs_wait_bit_killable(&q.key); |
| 119 | } while (atomic_read(&c->io_count) != 0); | 119 | } while (atomic_read(&c->io_count) != 0 && !ret); |
| 120 | finish_wait(wq, &q.wait); | 120 | finish_wait(wq, &q.wait); |
| 121 | return ret; | 121 | return ret; |
| 122 | } | 122 | } |
| @@ -139,26 +139,49 @@ nfs_iocounter_wait(struct nfs_io_counter *c) | |||
| 139 | /* | 139 | /* |
| 140 | * nfs_page_group_lock - lock the head of the page group | 140 | * nfs_page_group_lock - lock the head of the page group |
| 141 | * @req - request in group that is to be locked | 141 | * @req - request in group that is to be locked |
| 142 | * @nonblock - if true don't block waiting for lock | ||
| 142 | * | 143 | * |
| 143 | * this lock must be held if modifying the page group list | 144 | * this lock must be held if modifying the page group list |
| 144 | * | 145 | * |
| 145 | * returns result from wait_on_bit_lock: 0 on success, < 0 on error | 146 | * return 0 on success, < 0 on error: -EDELAY if nonblocking or the |
| 147 | * result from wait_on_bit_lock | ||
| 148 | * | ||
| 149 | * NOTE: calling with nonblock=false should always have set the | ||
| 150 | * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock | ||
| 151 | * with TASK_UNINTERRUPTIBLE), so there is no need to check the result. | ||
| 146 | */ | 152 | */ |
| 147 | int | 153 | int |
| 148 | nfs_page_group_lock(struct nfs_page *req, bool wait) | 154 | nfs_page_group_lock(struct nfs_page *req, bool nonblock) |
| 149 | { | 155 | { |
| 150 | struct nfs_page *head = req->wb_head; | 156 | struct nfs_page *head = req->wb_head; |
| 151 | int ret; | ||
| 152 | 157 | ||
| 153 | WARN_ON_ONCE(head != head->wb_head); | 158 | WARN_ON_ONCE(head != head->wb_head); |
| 154 | 159 | ||
| 155 | do { | 160 | if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) |
| 156 | ret = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, | 161 | return 0; |
| 157 | TASK_UNINTERRUPTIBLE); | ||
| 158 | } while (wait && ret != 0); | ||
| 159 | 162 | ||
| 160 | WARN_ON_ONCE(ret > 0); | 163 | if (!nonblock) |
| 161 | return ret; | 164 | return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, |
| 165 | TASK_UNINTERRUPTIBLE); | ||
| 166 | |||
| 167 | return -EAGAIN; | ||
| 168 | } | ||
| 169 | |||
| 170 | /* | ||
| 171 | * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it | ||
| 172 | * @req - a request in the group | ||
| 173 | * | ||
| 174 | * This is a blocking call to wait for the group lock to be cleared. | ||
| 175 | */ | ||
| 176 | void | ||
| 177 | nfs_page_group_lock_wait(struct nfs_page *req) | ||
| 178 | { | ||
| 179 | struct nfs_page *head = req->wb_head; | ||
| 180 | |||
| 181 | WARN_ON_ONCE(head != head->wb_head); | ||
| 182 | |||
| 183 | wait_on_bit(&head->wb_flags, PG_HEADLOCK, | ||
| 184 | TASK_UNINTERRUPTIBLE); | ||
| 162 | } | 185 | } |
| 163 | 186 | ||
| 164 | /* | 187 | /* |
| @@ -219,7 +242,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) | |||
| 219 | { | 242 | { |
| 220 | bool ret; | 243 | bool ret; |
| 221 | 244 | ||
| 222 | nfs_page_group_lock(req, true); | 245 | nfs_page_group_lock(req, false); |
| 223 | ret = nfs_page_group_sync_on_bit_locked(req, bit); | 246 | ret = nfs_page_group_sync_on_bit_locked(req, bit); |
| 224 | nfs_page_group_unlock(req); | 247 | nfs_page_group_unlock(req); |
| 225 | 248 | ||
| @@ -701,10 +724,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, | |||
| 701 | struct nfs_pgio_header *hdr) | 724 | struct nfs_pgio_header *hdr) |
| 702 | { | 725 | { |
| 703 | struct nfs_page *req; | 726 | struct nfs_page *req; |
| 704 | struct page **pages; | 727 | struct page **pages, |
| 728 | *last_page; | ||
| 705 | struct list_head *head = &desc->pg_list; | 729 | struct list_head *head = &desc->pg_list; |
| 706 | struct nfs_commit_info cinfo; | 730 | struct nfs_commit_info cinfo; |
| 707 | unsigned int pagecount; | 731 | unsigned int pagecount, pageused; |
| 708 | 732 | ||
| 709 | pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); | 733 | pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); |
| 710 | if (!nfs_pgarray_set(&hdr->page_array, pagecount)) | 734 | if (!nfs_pgarray_set(&hdr->page_array, pagecount)) |
| @@ -712,12 +736,23 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, | |||
| 712 | 736 | ||
| 713 | nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); | 737 | nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); |
| 714 | pages = hdr->page_array.pagevec; | 738 | pages = hdr->page_array.pagevec; |
| 739 | last_page = NULL; | ||
| 740 | pageused = 0; | ||
| 715 | while (!list_empty(head)) { | 741 | while (!list_empty(head)) { |
| 716 | req = nfs_list_entry(head->next); | 742 | req = nfs_list_entry(head->next); |
| 717 | nfs_list_remove_request(req); | 743 | nfs_list_remove_request(req); |
| 718 | nfs_list_add_request(req, &hdr->pages); | 744 | nfs_list_add_request(req, &hdr->pages); |
| 719 | *pages++ = req->wb_page; | 745 | |
| 746 | if (WARN_ON_ONCE(pageused >= pagecount)) | ||
| 747 | return nfs_pgio_error(desc, hdr); | ||
| 748 | |||
| 749 | if (!last_page || last_page != req->wb_page) { | ||
| 750 | *pages++ = last_page = req->wb_page; | ||
| 751 | pageused++; | ||
| 752 | } | ||
| 720 | } | 753 | } |
| 754 | if (WARN_ON_ONCE(pageused != pagecount)) | ||
| 755 | return nfs_pgio_error(desc, hdr); | ||
| 721 | 756 | ||
| 722 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && | 757 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && |
| 723 | (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) | 758 | (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) |
| @@ -788,6 +823,14 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, | |||
| 788 | return false; | 823 | return false; |
| 789 | if (req_offset(req) != req_offset(prev) + prev->wb_bytes) | 824 | if (req_offset(req) != req_offset(prev) + prev->wb_bytes) |
| 790 | return false; | 825 | return false; |
| 826 | if (req->wb_page == prev->wb_page) { | ||
| 827 | if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes) | ||
| 828 | return false; | ||
| 829 | } else { | ||
| 830 | if (req->wb_pgbase != 0 || | ||
| 831 | prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) | ||
| 832 | return false; | ||
| 833 | } | ||
| 791 | } | 834 | } |
| 792 | size = pgio->pg_ops->pg_test(pgio, prev, req); | 835 | size = pgio->pg_ops->pg_test(pgio, prev, req); |
| 793 | WARN_ON_ONCE(size > req->wb_bytes); | 836 | WARN_ON_ONCE(size > req->wb_bytes); |
| @@ -858,13 +901,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
| 858 | struct nfs_page *subreq; | 901 | struct nfs_page *subreq; |
| 859 | unsigned int bytes_left = 0; | 902 | unsigned int bytes_left = 0; |
| 860 | unsigned int offset, pgbase; | 903 | unsigned int offset, pgbase; |
| 861 | int ret; | ||
| 862 | 904 | ||
| 863 | ret = nfs_page_group_lock(req, false); | 905 | nfs_page_group_lock(req, false); |
| 864 | if (ret < 0) { | ||
| 865 | desc->pg_error = ret; | ||
| 866 | return 0; | ||
| 867 | } | ||
| 868 | 906 | ||
| 869 | subreq = req; | 907 | subreq = req; |
| 870 | bytes_left = subreq->wb_bytes; | 908 | bytes_left = subreq->wb_bytes; |
| @@ -886,11 +924,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
| 886 | if (desc->pg_recoalesce) | 924 | if (desc->pg_recoalesce) |
| 887 | return 0; | 925 | return 0; |
| 888 | /* retry add_request for this subreq */ | 926 | /* retry add_request for this subreq */ |
| 889 | ret = nfs_page_group_lock(req, false); | 927 | nfs_page_group_lock(req, false); |
| 890 | if (ret < 0) { | ||
| 891 | desc->pg_error = ret; | ||
| 892 | return 0; | ||
| 893 | } | ||
| 894 | continue; | 928 | continue; |
| 895 | } | 929 | } |
| 896 | 930 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e3b5cf28bdc5..175d5d073ccf 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -241,7 +241,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req) | |||
| 241 | unsigned int pos = 0; | 241 | unsigned int pos = 0; |
| 242 | unsigned int len = nfs_page_length(req->wb_page); | 242 | unsigned int len = nfs_page_length(req->wb_page); |
| 243 | 243 | ||
| 244 | nfs_page_group_lock(req, true); | 244 | nfs_page_group_lock(req, false); |
| 245 | 245 | ||
| 246 | do { | 246 | do { |
| 247 | tmp = nfs_page_group_search_locked(req->wb_head, pos); | 247 | tmp = nfs_page_group_search_locked(req->wb_head, pos); |
| @@ -478,10 +478,23 @@ try_again: | |||
| 478 | return NULL; | 478 | return NULL; |
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | /* lock each request in the page group */ | 481 | /* holding inode lock, so always make a non-blocking call to try the |
| 482 | ret = nfs_page_group_lock(head, false); | 482 | * page group lock */ |
| 483 | if (ret < 0) | 483 | ret = nfs_page_group_lock(head, true); |
| 484 | if (ret < 0) { | ||
| 485 | spin_unlock(&inode->i_lock); | ||
| 486 | |||
| 487 | if (!nonblock && ret == -EAGAIN) { | ||
| 488 | nfs_page_group_lock_wait(head); | ||
| 489 | nfs_release_request(head); | ||
| 490 | goto try_again; | ||
| 491 | } | ||
| 492 | |||
| 493 | nfs_release_request(head); | ||
| 484 | return ERR_PTR(ret); | 494 | return ERR_PTR(ret); |
| 495 | } | ||
| 496 | |||
| 497 | /* lock each request in the page group */ | ||
| 485 | subreq = head; | 498 | subreq = head; |
| 486 | do { | 499 | do { |
| 487 | /* | 500 | /* |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 6ad2bbcad405..6c3e06ee2fb7 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
| @@ -123,6 +123,7 @@ extern int nfs_wait_on_request(struct nfs_page *); | |||
| 123 | extern void nfs_unlock_request(struct nfs_page *req); | 123 | extern void nfs_unlock_request(struct nfs_page *req); |
| 124 | extern void nfs_unlock_and_release_request(struct nfs_page *); | 124 | extern void nfs_unlock_and_release_request(struct nfs_page *); |
| 125 | extern int nfs_page_group_lock(struct nfs_page *, bool); | 125 | extern int nfs_page_group_lock(struct nfs_page *, bool); |
| 126 | extern void nfs_page_group_lock_wait(struct nfs_page *); | ||
| 126 | extern void nfs_page_group_unlock(struct nfs_page *); | 127 | extern void nfs_page_group_unlock(struct nfs_page *); |
| 127 | extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); | 128 | extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); |
| 128 | 129 | ||
