diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-05 16:05:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-05 16:05:15 -0400 |
commit | a942b57b9577f30da46a9f16ea0ef2c372cb15a4 (patch) | |
tree | e62017f1b154dc1ffe0c2a95fc654fb16e98fe5c | |
parent | 887e5d5fcc96e3a7c91e19d9bb419c10196ffdc1 (diff) | |
parent | 72dbac37e3a0acf8e8f07fc65e34e83de83e0b28 (diff) |
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6:
NLM,NFSv4: Wait on local locks before we put RPC calls on the wire
VFS: Add support for the FL_ACCESS flag to flock_lock_file()
NFSv4: Ensure nfs4_lock_expired() caches delegated locks
NLM,NFSv4: Don't put UNLOCK requests on the wire unless we hold a lock
VFS: Allow caller to determine if BSD or posix locks were actually freed
NFS: Optimise away an excessive GETATTR call when a file is symlinked
This fixes a panic doing the first READDIR or READDIRPLUS call when:
NFS: Fix NFS page_state usage
Revert "Merge branch 'odirect'"
-rw-r--r-- | fs/lockd/clntproc.c | 26 | ||||
-rw-r--r-- | fs/locks.c | 23 | ||||
-rw-r--r-- | fs/nfs/dir.c | 4 | ||||
-rw-r--r-- | fs/nfs/direct.c | 435 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 74 | ||||
-rw-r--r-- | fs/nfs/write.c | 20 | ||||
-rw-r--r-- | include/linux/fs.h | 1 | ||||
-rw-r--r-- | include/linux/nfs_xdr.h | 2 | ||||
-rw-r--r-- | net/sunrpc/xdr.c | 3 |
9 files changed, 304 insertions, 284 deletions
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 5980c45998cc..89ba0df14c22 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -454,7 +454,7 @@ static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *ho | |||
454 | fl->fl_ops = &nlmclnt_lock_ops; | 454 | fl->fl_ops = &nlmclnt_lock_ops; |
455 | } | 455 | } |
456 | 456 | ||
457 | static void do_vfs_lock(struct file_lock *fl) | 457 | static int do_vfs_lock(struct file_lock *fl) |
458 | { | 458 | { |
459 | int res = 0; | 459 | int res = 0; |
460 | switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { | 460 | switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { |
@@ -467,9 +467,7 @@ static void do_vfs_lock(struct file_lock *fl) | |||
467 | default: | 467 | default: |
468 | BUG(); | 468 | BUG(); |
469 | } | 469 | } |
470 | if (res < 0) | 470 | return res; |
471 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", | ||
472 | __FUNCTION__); | ||
473 | } | 471 | } |
474 | 472 | ||
475 | /* | 473 | /* |
@@ -498,6 +496,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) | |||
498 | struct nlm_host *host = req->a_host; | 496 | struct nlm_host *host = req->a_host; |
499 | struct nlm_res *resp = &req->a_res; | 497 | struct nlm_res *resp = &req->a_res; |
500 | struct nlm_wait *block = NULL; | 498 | struct nlm_wait *block = NULL; |
499 | unsigned char fl_flags = fl->fl_flags; | ||
501 | int status = -ENOLCK; | 500 | int status = -ENOLCK; |
502 | 501 | ||
503 | if (!host->h_monitored && nsm_monitor(host) < 0) { | 502 | if (!host->h_monitored && nsm_monitor(host) < 0) { |
@@ -505,6 +504,10 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) | |||
505 | host->h_name); | 504 | host->h_name); |
506 | goto out; | 505 | goto out; |
507 | } | 506 | } |
507 | fl->fl_flags |= FL_ACCESS; | ||
508 | status = do_vfs_lock(fl); | ||
509 | if (status < 0) | ||
510 | goto out; | ||
508 | 511 | ||
509 | block = nlmclnt_prepare_block(host, fl); | 512 | block = nlmclnt_prepare_block(host, fl); |
510 | again: | 513 | again: |
@@ -539,9 +542,10 @@ again: | |||
539 | up_read(&host->h_rwsem); | 542 | up_read(&host->h_rwsem); |
540 | goto again; | 543 | goto again; |
541 | } | 544 | } |
542 | fl->fl_flags |= FL_SLEEP; | ||
543 | /* Ensure the resulting lock will get added to granted list */ | 545 | /* Ensure the resulting lock will get added to granted list */ |
544 | do_vfs_lock(fl); | 546 | fl->fl_flags = fl_flags | FL_SLEEP; |
547 | if (do_vfs_lock(fl) < 0) | ||
548 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); | ||
545 | up_read(&host->h_rwsem); | 549 | up_read(&host->h_rwsem); |
546 | } | 550 | } |
547 | status = nlm_stat_to_errno(resp->status); | 551 | status = nlm_stat_to_errno(resp->status); |
@@ -552,6 +556,7 @@ out_unblock: | |||
552 | nlmclnt_cancel(host, req->a_args.block, fl); | 556 | nlmclnt_cancel(host, req->a_args.block, fl); |
553 | out: | 557 | out: |
554 | nlm_release_call(req); | 558 | nlm_release_call(req); |
559 | fl->fl_flags = fl_flags; | ||
555 | return status; | 560 | return status; |
556 | } | 561 | } |
557 | 562 | ||
@@ -606,15 +611,19 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |||
606 | { | 611 | { |
607 | struct nlm_host *host = req->a_host; | 612 | struct nlm_host *host = req->a_host; |
608 | struct nlm_res *resp = &req->a_res; | 613 | struct nlm_res *resp = &req->a_res; |
609 | int status; | 614 | int status = 0; |
610 | 615 | ||
611 | /* | 616 | /* |
612 | * Note: the server is supposed to either grant us the unlock | 617 | * Note: the server is supposed to either grant us the unlock |
613 | * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either | 618 | * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either |
614 | * case, we want to unlock. | 619 | * case, we want to unlock. |
615 | */ | 620 | */ |
621 | fl->fl_flags |= FL_EXISTS; | ||
616 | down_read(&host->h_rwsem); | 622 | down_read(&host->h_rwsem); |
617 | do_vfs_lock(fl); | 623 | if (do_vfs_lock(fl) == -ENOENT) { |
624 | up_read(&host->h_rwsem); | ||
625 | goto out; | ||
626 | } | ||
618 | up_read(&host->h_rwsem); | 627 | up_read(&host->h_rwsem); |
619 | 628 | ||
620 | if (req->a_flags & RPC_TASK_ASYNC) | 629 | if (req->a_flags & RPC_TASK_ASYNC) |
@@ -624,7 +633,6 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |||
624 | if (status < 0) | 633 | if (status < 0) |
625 | goto out; | 634 | goto out; |
626 | 635 | ||
627 | status = 0; | ||
628 | if (resp->status == NLM_LCK_GRANTED) | 636 | if (resp->status == NLM_LCK_GRANTED) |
629 | goto out; | 637 | goto out; |
630 | 638 | ||
diff --git a/fs/locks.c b/fs/locks.c index 1ad29c9b6252..b0b41a64e10b 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -725,6 +725,10 @@ next_task: | |||
725 | /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks | 725 | /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks |
726 | * at the head of the list, but that's secret knowledge known only to | 726 | * at the head of the list, but that's secret knowledge known only to |
727 | * flock_lock_file and posix_lock_file. | 727 | * flock_lock_file and posix_lock_file. |
728 | * | ||
729 | * Note that if called with an FL_EXISTS argument, the caller may determine | ||
730 | * whether or not a lock was successfully freed by testing the return | ||
731 | * value for -ENOENT. | ||
728 | */ | 732 | */ |
729 | static int flock_lock_file(struct file *filp, struct file_lock *request) | 733 | static int flock_lock_file(struct file *filp, struct file_lock *request) |
730 | { | 734 | { |
@@ -735,6 +739,8 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) | |||
735 | int found = 0; | 739 | int found = 0; |
736 | 740 | ||
737 | lock_kernel(); | 741 | lock_kernel(); |
742 | if (request->fl_flags & FL_ACCESS) | ||
743 | goto find_conflict; | ||
738 | for_each_lock(inode, before) { | 744 | for_each_lock(inode, before) { |
739 | struct file_lock *fl = *before; | 745 | struct file_lock *fl = *before; |
740 | if (IS_POSIX(fl)) | 746 | if (IS_POSIX(fl)) |
@@ -750,8 +756,11 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) | |||
750 | break; | 756 | break; |
751 | } | 757 | } |
752 | 758 | ||
753 | if (request->fl_type == F_UNLCK) | 759 | if (request->fl_type == F_UNLCK) { |
760 | if ((request->fl_flags & FL_EXISTS) && !found) | ||
761 | error = -ENOENT; | ||
754 | goto out; | 762 | goto out; |
763 | } | ||
755 | 764 | ||
756 | error = -ENOMEM; | 765 | error = -ENOMEM; |
757 | new_fl = locks_alloc_lock(); | 766 | new_fl = locks_alloc_lock(); |
@@ -764,6 +773,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) | |||
764 | if (found) | 773 | if (found) |
765 | cond_resched(); | 774 | cond_resched(); |
766 | 775 | ||
776 | find_conflict: | ||
767 | for_each_lock(inode, before) { | 777 | for_each_lock(inode, before) { |
768 | struct file_lock *fl = *before; | 778 | struct file_lock *fl = *before; |
769 | if (IS_POSIX(fl)) | 779 | if (IS_POSIX(fl)) |
@@ -777,6 +787,8 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) | |||
777 | locks_insert_block(fl, request); | 787 | locks_insert_block(fl, request); |
778 | goto out; | 788 | goto out; |
779 | } | 789 | } |
790 | if (request->fl_flags & FL_ACCESS) | ||
791 | goto out; | ||
780 | locks_copy_lock(new_fl, request); | 792 | locks_copy_lock(new_fl, request); |
781 | locks_insert_lock(&inode->i_flock, new_fl); | 793 | locks_insert_lock(&inode->i_flock, new_fl); |
782 | new_fl = NULL; | 794 | new_fl = NULL; |
@@ -948,8 +960,11 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request | |||
948 | 960 | ||
949 | error = 0; | 961 | error = 0; |
950 | if (!added) { | 962 | if (!added) { |
951 | if (request->fl_type == F_UNLCK) | 963 | if (request->fl_type == F_UNLCK) { |
964 | if (request->fl_flags & FL_EXISTS) | ||
965 | error = -ENOENT; | ||
952 | goto out; | 966 | goto out; |
967 | } | ||
953 | 968 | ||
954 | if (!new_fl) { | 969 | if (!new_fl) { |
955 | error = -ENOLCK; | 970 | error = -ENOLCK; |
@@ -996,6 +1011,10 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request | |||
996 | * Add a POSIX style lock to a file. | 1011 | * Add a POSIX style lock to a file. |
997 | * We merge adjacent & overlapping locks whenever possible. | 1012 | * We merge adjacent & overlapping locks whenever possible. |
998 | * POSIX locks are sorted by owner task, then by starting address | 1013 | * POSIX locks are sorted by owner task, then by starting address |
1014 | * | ||
1015 | * Note that if called with an FL_EXISTS argument, the caller may determine | ||
1016 | * whether or not a lock was successfully freed by testing the return | ||
1017 | * value for -ENOENT. | ||
999 | */ | 1018 | */ |
1000 | int posix_lock_file(struct file *filp, struct file_lock *fl) | 1019 | int posix_lock_file(struct file *filp, struct file_lock *fl) |
1001 | { | 1020 | { |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 3ddda6f7ecc2..e7ffb4deb3e5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -690,7 +690,9 @@ int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd) | |||
690 | goto out_force; | 690 | goto out_force; |
691 | /* This is an open(2) */ | 691 | /* This is an open(2) */ |
692 | if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 && | 692 | if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 && |
693 | !(server->flags & NFS_MOUNT_NOCTO)) | 693 | !(server->flags & NFS_MOUNT_NOCTO) && |
694 | (S_ISREG(inode->i_mode) || | ||
695 | S_ISDIR(inode->i_mode))) | ||
694 | goto out_force; | 696 | goto out_force; |
695 | } | 697 | } |
696 | return nfs_revalidate_inode(server, inode); | 698 | return nfs_revalidate_inode(server, inode); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 4cdd1b499e35..fecd3b095deb 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -67,25 +67,19 @@ struct nfs_direct_req { | |||
67 | struct kref kref; /* release manager */ | 67 | struct kref kref; /* release manager */ |
68 | 68 | ||
69 | /* I/O parameters */ | 69 | /* I/O parameters */ |
70 | struct list_head list, /* nfs_read/write_data structs */ | ||
71 | rewrite_list; /* saved nfs_write_data structs */ | ||
72 | struct nfs_open_context *ctx; /* file open context info */ | 70 | struct nfs_open_context *ctx; /* file open context info */ |
73 | struct kiocb * iocb; /* controlling i/o request */ | 71 | struct kiocb * iocb; /* controlling i/o request */ |
74 | struct inode * inode; /* target file of i/o */ | 72 | struct inode * inode; /* target file of i/o */ |
75 | unsigned long user_addr; /* location of user's buffer */ | ||
76 | size_t user_count; /* total bytes to move */ | ||
77 | loff_t pos; /* starting offset in file */ | ||
78 | struct page ** pages; /* pages in our buffer */ | ||
79 | unsigned int npages; /* count of pages */ | ||
80 | 73 | ||
81 | /* completion state */ | 74 | /* completion state */ |
75 | atomic_t io_count; /* i/os we're waiting for */ | ||
82 | spinlock_t lock; /* protect completion state */ | 76 | spinlock_t lock; /* protect completion state */ |
83 | int outstanding; /* i/os we're waiting for */ | ||
84 | ssize_t count, /* bytes actually processed */ | 77 | ssize_t count, /* bytes actually processed */ |
85 | error; /* any reported error */ | 78 | error; /* any reported error */ |
86 | struct completion completion; /* wait for i/o completion */ | 79 | struct completion completion; /* wait for i/o completion */ |
87 | 80 | ||
88 | /* commit state */ | 81 | /* commit state */ |
82 | struct list_head rewrite_list; /* saved nfs_write_data structs */ | ||
89 | struct nfs_write_data * commit_data; /* special write_data for commits */ | 83 | struct nfs_write_data * commit_data; /* special write_data for commits */ |
90 | int flags; | 84 | int flags; |
91 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ | 85 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ |
@@ -93,8 +87,37 @@ struct nfs_direct_req { | |||
93 | struct nfs_writeverf verf; /* unstable write verifier */ | 87 | struct nfs_writeverf verf; /* unstable write verifier */ |
94 | }; | 88 | }; |
95 | 89 | ||
96 | static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync); | ||
97 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); | 90 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); |
91 | static const struct rpc_call_ops nfs_write_direct_ops; | ||
92 | |||
93 | static inline void get_dreq(struct nfs_direct_req *dreq) | ||
94 | { | ||
95 | atomic_inc(&dreq->io_count); | ||
96 | } | ||
97 | |||
98 | static inline int put_dreq(struct nfs_direct_req *dreq) | ||
99 | { | ||
100 | return atomic_dec_and_test(&dreq->io_count); | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * "size" is never larger than rsize or wsize. | ||
105 | */ | ||
106 | static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size) | ||
107 | { | ||
108 | int page_count; | ||
109 | |||
110 | page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
111 | page_count -= user_addr >> PAGE_SHIFT; | ||
112 | BUG_ON(page_count < 0); | ||
113 | |||
114 | return page_count; | ||
115 | } | ||
116 | |||
117 | static inline unsigned int nfs_max_pages(unsigned int size) | ||
118 | { | ||
119 | return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
120 | } | ||
98 | 121 | ||
99 | /** | 122 | /** |
100 | * nfs_direct_IO - NFS address space operation for direct I/O | 123 | * nfs_direct_IO - NFS address space operation for direct I/O |
@@ -118,50 +141,21 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_ | |||
118 | return -EINVAL; | 141 | return -EINVAL; |
119 | } | 142 | } |
120 | 143 | ||
121 | static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty) | 144 | static void nfs_direct_dirty_pages(struct page **pages, int npages) |
122 | { | 145 | { |
123 | int i; | 146 | int i; |
124 | for (i = 0; i < npages; i++) { | 147 | for (i = 0; i < npages; i++) { |
125 | struct page *page = pages[i]; | 148 | struct page *page = pages[i]; |
126 | if (do_dirty && !PageCompound(page)) | 149 | if (!PageCompound(page)) |
127 | set_page_dirty_lock(page); | 150 | set_page_dirty_lock(page); |
128 | page_cache_release(page); | ||
129 | } | 151 | } |
130 | kfree(pages); | ||
131 | } | 152 | } |
132 | 153 | ||
133 | static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages) | 154 | static void nfs_direct_release_pages(struct page **pages, int npages) |
134 | { | 155 | { |
135 | int result = -ENOMEM; | 156 | int i; |
136 | unsigned long page_count; | 157 | for (i = 0; i < npages; i++) |
137 | size_t array_size; | 158 | page_cache_release(pages[i]); |
138 | |||
139 | page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
140 | page_count -= user_addr >> PAGE_SHIFT; | ||
141 | |||
142 | array_size = (page_count * sizeof(struct page *)); | ||
143 | *pages = kmalloc(array_size, GFP_KERNEL); | ||
144 | if (*pages) { | ||
145 | down_read(¤t->mm->mmap_sem); | ||
146 | result = get_user_pages(current, current->mm, user_addr, | ||
147 | page_count, (rw == READ), 0, | ||
148 | *pages, NULL); | ||
149 | up_read(¤t->mm->mmap_sem); | ||
150 | if (result != page_count) { | ||
151 | /* | ||
152 | * If we got fewer pages than expected from | ||
153 | * get_user_pages(), the user buffer runs off the | ||
154 | * end of a mapping; return EFAULT. | ||
155 | */ | ||
156 | if (result >= 0) { | ||
157 | nfs_free_user_pages(*pages, result, 0); | ||
158 | result = -EFAULT; | ||
159 | } else | ||
160 | kfree(*pages); | ||
161 | *pages = NULL; | ||
162 | } | ||
163 | } | ||
164 | return result; | ||
165 | } | 159 | } |
166 | 160 | ||
167 | static inline struct nfs_direct_req *nfs_direct_req_alloc(void) | 161 | static inline struct nfs_direct_req *nfs_direct_req_alloc(void) |
@@ -173,13 +167,13 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) | |||
173 | return NULL; | 167 | return NULL; |
174 | 168 | ||
175 | kref_init(&dreq->kref); | 169 | kref_init(&dreq->kref); |
170 | kref_get(&dreq->kref); | ||
176 | init_completion(&dreq->completion); | 171 | init_completion(&dreq->completion); |
177 | INIT_LIST_HEAD(&dreq->list); | ||
178 | INIT_LIST_HEAD(&dreq->rewrite_list); | 172 | INIT_LIST_HEAD(&dreq->rewrite_list); |
179 | dreq->iocb = NULL; | 173 | dreq->iocb = NULL; |
180 | dreq->ctx = NULL; | 174 | dreq->ctx = NULL; |
181 | spin_lock_init(&dreq->lock); | 175 | spin_lock_init(&dreq->lock); |
182 | dreq->outstanding = 0; | 176 | atomic_set(&dreq->io_count, 0); |
183 | dreq->count = 0; | 177 | dreq->count = 0; |
184 | dreq->error = 0; | 178 | dreq->error = 0; |
185 | dreq->flags = 0; | 179 | dreq->flags = 0; |
@@ -220,18 +214,11 @@ out: | |||
220 | } | 214 | } |
221 | 215 | ||
222 | /* | 216 | /* |
223 | * We must hold a reference to all the pages in this direct read request | 217 | * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust |
224 | * until the RPCs complete. This could be long *after* we are woken up in | 218 | * the iocb is still valid here if this is a synchronous request. |
225 | * nfs_direct_wait (for instance, if someone hits ^C on a slow server). | ||
226 | * | ||
227 | * In addition, synchronous I/O uses a stack-allocated iocb. Thus we | ||
228 | * can't trust the iocb is still valid here if this is a synchronous | ||
229 | * request. If the waiter is woken prematurely, the iocb is long gone. | ||
230 | */ | 219 | */ |
231 | static void nfs_direct_complete(struct nfs_direct_req *dreq) | 220 | static void nfs_direct_complete(struct nfs_direct_req *dreq) |
232 | { | 221 | { |
233 | nfs_free_user_pages(dreq->pages, dreq->npages, 1); | ||
234 | |||
235 | if (dreq->iocb) { | 222 | if (dreq->iocb) { |
236 | long res = (long) dreq->error; | 223 | long res = (long) dreq->error; |
237 | if (!res) | 224 | if (!res) |
@@ -244,48 +231,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) | |||
244 | } | 231 | } |
245 | 232 | ||
246 | /* | 233 | /* |
247 | * Note we also set the number of requests we have in the dreq when we are | 234 | * We must hold a reference to all the pages in this direct read request |
248 | * done. This prevents races with I/O completion so we will always wait | 235 | * until the RPCs complete. This could be long *after* we are woken up in |
249 | * until all requests have been dispatched and completed. | 236 | * nfs_direct_wait (for instance, if someone hits ^C on a slow server). |
250 | */ | 237 | */ |
251 | static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) | ||
252 | { | ||
253 | struct list_head *list; | ||
254 | struct nfs_direct_req *dreq; | ||
255 | unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
256 | |||
257 | dreq = nfs_direct_req_alloc(); | ||
258 | if (!dreq) | ||
259 | return NULL; | ||
260 | |||
261 | list = &dreq->list; | ||
262 | for(;;) { | ||
263 | struct nfs_read_data *data = nfs_readdata_alloc(rpages); | ||
264 | |||
265 | if (unlikely(!data)) { | ||
266 | while (!list_empty(list)) { | ||
267 | data = list_entry(list->next, | ||
268 | struct nfs_read_data, pages); | ||
269 | list_del(&data->pages); | ||
270 | nfs_readdata_free(data); | ||
271 | } | ||
272 | kref_put(&dreq->kref, nfs_direct_req_release); | ||
273 | return NULL; | ||
274 | } | ||
275 | |||
276 | INIT_LIST_HEAD(&data->pages); | ||
277 | list_add(&data->pages, list); | ||
278 | |||
279 | data->req = (struct nfs_page *) dreq; | ||
280 | dreq->outstanding++; | ||
281 | if (nbytes <= rsize) | ||
282 | break; | ||
283 | nbytes -= rsize; | ||
284 | } | ||
285 | kref_get(&dreq->kref); | ||
286 | return dreq; | ||
287 | } | ||
288 | |||
289 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | 238 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) |
290 | { | 239 | { |
291 | struct nfs_read_data *data = calldata; | 240 | struct nfs_read_data *data = calldata; |
@@ -294,6 +243,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | |||
294 | if (nfs_readpage_result(task, data) != 0) | 243 | if (nfs_readpage_result(task, data) != 0) |
295 | return; | 244 | return; |
296 | 245 | ||
246 | nfs_direct_dirty_pages(data->pagevec, data->npages); | ||
247 | nfs_direct_release_pages(data->pagevec, data->npages); | ||
248 | |||
297 | spin_lock(&dreq->lock); | 249 | spin_lock(&dreq->lock); |
298 | 250 | ||
299 | if (likely(task->tk_status >= 0)) | 251 | if (likely(task->tk_status >= 0)) |
@@ -301,13 +253,10 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | |||
301 | else | 253 | else |
302 | dreq->error = task->tk_status; | 254 | dreq->error = task->tk_status; |
303 | 255 | ||
304 | if (--dreq->outstanding) { | ||
305 | spin_unlock(&dreq->lock); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | spin_unlock(&dreq->lock); | 256 | spin_unlock(&dreq->lock); |
310 | nfs_direct_complete(dreq); | 257 | |
258 | if (put_dreq(dreq)) | ||
259 | nfs_direct_complete(dreq); | ||
311 | } | 260 | } |
312 | 261 | ||
313 | static const struct rpc_call_ops nfs_read_direct_ops = { | 262 | static const struct rpc_call_ops nfs_read_direct_ops = { |
@@ -316,41 +265,60 @@ static const struct rpc_call_ops nfs_read_direct_ops = { | |||
316 | }; | 265 | }; |
317 | 266 | ||
318 | /* | 267 | /* |
319 | * For each nfs_read_data struct that was allocated on the list, dispatch | 268 | * For each rsize'd chunk of the user's buffer, dispatch an NFS READ |
320 | * an NFS READ operation | 269 | * operation. If nfs_readdata_alloc() or get_user_pages() fails, |
270 | * bail and stop sending more reads. Read length accounting is | ||
271 | * handled automatically by nfs_direct_read_result(). Otherwise, if | ||
272 | * no requests have been sent, just return an error. | ||
321 | */ | 273 | */ |
322 | static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) | 274 | static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) |
323 | { | 275 | { |
324 | struct nfs_open_context *ctx = dreq->ctx; | 276 | struct nfs_open_context *ctx = dreq->ctx; |
325 | struct inode *inode = ctx->dentry->d_inode; | 277 | struct inode *inode = ctx->dentry->d_inode; |
326 | struct list_head *list = &dreq->list; | ||
327 | struct page **pages = dreq->pages; | ||
328 | size_t count = dreq->user_count; | ||
329 | loff_t pos = dreq->pos; | ||
330 | size_t rsize = NFS_SERVER(inode)->rsize; | 278 | size_t rsize = NFS_SERVER(inode)->rsize; |
331 | unsigned int curpage, pgbase; | 279 | unsigned int rpages = nfs_max_pages(rsize); |
280 | unsigned int pgbase; | ||
281 | int result; | ||
282 | ssize_t started = 0; | ||
283 | |||
284 | get_dreq(dreq); | ||
332 | 285 | ||
333 | curpage = 0; | 286 | pgbase = user_addr & ~PAGE_MASK; |
334 | pgbase = dreq->user_addr & ~PAGE_MASK; | ||
335 | do { | 287 | do { |
336 | struct nfs_read_data *data; | 288 | struct nfs_read_data *data; |
337 | size_t bytes; | 289 | size_t bytes; |
338 | 290 | ||
291 | result = -ENOMEM; | ||
292 | data = nfs_readdata_alloc(rpages); | ||
293 | if (unlikely(!data)) | ||
294 | break; | ||
295 | |||
339 | bytes = rsize; | 296 | bytes = rsize; |
340 | if (count < rsize) | 297 | if (count < rsize) |
341 | bytes = count; | 298 | bytes = count; |
342 | 299 | ||
343 | BUG_ON(list_empty(list)); | 300 | data->npages = nfs_direct_count_pages(user_addr, bytes); |
344 | data = list_entry(list->next, struct nfs_read_data, pages); | 301 | down_read(¤t->mm->mmap_sem); |
345 | list_del_init(&data->pages); | 302 | result = get_user_pages(current, current->mm, user_addr, |
303 | data->npages, 1, 0, data->pagevec, NULL); | ||
304 | up_read(¤t->mm->mmap_sem); | ||
305 | if (unlikely(result < data->npages)) { | ||
306 | if (result > 0) | ||
307 | nfs_direct_release_pages(data->pagevec, result); | ||
308 | nfs_readdata_release(data); | ||
309 | break; | ||
310 | } | ||
311 | |||
312 | get_dreq(dreq); | ||
346 | 313 | ||
314 | data->req = (struct nfs_page *) dreq; | ||
347 | data->inode = inode; | 315 | data->inode = inode; |
348 | data->cred = ctx->cred; | 316 | data->cred = ctx->cred; |
349 | data->args.fh = NFS_FH(inode); | 317 | data->args.fh = NFS_FH(inode); |
350 | data->args.context = ctx; | 318 | data->args.context = ctx; |
351 | data->args.offset = pos; | 319 | data->args.offset = pos; |
352 | data->args.pgbase = pgbase; | 320 | data->args.pgbase = pgbase; |
353 | data->args.pages = &pages[curpage]; | 321 | data->args.pages = data->pagevec; |
354 | data->args.count = bytes; | 322 | data->args.count = bytes; |
355 | data->res.fattr = &data->fattr; | 323 | data->res.fattr = &data->fattr; |
356 | data->res.eof = 0; | 324 | data->res.eof = 0; |
@@ -373,33 +341,35 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) | |||
373 | bytes, | 341 | bytes, |
374 | (unsigned long long)data->args.offset); | 342 | (unsigned long long)data->args.offset); |
375 | 343 | ||
344 | started += bytes; | ||
345 | user_addr += bytes; | ||
376 | pos += bytes; | 346 | pos += bytes; |
377 | pgbase += bytes; | 347 | pgbase += bytes; |
378 | curpage += pgbase >> PAGE_SHIFT; | ||
379 | pgbase &= ~PAGE_MASK; | 348 | pgbase &= ~PAGE_MASK; |
380 | 349 | ||
381 | count -= bytes; | 350 | count -= bytes; |
382 | } while (count != 0); | 351 | } while (count != 0); |
383 | BUG_ON(!list_empty(list)); | 352 | |
353 | if (put_dreq(dreq)) | ||
354 | nfs_direct_complete(dreq); | ||
355 | |||
356 | if (started) | ||
357 | return 0; | ||
358 | return result < 0 ? (ssize_t) result : -EFAULT; | ||
384 | } | 359 | } |
385 | 360 | ||
386 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages) | 361 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) |
387 | { | 362 | { |
388 | ssize_t result; | 363 | ssize_t result = 0; |
389 | sigset_t oldset; | 364 | sigset_t oldset; |
390 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 365 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
391 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | 366 | struct rpc_clnt *clnt = NFS_CLIENT(inode); |
392 | struct nfs_direct_req *dreq; | 367 | struct nfs_direct_req *dreq; |
393 | 368 | ||
394 | dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); | 369 | dreq = nfs_direct_req_alloc(); |
395 | if (!dreq) | 370 | if (!dreq) |
396 | return -ENOMEM; | 371 | return -ENOMEM; |
397 | 372 | ||
398 | dreq->user_addr = user_addr; | ||
399 | dreq->user_count = count; | ||
400 | dreq->pos = pos; | ||
401 | dreq->pages = pages; | ||
402 | dreq->npages = nr_pages; | ||
403 | dreq->inode = inode; | 373 | dreq->inode = inode; |
404 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); | 374 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); |
405 | if (!is_sync_kiocb(iocb)) | 375 | if (!is_sync_kiocb(iocb)) |
@@ -407,8 +377,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size | |||
407 | 377 | ||
408 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); | 378 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); |
409 | rpc_clnt_sigmask(clnt, &oldset); | 379 | rpc_clnt_sigmask(clnt, &oldset); |
410 | nfs_direct_read_schedule(dreq); | 380 | result = nfs_direct_read_schedule(dreq, user_addr, count, pos); |
411 | result = nfs_direct_wait(dreq); | 381 | if (!result) |
382 | result = nfs_direct_wait(dreq); | ||
412 | rpc_clnt_sigunmask(clnt, &oldset); | 383 | rpc_clnt_sigunmask(clnt, &oldset); |
413 | 384 | ||
414 | return result; | 385 | return result; |
@@ -416,10 +387,10 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size | |||
416 | 387 | ||
417 | static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) | 388 | static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) |
418 | { | 389 | { |
419 | list_splice_init(&dreq->rewrite_list, &dreq->list); | 390 | while (!list_empty(&dreq->rewrite_list)) { |
420 | while (!list_empty(&dreq->list)) { | 391 | struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); |
421 | struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages); | ||
422 | list_del(&data->pages); | 392 | list_del(&data->pages); |
393 | nfs_direct_release_pages(data->pagevec, data->npages); | ||
423 | nfs_writedata_release(data); | 394 | nfs_writedata_release(data); |
424 | } | 395 | } |
425 | } | 396 | } |
@@ -427,14 +398,51 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) | |||
427 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 398 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
428 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | 399 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) |
429 | { | 400 | { |
430 | struct list_head *pos; | 401 | struct inode *inode = dreq->inode; |
402 | struct list_head *p; | ||
403 | struct nfs_write_data *data; | ||
431 | 404 | ||
432 | list_splice_init(&dreq->rewrite_list, &dreq->list); | ||
433 | list_for_each(pos, &dreq->list) | ||
434 | dreq->outstanding++; | ||
435 | dreq->count = 0; | 405 | dreq->count = 0; |
406 | get_dreq(dreq); | ||
407 | |||
408 | list_for_each(p, &dreq->rewrite_list) { | ||
409 | data = list_entry(p, struct nfs_write_data, pages); | ||
410 | |||
411 | get_dreq(dreq); | ||
412 | |||
413 | /* | ||
414 | * Reset data->res. | ||
415 | */ | ||
416 | nfs_fattr_init(&data->fattr); | ||
417 | data->res.count = data->args.count; | ||
418 | memset(&data->verf, 0, sizeof(data->verf)); | ||
419 | |||
420 | /* | ||
421 | * Reuse data->task; data->args should not have changed | ||
422 | * since the original request was sent. | ||
423 | */ | ||
424 | rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, | ||
425 | &nfs_write_direct_ops, data); | ||
426 | NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); | ||
427 | |||
428 | data->task.tk_priority = RPC_PRIORITY_NORMAL; | ||
429 | data->task.tk_cookie = (unsigned long) inode; | ||
430 | |||
431 | /* | ||
432 | * We're called via an RPC callback, so BKL is already held. | ||
433 | */ | ||
434 | rpc_execute(&data->task); | ||
435 | |||
436 | dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n", | ||
437 | data->task.tk_pid, | ||
438 | inode->i_sb->s_id, | ||
439 | (long long)NFS_FILEID(inode), | ||
440 | data->args.count, | ||
441 | (unsigned long long)data->args.offset); | ||
442 | } | ||
436 | 443 | ||
437 | nfs_direct_write_schedule(dreq, FLUSH_STABLE); | 444 | if (put_dreq(dreq)) |
445 | nfs_direct_write_complete(dreq, inode); | ||
438 | } | 446 | } |
439 | 447 | ||
440 | static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) | 448 | static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) |
@@ -471,8 +479,8 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | |||
471 | data->cred = dreq->ctx->cred; | 479 | data->cred = dreq->ctx->cred; |
472 | 480 | ||
473 | data->args.fh = NFS_FH(data->inode); | 481 | data->args.fh = NFS_FH(data->inode); |
474 | data->args.offset = dreq->pos; | 482 | data->args.offset = 0; |
475 | data->args.count = dreq->user_count; | 483 | data->args.count = 0; |
476 | data->res.count = 0; | 484 | data->res.count = 0; |
477 | data->res.fattr = &data->fattr; | 485 | data->res.fattr = &data->fattr; |
478 | data->res.verf = &data->verf; | 486 | data->res.verf = &data->verf; |
@@ -534,47 +542,6 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode | |||
534 | } | 542 | } |
535 | #endif | 543 | #endif |
536 | 544 | ||
537 | static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize) | ||
538 | { | ||
539 | struct list_head *list; | ||
540 | struct nfs_direct_req *dreq; | ||
541 | unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
542 | |||
543 | dreq = nfs_direct_req_alloc(); | ||
544 | if (!dreq) | ||
545 | return NULL; | ||
546 | |||
547 | list = &dreq->list; | ||
548 | for(;;) { | ||
549 | struct nfs_write_data *data = nfs_writedata_alloc(wpages); | ||
550 | |||
551 | if (unlikely(!data)) { | ||
552 | while (!list_empty(list)) { | ||
553 | data = list_entry(list->next, | ||
554 | struct nfs_write_data, pages); | ||
555 | list_del(&data->pages); | ||
556 | nfs_writedata_free(data); | ||
557 | } | ||
558 | kref_put(&dreq->kref, nfs_direct_req_release); | ||
559 | return NULL; | ||
560 | } | ||
561 | |||
562 | INIT_LIST_HEAD(&data->pages); | ||
563 | list_add(&data->pages, list); | ||
564 | |||
565 | data->req = (struct nfs_page *) dreq; | ||
566 | dreq->outstanding++; | ||
567 | if (nbytes <= wsize) | ||
568 | break; | ||
569 | nbytes -= wsize; | ||
570 | } | ||
571 | |||
572 | nfs_alloc_commit_data(dreq); | ||
573 | |||
574 | kref_get(&dreq->kref); | ||
575 | return dreq; | ||
576 | } | ||
577 | |||
578 | static void nfs_direct_write_result(struct rpc_task *task, void *calldata) | 545 | static void nfs_direct_write_result(struct rpc_task *task, void *calldata) |
579 | { | 546 | { |
580 | struct nfs_write_data *data = calldata; | 547 | struct nfs_write_data *data = calldata; |
@@ -604,8 +571,6 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata) | |||
604 | } | 571 | } |
605 | } | 572 | } |
606 | } | 573 | } |
607 | /* In case we have to resend */ | ||
608 | data->args.stable = NFS_FILE_SYNC; | ||
609 | 574 | ||
610 | spin_unlock(&dreq->lock); | 575 | spin_unlock(&dreq->lock); |
611 | } | 576 | } |
@@ -619,14 +584,8 @@ static void nfs_direct_write_release(void *calldata) | |||
619 | struct nfs_write_data *data = calldata; | 584 | struct nfs_write_data *data = calldata; |
620 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; | 585 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; |
621 | 586 | ||
622 | spin_lock(&dreq->lock); | 587 | if (put_dreq(dreq)) |
623 | if (--dreq->outstanding) { | 588 | nfs_direct_write_complete(dreq, data->inode); |
624 | spin_unlock(&dreq->lock); | ||
625 | return; | ||
626 | } | ||
627 | spin_unlock(&dreq->lock); | ||
628 | |||
629 | nfs_direct_write_complete(dreq, data->inode); | ||
630 | } | 589 | } |
631 | 590 | ||
632 | static const struct rpc_call_ops nfs_write_direct_ops = { | 591 | static const struct rpc_call_ops nfs_write_direct_ops = { |
@@ -635,41 +594,62 @@ static const struct rpc_call_ops nfs_write_direct_ops = { | |||
635 | }; | 594 | }; |
636 | 595 | ||
637 | /* | 596 | /* |
638 | * For each nfs_write_data struct that was allocated on the list, dispatch | 597 | * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE |
639 | * an NFS WRITE operation | 598 | * operation. If nfs_writedata_alloc() or get_user_pages() fails, |
599 | * bail and stop sending more writes. Write length accounting is | ||
600 | * handled automatically by nfs_direct_write_result(). Otherwise, if | ||
601 | * no requests have been sent, just return an error. | ||
640 | */ | 602 | */ |
641 | static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) | 603 | static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) |
642 | { | 604 | { |
643 | struct nfs_open_context *ctx = dreq->ctx; | 605 | struct nfs_open_context *ctx = dreq->ctx; |
644 | struct inode *inode = ctx->dentry->d_inode; | 606 | struct inode *inode = ctx->dentry->d_inode; |
645 | struct list_head *list = &dreq->list; | ||
646 | struct page **pages = dreq->pages; | ||
647 | size_t count = dreq->user_count; | ||
648 | loff_t pos = dreq->pos; | ||
649 | size_t wsize = NFS_SERVER(inode)->wsize; | 607 | size_t wsize = NFS_SERVER(inode)->wsize; |
650 | unsigned int curpage, pgbase; | 608 | unsigned int wpages = nfs_max_pages(wsize); |
609 | unsigned int pgbase; | ||
610 | int result; | ||
611 | ssize_t started = 0; | ||
651 | 612 | ||
652 | curpage = 0; | 613 | get_dreq(dreq); |
653 | pgbase = dreq->user_addr & ~PAGE_MASK; | 614 | |
615 | pgbase = user_addr & ~PAGE_MASK; | ||
654 | do { | 616 | do { |
655 | struct nfs_write_data *data; | 617 | struct nfs_write_data *data; |
656 | size_t bytes; | 618 | size_t bytes; |
657 | 619 | ||
620 | result = -ENOMEM; | ||
621 | data = nfs_writedata_alloc(wpages); | ||
622 | if (unlikely(!data)) | ||
623 | break; | ||
624 | |||
658 | bytes = wsize; | 625 | bytes = wsize; |
659 | if (count < wsize) | 626 | if (count < wsize) |
660 | bytes = count; | 627 | bytes = count; |
661 | 628 | ||
662 | BUG_ON(list_empty(list)); | 629 | data->npages = nfs_direct_count_pages(user_addr, bytes); |
663 | data = list_entry(list->next, struct nfs_write_data, pages); | 630 | down_read(¤t->mm->mmap_sem); |
631 | result = get_user_pages(current, current->mm, user_addr, | ||
632 | data->npages, 0, 0, data->pagevec, NULL); | ||
633 | up_read(¤t->mm->mmap_sem); | ||
634 | if (unlikely(result < data->npages)) { | ||
635 | if (result > 0) | ||
636 | nfs_direct_release_pages(data->pagevec, result); | ||
637 | nfs_writedata_release(data); | ||
638 | break; | ||
639 | } | ||
640 | |||
641 | get_dreq(dreq); | ||
642 | |||
664 | list_move_tail(&data->pages, &dreq->rewrite_list); | 643 | list_move_tail(&data->pages, &dreq->rewrite_list); |
665 | 644 | ||
645 | data->req = (struct nfs_page *) dreq; | ||
666 | data->inode = inode; | 646 | data->inode = inode; |
667 | data->cred = ctx->cred; | 647 | data->cred = ctx->cred; |
668 | data->args.fh = NFS_FH(inode); | 648 | data->args.fh = NFS_FH(inode); |
669 | data->args.context = ctx; | 649 | data->args.context = ctx; |
670 | data->args.offset = pos; | 650 | data->args.offset = pos; |
671 | data->args.pgbase = pgbase; | 651 | data->args.pgbase = pgbase; |
672 | data->args.pages = &pages[curpage]; | 652 | data->args.pages = data->pagevec; |
673 | data->args.count = bytes; | 653 | data->args.count = bytes; |
674 | data->res.fattr = &data->fattr; | 654 | data->res.fattr = &data->fattr; |
675 | data->res.count = bytes; | 655 | data->res.count = bytes; |
@@ -693,19 +673,26 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) | |||
693 | bytes, | 673 | bytes, |
694 | (unsigned long long)data->args.offset); | 674 | (unsigned long long)data->args.offset); |
695 | 675 | ||
676 | started += bytes; | ||
677 | user_addr += bytes; | ||
696 | pos += bytes; | 678 | pos += bytes; |
697 | pgbase += bytes; | 679 | pgbase += bytes; |
698 | curpage += pgbase >> PAGE_SHIFT; | ||
699 | pgbase &= ~PAGE_MASK; | 680 | pgbase &= ~PAGE_MASK; |
700 | 681 | ||
701 | count -= bytes; | 682 | count -= bytes; |
702 | } while (count != 0); | 683 | } while (count != 0); |
703 | BUG_ON(!list_empty(list)); | 684 | |
685 | if (put_dreq(dreq)) | ||
686 | nfs_direct_write_complete(dreq, inode); | ||
687 | |||
688 | if (started) | ||
689 | return 0; | ||
690 | return result < 0 ? (ssize_t) result : -EFAULT; | ||
704 | } | 691 | } |
705 | 692 | ||
706 | static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages) | 693 | static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) |
707 | { | 694 | { |
708 | ssize_t result; | 695 | ssize_t result = 0; |
709 | sigset_t oldset; | 696 | sigset_t oldset; |
710 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 697 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
711 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | 698 | struct rpc_clnt *clnt = NFS_CLIENT(inode); |
@@ -713,17 +700,14 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
713 | size_t wsize = NFS_SERVER(inode)->wsize; | 700 | size_t wsize = NFS_SERVER(inode)->wsize; |
714 | int sync = 0; | 701 | int sync = 0; |
715 | 702 | ||
716 | dreq = nfs_direct_write_alloc(count, wsize); | 703 | dreq = nfs_direct_req_alloc(); |
717 | if (!dreq) | 704 | if (!dreq) |
718 | return -ENOMEM; | 705 | return -ENOMEM; |
706 | nfs_alloc_commit_data(dreq); | ||
707 | |||
719 | if (dreq->commit_data == NULL || count < wsize) | 708 | if (dreq->commit_data == NULL || count < wsize) |
720 | sync = FLUSH_STABLE; | 709 | sync = FLUSH_STABLE; |
721 | 710 | ||
722 | dreq->user_addr = user_addr; | ||
723 | dreq->user_count = count; | ||
724 | dreq->pos = pos; | ||
725 | dreq->pages = pages; | ||
726 | dreq->npages = nr_pages; | ||
727 | dreq->inode = inode; | 711 | dreq->inode = inode; |
728 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); | 712 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); |
729 | if (!is_sync_kiocb(iocb)) | 713 | if (!is_sync_kiocb(iocb)) |
@@ -734,8 +718,9 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
734 | nfs_begin_data_update(inode); | 718 | nfs_begin_data_update(inode); |
735 | 719 | ||
736 | rpc_clnt_sigmask(clnt, &oldset); | 720 | rpc_clnt_sigmask(clnt, &oldset); |
737 | nfs_direct_write_schedule(dreq, sync); | 721 | result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync); |
738 | result = nfs_direct_wait(dreq); | 722 | if (!result) |
723 | result = nfs_direct_wait(dreq); | ||
739 | rpc_clnt_sigunmask(clnt, &oldset); | 724 | rpc_clnt_sigunmask(clnt, &oldset); |
740 | 725 | ||
741 | return result; | 726 | return result; |
@@ -765,8 +750,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
765 | ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) | 750 | ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) |
766 | { | 751 | { |
767 | ssize_t retval = -EINVAL; | 752 | ssize_t retval = -EINVAL; |
768 | int page_count; | ||
769 | struct page **pages; | ||
770 | struct file *file = iocb->ki_filp; | 753 | struct file *file = iocb->ki_filp; |
771 | struct address_space *mapping = file->f_mapping; | 754 | struct address_space *mapping = file->f_mapping; |
772 | 755 | ||
@@ -788,14 +771,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, | |||
788 | if (retval) | 771 | if (retval) |
789 | goto out; | 772 | goto out; |
790 | 773 | ||
791 | retval = nfs_get_user_pages(READ, (unsigned long) buf, | 774 | retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos); |
792 | count, &pages); | ||
793 | if (retval < 0) | ||
794 | goto out; | ||
795 | page_count = retval; | ||
796 | |||
797 | retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos, | ||
798 | pages, page_count); | ||
799 | if (retval > 0) | 775 | if (retval > 0) |
800 | iocb->ki_pos = pos + retval; | 776 | iocb->ki_pos = pos + retval; |
801 | 777 | ||
@@ -831,8 +807,6 @@ out: | |||
831 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) | 807 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) |
832 | { | 808 | { |
833 | ssize_t retval; | 809 | ssize_t retval; |
834 | int page_count; | ||
835 | struct page **pages; | ||
836 | struct file *file = iocb->ki_filp; | 810 | struct file *file = iocb->ki_filp; |
837 | struct address_space *mapping = file->f_mapping; | 811 | struct address_space *mapping = file->f_mapping; |
838 | 812 | ||
@@ -860,14 +834,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t | |||
860 | if (retval) | 834 | if (retval) |
861 | goto out; | 835 | goto out; |
862 | 836 | ||
863 | retval = nfs_get_user_pages(WRITE, (unsigned long) buf, | 837 | retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos); |
864 | count, &pages); | ||
865 | if (retval < 0) | ||
866 | goto out; | ||
867 | page_count = retval; | ||
868 | |||
869 | retval = nfs_direct_write(iocb, (unsigned long) buf, count, | ||
870 | pos, pages, page_count); | ||
871 | 838 | ||
872 | /* | 839 | /* |
873 | * XXX: nfs_end_data_update() already ensures this file's | 840 | * XXX: nfs_end_data_update() already ensures this file's |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b4916b092194..e6ee97f19d81 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -3144,9 +3144,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl) | |||
3144 | default: | 3144 | default: |
3145 | BUG(); | 3145 | BUG(); |
3146 | } | 3146 | } |
3147 | if (res < 0) | ||
3148 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", | ||
3149 | __FUNCTION__); | ||
3150 | return res; | 3147 | return res; |
3151 | } | 3148 | } |
3152 | 3149 | ||
@@ -3258,8 +3255,6 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, | |||
3258 | return ERR_PTR(-ENOMEM); | 3255 | return ERR_PTR(-ENOMEM); |
3259 | } | 3256 | } |
3260 | 3257 | ||
3261 | /* Unlock _before_ we do the RPC call */ | ||
3262 | do_vfs_lock(fl->fl_file, fl); | ||
3263 | return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data); | 3258 | return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data); |
3264 | } | 3259 | } |
3265 | 3260 | ||
@@ -3270,30 +3265,28 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * | |||
3270 | struct rpc_task *task; | 3265 | struct rpc_task *task; |
3271 | int status = 0; | 3266 | int status = 0; |
3272 | 3267 | ||
3273 | /* Is this a delegated lock? */ | ||
3274 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
3275 | goto out_unlock; | ||
3276 | /* Is this open_owner holding any locks on the server? */ | ||
3277 | if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) | ||
3278 | goto out_unlock; | ||
3279 | |||
3280 | status = nfs4_set_lock_state(state, request); | 3268 | status = nfs4_set_lock_state(state, request); |
3269 | /* Unlock _before_ we do the RPC call */ | ||
3270 | request->fl_flags |= FL_EXISTS; | ||
3271 | if (do_vfs_lock(request->fl_file, request) == -ENOENT) | ||
3272 | goto out; | ||
3281 | if (status != 0) | 3273 | if (status != 0) |
3282 | goto out_unlock; | 3274 | goto out; |
3275 | /* Is this a delegated lock? */ | ||
3276 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
3277 | goto out; | ||
3283 | lsp = request->fl_u.nfs4_fl.owner; | 3278 | lsp = request->fl_u.nfs4_fl.owner; |
3284 | status = -ENOMEM; | ||
3285 | seqid = nfs_alloc_seqid(&lsp->ls_seqid); | 3279 | seqid = nfs_alloc_seqid(&lsp->ls_seqid); |
3280 | status = -ENOMEM; | ||
3286 | if (seqid == NULL) | 3281 | if (seqid == NULL) |
3287 | goto out_unlock; | 3282 | goto out; |
3288 | task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid); | 3283 | task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid); |
3289 | status = PTR_ERR(task); | 3284 | status = PTR_ERR(task); |
3290 | if (IS_ERR(task)) | 3285 | if (IS_ERR(task)) |
3291 | goto out_unlock; | 3286 | goto out; |
3292 | status = nfs4_wait_for_completion_rpc_task(task); | 3287 | status = nfs4_wait_for_completion_rpc_task(task); |
3293 | rpc_release_task(task); | 3288 | rpc_release_task(task); |
3294 | return status; | 3289 | out: |
3295 | out_unlock: | ||
3296 | do_vfs_lock(request->fl_file, request); | ||
3297 | return status; | 3290 | return status; |
3298 | } | 3291 | } |
3299 | 3292 | ||
@@ -3461,10 +3454,10 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request | |||
3461 | struct nfs4_exception exception = { }; | 3454 | struct nfs4_exception exception = { }; |
3462 | int err; | 3455 | int err; |
3463 | 3456 | ||
3464 | /* Cache the lock if possible... */ | ||
3465 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
3466 | return 0; | ||
3467 | do { | 3457 | do { |
3458 | /* Cache the lock if possible... */ | ||
3459 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) | ||
3460 | return 0; | ||
3468 | err = _nfs4_do_setlk(state, F_SETLK, request, 1); | 3461 | err = _nfs4_do_setlk(state, F_SETLK, request, 1); |
3469 | if (err != -NFS4ERR_DELAY) | 3462 | if (err != -NFS4ERR_DELAY) |
3470 | break; | 3463 | break; |
@@ -3483,6 +3476,8 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request | |||
3483 | if (err != 0) | 3476 | if (err != 0) |
3484 | return err; | 3477 | return err; |
3485 | do { | 3478 | do { |
3479 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) | ||
3480 | return 0; | ||
3486 | err = _nfs4_do_setlk(state, F_SETLK, request, 0); | 3481 | err = _nfs4_do_setlk(state, F_SETLK, request, 0); |
3487 | if (err != -NFS4ERR_DELAY) | 3482 | if (err != -NFS4ERR_DELAY) |
3488 | break; | 3483 | break; |
@@ -3494,29 +3489,42 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request | |||
3494 | static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) | 3489 | static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) |
3495 | { | 3490 | { |
3496 | struct nfs4_client *clp = state->owner->so_client; | 3491 | struct nfs4_client *clp = state->owner->so_client; |
3492 | unsigned char fl_flags = request->fl_flags; | ||
3497 | int status; | 3493 | int status; |
3498 | 3494 | ||
3499 | /* Is this a delegated open? */ | 3495 | /* Is this a delegated open? */ |
3500 | if (NFS_I(state->inode)->delegation_state != 0) { | ||
3501 | /* Yes: cache locks! */ | ||
3502 | status = do_vfs_lock(request->fl_file, request); | ||
3503 | /* ...but avoid races with delegation recall... */ | ||
3504 | if (status < 0 || test_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
3505 | return status; | ||
3506 | } | ||
3507 | down_read(&clp->cl_sem); | ||
3508 | status = nfs4_set_lock_state(state, request); | 3496 | status = nfs4_set_lock_state(state, request); |
3509 | if (status != 0) | 3497 | if (status != 0) |
3510 | goto out; | 3498 | goto out; |
3499 | request->fl_flags |= FL_ACCESS; | ||
3500 | status = do_vfs_lock(request->fl_file, request); | ||
3501 | if (status < 0) | ||
3502 | goto out; | ||
3503 | down_read(&clp->cl_sem); | ||
3504 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { | ||
3505 | struct nfs_inode *nfsi = NFS_I(state->inode); | ||
3506 | /* Yes: cache locks! */ | ||
3507 | down_read(&nfsi->rwsem); | ||
3508 | /* ...but avoid races with delegation recall... */ | ||
3509 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { | ||
3510 | request->fl_flags = fl_flags & ~FL_SLEEP; | ||
3511 | status = do_vfs_lock(request->fl_file, request); | ||
3512 | up_read(&nfsi->rwsem); | ||
3513 | goto out_unlock; | ||
3514 | } | ||
3515 | up_read(&nfsi->rwsem); | ||
3516 | } | ||
3511 | status = _nfs4_do_setlk(state, cmd, request, 0); | 3517 | status = _nfs4_do_setlk(state, cmd, request, 0); |
3512 | if (status != 0) | 3518 | if (status != 0) |
3513 | goto out; | 3519 | goto out_unlock; |
3514 | /* Note: we always want to sleep here! */ | 3520 | /* Note: we always want to sleep here! */ |
3515 | request->fl_flags |= FL_SLEEP; | 3521 | request->fl_flags = fl_flags | FL_SLEEP; |
3516 | if (do_vfs_lock(request->fl_file, request) < 0) | 3522 | if (do_vfs_lock(request->fl_file, request) < 0) |
3517 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); | 3523 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); |
3518 | out: | 3524 | out_unlock: |
3519 | up_read(&clp->cl_sem); | 3525 | up_read(&clp->cl_sem); |
3526 | out: | ||
3527 | request->fl_flags = fl_flags; | ||
3520 | return status; | 3528 | return status; |
3521 | } | 3529 | } |
3522 | 3530 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bca5734ca9fb..86bac6a5008e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -578,7 +578,7 @@ static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, un | |||
578 | return ret; | 578 | return ret; |
579 | } | 579 | } |
580 | 580 | ||
581 | static void nfs_cancel_requests(struct list_head *head) | 581 | static void nfs_cancel_dirty_list(struct list_head *head) |
582 | { | 582 | { |
583 | struct nfs_page *req; | 583 | struct nfs_page *req; |
584 | while(!list_empty(head)) { | 584 | while(!list_empty(head)) { |
@@ -589,6 +589,19 @@ static void nfs_cancel_requests(struct list_head *head) | |||
589 | } | 589 | } |
590 | } | 590 | } |
591 | 591 | ||
592 | static void nfs_cancel_commit_list(struct list_head *head) | ||
593 | { | ||
594 | struct nfs_page *req; | ||
595 | |||
596 | while(!list_empty(head)) { | ||
597 | req = nfs_list_entry(head->next); | ||
598 | nfs_list_remove_request(req); | ||
599 | nfs_inode_remove_request(req); | ||
600 | nfs_clear_page_writeback(req); | ||
601 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
602 | } | ||
603 | } | ||
604 | |||
592 | /* | 605 | /* |
593 | * nfs_scan_dirty - Scan an inode for dirty requests | 606 | * nfs_scan_dirty - Scan an inode for dirty requests |
594 | * @inode: NFS inode to scan | 607 | * @inode: NFS inode to scan |
@@ -1381,6 +1394,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) | |||
1381 | nfs_list_remove_request(req); | 1394 | nfs_list_remove_request(req); |
1382 | nfs_mark_request_commit(req); | 1395 | nfs_mark_request_commit(req); |
1383 | nfs_clear_page_writeback(req); | 1396 | nfs_clear_page_writeback(req); |
1397 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
1384 | } | 1398 | } |
1385 | return -ENOMEM; | 1399 | return -ENOMEM; |
1386 | } | 1400 | } |
@@ -1499,7 +1513,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | |||
1499 | if (pages != 0) { | 1513 | if (pages != 0) { |
1500 | spin_unlock(&nfsi->req_lock); | 1514 | spin_unlock(&nfsi->req_lock); |
1501 | if (how & FLUSH_INVALIDATE) | 1515 | if (how & FLUSH_INVALIDATE) |
1502 | nfs_cancel_requests(&head); | 1516 | nfs_cancel_dirty_list(&head); |
1503 | else | 1517 | else |
1504 | ret = nfs_flush_list(inode, &head, pages, how); | 1518 | ret = nfs_flush_list(inode, &head, pages, how); |
1505 | spin_lock(&nfsi->req_lock); | 1519 | spin_lock(&nfsi->req_lock); |
@@ -1512,7 +1526,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | |||
1512 | break; | 1526 | break; |
1513 | if (how & FLUSH_INVALIDATE) { | 1527 | if (how & FLUSH_INVALIDATE) { |
1514 | spin_unlock(&nfsi->req_lock); | 1528 | spin_unlock(&nfsi->req_lock); |
1515 | nfs_cancel_requests(&head); | 1529 | nfs_cancel_commit_list(&head); |
1516 | spin_lock(&nfsi->req_lock); | 1530 | spin_lock(&nfsi->req_lock); |
1517 | continue; | 1531 | continue; |
1518 | } | 1532 | } |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 134b32068246..43aef9b230fd 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -716,6 +716,7 @@ extern spinlock_t files_lock; | |||
716 | #define FL_POSIX 1 | 716 | #define FL_POSIX 1 |
717 | #define FL_FLOCK 2 | 717 | #define FL_FLOCK 2 |
718 | #define FL_ACCESS 8 /* not trying to lock, just looking */ | 718 | #define FL_ACCESS 8 /* not trying to lock, just looking */ |
719 | #define FL_EXISTS 16 /* when unlocking, test for existence */ | ||
719 | #define FL_LEASE 32 /* lease held on this file */ | 720 | #define FL_LEASE 32 /* lease held on this file */ |
720 | #define FL_CLOSE 64 /* unlock on close */ | 721 | #define FL_CLOSE 64 /* unlock on close */ |
721 | #define FL_SLEEP 128 /* A blocking lock */ | 722 | #define FL_SLEEP 128 /* A blocking lock */ |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 7c7320fa51aa..2d3fb6416d91 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -729,6 +729,7 @@ struct nfs_read_data { | |||
729 | struct list_head pages; /* Coalesced read requests */ | 729 | struct list_head pages; /* Coalesced read requests */ |
730 | struct nfs_page *req; /* multi ops per nfs_page */ | 730 | struct nfs_page *req; /* multi ops per nfs_page */ |
731 | struct page **pagevec; | 731 | struct page **pagevec; |
732 | unsigned int npages; /* active pages in pagevec */ | ||
732 | struct nfs_readargs args; | 733 | struct nfs_readargs args; |
733 | struct nfs_readres res; | 734 | struct nfs_readres res; |
734 | #ifdef CONFIG_NFS_V4 | 735 | #ifdef CONFIG_NFS_V4 |
@@ -747,6 +748,7 @@ struct nfs_write_data { | |||
747 | struct list_head pages; /* Coalesced requests we wish to flush */ | 748 | struct list_head pages; /* Coalesced requests we wish to flush */ |
748 | struct nfs_page *req; /* multi ops per nfs_page */ | 749 | struct nfs_page *req; /* multi ops per nfs_page */ |
749 | struct page **pagevec; | 750 | struct page **pagevec; |
751 | unsigned int npages; /* active pages in pagevec */ | ||
750 | struct nfs_writeargs args; /* argument struct */ | 752 | struct nfs_writeargs args; /* argument struct */ |
751 | struct nfs_writeres res; /* result struct */ | 753 | struct nfs_writeres res; /* result struct */ |
752 | #ifdef CONFIG_NFS_V4 | 754 | #ifdef CONFIG_NFS_V4 |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 49174f0d0a3e..6ac45103a272 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -191,7 +191,6 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base, | |||
191 | do { | 191 | do { |
192 | /* Are any pointers crossing a page boundary? */ | 192 | /* Are any pointers crossing a page boundary? */ |
193 | if (pgto_base == 0) { | 193 | if (pgto_base == 0) { |
194 | flush_dcache_page(*pgto); | ||
195 | pgto_base = PAGE_CACHE_SIZE; | 194 | pgto_base = PAGE_CACHE_SIZE; |
196 | pgto--; | 195 | pgto--; |
197 | } | 196 | } |
@@ -211,11 +210,11 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base, | |||
211 | vto = kmap_atomic(*pgto, KM_USER0); | 210 | vto = kmap_atomic(*pgto, KM_USER0); |
212 | vfrom = kmap_atomic(*pgfrom, KM_USER1); | 211 | vfrom = kmap_atomic(*pgfrom, KM_USER1); |
213 | memmove(vto + pgto_base, vfrom + pgfrom_base, copy); | 212 | memmove(vto + pgto_base, vfrom + pgfrom_base, copy); |
213 | flush_dcache_page(*pgto); | ||
214 | kunmap_atomic(vfrom, KM_USER1); | 214 | kunmap_atomic(vfrom, KM_USER1); |
215 | kunmap_atomic(vto, KM_USER0); | 215 | kunmap_atomic(vto, KM_USER0); |
216 | 216 | ||
217 | } while ((len -= copy) != 0); | 217 | } while ((len -= copy) != 0); |
218 | flush_dcache_page(*pgto); | ||
219 | } | 218 | } |
220 | 219 | ||
221 | /* | 220 | /* |