diff options
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/dir.c | 4 | ||||
-rw-r--r-- | fs/nfs/direct.c | 427 | ||||
-rw-r--r-- | fs/nfs/file.c | 8 | ||||
-rw-r--r-- | fs/nfs/idmap.c | 4 | ||||
-rw-r--r-- | fs/nfs/namespace.c | 4 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 103 | ||||
-rw-r--r-- | fs/nfs/nfs4xdr.c | 21 | ||||
-rw-r--r-- | fs/nfs/read.c | 49 | ||||
-rw-r--r-- | fs/nfs/write.c | 59 |
9 files changed, 341 insertions, 338 deletions
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 3ddda6f7ecc2..e7ffb4deb3e5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -690,7 +690,9 @@ int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd) | |||
690 | goto out_force; | 690 | goto out_force; |
691 | /* This is an open(2) */ | 691 | /* This is an open(2) */ |
692 | if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 && | 692 | if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 && |
693 | !(server->flags & NFS_MOUNT_NOCTO)) | 693 | !(server->flags & NFS_MOUNT_NOCTO) && |
694 | (S_ISREG(inode->i_mode) || | ||
695 | S_ISDIR(inode->i_mode))) | ||
694 | goto out_force; | 696 | goto out_force; |
695 | } | 697 | } |
696 | return nfs_revalidate_inode(server, inode); | 698 | return nfs_revalidate_inode(server, inode); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 4cdd1b499e35..76ca1cbc38f9 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -67,25 +67,19 @@ struct nfs_direct_req { | |||
67 | struct kref kref; /* release manager */ | 67 | struct kref kref; /* release manager */ |
68 | 68 | ||
69 | /* I/O parameters */ | 69 | /* I/O parameters */ |
70 | struct list_head list, /* nfs_read/write_data structs */ | ||
71 | rewrite_list; /* saved nfs_write_data structs */ | ||
72 | struct nfs_open_context *ctx; /* file open context info */ | 70 | struct nfs_open_context *ctx; /* file open context info */ |
73 | struct kiocb * iocb; /* controlling i/o request */ | 71 | struct kiocb * iocb; /* controlling i/o request */ |
74 | struct inode * inode; /* target file of i/o */ | 72 | struct inode * inode; /* target file of i/o */ |
75 | unsigned long user_addr; /* location of user's buffer */ | ||
76 | size_t user_count; /* total bytes to move */ | ||
77 | loff_t pos; /* starting offset in file */ | ||
78 | struct page ** pages; /* pages in our buffer */ | ||
79 | unsigned int npages; /* count of pages */ | ||
80 | 73 | ||
81 | /* completion state */ | 74 | /* completion state */ |
75 | atomic_t io_count; /* i/os we're waiting for */ | ||
82 | spinlock_t lock; /* protect completion state */ | 76 | spinlock_t lock; /* protect completion state */ |
83 | int outstanding; /* i/os we're waiting for */ | ||
84 | ssize_t count, /* bytes actually processed */ | 77 | ssize_t count, /* bytes actually processed */ |
85 | error; /* any reported error */ | 78 | error; /* any reported error */ |
86 | struct completion completion; /* wait for i/o completion */ | 79 | struct completion completion; /* wait for i/o completion */ |
87 | 80 | ||
88 | /* commit state */ | 81 | /* commit state */ |
82 | struct list_head rewrite_list; /* saved nfs_write_data structs */ | ||
89 | struct nfs_write_data * commit_data; /* special write_data for commits */ | 83 | struct nfs_write_data * commit_data; /* special write_data for commits */ |
90 | int flags; | 84 | int flags; |
91 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ | 85 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ |
@@ -93,8 +87,18 @@ struct nfs_direct_req { | |||
93 | struct nfs_writeverf verf; /* unstable write verifier */ | 87 | struct nfs_writeverf verf; /* unstable write verifier */ |
94 | }; | 88 | }; |
95 | 89 | ||
96 | static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync); | ||
97 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); | 90 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); |
91 | static const struct rpc_call_ops nfs_write_direct_ops; | ||
92 | |||
93 | static inline void get_dreq(struct nfs_direct_req *dreq) | ||
94 | { | ||
95 | atomic_inc(&dreq->io_count); | ||
96 | } | ||
97 | |||
98 | static inline int put_dreq(struct nfs_direct_req *dreq) | ||
99 | { | ||
100 | return atomic_dec_and_test(&dreq->io_count); | ||
101 | } | ||
98 | 102 | ||
99 | /** | 103 | /** |
100 | * nfs_direct_IO - NFS address space operation for direct I/O | 104 | * nfs_direct_IO - NFS address space operation for direct I/O |
@@ -118,50 +122,21 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_ | |||
118 | return -EINVAL; | 122 | return -EINVAL; |
119 | } | 123 | } |
120 | 124 | ||
121 | static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty) | 125 | static void nfs_direct_dirty_pages(struct page **pages, int npages) |
122 | { | 126 | { |
123 | int i; | 127 | int i; |
124 | for (i = 0; i < npages; i++) { | 128 | for (i = 0; i < npages; i++) { |
125 | struct page *page = pages[i]; | 129 | struct page *page = pages[i]; |
126 | if (do_dirty && !PageCompound(page)) | 130 | if (!PageCompound(page)) |
127 | set_page_dirty_lock(page); | 131 | set_page_dirty_lock(page); |
128 | page_cache_release(page); | ||
129 | } | 132 | } |
130 | kfree(pages); | ||
131 | } | 133 | } |
132 | 134 | ||
133 | static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages) | 135 | static void nfs_direct_release_pages(struct page **pages, int npages) |
134 | { | 136 | { |
135 | int result = -ENOMEM; | 137 | int i; |
136 | unsigned long page_count; | 138 | for (i = 0; i < npages; i++) |
137 | size_t array_size; | 139 | page_cache_release(pages[i]); |
138 | |||
139 | page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
140 | page_count -= user_addr >> PAGE_SHIFT; | ||
141 | |||
142 | array_size = (page_count * sizeof(struct page *)); | ||
143 | *pages = kmalloc(array_size, GFP_KERNEL); | ||
144 | if (*pages) { | ||
145 | down_read(¤t->mm->mmap_sem); | ||
146 | result = get_user_pages(current, current->mm, user_addr, | ||
147 | page_count, (rw == READ), 0, | ||
148 | *pages, NULL); | ||
149 | up_read(¤t->mm->mmap_sem); | ||
150 | if (result != page_count) { | ||
151 | /* | ||
152 | * If we got fewer pages than expected from | ||
153 | * get_user_pages(), the user buffer runs off the | ||
154 | * end of a mapping; return EFAULT. | ||
155 | */ | ||
156 | if (result >= 0) { | ||
157 | nfs_free_user_pages(*pages, result, 0); | ||
158 | result = -EFAULT; | ||
159 | } else | ||
160 | kfree(*pages); | ||
161 | *pages = NULL; | ||
162 | } | ||
163 | } | ||
164 | return result; | ||
165 | } | 140 | } |
166 | 141 | ||
167 | static inline struct nfs_direct_req *nfs_direct_req_alloc(void) | 142 | static inline struct nfs_direct_req *nfs_direct_req_alloc(void) |
@@ -173,13 +148,13 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) | |||
173 | return NULL; | 148 | return NULL; |
174 | 149 | ||
175 | kref_init(&dreq->kref); | 150 | kref_init(&dreq->kref); |
151 | kref_get(&dreq->kref); | ||
176 | init_completion(&dreq->completion); | 152 | init_completion(&dreq->completion); |
177 | INIT_LIST_HEAD(&dreq->list); | ||
178 | INIT_LIST_HEAD(&dreq->rewrite_list); | 153 | INIT_LIST_HEAD(&dreq->rewrite_list); |
179 | dreq->iocb = NULL; | 154 | dreq->iocb = NULL; |
180 | dreq->ctx = NULL; | 155 | dreq->ctx = NULL; |
181 | spin_lock_init(&dreq->lock); | 156 | spin_lock_init(&dreq->lock); |
182 | dreq->outstanding = 0; | 157 | atomic_set(&dreq->io_count, 0); |
183 | dreq->count = 0; | 158 | dreq->count = 0; |
184 | dreq->error = 0; | 159 | dreq->error = 0; |
185 | dreq->flags = 0; | 160 | dreq->flags = 0; |
@@ -220,18 +195,11 @@ out: | |||
220 | } | 195 | } |
221 | 196 | ||
222 | /* | 197 | /* |
223 | * We must hold a reference to all the pages in this direct read request | 198 | * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust |
224 | * until the RPCs complete. This could be long *after* we are woken up in | 199 | * the iocb is still valid here if this is a synchronous request. |
225 | * nfs_direct_wait (for instance, if someone hits ^C on a slow server). | ||
226 | * | ||
227 | * In addition, synchronous I/O uses a stack-allocated iocb. Thus we | ||
228 | * can't trust the iocb is still valid here if this is a synchronous | ||
229 | * request. If the waiter is woken prematurely, the iocb is long gone. | ||
230 | */ | 200 | */ |
231 | static void nfs_direct_complete(struct nfs_direct_req *dreq) | 201 | static void nfs_direct_complete(struct nfs_direct_req *dreq) |
232 | { | 202 | { |
233 | nfs_free_user_pages(dreq->pages, dreq->npages, 1); | ||
234 | |||
235 | if (dreq->iocb) { | 203 | if (dreq->iocb) { |
236 | long res = (long) dreq->error; | 204 | long res = (long) dreq->error; |
237 | if (!res) | 205 | if (!res) |
@@ -244,48 +212,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) | |||
244 | } | 212 | } |
245 | 213 | ||
246 | /* | 214 | /* |
247 | * Note we also set the number of requests we have in the dreq when we are | 215 | * We must hold a reference to all the pages in this direct read request |
248 | * done. This prevents races with I/O completion so we will always wait | 216 | * until the RPCs complete. This could be long *after* we are woken up in |
249 | * until all requests have been dispatched and completed. | 217 | * nfs_direct_wait (for instance, if someone hits ^C on a slow server). |
250 | */ | 218 | */ |
251 | static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) | ||
252 | { | ||
253 | struct list_head *list; | ||
254 | struct nfs_direct_req *dreq; | ||
255 | unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
256 | |||
257 | dreq = nfs_direct_req_alloc(); | ||
258 | if (!dreq) | ||
259 | return NULL; | ||
260 | |||
261 | list = &dreq->list; | ||
262 | for(;;) { | ||
263 | struct nfs_read_data *data = nfs_readdata_alloc(rpages); | ||
264 | |||
265 | if (unlikely(!data)) { | ||
266 | while (!list_empty(list)) { | ||
267 | data = list_entry(list->next, | ||
268 | struct nfs_read_data, pages); | ||
269 | list_del(&data->pages); | ||
270 | nfs_readdata_free(data); | ||
271 | } | ||
272 | kref_put(&dreq->kref, nfs_direct_req_release); | ||
273 | return NULL; | ||
274 | } | ||
275 | |||
276 | INIT_LIST_HEAD(&data->pages); | ||
277 | list_add(&data->pages, list); | ||
278 | |||
279 | data->req = (struct nfs_page *) dreq; | ||
280 | dreq->outstanding++; | ||
281 | if (nbytes <= rsize) | ||
282 | break; | ||
283 | nbytes -= rsize; | ||
284 | } | ||
285 | kref_get(&dreq->kref); | ||
286 | return dreq; | ||
287 | } | ||
288 | |||
289 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | 219 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) |
290 | { | 220 | { |
291 | struct nfs_read_data *data = calldata; | 221 | struct nfs_read_data *data = calldata; |
@@ -294,6 +224,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | |||
294 | if (nfs_readpage_result(task, data) != 0) | 224 | if (nfs_readpage_result(task, data) != 0) |
295 | return; | 225 | return; |
296 | 226 | ||
227 | nfs_direct_dirty_pages(data->pagevec, data->npages); | ||
228 | nfs_direct_release_pages(data->pagevec, data->npages); | ||
229 | |||
297 | spin_lock(&dreq->lock); | 230 | spin_lock(&dreq->lock); |
298 | 231 | ||
299 | if (likely(task->tk_status >= 0)) | 232 | if (likely(task->tk_status >= 0)) |
@@ -301,13 +234,10 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | |||
301 | else | 234 | else |
302 | dreq->error = task->tk_status; | 235 | dreq->error = task->tk_status; |
303 | 236 | ||
304 | if (--dreq->outstanding) { | ||
305 | spin_unlock(&dreq->lock); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | spin_unlock(&dreq->lock); | 237 | spin_unlock(&dreq->lock); |
310 | nfs_direct_complete(dreq); | 238 | |
239 | if (put_dreq(dreq)) | ||
240 | nfs_direct_complete(dreq); | ||
311 | } | 241 | } |
312 | 242 | ||
313 | static const struct rpc_call_ops nfs_read_direct_ops = { | 243 | static const struct rpc_call_ops nfs_read_direct_ops = { |
@@ -316,41 +246,56 @@ static const struct rpc_call_ops nfs_read_direct_ops = { | |||
316 | }; | 246 | }; |
317 | 247 | ||
318 | /* | 248 | /* |
319 | * For each nfs_read_data struct that was allocated on the list, dispatch | 249 | * For each rsize'd chunk of the user's buffer, dispatch an NFS READ |
320 | * an NFS READ operation | 250 | * operation. If nfs_readdata_alloc() or get_user_pages() fails, |
251 | * bail and stop sending more reads. Read length accounting is | ||
252 | * handled automatically by nfs_direct_read_result(). Otherwise, if | ||
253 | * no requests have been sent, just return an error. | ||
321 | */ | 254 | */ |
322 | static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) | 255 | static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) |
323 | { | 256 | { |
324 | struct nfs_open_context *ctx = dreq->ctx; | 257 | struct nfs_open_context *ctx = dreq->ctx; |
325 | struct inode *inode = ctx->dentry->d_inode; | 258 | struct inode *inode = ctx->dentry->d_inode; |
326 | struct list_head *list = &dreq->list; | ||
327 | struct page **pages = dreq->pages; | ||
328 | size_t count = dreq->user_count; | ||
329 | loff_t pos = dreq->pos; | ||
330 | size_t rsize = NFS_SERVER(inode)->rsize; | 259 | size_t rsize = NFS_SERVER(inode)->rsize; |
331 | unsigned int curpage, pgbase; | 260 | unsigned int pgbase; |
261 | int result; | ||
262 | ssize_t started = 0; | ||
263 | |||
264 | get_dreq(dreq); | ||
332 | 265 | ||
333 | curpage = 0; | ||
334 | pgbase = dreq->user_addr & ~PAGE_MASK; | ||
335 | do { | 266 | do { |
336 | struct nfs_read_data *data; | 267 | struct nfs_read_data *data; |
337 | size_t bytes; | 268 | size_t bytes; |
338 | 269 | ||
339 | bytes = rsize; | 270 | pgbase = user_addr & ~PAGE_MASK; |
340 | if (count < rsize) | 271 | bytes = min(rsize,count); |
341 | bytes = count; | ||
342 | 272 | ||
343 | BUG_ON(list_empty(list)); | 273 | result = -ENOMEM; |
344 | data = list_entry(list->next, struct nfs_read_data, pages); | 274 | data = nfs_readdata_alloc(pgbase + bytes); |
345 | list_del_init(&data->pages); | 275 | if (unlikely(!data)) |
276 | break; | ||
277 | |||
278 | down_read(¤t->mm->mmap_sem); | ||
279 | result = get_user_pages(current, current->mm, user_addr, | ||
280 | data->npages, 1, 0, data->pagevec, NULL); | ||
281 | up_read(¤t->mm->mmap_sem); | ||
282 | if (unlikely(result < data->npages)) { | ||
283 | if (result > 0) | ||
284 | nfs_direct_release_pages(data->pagevec, result); | ||
285 | nfs_readdata_release(data); | ||
286 | break; | ||
287 | } | ||
288 | |||
289 | get_dreq(dreq); | ||
346 | 290 | ||
291 | data->req = (struct nfs_page *) dreq; | ||
347 | data->inode = inode; | 292 | data->inode = inode; |
348 | data->cred = ctx->cred; | 293 | data->cred = ctx->cred; |
349 | data->args.fh = NFS_FH(inode); | 294 | data->args.fh = NFS_FH(inode); |
350 | data->args.context = ctx; | 295 | data->args.context = ctx; |
351 | data->args.offset = pos; | 296 | data->args.offset = pos; |
352 | data->args.pgbase = pgbase; | 297 | data->args.pgbase = pgbase; |
353 | data->args.pages = &pages[curpage]; | 298 | data->args.pages = data->pagevec; |
354 | data->args.count = bytes; | 299 | data->args.count = bytes; |
355 | data->res.fattr = &data->fattr; | 300 | data->res.fattr = &data->fattr; |
356 | data->res.eof = 0; | 301 | data->res.eof = 0; |
@@ -373,33 +318,37 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) | |||
373 | bytes, | 318 | bytes, |
374 | (unsigned long long)data->args.offset); | 319 | (unsigned long long)data->args.offset); |
375 | 320 | ||
321 | started += bytes; | ||
322 | user_addr += bytes; | ||
376 | pos += bytes; | 323 | pos += bytes; |
324 | /* FIXME: Remove this unnecessary math from final patch */ | ||
377 | pgbase += bytes; | 325 | pgbase += bytes; |
378 | curpage += pgbase >> PAGE_SHIFT; | ||
379 | pgbase &= ~PAGE_MASK; | 326 | pgbase &= ~PAGE_MASK; |
327 | BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); | ||
380 | 328 | ||
381 | count -= bytes; | 329 | count -= bytes; |
382 | } while (count != 0); | 330 | } while (count != 0); |
383 | BUG_ON(!list_empty(list)); | 331 | |
332 | if (put_dreq(dreq)) | ||
333 | nfs_direct_complete(dreq); | ||
334 | |||
335 | if (started) | ||
336 | return 0; | ||
337 | return result < 0 ? (ssize_t) result : -EFAULT; | ||
384 | } | 338 | } |
385 | 339 | ||
386 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages) | 340 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) |
387 | { | 341 | { |
388 | ssize_t result; | 342 | ssize_t result = 0; |
389 | sigset_t oldset; | 343 | sigset_t oldset; |
390 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 344 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
391 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | 345 | struct rpc_clnt *clnt = NFS_CLIENT(inode); |
392 | struct nfs_direct_req *dreq; | 346 | struct nfs_direct_req *dreq; |
393 | 347 | ||
394 | dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); | 348 | dreq = nfs_direct_req_alloc(); |
395 | if (!dreq) | 349 | if (!dreq) |
396 | return -ENOMEM; | 350 | return -ENOMEM; |
397 | 351 | ||
398 | dreq->user_addr = user_addr; | ||
399 | dreq->user_count = count; | ||
400 | dreq->pos = pos; | ||
401 | dreq->pages = pages; | ||
402 | dreq->npages = nr_pages; | ||
403 | dreq->inode = inode; | 352 | dreq->inode = inode; |
404 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); | 353 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); |
405 | if (!is_sync_kiocb(iocb)) | 354 | if (!is_sync_kiocb(iocb)) |
@@ -407,8 +356,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size | |||
407 | 356 | ||
408 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); | 357 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); |
409 | rpc_clnt_sigmask(clnt, &oldset); | 358 | rpc_clnt_sigmask(clnt, &oldset); |
410 | nfs_direct_read_schedule(dreq); | 359 | result = nfs_direct_read_schedule(dreq, user_addr, count, pos); |
411 | result = nfs_direct_wait(dreq); | 360 | if (!result) |
361 | result = nfs_direct_wait(dreq); | ||
412 | rpc_clnt_sigunmask(clnt, &oldset); | 362 | rpc_clnt_sigunmask(clnt, &oldset); |
413 | 363 | ||
414 | return result; | 364 | return result; |
@@ -416,10 +366,10 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size | |||
416 | 366 | ||
417 | static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) | 367 | static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) |
418 | { | 368 | { |
419 | list_splice_init(&dreq->rewrite_list, &dreq->list); | 369 | while (!list_empty(&dreq->rewrite_list)) { |
420 | while (!list_empty(&dreq->list)) { | 370 | struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); |
421 | struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages); | ||
422 | list_del(&data->pages); | 371 | list_del(&data->pages); |
372 | nfs_direct_release_pages(data->pagevec, data->npages); | ||
423 | nfs_writedata_release(data); | 373 | nfs_writedata_release(data); |
424 | } | 374 | } |
425 | } | 375 | } |
@@ -427,14 +377,51 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) | |||
427 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 377 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
428 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | 378 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) |
429 | { | 379 | { |
430 | struct list_head *pos; | 380 | struct inode *inode = dreq->inode; |
381 | struct list_head *p; | ||
382 | struct nfs_write_data *data; | ||
431 | 383 | ||
432 | list_splice_init(&dreq->rewrite_list, &dreq->list); | ||
433 | list_for_each(pos, &dreq->list) | ||
434 | dreq->outstanding++; | ||
435 | dreq->count = 0; | 384 | dreq->count = 0; |
385 | get_dreq(dreq); | ||
386 | |||
387 | list_for_each(p, &dreq->rewrite_list) { | ||
388 | data = list_entry(p, struct nfs_write_data, pages); | ||
389 | |||
390 | get_dreq(dreq); | ||
436 | 391 | ||
437 | nfs_direct_write_schedule(dreq, FLUSH_STABLE); | 392 | /* |
393 | * Reset data->res. | ||
394 | */ | ||
395 | nfs_fattr_init(&data->fattr); | ||
396 | data->res.count = data->args.count; | ||
397 | memset(&data->verf, 0, sizeof(data->verf)); | ||
398 | |||
399 | /* | ||
400 | * Reuse data->task; data->args should not have changed | ||
401 | * since the original request was sent. | ||
402 | */ | ||
403 | rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, | ||
404 | &nfs_write_direct_ops, data); | ||
405 | NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); | ||
406 | |||
407 | data->task.tk_priority = RPC_PRIORITY_NORMAL; | ||
408 | data->task.tk_cookie = (unsigned long) inode; | ||
409 | |||
410 | /* | ||
411 | * We're called via an RPC callback, so BKL is already held. | ||
412 | */ | ||
413 | rpc_execute(&data->task); | ||
414 | |||
415 | dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n", | ||
416 | data->task.tk_pid, | ||
417 | inode->i_sb->s_id, | ||
418 | (long long)NFS_FILEID(inode), | ||
419 | data->args.count, | ||
420 | (unsigned long long)data->args.offset); | ||
421 | } | ||
422 | |||
423 | if (put_dreq(dreq)) | ||
424 | nfs_direct_write_complete(dreq, inode); | ||
438 | } | 425 | } |
439 | 426 | ||
440 | static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) | 427 | static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) |
@@ -471,8 +458,8 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | |||
471 | data->cred = dreq->ctx->cred; | 458 | data->cred = dreq->ctx->cred; |
472 | 459 | ||
473 | data->args.fh = NFS_FH(data->inode); | 460 | data->args.fh = NFS_FH(data->inode); |
474 | data->args.offset = dreq->pos; | 461 | data->args.offset = 0; |
475 | data->args.count = dreq->user_count; | 462 | data->args.count = 0; |
476 | data->res.count = 0; | 463 | data->res.count = 0; |
477 | data->res.fattr = &data->fattr; | 464 | data->res.fattr = &data->fattr; |
478 | data->res.verf = &data->verf; | 465 | data->res.verf = &data->verf; |
@@ -516,7 +503,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode | |||
516 | 503 | ||
517 | static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) | 504 | static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) |
518 | { | 505 | { |
519 | dreq->commit_data = nfs_commit_alloc(0); | 506 | dreq->commit_data = nfs_commit_alloc(); |
520 | if (dreq->commit_data != NULL) | 507 | if (dreq->commit_data != NULL) |
521 | dreq->commit_data->req = (struct nfs_page *) dreq; | 508 | dreq->commit_data->req = (struct nfs_page *) dreq; |
522 | } | 509 | } |
@@ -534,47 +521,6 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode | |||
534 | } | 521 | } |
535 | #endif | 522 | #endif |
536 | 523 | ||
537 | static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize) | ||
538 | { | ||
539 | struct list_head *list; | ||
540 | struct nfs_direct_req *dreq; | ||
541 | unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
542 | |||
543 | dreq = nfs_direct_req_alloc(); | ||
544 | if (!dreq) | ||
545 | return NULL; | ||
546 | |||
547 | list = &dreq->list; | ||
548 | for(;;) { | ||
549 | struct nfs_write_data *data = nfs_writedata_alloc(wpages); | ||
550 | |||
551 | if (unlikely(!data)) { | ||
552 | while (!list_empty(list)) { | ||
553 | data = list_entry(list->next, | ||
554 | struct nfs_write_data, pages); | ||
555 | list_del(&data->pages); | ||
556 | nfs_writedata_free(data); | ||
557 | } | ||
558 | kref_put(&dreq->kref, nfs_direct_req_release); | ||
559 | return NULL; | ||
560 | } | ||
561 | |||
562 | INIT_LIST_HEAD(&data->pages); | ||
563 | list_add(&data->pages, list); | ||
564 | |||
565 | data->req = (struct nfs_page *) dreq; | ||
566 | dreq->outstanding++; | ||
567 | if (nbytes <= wsize) | ||
568 | break; | ||
569 | nbytes -= wsize; | ||
570 | } | ||
571 | |||
572 | nfs_alloc_commit_data(dreq); | ||
573 | |||
574 | kref_get(&dreq->kref); | ||
575 | return dreq; | ||
576 | } | ||
577 | |||
578 | static void nfs_direct_write_result(struct rpc_task *task, void *calldata) | 524 | static void nfs_direct_write_result(struct rpc_task *task, void *calldata) |
579 | { | 525 | { |
580 | struct nfs_write_data *data = calldata; | 526 | struct nfs_write_data *data = calldata; |
@@ -604,8 +550,6 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata) | |||
604 | } | 550 | } |
605 | } | 551 | } |
606 | } | 552 | } |
607 | /* In case we have to resend */ | ||
608 | data->args.stable = NFS_FILE_SYNC; | ||
609 | 553 | ||
610 | spin_unlock(&dreq->lock); | 554 | spin_unlock(&dreq->lock); |
611 | } | 555 | } |
@@ -619,14 +563,8 @@ static void nfs_direct_write_release(void *calldata) | |||
619 | struct nfs_write_data *data = calldata; | 563 | struct nfs_write_data *data = calldata; |
620 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; | 564 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; |
621 | 565 | ||
622 | spin_lock(&dreq->lock); | 566 | if (put_dreq(dreq)) |
623 | if (--dreq->outstanding) { | 567 | nfs_direct_write_complete(dreq, data->inode); |
624 | spin_unlock(&dreq->lock); | ||
625 | return; | ||
626 | } | ||
627 | spin_unlock(&dreq->lock); | ||
628 | |||
629 | nfs_direct_write_complete(dreq, data->inode); | ||
630 | } | 568 | } |
631 | 569 | ||
632 | static const struct rpc_call_ops nfs_write_direct_ops = { | 570 | static const struct rpc_call_ops nfs_write_direct_ops = { |
@@ -635,41 +573,58 @@ static const struct rpc_call_ops nfs_write_direct_ops = { | |||
635 | }; | 573 | }; |
636 | 574 | ||
637 | /* | 575 | /* |
638 | * For each nfs_write_data struct that was allocated on the list, dispatch | 576 | * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE |
639 | * an NFS WRITE operation | 577 | * operation. If nfs_writedata_alloc() or get_user_pages() fails, |
578 | * bail and stop sending more writes. Write length accounting is | ||
579 | * handled automatically by nfs_direct_write_result(). Otherwise, if | ||
580 | * no requests have been sent, just return an error. | ||
640 | */ | 581 | */ |
641 | static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) | 582 | static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) |
642 | { | 583 | { |
643 | struct nfs_open_context *ctx = dreq->ctx; | 584 | struct nfs_open_context *ctx = dreq->ctx; |
644 | struct inode *inode = ctx->dentry->d_inode; | 585 | struct inode *inode = ctx->dentry->d_inode; |
645 | struct list_head *list = &dreq->list; | ||
646 | struct page **pages = dreq->pages; | ||
647 | size_t count = dreq->user_count; | ||
648 | loff_t pos = dreq->pos; | ||
649 | size_t wsize = NFS_SERVER(inode)->wsize; | 586 | size_t wsize = NFS_SERVER(inode)->wsize; |
650 | unsigned int curpage, pgbase; | 587 | unsigned int pgbase; |
588 | int result; | ||
589 | ssize_t started = 0; | ||
590 | |||
591 | get_dreq(dreq); | ||
651 | 592 | ||
652 | curpage = 0; | ||
653 | pgbase = dreq->user_addr & ~PAGE_MASK; | ||
654 | do { | 593 | do { |
655 | struct nfs_write_data *data; | 594 | struct nfs_write_data *data; |
656 | size_t bytes; | 595 | size_t bytes; |
657 | 596 | ||
658 | bytes = wsize; | 597 | pgbase = user_addr & ~PAGE_MASK; |
659 | if (count < wsize) | 598 | bytes = min(wsize,count); |
660 | bytes = count; | 599 | |
600 | result = -ENOMEM; | ||
601 | data = nfs_writedata_alloc(pgbase + bytes); | ||
602 | if (unlikely(!data)) | ||
603 | break; | ||
604 | |||
605 | down_read(¤t->mm->mmap_sem); | ||
606 | result = get_user_pages(current, current->mm, user_addr, | ||
607 | data->npages, 0, 0, data->pagevec, NULL); | ||
608 | up_read(¤t->mm->mmap_sem); | ||
609 | if (unlikely(result < data->npages)) { | ||
610 | if (result > 0) | ||
611 | nfs_direct_release_pages(data->pagevec, result); | ||
612 | nfs_writedata_release(data); | ||
613 | break; | ||
614 | } | ||
615 | |||
616 | get_dreq(dreq); | ||
661 | 617 | ||
662 | BUG_ON(list_empty(list)); | ||
663 | data = list_entry(list->next, struct nfs_write_data, pages); | ||
664 | list_move_tail(&data->pages, &dreq->rewrite_list); | 618 | list_move_tail(&data->pages, &dreq->rewrite_list); |
665 | 619 | ||
620 | data->req = (struct nfs_page *) dreq; | ||
666 | data->inode = inode; | 621 | data->inode = inode; |
667 | data->cred = ctx->cred; | 622 | data->cred = ctx->cred; |
668 | data->args.fh = NFS_FH(inode); | 623 | data->args.fh = NFS_FH(inode); |
669 | data->args.context = ctx; | 624 | data->args.context = ctx; |
670 | data->args.offset = pos; | 625 | data->args.offset = pos; |
671 | data->args.pgbase = pgbase; | 626 | data->args.pgbase = pgbase; |
672 | data->args.pages = &pages[curpage]; | 627 | data->args.pages = data->pagevec; |
673 | data->args.count = bytes; | 628 | data->args.count = bytes; |
674 | data->res.fattr = &data->fattr; | 629 | data->res.fattr = &data->fattr; |
675 | data->res.count = bytes; | 630 | data->res.count = bytes; |
@@ -693,19 +648,29 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) | |||
693 | bytes, | 648 | bytes, |
694 | (unsigned long long)data->args.offset); | 649 | (unsigned long long)data->args.offset); |
695 | 650 | ||
651 | started += bytes; | ||
652 | user_addr += bytes; | ||
696 | pos += bytes; | 653 | pos += bytes; |
654 | |||
655 | /* FIXME: Remove this useless math from the final patch */ | ||
697 | pgbase += bytes; | 656 | pgbase += bytes; |
698 | curpage += pgbase >> PAGE_SHIFT; | ||
699 | pgbase &= ~PAGE_MASK; | 657 | pgbase &= ~PAGE_MASK; |
658 | BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); | ||
700 | 659 | ||
701 | count -= bytes; | 660 | count -= bytes; |
702 | } while (count != 0); | 661 | } while (count != 0); |
703 | BUG_ON(!list_empty(list)); | 662 | |
663 | if (put_dreq(dreq)) | ||
664 | nfs_direct_write_complete(dreq, inode); | ||
665 | |||
666 | if (started) | ||
667 | return 0; | ||
668 | return result < 0 ? (ssize_t) result : -EFAULT; | ||
704 | } | 669 | } |
705 | 670 | ||
706 | static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages) | 671 | static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) |
707 | { | 672 | { |
708 | ssize_t result; | 673 | ssize_t result = 0; |
709 | sigset_t oldset; | 674 | sigset_t oldset; |
710 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 675 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
711 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | 676 | struct rpc_clnt *clnt = NFS_CLIENT(inode); |
@@ -713,17 +678,14 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
713 | size_t wsize = NFS_SERVER(inode)->wsize; | 678 | size_t wsize = NFS_SERVER(inode)->wsize; |
714 | int sync = 0; | 679 | int sync = 0; |
715 | 680 | ||
716 | dreq = nfs_direct_write_alloc(count, wsize); | 681 | dreq = nfs_direct_req_alloc(); |
717 | if (!dreq) | 682 | if (!dreq) |
718 | return -ENOMEM; | 683 | return -ENOMEM; |
684 | nfs_alloc_commit_data(dreq); | ||
685 | |||
719 | if (dreq->commit_data == NULL || count < wsize) | 686 | if (dreq->commit_data == NULL || count < wsize) |
720 | sync = FLUSH_STABLE; | 687 | sync = FLUSH_STABLE; |
721 | 688 | ||
722 | dreq->user_addr = user_addr; | ||
723 | dreq->user_count = count; | ||
724 | dreq->pos = pos; | ||
725 | dreq->pages = pages; | ||
726 | dreq->npages = nr_pages; | ||
727 | dreq->inode = inode; | 689 | dreq->inode = inode; |
728 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); | 690 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); |
729 | if (!is_sync_kiocb(iocb)) | 691 | if (!is_sync_kiocb(iocb)) |
@@ -734,8 +696,9 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
734 | nfs_begin_data_update(inode); | 696 | nfs_begin_data_update(inode); |
735 | 697 | ||
736 | rpc_clnt_sigmask(clnt, &oldset); | 698 | rpc_clnt_sigmask(clnt, &oldset); |
737 | nfs_direct_write_schedule(dreq, sync); | 699 | result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync); |
738 | result = nfs_direct_wait(dreq); | 700 | if (!result) |
701 | result = nfs_direct_wait(dreq); | ||
739 | rpc_clnt_sigunmask(clnt, &oldset); | 702 | rpc_clnt_sigunmask(clnt, &oldset); |
740 | 703 | ||
741 | return result; | 704 | return result; |
@@ -765,8 +728,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
765 | ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) | 728 | ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) |
766 | { | 729 | { |
767 | ssize_t retval = -EINVAL; | 730 | ssize_t retval = -EINVAL; |
768 | int page_count; | ||
769 | struct page **pages; | ||
770 | struct file *file = iocb->ki_filp; | 731 | struct file *file = iocb->ki_filp; |
771 | struct address_space *mapping = file->f_mapping; | 732 | struct address_space *mapping = file->f_mapping; |
772 | 733 | ||
@@ -788,14 +749,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, | |||
788 | if (retval) | 749 | if (retval) |
789 | goto out; | 750 | goto out; |
790 | 751 | ||
791 | retval = nfs_get_user_pages(READ, (unsigned long) buf, | 752 | retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos); |
792 | count, &pages); | ||
793 | if (retval < 0) | ||
794 | goto out; | ||
795 | page_count = retval; | ||
796 | |||
797 | retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos, | ||
798 | pages, page_count); | ||
799 | if (retval > 0) | 753 | if (retval > 0) |
800 | iocb->ki_pos = pos + retval; | 754 | iocb->ki_pos = pos + retval; |
801 | 755 | ||
@@ -831,8 +785,6 @@ out: | |||
831 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) | 785 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) |
832 | { | 786 | { |
833 | ssize_t retval; | 787 | ssize_t retval; |
834 | int page_count; | ||
835 | struct page **pages; | ||
836 | struct file *file = iocb->ki_filp; | 788 | struct file *file = iocb->ki_filp; |
837 | struct address_space *mapping = file->f_mapping; | 789 | struct address_space *mapping = file->f_mapping; |
838 | 790 | ||
@@ -860,14 +812,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t | |||
860 | if (retval) | 812 | if (retval) |
861 | goto out; | 813 | goto out; |
862 | 814 | ||
863 | retval = nfs_get_user_pages(WRITE, (unsigned long) buf, | 815 | retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos); |
864 | count, &pages); | ||
865 | if (retval < 0) | ||
866 | goto out; | ||
867 | page_count = retval; | ||
868 | |||
869 | retval = nfs_direct_write(iocb, (unsigned long) buf, count, | ||
870 | pos, pages, page_count); | ||
871 | 816 | ||
872 | /* | 817 | /* |
873 | * XXX: nfs_end_data_update() already ensures this file's | 818 | * XXX: nfs_end_data_update() already ensures this file's |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc2b874ad5a4..48e892880d5b 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -312,7 +312,13 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) | |||
312 | 312 | ||
313 | static int nfs_release_page(struct page *page, gfp_t gfp) | 313 | static int nfs_release_page(struct page *page, gfp_t gfp) |
314 | { | 314 | { |
315 | return !nfs_wb_page(page->mapping->host, page); | 315 | if (gfp & __GFP_FS) |
316 | return !nfs_wb_page(page->mapping->host, page); | ||
317 | else | ||
318 | /* | ||
319 | * Avoid deadlock on nfs_wait_on_request(). | ||
320 | */ | ||
321 | return 0; | ||
316 | } | 322 | } |
317 | 323 | ||
318 | const struct address_space_operations nfs_file_aops = { | 324 | const struct address_space_operations nfs_file_aops = { |
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b81e7ed3c902..07a5dd57646e 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c | |||
@@ -130,9 +130,7 @@ nfs_idmap_delete(struct nfs4_client *clp) | |||
130 | 130 | ||
131 | if (!idmap) | 131 | if (!idmap) |
132 | return; | 132 | return; |
133 | dput(idmap->idmap_dentry); | 133 | rpc_unlink(idmap->idmap_dentry); |
134 | idmap->idmap_dentry = NULL; | ||
135 | rpc_unlink(idmap->idmap_path); | ||
136 | clp->cl_idmap = NULL; | 134 | clp->cl_idmap = NULL; |
137 | kfree(idmap); | 135 | kfree(idmap); |
138 | } | 136 | } |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 19b98ca468eb..86b3169c8cac 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -51,7 +51,7 @@ char *nfs_path(const char *base, const struct dentry *dentry, | |||
51 | namelen = dentry->d_name.len; | 51 | namelen = dentry->d_name.len; |
52 | buflen -= namelen + 1; | 52 | buflen -= namelen + 1; |
53 | if (buflen < 0) | 53 | if (buflen < 0) |
54 | goto Elong; | 54 | goto Elong_unlock; |
55 | end -= namelen; | 55 | end -= namelen; |
56 | memcpy(end, dentry->d_name.name, namelen); | 56 | memcpy(end, dentry->d_name.name, namelen); |
57 | *--end = '/'; | 57 | *--end = '/'; |
@@ -68,6 +68,8 @@ char *nfs_path(const char *base, const struct dentry *dentry, | |||
68 | end -= namelen; | 68 | end -= namelen; |
69 | memcpy(end, base, namelen); | 69 | memcpy(end, base, namelen); |
70 | return end; | 70 | return end; |
71 | Elong_unlock: | ||
72 | spin_unlock(&dcache_lock); | ||
71 | Elong: | 73 | Elong: |
72 | return ERR_PTR(-ENAMETOOLONG); | 74 | return ERR_PTR(-ENAMETOOLONG); |
73 | } | 75 | } |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b4916b092194..153898e1331f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2668,7 +2668,7 @@ out: | |||
2668 | nfs4_set_cached_acl(inode, acl); | 2668 | nfs4_set_cached_acl(inode, acl); |
2669 | } | 2669 | } |
2670 | 2670 | ||
2671 | static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | 2671 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) |
2672 | { | 2672 | { |
2673 | struct page *pages[NFS4ACL_MAXPAGES]; | 2673 | struct page *pages[NFS4ACL_MAXPAGES]; |
2674 | struct nfs_getaclargs args = { | 2674 | struct nfs_getaclargs args = { |
@@ -2721,6 +2721,19 @@ out_free: | |||
2721 | return ret; | 2721 | return ret; |
2722 | } | 2722 | } |
2723 | 2723 | ||
2724 | static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | ||
2725 | { | ||
2726 | struct nfs4_exception exception = { }; | ||
2727 | ssize_t ret; | ||
2728 | do { | ||
2729 | ret = __nfs4_get_acl_uncached(inode, buf, buflen); | ||
2730 | if (ret >= 0) | ||
2731 | break; | ||
2732 | ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); | ||
2733 | } while (exception.retry); | ||
2734 | return ret; | ||
2735 | } | ||
2736 | |||
2724 | static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) | 2737 | static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) |
2725 | { | 2738 | { |
2726 | struct nfs_server *server = NFS_SERVER(inode); | 2739 | struct nfs_server *server = NFS_SERVER(inode); |
@@ -2737,7 +2750,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) | |||
2737 | return nfs4_get_acl_uncached(inode, buf, buflen); | 2750 | return nfs4_get_acl_uncached(inode, buf, buflen); |
2738 | } | 2751 | } |
2739 | 2752 | ||
2740 | static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) | 2753 | static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) |
2741 | { | 2754 | { |
2742 | struct nfs_server *server = NFS_SERVER(inode); | 2755 | struct nfs_server *server = NFS_SERVER(inode); |
2743 | struct page *pages[NFS4ACL_MAXPAGES]; | 2756 | struct page *pages[NFS4ACL_MAXPAGES]; |
@@ -2763,6 +2776,18 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen | |||
2763 | return ret; | 2776 | return ret; |
2764 | } | 2777 | } |
2765 | 2778 | ||
2779 | static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) | ||
2780 | { | ||
2781 | struct nfs4_exception exception = { }; | ||
2782 | int err; | ||
2783 | do { | ||
2784 | err = nfs4_handle_exception(NFS_SERVER(inode), | ||
2785 | __nfs4_proc_set_acl(inode, buf, buflen), | ||
2786 | &exception); | ||
2787 | } while (exception.retry); | ||
2788 | return err; | ||
2789 | } | ||
2790 | |||
2766 | static int | 2791 | static int |
2767 | nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) | 2792 | nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) |
2768 | { | 2793 | { |
@@ -3144,9 +3169,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl) | |||
3144 | default: | 3169 | default: |
3145 | BUG(); | 3170 | BUG(); |
3146 | } | 3171 | } |
3147 | if (res < 0) | ||
3148 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", | ||
3149 | __FUNCTION__); | ||
3150 | return res; | 3172 | return res; |
3151 | } | 3173 | } |
3152 | 3174 | ||
@@ -3258,8 +3280,6 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, | |||
3258 | return ERR_PTR(-ENOMEM); | 3280 | return ERR_PTR(-ENOMEM); |
3259 | } | 3281 | } |
3260 | 3282 | ||
3261 | /* Unlock _before_ we do the RPC call */ | ||
3262 | do_vfs_lock(fl->fl_file, fl); | ||
3263 | return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data); | 3283 | return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data); |
3264 | } | 3284 | } |
3265 | 3285 | ||
@@ -3270,30 +3290,28 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * | |||
3270 | struct rpc_task *task; | 3290 | struct rpc_task *task; |
3271 | int status = 0; | 3291 | int status = 0; |
3272 | 3292 | ||
3273 | /* Is this a delegated lock? */ | ||
3274 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
3275 | goto out_unlock; | ||
3276 | /* Is this open_owner holding any locks on the server? */ | ||
3277 | if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) | ||
3278 | goto out_unlock; | ||
3279 | |||
3280 | status = nfs4_set_lock_state(state, request); | 3293 | status = nfs4_set_lock_state(state, request); |
3294 | /* Unlock _before_ we do the RPC call */ | ||
3295 | request->fl_flags |= FL_EXISTS; | ||
3296 | if (do_vfs_lock(request->fl_file, request) == -ENOENT) | ||
3297 | goto out; | ||
3281 | if (status != 0) | 3298 | if (status != 0) |
3282 | goto out_unlock; | 3299 | goto out; |
3300 | /* Is this a delegated lock? */ | ||
3301 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
3302 | goto out; | ||
3283 | lsp = request->fl_u.nfs4_fl.owner; | 3303 | lsp = request->fl_u.nfs4_fl.owner; |
3284 | status = -ENOMEM; | ||
3285 | seqid = nfs_alloc_seqid(&lsp->ls_seqid); | 3304 | seqid = nfs_alloc_seqid(&lsp->ls_seqid); |
3305 | status = -ENOMEM; | ||
3286 | if (seqid == NULL) | 3306 | if (seqid == NULL) |
3287 | goto out_unlock; | 3307 | goto out; |
3288 | task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid); | 3308 | task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid); |
3289 | status = PTR_ERR(task); | 3309 | status = PTR_ERR(task); |
3290 | if (IS_ERR(task)) | 3310 | if (IS_ERR(task)) |
3291 | goto out_unlock; | 3311 | goto out; |
3292 | status = nfs4_wait_for_completion_rpc_task(task); | 3312 | status = nfs4_wait_for_completion_rpc_task(task); |
3293 | rpc_release_task(task); | 3313 | rpc_release_task(task); |
3294 | return status; | 3314 | out: |
3295 | out_unlock: | ||
3296 | do_vfs_lock(request->fl_file, request); | ||
3297 | return status; | 3315 | return status; |
3298 | } | 3316 | } |
3299 | 3317 | ||
@@ -3461,10 +3479,10 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request | |||
3461 | struct nfs4_exception exception = { }; | 3479 | struct nfs4_exception exception = { }; |
3462 | int err; | 3480 | int err; |
3463 | 3481 | ||
3464 | /* Cache the lock if possible... */ | ||
3465 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
3466 | return 0; | ||
3467 | do { | 3482 | do { |
3483 | /* Cache the lock if possible... */ | ||
3484 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) | ||
3485 | return 0; | ||
3468 | err = _nfs4_do_setlk(state, F_SETLK, request, 1); | 3486 | err = _nfs4_do_setlk(state, F_SETLK, request, 1); |
3469 | if (err != -NFS4ERR_DELAY) | 3487 | if (err != -NFS4ERR_DELAY) |
3470 | break; | 3488 | break; |
@@ -3483,6 +3501,8 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request | |||
3483 | if (err != 0) | 3501 | if (err != 0) |
3484 | return err; | 3502 | return err; |
3485 | do { | 3503 | do { |
3504 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) | ||
3505 | return 0; | ||
3486 | err = _nfs4_do_setlk(state, F_SETLK, request, 0); | 3506 | err = _nfs4_do_setlk(state, F_SETLK, request, 0); |
3487 | if (err != -NFS4ERR_DELAY) | 3507 | if (err != -NFS4ERR_DELAY) |
3488 | break; | 3508 | break; |
@@ -3494,29 +3514,42 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request | |||
3494 | static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) | 3514 | static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) |
3495 | { | 3515 | { |
3496 | struct nfs4_client *clp = state->owner->so_client; | 3516 | struct nfs4_client *clp = state->owner->so_client; |
3517 | unsigned char fl_flags = request->fl_flags; | ||
3497 | int status; | 3518 | int status; |
3498 | 3519 | ||
3499 | /* Is this a delegated open? */ | 3520 | /* Is this a delegated open? */ |
3500 | if (NFS_I(state->inode)->delegation_state != 0) { | ||
3501 | /* Yes: cache locks! */ | ||
3502 | status = do_vfs_lock(request->fl_file, request); | ||
3503 | /* ...but avoid races with delegation recall... */ | ||
3504 | if (status < 0 || test_bit(NFS_DELEGATED_STATE, &state->flags)) | ||
3505 | return status; | ||
3506 | } | ||
3507 | down_read(&clp->cl_sem); | ||
3508 | status = nfs4_set_lock_state(state, request); | 3521 | status = nfs4_set_lock_state(state, request); |
3509 | if (status != 0) | 3522 | if (status != 0) |
3510 | goto out; | 3523 | goto out; |
3524 | request->fl_flags |= FL_ACCESS; | ||
3525 | status = do_vfs_lock(request->fl_file, request); | ||
3526 | if (status < 0) | ||
3527 | goto out; | ||
3528 | down_read(&clp->cl_sem); | ||
3529 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { | ||
3530 | struct nfs_inode *nfsi = NFS_I(state->inode); | ||
3531 | /* Yes: cache locks! */ | ||
3532 | down_read(&nfsi->rwsem); | ||
3533 | /* ...but avoid races with delegation recall... */ | ||
3534 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { | ||
3535 | request->fl_flags = fl_flags & ~FL_SLEEP; | ||
3536 | status = do_vfs_lock(request->fl_file, request); | ||
3537 | up_read(&nfsi->rwsem); | ||
3538 | goto out_unlock; | ||
3539 | } | ||
3540 | up_read(&nfsi->rwsem); | ||
3541 | } | ||
3511 | status = _nfs4_do_setlk(state, cmd, request, 0); | 3542 | status = _nfs4_do_setlk(state, cmd, request, 0); |
3512 | if (status != 0) | 3543 | if (status != 0) |
3513 | goto out; | 3544 | goto out_unlock; |
3514 | /* Note: we always want to sleep here! */ | 3545 | /* Note: we always want to sleep here! */ |
3515 | request->fl_flags |= FL_SLEEP; | 3546 | request->fl_flags = fl_flags | FL_SLEEP; |
3516 | if (do_vfs_lock(request->fl_file, request) < 0) | 3547 | if (do_vfs_lock(request->fl_file, request) < 0) |
3517 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); | 3548 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); |
3518 | out: | 3549 | out_unlock: |
3519 | up_read(&clp->cl_sem); | 3550 | up_read(&clp->cl_sem); |
3551 | out: | ||
3552 | request->fl_flags = fl_flags; | ||
3520 | return status; | 3553 | return status; |
3521 | } | 3554 | } |
3522 | 3555 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 1750d996f49f..730ec8fb31c6 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
3355 | struct kvec *iov = rcvbuf->head; | 3355 | struct kvec *iov = rcvbuf->head; |
3356 | unsigned int nr, pglen = rcvbuf->page_len; | 3356 | unsigned int nr, pglen = rcvbuf->page_len; |
3357 | uint32_t *end, *entry, *p, *kaddr; | 3357 | uint32_t *end, *entry, *p, *kaddr; |
3358 | uint32_t len, attrlen; | 3358 | uint32_t len, attrlen, xlen; |
3359 | int hdrlen, recvd, status; | 3359 | int hdrlen, recvd, status; |
3360 | 3360 | ||
3361 | status = decode_op_hdr(xdr, OP_READDIR); | 3361 | status = decode_op_hdr(xdr, OP_READDIR); |
@@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
3377 | 3377 | ||
3378 | BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); | 3378 | BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); |
3379 | kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); | 3379 | kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); |
3380 | end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); | 3380 | end = p + ((pglen + readdir->pgbase) >> 2); |
3381 | entry = p; | 3381 | entry = p; |
3382 | for (nr = 0; *p++; nr++) { | 3382 | for (nr = 0; *p++; nr++) { |
3383 | if (p + 3 > end) | 3383 | if (end - p < 3) |
3384 | goto short_pkt; | 3384 | goto short_pkt; |
3385 | dprintk("cookie = %Lu, ", *((unsigned long long *)p)); | 3385 | dprintk("cookie = %Lu, ", *((unsigned long long *)p)); |
3386 | p += 2; /* cookie */ | 3386 | p += 2; /* cookie */ |
@@ -3389,18 +3389,19 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
3389 | printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); | 3389 | printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); |
3390 | goto err_unmap; | 3390 | goto err_unmap; |
3391 | } | 3391 | } |
3392 | dprintk("filename = %*s\n", len, (char *)p); | 3392 | xlen = XDR_QUADLEN(len); |
3393 | p += XDR_QUADLEN(len); | 3393 | if (end - p < xlen + 1) |
3394 | if (p + 1 > end) | ||
3395 | goto short_pkt; | 3394 | goto short_pkt; |
3395 | dprintk("filename = %*s\n", len, (char *)p); | ||
3396 | p += xlen; | ||
3396 | len = ntohl(*p++); /* bitmap length */ | 3397 | len = ntohl(*p++); /* bitmap length */ |
3397 | p += len; | 3398 | if (end - p < len + 1) |
3398 | if (p + 1 > end) | ||
3399 | goto short_pkt; | 3399 | goto short_pkt; |
3400 | p += len; | ||
3400 | attrlen = XDR_QUADLEN(ntohl(*p++)); | 3401 | attrlen = XDR_QUADLEN(ntohl(*p++)); |
3401 | p += attrlen; /* attributes */ | 3402 | if (end - p < attrlen + 2) |
3402 | if (p + 2 > end) | ||
3403 | goto short_pkt; | 3403 | goto short_pkt; |
3404 | p += attrlen; /* attributes */ | ||
3404 | entry = p; | 3405 | entry = p; |
3405 | } | 3406 | } |
3406 | if (!nr && (entry[0] != 0 || entry[1] == 0)) | 3407 | if (!nr && (entry[0] != 0 || entry[1] == 0)) |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 52bf634260a1..7a9ee00e0c61 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -43,13 +43,15 @@ static mempool_t *nfs_rdata_mempool; | |||
43 | 43 | ||
44 | #define MIN_POOL_READ (32) | 44 | #define MIN_POOL_READ (32) |
45 | 45 | ||
46 | struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) | 46 | struct nfs_read_data *nfs_readdata_alloc(size_t len) |
47 | { | 47 | { |
48 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
48 | struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); | 49 | struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); |
49 | 50 | ||
50 | if (p) { | 51 | if (p) { |
51 | memset(p, 0, sizeof(*p)); | 52 | memset(p, 0, sizeof(*p)); |
52 | INIT_LIST_HEAD(&p->pages); | 53 | INIT_LIST_HEAD(&p->pages); |
54 | p->npages = pagecount; | ||
53 | if (pagecount <= ARRAY_SIZE(p->page_array)) | 55 | if (pagecount <= ARRAY_SIZE(p->page_array)) |
54 | p->pagevec = p->page_array; | 56 | p->pagevec = p->page_array; |
55 | else { | 57 | else { |
@@ -63,7 +65,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) | |||
63 | return p; | 65 | return p; |
64 | } | 66 | } |
65 | 67 | ||
66 | void nfs_readdata_free(struct nfs_read_data *p) | 68 | static void nfs_readdata_free(struct nfs_read_data *p) |
67 | { | 69 | { |
68 | if (p && (p->pagevec != &p->page_array[0])) | 70 | if (p && (p->pagevec != &p->page_array[0])) |
69 | kfree(p->pagevec); | 71 | kfree(p->pagevec); |
@@ -116,10 +118,17 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | |||
116 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | 118 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; |
117 | base &= ~PAGE_CACHE_MASK; | 119 | base &= ~PAGE_CACHE_MASK; |
118 | pglen = PAGE_CACHE_SIZE - base; | 120 | pglen = PAGE_CACHE_SIZE - base; |
119 | if (pglen < remainder) | 121 | for (;;) { |
122 | if (remainder <= pglen) { | ||
123 | memclear_highpage_flush(*pages, base, remainder); | ||
124 | break; | ||
125 | } | ||
120 | memclear_highpage_flush(*pages, base, pglen); | 126 | memclear_highpage_flush(*pages, base, pglen); |
121 | else | 127 | pages++; |
122 | memclear_highpage_flush(*pages, base, remainder); | 128 | remainder -= pglen; |
129 | pglen = PAGE_CACHE_SIZE; | ||
130 | base = 0; | ||
131 | } | ||
123 | } | 132 | } |
124 | 133 | ||
125 | /* | 134 | /* |
@@ -133,7 +142,7 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, | |||
133 | int result; | 142 | int result; |
134 | struct nfs_read_data *rdata; | 143 | struct nfs_read_data *rdata; |
135 | 144 | ||
136 | rdata = nfs_readdata_alloc(1); | 145 | rdata = nfs_readdata_alloc(count); |
137 | if (!rdata) | 146 | if (!rdata) |
138 | return -ENOMEM; | 147 | return -ENOMEM; |
139 | 148 | ||
@@ -329,25 +338,25 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode) | |||
329 | struct nfs_page *req = nfs_list_entry(head->next); | 338 | struct nfs_page *req = nfs_list_entry(head->next); |
330 | struct page *page = req->wb_page; | 339 | struct page *page = req->wb_page; |
331 | struct nfs_read_data *data; | 340 | struct nfs_read_data *data; |
332 | unsigned int rsize = NFS_SERVER(inode)->rsize; | 341 | size_t rsize = NFS_SERVER(inode)->rsize, nbytes; |
333 | unsigned int nbytes, offset; | 342 | unsigned int offset; |
334 | int requests = 0; | 343 | int requests = 0; |
335 | LIST_HEAD(list); | 344 | LIST_HEAD(list); |
336 | 345 | ||
337 | nfs_list_remove_request(req); | 346 | nfs_list_remove_request(req); |
338 | 347 | ||
339 | nbytes = req->wb_bytes; | 348 | nbytes = req->wb_bytes; |
340 | for(;;) { | 349 | do { |
341 | data = nfs_readdata_alloc(1); | 350 | size_t len = min(nbytes,rsize); |
351 | |||
352 | data = nfs_readdata_alloc(len); | ||
342 | if (!data) | 353 | if (!data) |
343 | goto out_bad; | 354 | goto out_bad; |
344 | INIT_LIST_HEAD(&data->pages); | 355 | INIT_LIST_HEAD(&data->pages); |
345 | list_add(&data->pages, &list); | 356 | list_add(&data->pages, &list); |
346 | requests++; | 357 | requests++; |
347 | if (nbytes <= rsize) | 358 | nbytes -= len; |
348 | break; | 359 | } while(nbytes != 0); |
349 | nbytes -= rsize; | ||
350 | } | ||
351 | atomic_set(&req->wb_complete, requests); | 360 | atomic_set(&req->wb_complete, requests); |
352 | 361 | ||
353 | ClearPageError(page); | 362 | ClearPageError(page); |
@@ -395,7 +404,7 @@ static int nfs_pagein_one(struct list_head *head, struct inode *inode) | |||
395 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) | 404 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) |
396 | return nfs_pagein_multi(head, inode); | 405 | return nfs_pagein_multi(head, inode); |
397 | 406 | ||
398 | data = nfs_readdata_alloc(NFS_SERVER(inode)->rpages); | 407 | data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize); |
399 | if (!data) | 408 | if (!data) |
400 | goto out_bad; | 409 | goto out_bad; |
401 | 410 | ||
@@ -476,6 +485,8 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | |||
476 | unsigned int base = data->args.pgbase; | 485 | unsigned int base = data->args.pgbase; |
477 | struct page **pages; | 486 | struct page **pages; |
478 | 487 | ||
488 | if (data->res.eof) | ||
489 | count = data->args.count; | ||
479 | if (unlikely(count == 0)) | 490 | if (unlikely(count == 0)) |
480 | return; | 491 | return; |
481 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | 492 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; |
@@ -483,11 +494,7 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | |||
483 | count += base; | 494 | count += base; |
484 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | 495 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) |
485 | SetPageUptodate(*pages); | 496 | SetPageUptodate(*pages); |
486 | /* | 497 | if (count != 0) |
487 | * Was this an eof or a short read? If the latter, don't mark the page | ||
488 | * as uptodate yet. | ||
489 | */ | ||
490 | if (count > 0 && (data->res.eof || data->args.count == data->res.count)) | ||
491 | SetPageUptodate(*pages); | 498 | SetPageUptodate(*pages); |
492 | } | 499 | } |
493 | 500 | ||
@@ -502,6 +509,8 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data) | |||
502 | count += base; | 509 | count += base; |
503 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | 510 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) |
504 | SetPageError(*pages); | 511 | SetPageError(*pages); |
512 | if (count != 0) | ||
513 | SetPageError(*pages); | ||
505 | } | 514 | } |
506 | 515 | ||
507 | /* | 516 | /* |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bca5734ca9fb..8ab3cf10d792 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -90,22 +90,13 @@ static mempool_t *nfs_commit_mempool; | |||
90 | 90 | ||
91 | static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); | 91 | static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); |
92 | 92 | ||
93 | struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount) | 93 | struct nfs_write_data *nfs_commit_alloc(void) |
94 | { | 94 | { |
95 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); | 95 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); |
96 | 96 | ||
97 | if (p) { | 97 | if (p) { |
98 | memset(p, 0, sizeof(*p)); | 98 | memset(p, 0, sizeof(*p)); |
99 | INIT_LIST_HEAD(&p->pages); | 99 | INIT_LIST_HEAD(&p->pages); |
100 | if (pagecount <= ARRAY_SIZE(p->page_array)) | ||
101 | p->pagevec = p->page_array; | ||
102 | else { | ||
103 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS); | ||
104 | if (!p->pagevec) { | ||
105 | mempool_free(p, nfs_commit_mempool); | ||
106 | p = NULL; | ||
107 | } | ||
108 | } | ||
109 | } | 100 | } |
110 | return p; | 101 | return p; |
111 | } | 102 | } |
@@ -117,13 +108,15 @@ void nfs_commit_free(struct nfs_write_data *p) | |||
117 | mempool_free(p, nfs_commit_mempool); | 108 | mempool_free(p, nfs_commit_mempool); |
118 | } | 109 | } |
119 | 110 | ||
120 | struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | 111 | struct nfs_write_data *nfs_writedata_alloc(size_t len) |
121 | { | 112 | { |
113 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
122 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); | 114 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); |
123 | 115 | ||
124 | if (p) { | 116 | if (p) { |
125 | memset(p, 0, sizeof(*p)); | 117 | memset(p, 0, sizeof(*p)); |
126 | INIT_LIST_HEAD(&p->pages); | 118 | INIT_LIST_HEAD(&p->pages); |
119 | p->npages = pagecount; | ||
127 | if (pagecount <= ARRAY_SIZE(p->page_array)) | 120 | if (pagecount <= ARRAY_SIZE(p->page_array)) |
128 | p->pagevec = p->page_array; | 121 | p->pagevec = p->page_array; |
129 | else { | 122 | else { |
@@ -137,7 +130,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | |||
137 | return p; | 130 | return p; |
138 | } | 131 | } |
139 | 132 | ||
140 | void nfs_writedata_free(struct nfs_write_data *p) | 133 | static void nfs_writedata_free(struct nfs_write_data *p) |
141 | { | 134 | { |
142 | if (p && (p->pagevec != &p->page_array[0])) | 135 | if (p && (p->pagevec != &p->page_array[0])) |
143 | kfree(p->pagevec); | 136 | kfree(p->pagevec); |
@@ -208,7 +201,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode, | |||
208 | int result, written = 0; | 201 | int result, written = 0; |
209 | struct nfs_write_data *wdata; | 202 | struct nfs_write_data *wdata; |
210 | 203 | ||
211 | wdata = nfs_writedata_alloc(1); | 204 | wdata = nfs_writedata_alloc(wsize); |
212 | if (!wdata) | 205 | if (!wdata) |
213 | return -ENOMEM; | 206 | return -ENOMEM; |
214 | 207 | ||
@@ -578,7 +571,7 @@ static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, un | |||
578 | return ret; | 571 | return ret; |
579 | } | 572 | } |
580 | 573 | ||
581 | static void nfs_cancel_requests(struct list_head *head) | 574 | static void nfs_cancel_dirty_list(struct list_head *head) |
582 | { | 575 | { |
583 | struct nfs_page *req; | 576 | struct nfs_page *req; |
584 | while(!list_empty(head)) { | 577 | while(!list_empty(head)) { |
@@ -589,6 +582,19 @@ static void nfs_cancel_requests(struct list_head *head) | |||
589 | } | 582 | } |
590 | } | 583 | } |
591 | 584 | ||
585 | static void nfs_cancel_commit_list(struct list_head *head) | ||
586 | { | ||
587 | struct nfs_page *req; | ||
588 | |||
589 | while(!list_empty(head)) { | ||
590 | req = nfs_list_entry(head->next); | ||
591 | nfs_list_remove_request(req); | ||
592 | nfs_inode_remove_request(req); | ||
593 | nfs_clear_page_writeback(req); | ||
594 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
595 | } | ||
596 | } | ||
597 | |||
592 | /* | 598 | /* |
593 | * nfs_scan_dirty - Scan an inode for dirty requests | 599 | * nfs_scan_dirty - Scan an inode for dirty requests |
594 | * @inode: NFS inode to scan | 600 | * @inode: NFS inode to scan |
@@ -986,24 +992,24 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | |||
986 | struct nfs_page *req = nfs_list_entry(head->next); | 992 | struct nfs_page *req = nfs_list_entry(head->next); |
987 | struct page *page = req->wb_page; | 993 | struct page *page = req->wb_page; |
988 | struct nfs_write_data *data; | 994 | struct nfs_write_data *data; |
989 | unsigned int wsize = NFS_SERVER(inode)->wsize; | 995 | size_t wsize = NFS_SERVER(inode)->wsize, nbytes; |
990 | unsigned int nbytes, offset; | 996 | unsigned int offset; |
991 | int requests = 0; | 997 | int requests = 0; |
992 | LIST_HEAD(list); | 998 | LIST_HEAD(list); |
993 | 999 | ||
994 | nfs_list_remove_request(req); | 1000 | nfs_list_remove_request(req); |
995 | 1001 | ||
996 | nbytes = req->wb_bytes; | 1002 | nbytes = req->wb_bytes; |
997 | for (;;) { | 1003 | do { |
998 | data = nfs_writedata_alloc(1); | 1004 | size_t len = min(nbytes, wsize); |
1005 | |||
1006 | data = nfs_writedata_alloc(len); | ||
999 | if (!data) | 1007 | if (!data) |
1000 | goto out_bad; | 1008 | goto out_bad; |
1001 | list_add(&data->pages, &list); | 1009 | list_add(&data->pages, &list); |
1002 | requests++; | 1010 | requests++; |
1003 | if (nbytes <= wsize) | 1011 | nbytes -= len; |
1004 | break; | 1012 | } while (nbytes != 0); |
1005 | nbytes -= wsize; | ||
1006 | } | ||
1007 | atomic_set(&req->wb_complete, requests); | 1013 | atomic_set(&req->wb_complete, requests); |
1008 | 1014 | ||
1009 | ClearPageError(page); | 1015 | ClearPageError(page); |
@@ -1057,7 +1063,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) | |||
1057 | struct nfs_write_data *data; | 1063 | struct nfs_write_data *data; |
1058 | unsigned int count; | 1064 | unsigned int count; |
1059 | 1065 | ||
1060 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages); | 1066 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); |
1061 | if (!data) | 1067 | if (!data) |
1062 | goto out_bad; | 1068 | goto out_bad; |
1063 | 1069 | ||
@@ -1365,7 +1371,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) | |||
1365 | struct nfs_write_data *data; | 1371 | struct nfs_write_data *data; |
1366 | struct nfs_page *req; | 1372 | struct nfs_page *req; |
1367 | 1373 | ||
1368 | data = nfs_commit_alloc(NFS_SERVER(inode)->wpages); | 1374 | data = nfs_commit_alloc(); |
1369 | 1375 | ||
1370 | if (!data) | 1376 | if (!data) |
1371 | goto out_bad; | 1377 | goto out_bad; |
@@ -1381,6 +1387,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) | |||
1381 | nfs_list_remove_request(req); | 1387 | nfs_list_remove_request(req); |
1382 | nfs_mark_request_commit(req); | 1388 | nfs_mark_request_commit(req); |
1383 | nfs_clear_page_writeback(req); | 1389 | nfs_clear_page_writeback(req); |
1390 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
1384 | } | 1391 | } |
1385 | return -ENOMEM; | 1392 | return -ENOMEM; |
1386 | } | 1393 | } |
@@ -1499,7 +1506,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | |||
1499 | if (pages != 0) { | 1506 | if (pages != 0) { |
1500 | spin_unlock(&nfsi->req_lock); | 1507 | spin_unlock(&nfsi->req_lock); |
1501 | if (how & FLUSH_INVALIDATE) | 1508 | if (how & FLUSH_INVALIDATE) |
1502 | nfs_cancel_requests(&head); | 1509 | nfs_cancel_dirty_list(&head); |
1503 | else | 1510 | else |
1504 | ret = nfs_flush_list(inode, &head, pages, how); | 1511 | ret = nfs_flush_list(inode, &head, pages, how); |
1505 | spin_lock(&nfsi->req_lock); | 1512 | spin_lock(&nfsi->req_lock); |
@@ -1512,7 +1519,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | |||
1512 | break; | 1519 | break; |
1513 | if (how & FLUSH_INVALIDATE) { | 1520 | if (how & FLUSH_INVALIDATE) { |
1514 | spin_unlock(&nfsi->req_lock); | 1521 | spin_unlock(&nfsi->req_lock); |
1515 | nfs_cancel_requests(&head); | 1522 | nfs_cancel_commit_list(&head); |
1516 | spin_lock(&nfsi->req_lock); | 1523 | spin_lock(&nfsi->req_lock); |
1517 | continue; | 1524 | continue; |
1518 | } | 1525 | } |