diff options
author | Fred Isaman <iisaman@netapp.com> | 2012-04-20 14:47:46 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2012-04-27 14:10:37 -0400 |
commit | 4db6e0b74c0f6dfc2f9c0690e8df512e3b635983 (patch) | |
tree | 19d8a2a7051bdab220b0bdcf3da1e350a53ce428 /fs/nfs/read.c | |
parent | 30dd374f6fc1b202db3a1b57b61afff1326bad92 (diff) |
NFS: merge _full and _partial read rpc_ops
Decouple nfs_pgio_header and nfs_read_data, and have (possibly
multiple) nfs_read_datas each take a refcount on nfs_pgio_header.
For the moment keeps nfs_read_header as a way to preallocate a single
nfs_read_data with the nfs_pgio_header. The code doesn't need this,
and would be prettier without, but given the amount of churn I am
already introducing I didn't want to play with tuning new mempools.
This also fixes bug in pnfs_ld_handle_read_error. In the case of
desc->pg_bsize < PAGE_CACHE_SIZE, the pages list was empty, causing
replay attempt to do nothing.
Signed-off-by: Fred Isaman <iisaman@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/read.c')
-rw-r--r-- | fs/nfs/read.c | 338 |
1 files changed, 157 insertions, 181 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index f6ab30b5a462..c9633b2501bd 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -30,29 +30,49 @@ | |||
30 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE | 30 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
31 | 31 | ||
32 | static const struct nfs_pageio_ops nfs_pageio_read_ops; | 32 | static const struct nfs_pageio_ops nfs_pageio_read_ops; |
33 | static const struct rpc_call_ops nfs_read_partial_ops; | 33 | static const struct rpc_call_ops nfs_read_common_ops; |
34 | static const struct rpc_call_ops nfs_read_full_ops; | ||
35 | 34 | ||
36 | static struct kmem_cache *nfs_rdata_cachep; | 35 | static struct kmem_cache *nfs_rdata_cachep; |
37 | 36 | ||
38 | struct nfs_read_header *nfs_readhdr_alloc(unsigned int pagecount) | 37 | struct nfs_read_header *nfs_readhdr_alloc() |
39 | { | 38 | { |
40 | struct nfs_read_header *p; | 39 | struct nfs_read_header *rhdr; |
41 | 40 | ||
42 | p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); | 41 | rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); |
43 | if (p) { | 42 | if (rhdr) { |
44 | struct nfs_pgio_header *hdr = &p->header; | 43 | struct nfs_pgio_header *hdr = &rhdr->header; |
45 | struct nfs_read_data *data = &p->rpc_data; | ||
46 | 44 | ||
47 | INIT_LIST_HEAD(&hdr->pages); | 45 | INIT_LIST_HEAD(&hdr->pages); |
48 | INIT_LIST_HEAD(&data->list); | 46 | INIT_LIST_HEAD(&hdr->rpc_list); |
47 | spin_lock_init(&hdr->lock); | ||
48 | atomic_set(&hdr->refcnt, 0); | ||
49 | } | ||
50 | return rhdr; | ||
51 | } | ||
52 | |||
53 | struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr, | ||
54 | unsigned int pagecount) | ||
55 | { | ||
56 | struct nfs_read_data *data, *prealloc; | ||
57 | |||
58 | prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data; | ||
59 | if (prealloc->header == NULL) | ||
60 | data = prealloc; | ||
61 | else | ||
62 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
63 | if (!data) | ||
64 | goto out; | ||
65 | |||
66 | if (nfs_pgarray_set(&data->pages, pagecount)) { | ||
49 | data->header = hdr; | 67 | data->header = hdr; |
50 | if (!nfs_pgarray_set(&data->pages, pagecount)) { | 68 | atomic_inc(&hdr->refcnt); |
51 | kmem_cache_free(nfs_rdata_cachep, p); | 69 | } else { |
52 | p = NULL; | 70 | if (data != prealloc) |
53 | } | 71 | kfree(data); |
72 | data = NULL; | ||
54 | } | 73 | } |
55 | return p; | 74 | out: |
75 | return data; | ||
56 | } | 76 | } |
57 | 77 | ||
58 | void nfs_readhdr_free(struct nfs_pgio_header *hdr) | 78 | void nfs_readhdr_free(struct nfs_pgio_header *hdr) |
@@ -64,10 +84,18 @@ void nfs_readhdr_free(struct nfs_pgio_header *hdr) | |||
64 | 84 | ||
65 | void nfs_readdata_release(struct nfs_read_data *rdata) | 85 | void nfs_readdata_release(struct nfs_read_data *rdata) |
66 | { | 86 | { |
87 | struct nfs_pgio_header *hdr = rdata->header; | ||
88 | struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header); | ||
89 | |||
67 | put_nfs_open_context(rdata->args.context); | 90 | put_nfs_open_context(rdata->args.context); |
68 | if (rdata->pages.pagevec != rdata->pages.page_array) | 91 | if (rdata->pages.pagevec != rdata->pages.page_array) |
69 | kfree(rdata->pages.pagevec); | 92 | kfree(rdata->pages.pagevec); |
70 | nfs_readhdr_free(rdata->header); | 93 | if (rdata != &read_header->rpc_data) |
94 | kfree(rdata); | ||
95 | else | ||
96 | rdata->header = NULL; | ||
97 | if (atomic_dec_and_test(&hdr->refcnt)) | ||
98 | nfs_read_completion(hdr); | ||
71 | } | 99 | } |
72 | 100 | ||
73 | static | 101 | static |
@@ -79,35 +107,6 @@ int nfs_return_empty_page(struct page *page) | |||
79 | return 0; | 107 | return 0; |
80 | } | 108 | } |
81 | 109 | ||
82 | static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | ||
83 | { | ||
84 | unsigned int remainder = data->args.count - data->res.count; | ||
85 | unsigned int base = data->args.pgbase + data->res.count; | ||
86 | unsigned int pglen; | ||
87 | struct page **pages; | ||
88 | |||
89 | if (data->res.eof == 0 || remainder == 0) | ||
90 | return; | ||
91 | /* | ||
92 | * Note: "remainder" can never be negative, since we check for | ||
93 | * this in the XDR code. | ||
94 | */ | ||
95 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | ||
96 | base &= ~PAGE_CACHE_MASK; | ||
97 | pglen = PAGE_CACHE_SIZE - base; | ||
98 | for (;;) { | ||
99 | if (remainder <= pglen) { | ||
100 | zero_user(*pages, base, remainder); | ||
101 | break; | ||
102 | } | ||
103 | zero_user(*pages, base, pglen); | ||
104 | pages++; | ||
105 | remainder -= pglen; | ||
106 | pglen = PAGE_CACHE_SIZE; | ||
107 | base = 0; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, | 110 | void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, |
112 | struct inode *inode) | 111 | struct inode *inode) |
113 | { | 112 | { |
@@ -170,6 +169,46 @@ static void nfs_readpage_release(struct nfs_page *req) | |||
170 | nfs_release_request(req); | 169 | nfs_release_request(req); |
171 | } | 170 | } |
172 | 171 | ||
172 | /* Note io was page aligned */ | ||
173 | void nfs_read_completion(struct nfs_pgio_header *hdr) | ||
174 | { | ||
175 | unsigned long bytes = 0; | ||
176 | |||
177 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) | ||
178 | goto out; | ||
179 | if (!test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { | ||
180 | while (!list_empty(&hdr->pages)) { | ||
181 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | ||
182 | struct page *page = req->wb_page; | ||
183 | |||
184 | if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { | ||
185 | if (bytes > hdr->good_bytes) | ||
186 | zero_user(page, 0, PAGE_SIZE); | ||
187 | else if (hdr->good_bytes - bytes < PAGE_SIZE) | ||
188 | zero_user_segment(page, | ||
189 | hdr->good_bytes & ~PAGE_MASK, | ||
190 | PAGE_SIZE); | ||
191 | } | ||
192 | SetPageUptodate(page); | ||
193 | nfs_list_remove_request(req); | ||
194 | nfs_readpage_release(req); | ||
195 | bytes += PAGE_SIZE; | ||
196 | } | ||
197 | } else { | ||
198 | while (!list_empty(&hdr->pages)) { | ||
199 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | ||
200 | |||
201 | bytes += req->wb_bytes; | ||
202 | if (bytes <= hdr->good_bytes) | ||
203 | SetPageUptodate(req->wb_page); | ||
204 | nfs_list_remove_request(req); | ||
205 | nfs_readpage_release(req); | ||
206 | } | ||
207 | } | ||
208 | out: | ||
209 | hdr->release(hdr); | ||
210 | } | ||
211 | |||
173 | int nfs_initiate_read(struct rpc_clnt *clnt, | 212 | int nfs_initiate_read(struct rpc_clnt *clnt, |
174 | struct nfs_read_data *data, | 213 | struct nfs_read_data *data, |
175 | const struct rpc_call_ops *call_ops) | 214 | const struct rpc_call_ops *call_ops) |
@@ -214,16 +253,12 @@ EXPORT_SYMBOL_GPL(nfs_initiate_read); | |||
214 | /* | 253 | /* |
215 | * Set up the NFS read request struct | 254 | * Set up the NFS read request struct |
216 | */ | 255 | */ |
217 | static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, | 256 | static void nfs_read_rpcsetup(struct nfs_read_data *data, |
218 | unsigned int count, unsigned int offset) | 257 | unsigned int count, unsigned int offset) |
219 | { | 258 | { |
220 | struct inode *inode = data->header->inode; | 259 | struct nfs_page *req = data->header->req; |
221 | |||
222 | data->header->req = req; | ||
223 | data->header->inode = inode; | ||
224 | data->header->cred = req->wb_context->cred; | ||
225 | 260 | ||
226 | data->args.fh = NFS_FH(inode); | 261 | data->args.fh = NFS_FH(data->header->inode); |
227 | data->args.offset = req_offset(req) + offset; | 262 | data->args.offset = req_offset(req) + offset; |
228 | data->args.pgbase = req->wb_pgbase + offset; | 263 | data->args.pgbase = req->wb_pgbase + offset; |
229 | data->args.pages = data->pages.pagevec; | 264 | data->args.pages = data->pages.pagevec; |
@@ -255,7 +290,7 @@ nfs_do_multiple_reads(struct list_head *head, | |||
255 | while (!list_empty(head)) { | 290 | while (!list_empty(head)) { |
256 | int ret2; | 291 | int ret2; |
257 | 292 | ||
258 | data = list_entry(head->next, struct nfs_read_data, list); | 293 | data = list_first_entry(head, struct nfs_read_data, list); |
259 | list_del_init(&data->list); | 294 | list_del_init(&data->list); |
260 | 295 | ||
261 | ret2 = nfs_do_read(data, call_ops); | 296 | ret2 = nfs_do_read(data, call_ops); |
@@ -265,7 +300,7 @@ nfs_do_multiple_reads(struct list_head *head, | |||
265 | return ret; | 300 | return ret; |
266 | } | 301 | } |
267 | 302 | ||
268 | static void | 303 | void |
269 | nfs_async_read_error(struct list_head *head) | 304 | nfs_async_read_error(struct list_head *head) |
270 | { | 305 | { |
271 | struct nfs_page *req; | 306 | struct nfs_page *req; |
@@ -290,11 +325,11 @@ nfs_async_read_error(struct list_head *head) | |||
290 | * won't see the new data until our attribute cache is updated. This is more | 325 | * won't see the new data until our attribute cache is updated. This is more |
291 | * or less conventional NFS client behavior. | 326 | * or less conventional NFS client behavior. |
292 | */ | 327 | */ |
293 | static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res) | 328 | static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, |
329 | struct nfs_pgio_header *hdr) | ||
294 | { | 330 | { |
295 | struct nfs_page *req = nfs_list_entry(desc->pg_list.next); | 331 | struct nfs_page *req = hdr->req; |
296 | struct page *page = req->wb_page; | 332 | struct page *page = req->wb_page; |
297 | struct nfs_read_header *rhdr; | ||
298 | struct nfs_read_data *data; | 333 | struct nfs_read_data *data; |
299 | size_t rsize = desc->pg_bsize, nbytes; | 334 | size_t rsize = desc->pg_bsize, nbytes; |
300 | unsigned int offset; | 335 | unsigned int offset; |
@@ -302,85 +337,97 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head | |||
302 | int ret = 0; | 337 | int ret = 0; |
303 | 338 | ||
304 | nfs_list_remove_request(req); | 339 | nfs_list_remove_request(req); |
340 | nfs_list_add_request(req, &hdr->pages); | ||
305 | 341 | ||
306 | offset = 0; | 342 | offset = 0; |
307 | nbytes = desc->pg_count; | 343 | nbytes = desc->pg_count; |
308 | do { | 344 | do { |
309 | size_t len = min(nbytes,rsize); | 345 | size_t len = min(nbytes,rsize); |
310 | 346 | ||
311 | rhdr = nfs_readhdr_alloc(1); | 347 | data = nfs_readdata_alloc(hdr, 1); |
312 | if (!rhdr) | 348 | if (!data) |
313 | goto out_bad; | 349 | goto out_bad; |
314 | data = &rhdr->rpc_data; | ||
315 | data->pages.pagevec[0] = page; | 350 | data->pages.pagevec[0] = page; |
316 | nfs_read_rpcsetup(req, data, len, offset); | 351 | nfs_read_rpcsetup(data, len, offset); |
317 | list_add(&data->list, res); | 352 | list_add(&data->list, &hdr->rpc_list); |
318 | requests++; | 353 | requests++; |
319 | nbytes -= len; | 354 | nbytes -= len; |
320 | offset += len; | 355 | offset += len; |
321 | } while(nbytes != 0); | 356 | } while(nbytes != 0); |
322 | atomic_set(&req->wb_complete, requests); | 357 | desc->pg_rpc_callops = &nfs_read_common_ops; |
323 | desc->pg_rpc_callops = &nfs_read_partial_ops; | ||
324 | return ret; | 358 | return ret; |
325 | out_bad: | 359 | out_bad: |
326 | while (!list_empty(res)) { | 360 | while (!list_empty(&hdr->rpc_list)) { |
327 | data = list_entry(res->next, struct nfs_read_data, list); | 361 | data = list_first_entry(&hdr->rpc_list, struct nfs_read_data, list); |
328 | list_del(&data->list); | 362 | list_del(&data->list); |
329 | nfs_readdata_release(data); | 363 | nfs_readdata_release(data); |
330 | } | 364 | } |
331 | nfs_readpage_release(req); | 365 | nfs_async_read_error(&hdr->pages); |
332 | return -ENOMEM; | 366 | return -ENOMEM; |
333 | } | 367 | } |
334 | 368 | ||
335 | static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res) | 369 | static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, |
370 | struct nfs_pgio_header *hdr) | ||
336 | { | 371 | { |
337 | struct nfs_page *req; | 372 | struct nfs_page *req; |
338 | struct page **pages; | 373 | struct page **pages; |
339 | struct nfs_read_header *rhdr; | 374 | struct nfs_read_data *data; |
340 | struct nfs_read_data *data; | ||
341 | struct list_head *head = &desc->pg_list; | 375 | struct list_head *head = &desc->pg_list; |
342 | int ret = 0; | 376 | int ret = 0; |
343 | 377 | ||
344 | rhdr = nfs_readhdr_alloc(nfs_page_array_len(desc->pg_base, | 378 | data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base, |
345 | desc->pg_count)); | 379 | desc->pg_count)); |
346 | if (!rhdr) { | 380 | if (!data) { |
347 | nfs_async_read_error(head); | 381 | nfs_async_read_error(head); |
348 | ret = -ENOMEM; | 382 | ret = -ENOMEM; |
349 | goto out; | 383 | goto out; |
350 | } | 384 | } |
351 | 385 | ||
352 | data = &rhdr->rpc_data; | ||
353 | pages = data->pages.pagevec; | 386 | pages = data->pages.pagevec; |
354 | while (!list_empty(head)) { | 387 | while (!list_empty(head)) { |
355 | req = nfs_list_entry(head->next); | 388 | req = nfs_list_entry(head->next); |
356 | nfs_list_remove_request(req); | 389 | nfs_list_remove_request(req); |
357 | nfs_list_add_request(req, &rhdr->header.pages); | 390 | nfs_list_add_request(req, &hdr->pages); |
358 | *pages++ = req->wb_page; | 391 | *pages++ = req->wb_page; |
359 | } | 392 | } |
360 | req = nfs_list_entry(rhdr->header.pages.next); | ||
361 | 393 | ||
362 | nfs_read_rpcsetup(req, data, desc->pg_count, 0); | 394 | nfs_read_rpcsetup(data, desc->pg_count, 0); |
363 | list_add(&data->list, res); | 395 | list_add(&data->list, &hdr->rpc_list); |
364 | desc->pg_rpc_callops = &nfs_read_full_ops; | 396 | desc->pg_rpc_callops = &nfs_read_common_ops; |
365 | out: | 397 | out: |
366 | return ret; | 398 | return ret; |
367 | } | 399 | } |
368 | 400 | ||
369 | int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head) | 401 | int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, |
402 | struct nfs_pgio_header *hdr) | ||
370 | { | 403 | { |
371 | if (desc->pg_bsize < PAGE_CACHE_SIZE) | 404 | if (desc->pg_bsize < PAGE_CACHE_SIZE) |
372 | return nfs_pagein_multi(desc, head); | 405 | return nfs_pagein_multi(desc, hdr); |
373 | return nfs_pagein_one(desc, head); | 406 | return nfs_pagein_one(desc, hdr); |
374 | } | 407 | } |
375 | 408 | ||
376 | static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) | 409 | static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) |
377 | { | 410 | { |
378 | LIST_HEAD(head); | 411 | struct nfs_read_header *rhdr; |
412 | struct nfs_pgio_header *hdr; | ||
379 | int ret; | 413 | int ret; |
380 | 414 | ||
381 | ret = nfs_generic_pagein(desc, &head); | 415 | rhdr = nfs_readhdr_alloc(); |
416 | if (!rhdr) { | ||
417 | nfs_async_read_error(&desc->pg_list); | ||
418 | return -ENOMEM; | ||
419 | } | ||
420 | hdr = &rhdr->header; | ||
421 | nfs_pgheader_init(desc, hdr, nfs_readhdr_free); | ||
422 | atomic_inc(&hdr->refcnt); | ||
423 | ret = nfs_generic_pagein(desc, hdr); | ||
382 | if (ret == 0) | 424 | if (ret == 0) |
383 | ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops); | 425 | ret = nfs_do_multiple_reads(&hdr->rpc_list, |
426 | desc->pg_rpc_callops); | ||
427 | else | ||
428 | set_bit(NFS_IOHDR_REDO, &hdr->flags); | ||
429 | if (atomic_dec_and_test(&hdr->refcnt)) | ||
430 | nfs_read_completion(hdr); | ||
384 | return ret; | 431 | return ret; |
385 | } | 432 | } |
386 | 433 | ||
@@ -419,15 +466,13 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data | |||
419 | struct nfs_readargs *argp = &data->args; | 466 | struct nfs_readargs *argp = &data->args; |
420 | struct nfs_readres *resp = &data->res; | 467 | struct nfs_readres *resp = &data->res; |
421 | 468 | ||
422 | if (resp->eof || resp->count == argp->count) | ||
423 | return; | ||
424 | |||
425 | /* This is a short read! */ | 469 | /* This is a short read! */ |
426 | nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD); | 470 | nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD); |
427 | /* Has the server at least made some progress? */ | 471 | /* Has the server at least made some progress? */ |
428 | if (resp->count == 0) | 472 | if (resp->count == 0) { |
473 | nfs_set_pgio_error(data->header, -EIO, argp->offset); | ||
429 | return; | 474 | return; |
430 | 475 | } | |
431 | /* Yes, so retry the read at the end of the data */ | 476 | /* Yes, so retry the read at the end of the data */ |
432 | data->mds_offset += resp->count; | 477 | data->mds_offset += resp->count; |
433 | argp->offset += resp->count; | 478 | argp->offset += resp->count; |
@@ -436,38 +481,34 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data | |||
436 | rpc_restart_call_prepare(task); | 481 | rpc_restart_call_prepare(task); |
437 | } | 482 | } |
438 | 483 | ||
439 | /* | 484 | static void nfs_readpage_result_common(struct rpc_task *task, void *calldata) |
440 | * Handle a read reply that fills part of a page. | ||
441 | */ | ||
442 | static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) | ||
443 | { | 485 | { |
444 | struct nfs_read_data *data = calldata; | 486 | struct nfs_read_data *data = calldata; |
445 | 487 | struct nfs_pgio_header *hdr = data->header; | |
488 | |||
489 | /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */ | ||
446 | if (nfs_readpage_result(task, data) != 0) | 490 | if (nfs_readpage_result(task, data) != 0) |
447 | return; | 491 | return; |
448 | if (task->tk_status < 0) | 492 | if (task->tk_status < 0) |
449 | return; | 493 | nfs_set_pgio_error(hdr, task->tk_status, data->args.offset); |
450 | 494 | else if (data->res.eof) { | |
451 | nfs_readpage_truncate_uninitialised_page(data); | 495 | loff_t bound; |
452 | nfs_readpage_retry(task, data); | 496 | |
497 | bound = data->args.offset + data->res.count; | ||
498 | spin_lock(&hdr->lock); | ||
499 | if (bound < hdr->io_start + hdr->good_bytes) { | ||
500 | set_bit(NFS_IOHDR_EOF, &hdr->flags); | ||
501 | clear_bit(NFS_IOHDR_ERROR, &hdr->flags); | ||
502 | hdr->good_bytes = bound - hdr->io_start; | ||
503 | } | ||
504 | spin_unlock(&hdr->lock); | ||
505 | } else if (data->res.count != data->args.count) | ||
506 | nfs_readpage_retry(task, data); | ||
453 | } | 507 | } |
454 | 508 | ||
455 | static void nfs_readpage_release_partial(void *calldata) | 509 | static void nfs_readpage_release_common(void *calldata) |
456 | { | 510 | { |
457 | struct nfs_read_data *data = calldata; | 511 | nfs_readdata_release(calldata); |
458 | struct nfs_page *req = data->header->req; | ||
459 | struct page *page = req->wb_page; | ||
460 | int status = data->task.tk_status; | ||
461 | |||
462 | if (status < 0) | ||
463 | set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags); | ||
464 | |||
465 | if (atomic_dec_and_test(&req->wb_complete)) { | ||
466 | if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags)) | ||
467 | SetPageUptodate(page); | ||
468 | nfs_readpage_release(req); | ||
469 | } | ||
470 | nfs_readdata_release(data); | ||
471 | } | 512 | } |
472 | 513 | ||
473 | void nfs_read_prepare(struct rpc_task *task, void *calldata) | 514 | void nfs_read_prepare(struct rpc_task *task, void *calldata) |
@@ -476,75 +517,10 @@ void nfs_read_prepare(struct rpc_task *task, void *calldata) | |||
476 | NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data); | 517 | NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data); |
477 | } | 518 | } |
478 | 519 | ||
479 | static const struct rpc_call_ops nfs_read_partial_ops = { | 520 | static const struct rpc_call_ops nfs_read_common_ops = { |
480 | .rpc_call_prepare = nfs_read_prepare, | ||
481 | .rpc_call_done = nfs_readpage_result_partial, | ||
482 | .rpc_release = nfs_readpage_release_partial, | ||
483 | }; | ||
484 | |||
485 | static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | ||
486 | { | ||
487 | unsigned int count = data->res.count; | ||
488 | unsigned int base = data->args.pgbase; | ||
489 | struct page **pages; | ||
490 | |||
491 | if (data->res.eof) | ||
492 | count = data->args.count; | ||
493 | if (unlikely(count == 0)) | ||
494 | return; | ||
495 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | ||
496 | base &= ~PAGE_CACHE_MASK; | ||
497 | count += base; | ||
498 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | ||
499 | SetPageUptodate(*pages); | ||
500 | if (count == 0) | ||
501 | return; | ||
502 | /* Was this a short read? */ | ||
503 | if (data->res.eof || data->res.count == data->args.count) | ||
504 | SetPageUptodate(*pages); | ||
505 | } | ||
506 | |||
507 | /* | ||
508 | * This is the callback from RPC telling us whether a reply was | ||
509 | * received or some error occurred (timeout or socket shutdown). | ||
510 | */ | ||
511 | static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) | ||
512 | { | ||
513 | struct nfs_read_data *data = calldata; | ||
514 | |||
515 | if (nfs_readpage_result(task, data) != 0) | ||
516 | return; | ||
517 | if (task->tk_status < 0) | ||
518 | return; | ||
519 | /* | ||
520 | * Note: nfs_readpage_retry may change the values of | ||
521 | * data->args. In the multi-page case, we therefore need | ||
522 | * to ensure that we call nfs_readpage_set_pages_uptodate() | ||
523 | * first. | ||
524 | */ | ||
525 | nfs_readpage_truncate_uninitialised_page(data); | ||
526 | nfs_readpage_set_pages_uptodate(data); | ||
527 | nfs_readpage_retry(task, data); | ||
528 | } | ||
529 | |||
530 | static void nfs_readpage_release_full(void *calldata) | ||
531 | { | ||
532 | struct nfs_read_data *data = calldata; | ||
533 | struct nfs_pgio_header *hdr = data->header; | ||
534 | |||
535 | while (!list_empty(&hdr->pages)) { | ||
536 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | ||
537 | |||
538 | nfs_list_remove_request(req); | ||
539 | nfs_readpage_release(req); | ||
540 | } | ||
541 | nfs_readdata_release(calldata); | ||
542 | } | ||
543 | |||
544 | static const struct rpc_call_ops nfs_read_full_ops = { | ||
545 | .rpc_call_prepare = nfs_read_prepare, | 521 | .rpc_call_prepare = nfs_read_prepare, |
546 | .rpc_call_done = nfs_readpage_result_full, | 522 | .rpc_call_done = nfs_readpage_result_common, |
547 | .rpc_release = nfs_readpage_release_full, | 523 | .rpc_release = nfs_readpage_release_common, |
548 | }; | 524 | }; |
549 | 525 | ||
550 | /* | 526 | /* |