diff options
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/direct.c | 19 | ||||
-rw-r--r-- | fs/nfs/read.c | 53 |
2 files changed, 46 insertions, 26 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e44200579c8d..9d9085b93a32 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -229,14 +229,20 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) | |||
229 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | 229 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) |
230 | { | 230 | { |
231 | struct nfs_read_data *data = calldata; | 231 | struct nfs_read_data *data = calldata; |
232 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; | ||
233 | 232 | ||
234 | if (nfs_readpage_result(task, data) != 0) | 233 | nfs_readpage_result(task, data); |
235 | return; | 234 | } |
235 | |||
236 | static void nfs_direct_read_release(void *calldata) | ||
237 | { | ||
238 | |||
239 | struct nfs_read_data *data = calldata; | ||
240 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; | ||
241 | int status = data->task.tk_status; | ||
236 | 242 | ||
237 | spin_lock(&dreq->lock); | 243 | spin_lock(&dreq->lock); |
238 | if (unlikely(task->tk_status < 0)) { | 244 | if (unlikely(status < 0)) { |
239 | dreq->error = task->tk_status; | 245 | dreq->error = status; |
240 | spin_unlock(&dreq->lock); | 246 | spin_unlock(&dreq->lock); |
241 | } else { | 247 | } else { |
242 | dreq->count += data->res.count; | 248 | dreq->count += data->res.count; |
@@ -249,11 +255,12 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | |||
249 | 255 | ||
250 | if (put_dreq(dreq)) | 256 | if (put_dreq(dreq)) |
251 | nfs_direct_complete(dreq); | 257 | nfs_direct_complete(dreq); |
258 | nfs_readdata_release(calldata); | ||
252 | } | 259 | } |
253 | 260 | ||
254 | static const struct rpc_call_ops nfs_read_direct_ops = { | 261 | static const struct rpc_call_ops nfs_read_direct_ops = { |
255 | .rpc_call_done = nfs_direct_read_result, | 262 | .rpc_call_done = nfs_direct_read_result, |
256 | .rpc_release = nfs_readdata_release, | 263 | .rpc_release = nfs_direct_read_release, |
257 | }; | 264 | }; |
258 | 265 | ||
259 | /* | 266 | /* |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index d333f5fedca1..6f9208a549a0 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -338,26 +338,25 @@ int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) | |||
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
340 | 340 | ||
341 | static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data) | 341 | static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data) |
342 | { | 342 | { |
343 | struct nfs_readargs *argp = &data->args; | 343 | struct nfs_readargs *argp = &data->args; |
344 | struct nfs_readres *resp = &data->res; | 344 | struct nfs_readres *resp = &data->res; |
345 | 345 | ||
346 | if (resp->eof || resp->count == argp->count) | 346 | if (resp->eof || resp->count == argp->count) |
347 | return 0; | 347 | return; |
348 | 348 | ||
349 | /* This is a short read! */ | 349 | /* This is a short read! */ |
350 | nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); | 350 | nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); |
351 | /* Has the server at least made some progress? */ | 351 | /* Has the server at least made some progress? */ |
352 | if (resp->count == 0) | 352 | if (resp->count == 0) |
353 | return 0; | 353 | return; |
354 | 354 | ||
355 | /* Yes, so retry the read at the end of the data */ | 355 | /* Yes, so retry the read at the end of the data */ |
356 | argp->offset += resp->count; | 356 | argp->offset += resp->count; |
357 | argp->pgbase += resp->count; | 357 | argp->pgbase += resp->count; |
358 | argp->count -= resp->count; | 358 | argp->count -= resp->count; |
359 | rpc_restart_call(task); | 359 | rpc_restart_call(task); |
360 | return -EAGAIN; | ||
361 | } | 360 | } |
362 | 361 | ||
363 | /* | 362 | /* |
@@ -366,29 +365,37 @@ static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data) | |||
366 | static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) | 365 | static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) |
367 | { | 366 | { |
368 | struct nfs_read_data *data = calldata; | 367 | struct nfs_read_data *data = calldata; |
369 | struct nfs_page *req = data->req; | ||
370 | struct page *page = req->wb_page; | ||
371 | 368 | ||
372 | if (nfs_readpage_result(task, data) != 0) | 369 | if (nfs_readpage_result(task, data) != 0) |
373 | return; | 370 | return; |
371 | if (task->tk_status < 0) | ||
372 | return; | ||
374 | 373 | ||
375 | if (likely(task->tk_status >= 0)) { | 374 | nfs_readpage_truncate_uninitialised_page(data); |
376 | nfs_readpage_truncate_uninitialised_page(data); | 375 | nfs_readpage_retry(task, data); |
377 | if (nfs_readpage_retry(task, data) != 0) | 376 | } |
378 | return; | 377 | |
379 | } | 378 | static void nfs_readpage_release_partial(void *calldata) |
380 | if (unlikely(task->tk_status < 0)) | 379 | { |
380 | struct nfs_read_data *data = calldata; | ||
381 | struct nfs_page *req = data->req; | ||
382 | struct page *page = req->wb_page; | ||
383 | int status = data->task.tk_status; | ||
384 | |||
385 | if (status < 0) | ||
381 | SetPageError(page); | 386 | SetPageError(page); |
387 | |||
382 | if (atomic_dec_and_test(&req->wb_complete)) { | 388 | if (atomic_dec_and_test(&req->wb_complete)) { |
383 | if (!PageError(page)) | 389 | if (!PageError(page)) |
384 | SetPageUptodate(page); | 390 | SetPageUptodate(page); |
385 | nfs_readpage_release(req); | 391 | nfs_readpage_release(req); |
386 | } | 392 | } |
393 | nfs_readdata_release(calldata); | ||
387 | } | 394 | } |
388 | 395 | ||
389 | static const struct rpc_call_ops nfs_read_partial_ops = { | 396 | static const struct rpc_call_ops nfs_read_partial_ops = { |
390 | .rpc_call_done = nfs_readpage_result_partial, | 397 | .rpc_call_done = nfs_readpage_result_partial, |
391 | .rpc_release = nfs_readdata_release, | 398 | .rpc_release = nfs_readpage_release_partial, |
392 | }; | 399 | }; |
393 | 400 | ||
394 | static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | 401 | static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) |
@@ -423,29 +430,35 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) | |||
423 | 430 | ||
424 | if (nfs_readpage_result(task, data) != 0) | 431 | if (nfs_readpage_result(task, data) != 0) |
425 | return; | 432 | return; |
433 | if (task->tk_status < 0) | ||
434 | return; | ||
426 | /* | 435 | /* |
427 | * Note: nfs_readpage_retry may change the values of | 436 | * Note: nfs_readpage_retry may change the values of |
428 | * data->args. In the multi-page case, we therefore need | 437 | * data->args. In the multi-page case, we therefore need |
429 | * to ensure that we call nfs_readpage_set_pages_uptodate() | 438 | * to ensure that we call nfs_readpage_set_pages_uptodate() |
430 | * first. | 439 | * first. |
431 | */ | 440 | */ |
432 | if (likely(task->tk_status >= 0)) { | 441 | nfs_readpage_truncate_uninitialised_page(data); |
433 | nfs_readpage_truncate_uninitialised_page(data); | 442 | nfs_readpage_set_pages_uptodate(data); |
434 | nfs_readpage_set_pages_uptodate(data); | 443 | nfs_readpage_retry(task, data); |
435 | if (nfs_readpage_retry(task, data) != 0) | 444 | } |
436 | return; | 445 | |
437 | } | 446 | static void nfs_readpage_release_full(void *calldata) |
447 | { | ||
448 | struct nfs_read_data *data = calldata; | ||
449 | |||
438 | while (!list_empty(&data->pages)) { | 450 | while (!list_empty(&data->pages)) { |
439 | struct nfs_page *req = nfs_list_entry(data->pages.next); | 451 | struct nfs_page *req = nfs_list_entry(data->pages.next); |
440 | 452 | ||
441 | nfs_list_remove_request(req); | 453 | nfs_list_remove_request(req); |
442 | nfs_readpage_release(req); | 454 | nfs_readpage_release(req); |
443 | } | 455 | } |
456 | nfs_readdata_release(calldata); | ||
444 | } | 457 | } |
445 | 458 | ||
446 | static const struct rpc_call_ops nfs_read_full_ops = { | 459 | static const struct rpc_call_ops nfs_read_full_ops = { |
447 | .rpc_call_done = nfs_readpage_result_full, | 460 | .rpc_call_done = nfs_readpage_result_full, |
448 | .rpc_release = nfs_readdata_release, | 461 | .rpc_release = nfs_readpage_release_full, |
449 | }; | 462 | }; |
450 | 463 | ||
451 | /* | 464 | /* |