diff options
| author | Jens Axboe <axboe@suse.de> | 2006-04-19 09:57:31 -0400 |
|---|---|---|
| committer | Jens Axboe <axboe@suse.de> | 2006-04-19 09:57:31 -0400 |
| commit | 9e0267c26e237f84f608a68e579bf4eb89dad819 (patch) | |
| tree | 9ea854e8e97113afbf33dfb787171950cfb53df6 | |
| parent | a4514ebd8e12c63c09ab02be518db545bd1d24af (diff) | |
[PATCH] splice: fixup writeout path after ->map changes
Since ->map() no longer locks the page, we need to adjust the handling
of those pages (and stealing) a little. This now passes full regressions
again.
Signed-off-by: Jens Axboe <axboe@suse.de>
| -rw-r--r-- | fs/splice.c | 49 |
1 files changed, 30 insertions, 19 deletions
diff --git a/fs/splice.c b/fs/splice.c index 27d6408ff490..22fac87e90b3 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -50,7 +50,8 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, | |||
| 50 | struct page *page = buf->page; | 50 | struct page *page = buf->page; |
| 51 | struct address_space *mapping = page_mapping(page); | 51 | struct address_space *mapping = page_mapping(page); |
| 52 | 52 | ||
| 53 | WARN_ON(!PageLocked(page)); | 53 | lock_page(page); |
| 54 | |||
| 54 | WARN_ON(!PageUptodate(page)); | 55 | WARN_ON(!PageUptodate(page)); |
| 55 | 56 | ||
| 56 | /* | 57 | /* |
| @@ -65,8 +66,10 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, | |||
| 65 | if (PagePrivate(page)) | 66 | if (PagePrivate(page)) |
| 66 | try_to_release_page(page, mapping_gfp_mask(mapping)); | 67 | try_to_release_page(page, mapping_gfp_mask(mapping)); |
| 67 | 68 | ||
| 68 | if (!remove_mapping(mapping, page)) | 69 | if (!remove_mapping(mapping, page)) { |
| 70 | unlock_page(page); | ||
| 69 | return 1; | 71 | return 1; |
| 72 | } | ||
| 70 | 73 | ||
| 71 | buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU; | 74 | buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU; |
| 72 | return 0; | 75 | return 0; |
| @@ -507,14 +510,12 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, | |||
| 507 | if (sd->flags & SPLICE_F_MOVE) { | 510 | if (sd->flags & SPLICE_F_MOVE) { |
| 508 | /* | 511 | /* |
| 509 | * If steal succeeds, buf->page is now pruned from the vm | 512 | * If steal succeeds, buf->page is now pruned from the vm |
| 510 | * side (LRU and page cache) and we can reuse it. | 513 | * side (LRU and page cache) and we can reuse it. The page |
| 514 | * will also be looked on successful return. | ||
| 511 | */ | 515 | */ |
| 512 | if (buf->ops->steal(info, buf)) | 516 | if (buf->ops->steal(info, buf)) |
| 513 | goto find_page; | 517 | goto find_page; |
| 514 | 518 | ||
| 515 | /* | ||
| 516 | * this will also set the page locked | ||
| 517 | */ | ||
| 518 | page = buf->page; | 519 | page = buf->page; |
| 519 | if (add_to_page_cache(page, mapping, index, gfp_mask)) | 520 | if (add_to_page_cache(page, mapping, index, gfp_mask)) |
| 520 | goto find_page; | 521 | goto find_page; |
| @@ -523,15 +524,27 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, | |||
| 523 | lru_cache_add(page); | 524 | lru_cache_add(page); |
| 524 | } else { | 525 | } else { |
| 525 | find_page: | 526 | find_page: |
| 526 | ret = -ENOMEM; | 527 | page = find_lock_page(mapping, index); |
| 527 | page = find_or_create_page(mapping, index, gfp_mask); | 528 | if (!page) { |
| 528 | if (!page) | 529 | ret = -ENOMEM; |
| 529 | goto out_nomem; | 530 | page = page_cache_alloc_cold(mapping); |
| 531 | if (unlikely(!page)) | ||
| 532 | goto out_nomem; | ||
| 533 | |||
| 534 | /* | ||
| 535 | * This will also lock the page | ||
| 536 | */ | ||
| 537 | ret = add_to_page_cache_lru(page, mapping, index, | ||
| 538 | gfp_mask); | ||
| 539 | if (unlikely(ret)) | ||
| 540 | goto out; | ||
| 541 | } | ||
| 530 | 542 | ||
| 531 | /* | 543 | /* |
| 532 | * If the page is uptodate, it is also locked. If it isn't | 544 | * We get here with the page locked. If the page is also |
| 533 | * uptodate, we can mark it uptodate if we are filling the | 545 | * uptodate, we don't need to do more. If it isn't, we |
| 534 | * full page. Otherwise we need to read it in first... | 546 | * may need to bring it in if we are not going to overwrite |
| 547 | * the full page. | ||
| 535 | */ | 548 | */ |
| 536 | if (!PageUptodate(page)) { | 549 | if (!PageUptodate(page)) { |
| 537 | if (sd->len < PAGE_CACHE_SIZE) { | 550 | if (sd->len < PAGE_CACHE_SIZE) { |
| @@ -553,10 +566,8 @@ find_page: | |||
| 553 | ret = -EIO; | 566 | ret = -EIO; |
| 554 | goto out; | 567 | goto out; |
| 555 | } | 568 | } |
| 556 | } else { | 569 | } else |
| 557 | WARN_ON(!PageLocked(page)); | ||
| 558 | SetPageUptodate(page); | 570 | SetPageUptodate(page); |
| 559 | } | ||
| 560 | } | 571 | } |
| 561 | } | 572 | } |
| 562 | 573 | ||
| @@ -585,10 +596,10 @@ find_page: | |||
| 585 | mark_page_accessed(page); | 596 | mark_page_accessed(page); |
| 586 | balance_dirty_pages_ratelimited(mapping); | 597 | balance_dirty_pages_ratelimited(mapping); |
| 587 | out: | 598 | out: |
| 588 | if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) { | 599 | if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) |
| 589 | page_cache_release(page); | 600 | page_cache_release(page); |
| 590 | unlock_page(page); | 601 | |
| 591 | } | 602 | unlock_page(page); |
| 592 | out_nomem: | 603 | out_nomem: |
| 593 | buf->ops->unmap(info, buf); | 604 | buf->ops->unmap(info, buf); |
| 594 | return ret; | 605 | return ret; |
