diff options
author | Weston Andros Adamson <dros@primarydata.com> | 2014-06-09 11:48:35 -0400 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@primarydata.com> | 2014-06-24 18:47:00 -0400 |
commit | d45f60c67848b9f19160692581d78e5b4757a000 (patch) | |
tree | 5f31965abe5bd21e73eae10454db74fe6ce901e9 /fs/nfs/blocklayout | |
parent | 823b0c9d9800e712374cda89ac3565bd29f6701b (diff) |
nfs: merge nfs_pgio_data into _header
struct nfs_pgio_data only exists as a member of nfs_pgio_header, but is
passed around everywhere, because there used to be multiple _data structs
per _header. Many of these functions then use the _data to find a pointer
to the _header. This patch cleans this up by merging the nfs_pgio_data
structure into nfs_pgio_header and passing nfs_pgio_header around instead.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Weston Andros Adamson <dros@primarydata.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Diffstat (limited to 'fs/nfs/blocklayout')
-rw-r--r-- | fs/nfs/blocklayout/blocklayout.c | 98 |
1 files changed, 47 insertions, 51 deletions
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 36b01cef849e..c3ccfe440390 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c | |||
@@ -210,8 +210,7 @@ static void bl_end_io_read(struct bio *bio, int err) | |||
210 | SetPageUptodate(bvec->bv_page); | 210 | SetPageUptodate(bvec->bv_page); |
211 | 211 | ||
212 | if (err) { | 212 | if (err) { |
213 | struct nfs_pgio_data *rdata = par->data; | 213 | struct nfs_pgio_header *header = par->data; |
214 | struct nfs_pgio_header *header = rdata->header; | ||
215 | 214 | ||
216 | if (!header->pnfs_error) | 215 | if (!header->pnfs_error) |
217 | header->pnfs_error = -EIO; | 216 | header->pnfs_error = -EIO; |
@@ -224,44 +223,44 @@ static void bl_end_io_read(struct bio *bio, int err) | |||
224 | static void bl_read_cleanup(struct work_struct *work) | 223 | static void bl_read_cleanup(struct work_struct *work) |
225 | { | 224 | { |
226 | struct rpc_task *task; | 225 | struct rpc_task *task; |
227 | struct nfs_pgio_data *rdata; | 226 | struct nfs_pgio_header *hdr; |
228 | dprintk("%s enter\n", __func__); | 227 | dprintk("%s enter\n", __func__); |
229 | task = container_of(work, struct rpc_task, u.tk_work); | 228 | task = container_of(work, struct rpc_task, u.tk_work); |
230 | rdata = container_of(task, struct nfs_pgio_data, task); | 229 | hdr = container_of(task, struct nfs_pgio_header, task); |
231 | pnfs_ld_read_done(rdata); | 230 | pnfs_ld_read_done(hdr); |
232 | } | 231 | } |
233 | 232 | ||
234 | static void | 233 | static void |
235 | bl_end_par_io_read(void *data, int unused) | 234 | bl_end_par_io_read(void *data, int unused) |
236 | { | 235 | { |
237 | struct nfs_pgio_data *rdata = data; | 236 | struct nfs_pgio_header *hdr = data; |
238 | 237 | ||
239 | rdata->task.tk_status = rdata->header->pnfs_error; | 238 | hdr->task.tk_status = hdr->pnfs_error; |
240 | INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup); | 239 | INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup); |
241 | schedule_work(&rdata->task.u.tk_work); | 240 | schedule_work(&hdr->task.u.tk_work); |
242 | } | 241 | } |
243 | 242 | ||
244 | static enum pnfs_try_status | 243 | static enum pnfs_try_status |
245 | bl_read_pagelist(struct nfs_pgio_data *rdata) | 244 | bl_read_pagelist(struct nfs_pgio_header *hdr) |
246 | { | 245 | { |
247 | struct nfs_pgio_header *header = rdata->header; | 246 | struct nfs_pgio_header *header = hdr; |
248 | int i, hole; | 247 | int i, hole; |
249 | struct bio *bio = NULL; | 248 | struct bio *bio = NULL; |
250 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; | 249 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; |
251 | sector_t isect, extent_length = 0; | 250 | sector_t isect, extent_length = 0; |
252 | struct parallel_io *par; | 251 | struct parallel_io *par; |
253 | loff_t f_offset = rdata->args.offset; | 252 | loff_t f_offset = hdr->args.offset; |
254 | size_t bytes_left = rdata->args.count; | 253 | size_t bytes_left = hdr->args.count; |
255 | unsigned int pg_offset, pg_len; | 254 | unsigned int pg_offset, pg_len; |
256 | struct page **pages = rdata->args.pages; | 255 | struct page **pages = hdr->args.pages; |
257 | int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT; | 256 | int pg_index = hdr->args.pgbase >> PAGE_CACHE_SHIFT; |
258 | const bool is_dio = (header->dreq != NULL); | 257 | const bool is_dio = (header->dreq != NULL); |
259 | 258 | ||
260 | dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, | 259 | dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, |
261 | rdata->page_array.npages, f_offset, | 260 | hdr->page_array.npages, f_offset, |
262 | (unsigned int)rdata->args.count); | 261 | (unsigned int)hdr->args.count); |
263 | 262 | ||
264 | par = alloc_parallel(rdata); | 263 | par = alloc_parallel(hdr); |
265 | if (!par) | 264 | if (!par) |
266 | goto use_mds; | 265 | goto use_mds; |
267 | par->pnfs_callback = bl_end_par_io_read; | 266 | par->pnfs_callback = bl_end_par_io_read; |
@@ -269,7 +268,7 @@ bl_read_pagelist(struct nfs_pgio_data *rdata) | |||
269 | 268 | ||
270 | isect = (sector_t) (f_offset >> SECTOR_SHIFT); | 269 | isect = (sector_t) (f_offset >> SECTOR_SHIFT); |
271 | /* Code assumes extents are page-aligned */ | 270 | /* Code assumes extents are page-aligned */ |
272 | for (i = pg_index; i < rdata->page_array.npages; i++) { | 271 | for (i = pg_index; i < hdr->page_array.npages; i++) { |
273 | if (!extent_length) { | 272 | if (!extent_length) { |
274 | /* We've used up the previous extent */ | 273 | /* We've used up the previous extent */ |
275 | bl_put_extent(be); | 274 | bl_put_extent(be); |
@@ -319,7 +318,7 @@ bl_read_pagelist(struct nfs_pgio_data *rdata) | |||
319 | 318 | ||
320 | be_read = (hole && cow_read) ? cow_read : be; | 319 | be_read = (hole && cow_read) ? cow_read : be; |
321 | bio = do_add_page_to_bio(bio, | 320 | bio = do_add_page_to_bio(bio, |
322 | rdata->page_array.npages - i, | 321 | hdr->page_array.npages - i, |
323 | READ, | 322 | READ, |
324 | isect, pages[i], be_read, | 323 | isect, pages[i], be_read, |
325 | bl_end_io_read, par, | 324 | bl_end_io_read, par, |
@@ -334,10 +333,10 @@ bl_read_pagelist(struct nfs_pgio_data *rdata) | |||
334 | extent_length -= PAGE_CACHE_SECTORS; | 333 | extent_length -= PAGE_CACHE_SECTORS; |
335 | } | 334 | } |
336 | if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { | 335 | if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { |
337 | rdata->res.eof = 1; | 336 | hdr->res.eof = 1; |
338 | rdata->res.count = header->inode->i_size - rdata->args.offset; | 337 | hdr->res.count = header->inode->i_size - hdr->args.offset; |
339 | } else { | 338 | } else { |
340 | rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset; | 339 | hdr->res.count = (isect << SECTOR_SHIFT) - hdr->args.offset; |
341 | } | 340 | } |
342 | out: | 341 | out: |
343 | bl_put_extent(be); | 342 | bl_put_extent(be); |
@@ -392,8 +391,7 @@ static void bl_end_io_write_zero(struct bio *bio, int err) | |||
392 | } | 391 | } |
393 | 392 | ||
394 | if (unlikely(err)) { | 393 | if (unlikely(err)) { |
395 | struct nfs_pgio_data *data = par->data; | 394 | struct nfs_pgio_header *header = par->data; |
396 | struct nfs_pgio_header *header = data->header; | ||
397 | 395 | ||
398 | if (!header->pnfs_error) | 396 | if (!header->pnfs_error) |
399 | header->pnfs_error = -EIO; | 397 | header->pnfs_error = -EIO; |
@@ -407,8 +405,7 @@ static void bl_end_io_write(struct bio *bio, int err) | |||
407 | { | 405 | { |
408 | struct parallel_io *par = bio->bi_private; | 406 | struct parallel_io *par = bio->bi_private; |
409 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 407 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
410 | struct nfs_pgio_data *data = par->data; | 408 | struct nfs_pgio_header *header = par->data; |
411 | struct nfs_pgio_header *header = data->header; | ||
412 | 409 | ||
413 | if (!uptodate) { | 410 | if (!uptodate) { |
414 | if (!header->pnfs_error) | 411 | if (!header->pnfs_error) |
@@ -425,32 +422,32 @@ static void bl_end_io_write(struct bio *bio, int err) | |||
425 | static void bl_write_cleanup(struct work_struct *work) | 422 | static void bl_write_cleanup(struct work_struct *work) |
426 | { | 423 | { |
427 | struct rpc_task *task; | 424 | struct rpc_task *task; |
428 | struct nfs_pgio_data *wdata; | 425 | struct nfs_pgio_header *hdr; |
429 | dprintk("%s enter\n", __func__); | 426 | dprintk("%s enter\n", __func__); |
430 | task = container_of(work, struct rpc_task, u.tk_work); | 427 | task = container_of(work, struct rpc_task, u.tk_work); |
431 | wdata = container_of(task, struct nfs_pgio_data, task); | 428 | hdr = container_of(task, struct nfs_pgio_header, task); |
432 | if (likely(!wdata->header->pnfs_error)) { | 429 | if (likely(!hdr->pnfs_error)) { |
433 | /* Marks for LAYOUTCOMMIT */ | 430 | /* Marks for LAYOUTCOMMIT */ |
434 | mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg), | 431 | mark_extents_written(BLK_LSEG2EXT(hdr->lseg), |
435 | wdata->args.offset, wdata->args.count); | 432 | hdr->args.offset, hdr->args.count); |
436 | } | 433 | } |
437 | pnfs_ld_write_done(wdata); | 434 | pnfs_ld_write_done(hdr); |
438 | } | 435 | } |
439 | 436 | ||
440 | /* Called when last of bios associated with a bl_write_pagelist call finishes */ | 437 | /* Called when last of bios associated with a bl_write_pagelist call finishes */ |
441 | static void bl_end_par_io_write(void *data, int num_se) | 438 | static void bl_end_par_io_write(void *data, int num_se) |
442 | { | 439 | { |
443 | struct nfs_pgio_data *wdata = data; | 440 | struct nfs_pgio_header *hdr = data; |
444 | 441 | ||
445 | if (unlikely(wdata->header->pnfs_error)) { | 442 | if (unlikely(hdr->pnfs_error)) { |
446 | bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval, | 443 | bl_free_short_extents(&BLK_LSEG2EXT(hdr->lseg)->bl_inval, |
447 | num_se); | 444 | num_se); |
448 | } | 445 | } |
449 | 446 | ||
450 | wdata->task.tk_status = wdata->header->pnfs_error; | 447 | hdr->task.tk_status = hdr->pnfs_error; |
451 | wdata->writeverf.committed = NFS_FILE_SYNC; | 448 | hdr->writeverf.committed = NFS_FILE_SYNC; |
452 | INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup); | 449 | INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup); |
453 | schedule_work(&wdata->task.u.tk_work); | 450 | schedule_work(&hdr->task.u.tk_work); |
454 | } | 451 | } |
455 | 452 | ||
456 | /* FIXME STUB - mark intersection of layout and page as bad, so is not | 453 | /* FIXME STUB - mark intersection of layout and page as bad, so is not |
@@ -675,18 +672,17 @@ check_page: | |||
675 | } | 672 | } |
676 | 673 | ||
677 | static enum pnfs_try_status | 674 | static enum pnfs_try_status |
678 | bl_write_pagelist(struct nfs_pgio_data *wdata, int sync) | 675 | bl_write_pagelist(struct nfs_pgio_header *header, int sync) |
679 | { | 676 | { |
680 | struct nfs_pgio_header *header = wdata->header; | ||
681 | int i, ret, npg_zero, pg_index, last = 0; | 677 | int i, ret, npg_zero, pg_index, last = 0; |
682 | struct bio *bio = NULL; | 678 | struct bio *bio = NULL; |
683 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; | 679 | struct pnfs_block_extent *be = NULL, *cow_read = NULL; |
684 | sector_t isect, last_isect = 0, extent_length = 0; | 680 | sector_t isect, last_isect = 0, extent_length = 0; |
685 | struct parallel_io *par = NULL; | 681 | struct parallel_io *par = NULL; |
686 | loff_t offset = wdata->args.offset; | 682 | loff_t offset = header->args.offset; |
687 | size_t count = wdata->args.count; | 683 | size_t count = header->args.count; |
688 | unsigned int pg_offset, pg_len, saved_len; | 684 | unsigned int pg_offset, pg_len, saved_len; |
689 | struct page **pages = wdata->args.pages; | 685 | struct page **pages = header->args.pages; |
690 | struct page *page; | 686 | struct page *page; |
691 | pgoff_t index; | 687 | pgoff_t index; |
692 | u64 temp; | 688 | u64 temp; |
@@ -701,11 +697,11 @@ bl_write_pagelist(struct nfs_pgio_data *wdata, int sync) | |||
701 | dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n"); | 697 | dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n"); |
702 | goto out_mds; | 698 | goto out_mds; |
703 | } | 699 | } |
704 | /* At this point, wdata->page_aray is a (sequential) list of nfs_pages. | 700 | /* At this point, header->page_aray is a (sequential) list of nfs_pages. |
705 | * We want to write each, and if there is an error set pnfs_error | 701 | * We want to write each, and if there is an error set pnfs_error |
706 | * to have it redone using nfs. | 702 | * to have it redone using nfs. |
707 | */ | 703 | */ |
708 | par = alloc_parallel(wdata); | 704 | par = alloc_parallel(header); |
709 | if (!par) | 705 | if (!par) |
710 | goto out_mds; | 706 | goto out_mds; |
711 | par->pnfs_callback = bl_end_par_io_write; | 707 | par->pnfs_callback = bl_end_par_io_write; |
@@ -792,8 +788,8 @@ next_page: | |||
792 | bio = bl_submit_bio(WRITE, bio); | 788 | bio = bl_submit_bio(WRITE, bio); |
793 | 789 | ||
794 | /* Middle pages */ | 790 | /* Middle pages */ |
795 | pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT; | 791 | pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; |
796 | for (i = pg_index; i < wdata->page_array.npages; i++) { | 792 | for (i = pg_index; i < header->page_array.npages; i++) { |
797 | if (!extent_length) { | 793 | if (!extent_length) { |
798 | /* We've used up the previous extent */ | 794 | /* We've used up the previous extent */ |
799 | bl_put_extent(be); | 795 | bl_put_extent(be); |
@@ -864,7 +860,7 @@ next_page: | |||
864 | } | 860 | } |
865 | 861 | ||
866 | 862 | ||
867 | bio = do_add_page_to_bio(bio, wdata->page_array.npages - i, | 863 | bio = do_add_page_to_bio(bio, header->page_array.npages - i, |
868 | WRITE, | 864 | WRITE, |
869 | isect, pages[i], be, | 865 | isect, pages[i], be, |
870 | bl_end_io_write, par, | 866 | bl_end_io_write, par, |
@@ -893,7 +889,7 @@ next_page: | |||
893 | } | 889 | } |
894 | 890 | ||
895 | write_done: | 891 | write_done: |
896 | wdata->res.count = wdata->args.count; | 892 | header->res.count = header->args.count; |
897 | out: | 893 | out: |
898 | bl_put_extent(be); | 894 | bl_put_extent(be); |
899 | bl_put_extent(cow_read); | 895 | bl_put_extent(cow_read); |