diff options
author | Jens Axboe <axboe@suse.de> | 2006-04-27 05:05:22 -0400 |
---|---|---|
committer | Jens Axboe <axboe@suse.de> | 2006-04-27 05:05:22 -0400 |
commit | eb20796bf6fdb95ccf51440ba2a827149bdc037f (patch) | |
tree | 9443923cbd83f03e797891d12e18bb0e2d51927b /fs | |
parent | ebf43500ef148a380bd132743c3fc530111ac620 (diff) |
[PATCH] splice: make the read-side do batched page lookups
Use the new find_get_pages_contig() to potentially look up the entire
splice range in one single call. This speeds up generic_file_splice_read()
quite a bit.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/splice.c | 95 |
1 files changed, 65 insertions, 30 deletions
diff --git a/fs/splice.c b/fs/splice.c index dc205f643090..a46ddd28561e 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -279,7 +279,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, | |||
279 | pgoff_t index, end_index; | 279 | pgoff_t index, end_index; |
280 | loff_t isize; | 280 | loff_t isize; |
281 | size_t total_len; | 281 | size_t total_len; |
282 | int error; | 282 | int error, page_nr; |
283 | struct splice_pipe_desc spd = { | 283 | struct splice_pipe_desc spd = { |
284 | .pages = pages, | 284 | .pages = pages, |
285 | .partial = partial, | 285 | .partial = partial, |
@@ -307,39 +307,67 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, | |||
307 | */ | 307 | */ |
308 | error = 0; | 308 | error = 0; |
309 | total_len = 0; | 309 | total_len = 0; |
310 | for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) { | ||
311 | unsigned int this_len; | ||
312 | 310 | ||
313 | if (!len) | 311 | /* |
314 | break; | 312 | * Lookup the (hopefully) full range of pages we need. |
313 | */ | ||
314 | spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages); | ||
315 | 315 | ||
316 | /* | ||
317 | * If find_get_pages_contig() returned fewer pages than we needed, | ||
318 | * allocate the rest. | ||
319 | */ | ||
320 | index += spd.nr_pages; | ||
321 | while (spd.nr_pages < nr_pages) { | ||
316 | /* | 322 | /* |
317 | * this_len is the max we'll use from this page | 323 | * Page could be there, find_get_pages_contig() breaks on |
318 | */ | 324 | * the first hole. |
319 | this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); | ||
320 | find_page: | ||
321 | /* | ||
322 | * lookup the page for this index | ||
323 | */ | 325 | */ |
324 | page = find_get_page(mapping, index); | 326 | page = find_get_page(mapping, index); |
325 | if (!page) { | 327 | if (!page) { |
326 | /* | 328 | /* |
327 | * page didn't exist, allocate one | 329 | * page didn't exist, allocate one. |
328 | */ | 330 | */ |
329 | page = page_cache_alloc_cold(mapping); | 331 | page = page_cache_alloc_cold(mapping); |
330 | if (!page) | 332 | if (!page) |
331 | break; | 333 | break; |
332 | 334 | ||
333 | error = add_to_page_cache_lru(page, mapping, index, | 335 | error = add_to_page_cache_lru(page, mapping, index, |
334 | mapping_gfp_mask(mapping)); | 336 | mapping_gfp_mask(mapping)); |
335 | if (unlikely(error)) { | 337 | if (unlikely(error)) { |
336 | page_cache_release(page); | 338 | page_cache_release(page); |
337 | break; | 339 | break; |
338 | } | 340 | } |
339 | 341 | /* | |
340 | goto readpage; | 342 | * add_to_page_cache() locks the page, unlock it |
343 | * to avoid convoluting the logic below even more. | ||
344 | */ | ||
345 | unlock_page(page); | ||
341 | } | 346 | } |
342 | 347 | ||
348 | pages[spd.nr_pages++] = page; | ||
349 | index++; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Now loop over the map and see if we need to start IO on any | ||
354 | * pages, fill in the partial map, etc. | ||
355 | */ | ||
356 | index = *ppos >> PAGE_CACHE_SHIFT; | ||
357 | nr_pages = spd.nr_pages; | ||
358 | spd.nr_pages = 0; | ||
359 | for (page_nr = 0; page_nr < nr_pages; page_nr++) { | ||
360 | unsigned int this_len; | ||
361 | |||
362 | if (!len) | ||
363 | break; | ||
364 | |||
365 | /* | ||
366 | * this_len is the max we'll use from this page | ||
367 | */ | ||
368 | this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); | ||
369 | page = pages[page_nr]; | ||
370 | |||
343 | /* | 371 | /* |
344 | * If the page isn't uptodate, we may need to start io on it | 372 | * If the page isn't uptodate, we may need to start io on it |
345 | */ | 373 | */ |
@@ -360,7 +388,6 @@ find_page: | |||
360 | */ | 388 | */ |
361 | if (!page->mapping) { | 389 | if (!page->mapping) { |
362 | unlock_page(page); | 390 | unlock_page(page); |
363 | page_cache_release(page); | ||
364 | break; | 391 | break; |
365 | } | 392 | } |
366 | /* | 393 | /* |
@@ -371,16 +398,20 @@ find_page: | |||
371 | goto fill_it; | 398 | goto fill_it; |
372 | } | 399 | } |
373 | 400 | ||
374 | readpage: | ||
375 | /* | 401 | /* |
376 | * need to read in the page | 402 | * need to read in the page |
377 | */ | 403 | */ |
378 | error = mapping->a_ops->readpage(in, page); | 404 | error = mapping->a_ops->readpage(in, page); |
379 | |||
380 | if (unlikely(error)) { | 405 | if (unlikely(error)) { |
381 | page_cache_release(page); | 406 | /* |
407 | * We really should re-lookup the page here, | ||
408 | * but it complicates things a lot. Instead | ||
409 | * lets just do what we already stored, and | ||
410 | * we'll get it the next time we are called. | ||
411 | */ | ||
382 | if (error == AOP_TRUNCATED_PAGE) | 412 | if (error == AOP_TRUNCATED_PAGE) |
383 | goto find_page; | 413 | error = 0; |
414 | |||
384 | break; | 415 | break; |
385 | } | 416 | } |
386 | 417 | ||
@@ -389,10 +420,8 @@ readpage: | |||
389 | */ | 420 | */ |
390 | isize = i_size_read(mapping->host); | 421 | isize = i_size_read(mapping->host); |
391 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | 422 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; |
392 | if (unlikely(!isize || index > end_index)) { | 423 | if (unlikely(!isize || index > end_index)) |
393 | page_cache_release(page); | ||
394 | break; | 424 | break; |
395 | } | ||
396 | 425 | ||
397 | /* | 426 | /* |
398 | * if this is the last page, see if we need to shrink | 427 | * if this is the last page, see if we need to shrink |
@@ -400,27 +429,33 @@ readpage: | |||
400 | */ | 429 | */ |
401 | if (end_index == index) { | 430 | if (end_index == index) { |
402 | loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK); | 431 | loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK); |
403 | if (total_len + loff > isize) { | 432 | if (total_len + loff > isize) |
404 | page_cache_release(page); | ||
405 | break; | 433 | break; |
406 | } | ||
407 | /* | 434 | /* |
408 | * force quit after adding this page | 435 | * force quit after adding this page |
409 | */ | 436 | */ |
410 | nr_pages = spd.nr_pages; | 437 | len = this_len; |
411 | this_len = min(this_len, loff); | 438 | this_len = min(this_len, loff); |
412 | loff = 0; | 439 | loff = 0; |
413 | } | 440 | } |
414 | } | 441 | } |
415 | fill_it: | 442 | fill_it: |
416 | pages[spd.nr_pages] = page; | 443 | partial[page_nr].offset = loff; |
417 | partial[spd.nr_pages].offset = loff; | 444 | partial[page_nr].len = this_len; |
418 | partial[spd.nr_pages].len = this_len; | ||
419 | len -= this_len; | 445 | len -= this_len; |
420 | total_len += this_len; | 446 | total_len += this_len; |
421 | loff = 0; | 447 | loff = 0; |
448 | spd.nr_pages++; | ||
449 | index++; | ||
422 | } | 450 | } |
423 | 451 | ||
452 | /* | ||
453 | * Release any pages at the end, if we quit early. 'i' is how far | ||
454 | * we got, 'nr_pages' is how many pages are in the map. | ||
455 | */ | ||
456 | while (page_nr < nr_pages) | ||
457 | page_cache_release(pages[page_nr++]); | ||
458 | |||
424 | if (spd.nr_pages) | 459 | if (spd.nr_pages) |
425 | return splice_to_pipe(pipe, &spd); | 460 | return splice_to_pipe(pipe, &spd); |
426 | 461 | ||