diff options
author | Patrick McHardy <kaber@trash.net> | 2011-04-13 07:32:28 -0400 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2011-04-13 07:32:28 -0400 |
commit | b32e3dc7860d00124fa432dba09667e647cb9bcc (patch) | |
tree | 2fa6e56f389431dfb84609d3d7572cad76e88e71 /fs/xfs/linux-2.6/xfs_buf.c | |
parent | 6604271c5bc658a6067ed0c3deba4d89e0e50382 (diff) | |
parent | 96120d86fe302c006259baee9061eea9e1b9e486 (diff) |
Merge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 374 |
1 files changed, 99 insertions, 275 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index f83a4c830a65..596bb2c9de42 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -94,75 +94,6 @@ xfs_buf_vmap_len( | |||
94 | } | 94 | } |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Page Region interfaces. | ||
98 | * | ||
99 | * For pages in filesystems where the blocksize is smaller than the | ||
100 | * pagesize, we use the page->private field (long) to hold a bitmap | ||
101 | * of uptodate regions within the page. | ||
102 | * | ||
103 | * Each such region is "bytes per page / bits per long" bytes long. | ||
104 | * | ||
105 | * NBPPR == number-of-bytes-per-page-region | ||
106 | * BTOPR == bytes-to-page-region (rounded up) | ||
107 | * BTOPRT == bytes-to-page-region-truncated (rounded down) | ||
108 | */ | ||
109 | #if (BITS_PER_LONG == 32) | ||
110 | #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */ | ||
111 | #elif (BITS_PER_LONG == 64) | ||
112 | #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */ | ||
113 | #else | ||
114 | #error BITS_PER_LONG must be 32 or 64 | ||
115 | #endif | ||
116 | #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG) | ||
117 | #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT) | ||
118 | #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT)) | ||
119 | |||
120 | STATIC unsigned long | ||
121 | page_region_mask( | ||
122 | size_t offset, | ||
123 | size_t length) | ||
124 | { | ||
125 | unsigned long mask; | ||
126 | int first, final; | ||
127 | |||
128 | first = BTOPR(offset); | ||
129 | final = BTOPRT(offset + length - 1); | ||
130 | first = min(first, final); | ||
131 | |||
132 | mask = ~0UL; | ||
133 | mask <<= BITS_PER_LONG - (final - first); | ||
134 | mask >>= BITS_PER_LONG - (final); | ||
135 | |||
136 | ASSERT(offset + length <= PAGE_CACHE_SIZE); | ||
137 | ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0); | ||
138 | |||
139 | return mask; | ||
140 | } | ||
141 | |||
142 | STATIC void | ||
143 | set_page_region( | ||
144 | struct page *page, | ||
145 | size_t offset, | ||
146 | size_t length) | ||
147 | { | ||
148 | set_page_private(page, | ||
149 | page_private(page) | page_region_mask(offset, length)); | ||
150 | if (page_private(page) == ~0UL) | ||
151 | SetPageUptodate(page); | ||
152 | } | ||
153 | |||
154 | STATIC int | ||
155 | test_page_region( | ||
156 | struct page *page, | ||
157 | size_t offset, | ||
158 | size_t length) | ||
159 | { | ||
160 | unsigned long mask = page_region_mask(offset, length); | ||
161 | |||
162 | return (mask && (page_private(page) & mask) == mask); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * xfs_buf_lru_add - add a buffer to the LRU. | 97 | * xfs_buf_lru_add - add a buffer to the LRU. |
167 | * | 98 | * |
168 | * The LRU takes a new reference to the buffer so that it will only be freed | 99 | * The LRU takes a new reference to the buffer so that it will only be freed |
@@ -332,7 +263,7 @@ xfs_buf_free( | |||
332 | 263 | ||
333 | ASSERT(list_empty(&bp->b_lru)); | 264 | ASSERT(list_empty(&bp->b_lru)); |
334 | 265 | ||
335 | if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { | 266 | if (bp->b_flags & _XBF_PAGES) { |
336 | uint i; | 267 | uint i; |
337 | 268 | ||
338 | if (xfs_buf_is_vmapped(bp)) | 269 | if (xfs_buf_is_vmapped(bp)) |
@@ -342,25 +273,22 @@ xfs_buf_free( | |||
342 | for (i = 0; i < bp->b_page_count; i++) { | 273 | for (i = 0; i < bp->b_page_count; i++) { |
343 | struct page *page = bp->b_pages[i]; | 274 | struct page *page = bp->b_pages[i]; |
344 | 275 | ||
345 | if (bp->b_flags & _XBF_PAGE_CACHE) | 276 | __free_page(page); |
346 | ASSERT(!PagePrivate(page)); | ||
347 | page_cache_release(page); | ||
348 | } | 277 | } |
349 | } | 278 | } else if (bp->b_flags & _XBF_KMEM) |
279 | kmem_free(bp->b_addr); | ||
350 | _xfs_buf_free_pages(bp); | 280 | _xfs_buf_free_pages(bp); |
351 | xfs_buf_deallocate(bp); | 281 | xfs_buf_deallocate(bp); |
352 | } | 282 | } |
353 | 283 | ||
354 | /* | 284 | /* |
355 | * Finds all pages for buffer in question and builds it's page list. | 285 | * Allocates all the pages for buffer in question and builds it's page list. |
356 | */ | 286 | */ |
357 | STATIC int | 287 | STATIC int |
358 | _xfs_buf_lookup_pages( | 288 | xfs_buf_allocate_memory( |
359 | xfs_buf_t *bp, | 289 | xfs_buf_t *bp, |
360 | uint flags) | 290 | uint flags) |
361 | { | 291 | { |
362 | struct address_space *mapping = bp->b_target->bt_mapping; | ||
363 | size_t blocksize = bp->b_target->bt_bsize; | ||
364 | size_t size = bp->b_count_desired; | 292 | size_t size = bp->b_count_desired; |
365 | size_t nbytes, offset; | 293 | size_t nbytes, offset; |
366 | gfp_t gfp_mask = xb_to_gfp(flags); | 294 | gfp_t gfp_mask = xb_to_gfp(flags); |
@@ -369,29 +297,55 @@ _xfs_buf_lookup_pages( | |||
369 | xfs_off_t end; | 297 | xfs_off_t end; |
370 | int error; | 298 | int error; |
371 | 299 | ||
300 | /* | ||
301 | * for buffers that are contained within a single page, just allocate | ||
302 | * the memory from the heap - there's no need for the complexity of | ||
303 | * page arrays to keep allocation down to order 0. | ||
304 | */ | ||
305 | if (bp->b_buffer_length < PAGE_SIZE) { | ||
306 | bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags)); | ||
307 | if (!bp->b_addr) { | ||
308 | /* low memory - use alloc_page loop instead */ | ||
309 | goto use_alloc_page; | ||
310 | } | ||
311 | |||
312 | if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) & | ||
313 | PAGE_MASK) != | ||
314 | ((unsigned long)bp->b_addr & PAGE_MASK)) { | ||
315 | /* b_addr spans two pages - use alloc_page instead */ | ||
316 | kmem_free(bp->b_addr); | ||
317 | bp->b_addr = NULL; | ||
318 | goto use_alloc_page; | ||
319 | } | ||
320 | bp->b_offset = offset_in_page(bp->b_addr); | ||
321 | bp->b_pages = bp->b_page_array; | ||
322 | bp->b_pages[0] = virt_to_page(bp->b_addr); | ||
323 | bp->b_page_count = 1; | ||
324 | bp->b_flags |= XBF_MAPPED | _XBF_KMEM; | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | use_alloc_page: | ||
372 | end = bp->b_file_offset + bp->b_buffer_length; | 329 | end = bp->b_file_offset + bp->b_buffer_length; |
373 | page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); | 330 | page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); |
374 | |||
375 | error = _xfs_buf_get_pages(bp, page_count, flags); | 331 | error = _xfs_buf_get_pages(bp, page_count, flags); |
376 | if (unlikely(error)) | 332 | if (unlikely(error)) |
377 | return error; | 333 | return error; |
378 | bp->b_flags |= _XBF_PAGE_CACHE; | ||
379 | 334 | ||
380 | offset = bp->b_offset; | 335 | offset = bp->b_offset; |
381 | first = bp->b_file_offset >> PAGE_CACHE_SHIFT; | 336 | first = bp->b_file_offset >> PAGE_SHIFT; |
337 | bp->b_flags |= _XBF_PAGES; | ||
382 | 338 | ||
383 | for (i = 0; i < bp->b_page_count; i++) { | 339 | for (i = 0; i < bp->b_page_count; i++) { |
384 | struct page *page; | 340 | struct page *page; |
385 | uint retries = 0; | 341 | uint retries = 0; |
386 | 342 | retry: | |
387 | retry: | 343 | page = alloc_page(gfp_mask); |
388 | page = find_or_create_page(mapping, first + i, gfp_mask); | ||
389 | if (unlikely(page == NULL)) { | 344 | if (unlikely(page == NULL)) { |
390 | if (flags & XBF_READ_AHEAD) { | 345 | if (flags & XBF_READ_AHEAD) { |
391 | bp->b_page_count = i; | 346 | bp->b_page_count = i; |
392 | for (i = 0; i < bp->b_page_count; i++) | 347 | error = ENOMEM; |
393 | unlock_page(bp->b_pages[i]); | 348 | goto out_free_pages; |
394 | return -ENOMEM; | ||
395 | } | 349 | } |
396 | 350 | ||
397 | /* | 351 | /* |
@@ -401,9 +355,8 @@ _xfs_buf_lookup_pages( | |||
401 | * handle buffer allocation failures we can't do much. | 355 | * handle buffer allocation failures we can't do much. |
402 | */ | 356 | */ |
403 | if (!(++retries % 100)) | 357 | if (!(++retries % 100)) |
404 | printk(KERN_ERR | 358 | xfs_err(NULL, |
405 | "XFS: possible memory allocation " | 359 | "possible memory allocation deadlock in %s (mode:0x%x)", |
406 | "deadlock in %s (mode:0x%x)\n", | ||
407 | __func__, gfp_mask); | 360 | __func__, gfp_mask); |
408 | 361 | ||
409 | XFS_STATS_INC(xb_page_retries); | 362 | XFS_STATS_INC(xb_page_retries); |
@@ -413,33 +366,16 @@ _xfs_buf_lookup_pages( | |||
413 | 366 | ||
414 | XFS_STATS_INC(xb_page_found); | 367 | XFS_STATS_INC(xb_page_found); |
415 | 368 | ||
416 | nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); | 369 | nbytes = min_t(size_t, size, PAGE_SIZE - offset); |
417 | size -= nbytes; | 370 | size -= nbytes; |
418 | |||
419 | ASSERT(!PagePrivate(page)); | ||
420 | if (!PageUptodate(page)) { | ||
421 | page_count--; | ||
422 | if (blocksize >= PAGE_CACHE_SIZE) { | ||
423 | if (flags & XBF_READ) | ||
424 | bp->b_flags |= _XBF_PAGE_LOCKED; | ||
425 | } else if (!PagePrivate(page)) { | ||
426 | if (test_page_region(page, offset, nbytes)) | ||
427 | page_count++; | ||
428 | } | ||
429 | } | ||
430 | |||
431 | bp->b_pages[i] = page; | 371 | bp->b_pages[i] = page; |
432 | offset = 0; | 372 | offset = 0; |
433 | } | 373 | } |
374 | return 0; | ||
434 | 375 | ||
435 | if (!(bp->b_flags & _XBF_PAGE_LOCKED)) { | 376 | out_free_pages: |
436 | for (i = 0; i < bp->b_page_count; i++) | 377 | for (i = 0; i < bp->b_page_count; i++) |
437 | unlock_page(bp->b_pages[i]); | 378 | __free_page(bp->b_pages[i]); |
438 | } | ||
439 | |||
440 | if (page_count == bp->b_page_count) | ||
441 | bp->b_flags |= XBF_DONE; | ||
442 | |||
443 | return error; | 379 | return error; |
444 | } | 380 | } |
445 | 381 | ||
@@ -451,14 +387,23 @@ _xfs_buf_map_pages( | |||
451 | xfs_buf_t *bp, | 387 | xfs_buf_t *bp, |
452 | uint flags) | 388 | uint flags) |
453 | { | 389 | { |
454 | /* A single page buffer is always mappable */ | 390 | ASSERT(bp->b_flags & _XBF_PAGES); |
455 | if (bp->b_page_count == 1) { | 391 | if (bp->b_page_count == 1) { |
392 | /* A single page buffer is always mappable */ | ||
456 | bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; | 393 | bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; |
457 | bp->b_flags |= XBF_MAPPED; | 394 | bp->b_flags |= XBF_MAPPED; |
458 | } else if (flags & XBF_MAPPED) { | 395 | } else if (flags & XBF_MAPPED) { |
459 | bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, | 396 | int retried = 0; |
460 | -1, PAGE_KERNEL); | 397 | |
461 | if (unlikely(bp->b_addr == NULL)) | 398 | do { |
399 | bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, | ||
400 | -1, PAGE_KERNEL); | ||
401 | if (bp->b_addr) | ||
402 | break; | ||
403 | vm_unmap_aliases(); | ||
404 | } while (retried++ <= 1); | ||
405 | |||
406 | if (!bp->b_addr) | ||
462 | return -ENOMEM; | 407 | return -ENOMEM; |
463 | bp->b_addr += bp->b_offset; | 408 | bp->b_addr += bp->b_offset; |
464 | bp->b_flags |= XBF_MAPPED; | 409 | bp->b_flags |= XBF_MAPPED; |
@@ -569,9 +514,14 @@ found: | |||
569 | } | 514 | } |
570 | } | 515 | } |
571 | 516 | ||
517 | /* | ||
518 | * if the buffer is stale, clear all the external state associated with | ||
519 | * it. We need to keep flags such as how we allocated the buffer memory | ||
520 | * intact here. | ||
521 | */ | ||
572 | if (bp->b_flags & XBF_STALE) { | 522 | if (bp->b_flags & XBF_STALE) { |
573 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); | 523 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); |
574 | bp->b_flags &= XBF_MAPPED; | 524 | bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES; |
575 | } | 525 | } |
576 | 526 | ||
577 | trace_xfs_buf_find(bp, flags, _RET_IP_); | 527 | trace_xfs_buf_find(bp, flags, _RET_IP_); |
@@ -592,7 +542,7 @@ xfs_buf_get( | |||
592 | xfs_buf_flags_t flags) | 542 | xfs_buf_flags_t flags) |
593 | { | 543 | { |
594 | xfs_buf_t *bp, *new_bp; | 544 | xfs_buf_t *bp, *new_bp; |
595 | int error = 0, i; | 545 | int error = 0; |
596 | 546 | ||
597 | new_bp = xfs_buf_allocate(flags); | 547 | new_bp = xfs_buf_allocate(flags); |
598 | if (unlikely(!new_bp)) | 548 | if (unlikely(!new_bp)) |
@@ -600,7 +550,7 @@ xfs_buf_get( | |||
600 | 550 | ||
601 | bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); | 551 | bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); |
602 | if (bp == new_bp) { | 552 | if (bp == new_bp) { |
603 | error = _xfs_buf_lookup_pages(bp, flags); | 553 | error = xfs_buf_allocate_memory(bp, flags); |
604 | if (error) | 554 | if (error) |
605 | goto no_buffer; | 555 | goto no_buffer; |
606 | } else { | 556 | } else { |
@@ -609,14 +559,11 @@ xfs_buf_get( | |||
609 | return NULL; | 559 | return NULL; |
610 | } | 560 | } |
611 | 561 | ||
612 | for (i = 0; i < bp->b_page_count; i++) | ||
613 | mark_page_accessed(bp->b_pages[i]); | ||
614 | |||
615 | if (!(bp->b_flags & XBF_MAPPED)) { | 562 | if (!(bp->b_flags & XBF_MAPPED)) { |
616 | error = _xfs_buf_map_pages(bp, flags); | 563 | error = _xfs_buf_map_pages(bp, flags); |
617 | if (unlikely(error)) { | 564 | if (unlikely(error)) { |
618 | printk(KERN_WARNING "%s: failed to map pages\n", | 565 | xfs_warn(target->bt_mount, |
619 | __func__); | 566 | "%s: failed to map pages\n", __func__); |
620 | goto no_buffer; | 567 | goto no_buffer; |
621 | } | 568 | } |
622 | } | 569 | } |
@@ -712,8 +659,7 @@ xfs_buf_readahead( | |||
712 | { | 659 | { |
713 | struct backing_dev_info *bdi; | 660 | struct backing_dev_info *bdi; |
714 | 661 | ||
715 | bdi = target->bt_mapping->backing_dev_info; | 662 | if (bdi_read_congested(target->bt_bdi)) |
716 | if (bdi_read_congested(bdi)) | ||
717 | return; | 663 | return; |
718 | 664 | ||
719 | xfs_buf_read(target, ioff, isize, | 665 | xfs_buf_read(target, ioff, isize, |
@@ -791,10 +737,10 @@ xfs_buf_associate_memory( | |||
791 | size_t buflen; | 737 | size_t buflen; |
792 | int page_count; | 738 | int page_count; |
793 | 739 | ||
794 | pageaddr = (unsigned long)mem & PAGE_CACHE_MASK; | 740 | pageaddr = (unsigned long)mem & PAGE_MASK; |
795 | offset = (unsigned long)mem - pageaddr; | 741 | offset = (unsigned long)mem - pageaddr; |
796 | buflen = PAGE_CACHE_ALIGN(len + offset); | 742 | buflen = PAGE_ALIGN(len + offset); |
797 | page_count = buflen >> PAGE_CACHE_SHIFT; | 743 | page_count = buflen >> PAGE_SHIFT; |
798 | 744 | ||
799 | /* Free any previous set of page pointers */ | 745 | /* Free any previous set of page pointers */ |
800 | if (bp->b_pages) | 746 | if (bp->b_pages) |
@@ -811,13 +757,12 @@ xfs_buf_associate_memory( | |||
811 | 757 | ||
812 | for (i = 0; i < bp->b_page_count; i++) { | 758 | for (i = 0; i < bp->b_page_count; i++) { |
813 | bp->b_pages[i] = mem_to_page((void *)pageaddr); | 759 | bp->b_pages[i] = mem_to_page((void *)pageaddr); |
814 | pageaddr += PAGE_CACHE_SIZE; | 760 | pageaddr += PAGE_SIZE; |
815 | } | 761 | } |
816 | 762 | ||
817 | bp->b_count_desired = len; | 763 | bp->b_count_desired = len; |
818 | bp->b_buffer_length = buflen; | 764 | bp->b_buffer_length = buflen; |
819 | bp->b_flags |= XBF_MAPPED; | 765 | bp->b_flags |= XBF_MAPPED; |
820 | bp->b_flags &= ~_XBF_PAGE_LOCKED; | ||
821 | 766 | ||
822 | return 0; | 767 | return 0; |
823 | } | 768 | } |
@@ -850,8 +795,8 @@ xfs_buf_get_uncached( | |||
850 | 795 | ||
851 | error = _xfs_buf_map_pages(bp, XBF_MAPPED); | 796 | error = _xfs_buf_map_pages(bp, XBF_MAPPED); |
852 | if (unlikely(error)) { | 797 | if (unlikely(error)) { |
853 | printk(KERN_WARNING "%s: failed to map pages\n", | 798 | xfs_warn(target->bt_mount, |
854 | __func__); | 799 | "%s: failed to map pages\n", __func__); |
855 | goto fail_free_mem; | 800 | goto fail_free_mem; |
856 | } | 801 | } |
857 | 802 | ||
@@ -924,20 +869,7 @@ xfs_buf_rele( | |||
924 | 869 | ||
925 | 870 | ||
926 | /* | 871 | /* |
927 | * Mutual exclusion on buffers. Locking model: | 872 | * Lock a buffer object, if it is not already locked. |
928 | * | ||
929 | * Buffers associated with inodes for which buffer locking | ||
930 | * is not enabled are not protected by semaphores, and are | ||
931 | * assumed to be exclusively owned by the caller. There is a | ||
932 | * spinlock in the buffer, used by the caller when concurrent | ||
933 | * access is possible. | ||
934 | */ | ||
935 | |||
936 | /* | ||
937 | * Locks a buffer object, if it is not already locked. Note that this in | ||
938 | * no way locks the underlying pages, so it is only useful for | ||
939 | * synchronizing concurrent use of buffer objects, not for synchronizing | ||
940 | * independent access to the underlying pages. | ||
941 | * | 873 | * |
942 | * If we come across a stale, pinned, locked buffer, we know that we are | 874 | * If we come across a stale, pinned, locked buffer, we know that we are |
943 | * being asked to lock a buffer that has been reallocated. Because it is | 875 | * being asked to lock a buffer that has been reallocated. Because it is |
@@ -971,10 +903,7 @@ xfs_buf_lock_value( | |||
971 | } | 903 | } |
972 | 904 | ||
973 | /* | 905 | /* |
974 | * Locks a buffer object. | 906 | * Lock a buffer object. |
975 | * Note that this in no way locks the underlying pages, so it is only | ||
976 | * useful for synchronizing concurrent use of buffer objects, not for | ||
977 | * synchronizing independent access to the underlying pages. | ||
978 | * | 907 | * |
979 | * If we come across a stale, pinned, locked buffer, we know that we | 908 | * If we come across a stale, pinned, locked buffer, we know that we |
980 | * are being asked to lock a buffer that has been reallocated. Because | 909 | * are being asked to lock a buffer that has been reallocated. Because |
@@ -991,7 +920,7 @@ xfs_buf_lock( | |||
991 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) | 920 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) |
992 | xfs_log_force(bp->b_target->bt_mount, 0); | 921 | xfs_log_force(bp->b_target->bt_mount, 0); |
993 | if (atomic_read(&bp->b_io_remaining)) | 922 | if (atomic_read(&bp->b_io_remaining)) |
994 | blk_run_address_space(bp->b_target->bt_mapping); | 923 | blk_flush_plug(current); |
995 | down(&bp->b_sema); | 924 | down(&bp->b_sema); |
996 | XB_SET_OWNER(bp); | 925 | XB_SET_OWNER(bp); |
997 | 926 | ||
@@ -1035,9 +964,7 @@ xfs_buf_wait_unpin( | |||
1035 | set_current_state(TASK_UNINTERRUPTIBLE); | 964 | set_current_state(TASK_UNINTERRUPTIBLE); |
1036 | if (atomic_read(&bp->b_pin_count) == 0) | 965 | if (atomic_read(&bp->b_pin_count) == 0) |
1037 | break; | 966 | break; |
1038 | if (atomic_read(&bp->b_io_remaining)) | 967 | io_schedule(); |
1039 | blk_run_address_space(bp->b_target->bt_mapping); | ||
1040 | schedule(); | ||
1041 | } | 968 | } |
1042 | remove_wait_queue(&bp->b_waiters, &wait); | 969 | remove_wait_queue(&bp->b_waiters, &wait); |
1043 | set_current_state(TASK_RUNNING); | 970 | set_current_state(TASK_RUNNING); |
@@ -1249,10 +1176,8 @@ _xfs_buf_ioend( | |||
1249 | xfs_buf_t *bp, | 1176 | xfs_buf_t *bp, |
1250 | int schedule) | 1177 | int schedule) |
1251 | { | 1178 | { |
1252 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { | 1179 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) |
1253 | bp->b_flags &= ~_XBF_PAGE_LOCKED; | ||
1254 | xfs_buf_ioend(bp, schedule); | 1180 | xfs_buf_ioend(bp, schedule); |
1255 | } | ||
1256 | } | 1181 | } |
1257 | 1182 | ||
1258 | STATIC void | 1183 | STATIC void |
@@ -1261,35 +1186,12 @@ xfs_buf_bio_end_io( | |||
1261 | int error) | 1186 | int error) |
1262 | { | 1187 | { |
1263 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; | 1188 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; |
1264 | unsigned int blocksize = bp->b_target->bt_bsize; | ||
1265 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
1266 | 1189 | ||
1267 | xfs_buf_ioerror(bp, -error); | 1190 | xfs_buf_ioerror(bp, -error); |
1268 | 1191 | ||
1269 | if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) | 1192 | if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) |
1270 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); | 1193 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); |
1271 | 1194 | ||
1272 | do { | ||
1273 | struct page *page = bvec->bv_page; | ||
1274 | |||
1275 | ASSERT(!PagePrivate(page)); | ||
1276 | if (unlikely(bp->b_error)) { | ||
1277 | if (bp->b_flags & XBF_READ) | ||
1278 | ClearPageUptodate(page); | ||
1279 | } else if (blocksize >= PAGE_CACHE_SIZE) { | ||
1280 | SetPageUptodate(page); | ||
1281 | } else if (!PagePrivate(page) && | ||
1282 | (bp->b_flags & _XBF_PAGE_CACHE)) { | ||
1283 | set_page_region(page, bvec->bv_offset, bvec->bv_len); | ||
1284 | } | ||
1285 | |||
1286 | if (--bvec >= bio->bi_io_vec) | ||
1287 | prefetchw(&bvec->bv_page->flags); | ||
1288 | |||
1289 | if (bp->b_flags & _XBF_PAGE_LOCKED) | ||
1290 | unlock_page(page); | ||
1291 | } while (bvec >= bio->bi_io_vec); | ||
1292 | |||
1293 | _xfs_buf_ioend(bp, 1); | 1195 | _xfs_buf_ioend(bp, 1); |
1294 | bio_put(bio); | 1196 | bio_put(bio); |
1295 | } | 1197 | } |
@@ -1303,7 +1205,6 @@ _xfs_buf_ioapply( | |||
1303 | int offset = bp->b_offset; | 1205 | int offset = bp->b_offset; |
1304 | int size = bp->b_count_desired; | 1206 | int size = bp->b_count_desired; |
1305 | sector_t sector = bp->b_bn; | 1207 | sector_t sector = bp->b_bn; |
1306 | unsigned int blocksize = bp->b_target->bt_bsize; | ||
1307 | 1208 | ||
1308 | total_nr_pages = bp->b_page_count; | 1209 | total_nr_pages = bp->b_page_count; |
1309 | map_i = 0; | 1210 | map_i = 0; |
@@ -1324,29 +1225,6 @@ _xfs_buf_ioapply( | |||
1324 | (bp->b_flags & XBF_READ_AHEAD) ? READA : READ; | 1225 | (bp->b_flags & XBF_READ_AHEAD) ? READA : READ; |
1325 | } | 1226 | } |
1326 | 1227 | ||
1327 | /* Special code path for reading a sub page size buffer in -- | ||
1328 | * we populate up the whole page, and hence the other metadata | ||
1329 | * in the same page. This optimization is only valid when the | ||
1330 | * filesystem block size is not smaller than the page size. | ||
1331 | */ | ||
1332 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && | ||
1333 | ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) == | ||
1334 | (XBF_READ|_XBF_PAGE_LOCKED)) && | ||
1335 | (blocksize >= PAGE_CACHE_SIZE)) { | ||
1336 | bio = bio_alloc(GFP_NOIO, 1); | ||
1337 | |||
1338 | bio->bi_bdev = bp->b_target->bt_bdev; | ||
1339 | bio->bi_sector = sector - (offset >> BBSHIFT); | ||
1340 | bio->bi_end_io = xfs_buf_bio_end_io; | ||
1341 | bio->bi_private = bp; | ||
1342 | |||
1343 | bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0); | ||
1344 | size = 0; | ||
1345 | |||
1346 | atomic_inc(&bp->b_io_remaining); | ||
1347 | |||
1348 | goto submit_io; | ||
1349 | } | ||
1350 | 1228 | ||
1351 | next_chunk: | 1229 | next_chunk: |
1352 | atomic_inc(&bp->b_io_remaining); | 1230 | atomic_inc(&bp->b_io_remaining); |
@@ -1360,8 +1238,9 @@ next_chunk: | |||
1360 | bio->bi_end_io = xfs_buf_bio_end_io; | 1238 | bio->bi_end_io = xfs_buf_bio_end_io; |
1361 | bio->bi_private = bp; | 1239 | bio->bi_private = bp; |
1362 | 1240 | ||
1241 | |||
1363 | for (; size && nr_pages; nr_pages--, map_i++) { | 1242 | for (; size && nr_pages; nr_pages--, map_i++) { |
1364 | int rbytes, nbytes = PAGE_CACHE_SIZE - offset; | 1243 | int rbytes, nbytes = PAGE_SIZE - offset; |
1365 | 1244 | ||
1366 | if (nbytes > size) | 1245 | if (nbytes > size) |
1367 | nbytes = size; | 1246 | nbytes = size; |
@@ -1376,7 +1255,6 @@ next_chunk: | |||
1376 | total_nr_pages--; | 1255 | total_nr_pages--; |
1377 | } | 1256 | } |
1378 | 1257 | ||
1379 | submit_io: | ||
1380 | if (likely(bio->bi_size)) { | 1258 | if (likely(bio->bi_size)) { |
1381 | if (xfs_buf_is_vmapped(bp)) { | 1259 | if (xfs_buf_is_vmapped(bp)) { |
1382 | flush_kernel_vmap_range(bp->b_addr, | 1260 | flush_kernel_vmap_range(bp->b_addr, |
@@ -1386,18 +1264,7 @@ submit_io: | |||
1386 | if (size) | 1264 | if (size) |
1387 | goto next_chunk; | 1265 | goto next_chunk; |
1388 | } else { | 1266 | } else { |
1389 | /* | ||
1390 | * if we get here, no pages were added to the bio. However, | ||
1391 | * we can't just error out here - if the pages are locked then | ||
1392 | * we have to unlock them otherwise we can hang on a later | ||
1393 | * access to the page. | ||
1394 | */ | ||
1395 | xfs_buf_ioerror(bp, EIO); | 1267 | xfs_buf_ioerror(bp, EIO); |
1396 | if (bp->b_flags & _XBF_PAGE_LOCKED) { | ||
1397 | int i; | ||
1398 | for (i = 0; i < bp->b_page_count; i++) | ||
1399 | unlock_page(bp->b_pages[i]); | ||
1400 | } | ||
1401 | bio_put(bio); | 1268 | bio_put(bio); |
1402 | } | 1269 | } |
1403 | } | 1270 | } |
@@ -1443,7 +1310,7 @@ xfs_buf_iowait( | |||
1443 | trace_xfs_buf_iowait(bp, _RET_IP_); | 1310 | trace_xfs_buf_iowait(bp, _RET_IP_); |
1444 | 1311 | ||
1445 | if (atomic_read(&bp->b_io_remaining)) | 1312 | if (atomic_read(&bp->b_io_remaining)) |
1446 | blk_run_address_space(bp->b_target->bt_mapping); | 1313 | blk_flush_plug(current); |
1447 | wait_for_completion(&bp->b_iowait); | 1314 | wait_for_completion(&bp->b_iowait); |
1448 | 1315 | ||
1449 | trace_xfs_buf_iowait_done(bp, _RET_IP_); | 1316 | trace_xfs_buf_iowait_done(bp, _RET_IP_); |
@@ -1461,8 +1328,8 @@ xfs_buf_offset( | |||
1461 | return XFS_BUF_PTR(bp) + offset; | 1328 | return XFS_BUF_PTR(bp) + offset; |
1462 | 1329 | ||
1463 | offset += bp->b_offset; | 1330 | offset += bp->b_offset; |
1464 | page = bp->b_pages[offset >> PAGE_CACHE_SHIFT]; | 1331 | page = bp->b_pages[offset >> PAGE_SHIFT]; |
1465 | return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1)); | 1332 | return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1)); |
1466 | } | 1333 | } |
1467 | 1334 | ||
1468 | /* | 1335 | /* |
@@ -1484,9 +1351,9 @@ xfs_buf_iomove( | |||
1484 | page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; | 1351 | page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; |
1485 | cpoff = xfs_buf_poff(boff + bp->b_offset); | 1352 | cpoff = xfs_buf_poff(boff + bp->b_offset); |
1486 | csize = min_t(size_t, | 1353 | csize = min_t(size_t, |
1487 | PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff); | 1354 | PAGE_SIZE-cpoff, bp->b_count_desired-boff); |
1488 | 1355 | ||
1489 | ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); | 1356 | ASSERT(((csize + cpoff) <= PAGE_SIZE)); |
1490 | 1357 | ||
1491 | switch (mode) { | 1358 | switch (mode) { |
1492 | case XBRW_ZERO: | 1359 | case XBRW_ZERO: |
@@ -1599,7 +1466,6 @@ xfs_free_buftarg( | |||
1599 | xfs_flush_buftarg(btp, 1); | 1466 | xfs_flush_buftarg(btp, 1); |
1600 | if (mp->m_flags & XFS_MOUNT_BARRIER) | 1467 | if (mp->m_flags & XFS_MOUNT_BARRIER) |
1601 | xfs_blkdev_issue_flush(btp); | 1468 | xfs_blkdev_issue_flush(btp); |
1602 | iput(btp->bt_mapping->host); | ||
1603 | 1469 | ||
1604 | kthread_stop(btp->bt_task); | 1470 | kthread_stop(btp->bt_task); |
1605 | kmem_free(btp); | 1471 | kmem_free(btp); |
@@ -1617,21 +1483,12 @@ xfs_setsize_buftarg_flags( | |||
1617 | btp->bt_smask = sectorsize - 1; | 1483 | btp->bt_smask = sectorsize - 1; |
1618 | 1484 | ||
1619 | if (set_blocksize(btp->bt_bdev, sectorsize)) { | 1485 | if (set_blocksize(btp->bt_bdev, sectorsize)) { |
1620 | printk(KERN_WARNING | 1486 | xfs_warn(btp->bt_mount, |
1621 | "XFS: Cannot set_blocksize to %u on device %s\n", | 1487 | "Cannot set_blocksize to %u on device %s\n", |
1622 | sectorsize, XFS_BUFTARG_NAME(btp)); | 1488 | sectorsize, XFS_BUFTARG_NAME(btp)); |
1623 | return EINVAL; | 1489 | return EINVAL; |
1624 | } | 1490 | } |
1625 | 1491 | ||
1626 | if (verbose && | ||
1627 | (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) { | ||
1628 | printk(KERN_WARNING | ||
1629 | "XFS: %u byte sectors in use on device %s. " | ||
1630 | "This is suboptimal; %u or greater is ideal.\n", | ||
1631 | sectorsize, XFS_BUFTARG_NAME(btp), | ||
1632 | (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG); | ||
1633 | } | ||
1634 | |||
1635 | return 0; | 1492 | return 0; |
1636 | } | 1493 | } |
1637 | 1494 | ||
@@ -1646,7 +1503,7 @@ xfs_setsize_buftarg_early( | |||
1646 | struct block_device *bdev) | 1503 | struct block_device *bdev) |
1647 | { | 1504 | { |
1648 | return xfs_setsize_buftarg_flags(btp, | 1505 | return xfs_setsize_buftarg_flags(btp, |
1649 | PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0); | 1506 | PAGE_SIZE, bdev_logical_block_size(bdev), 0); |
1650 | } | 1507 | } |
1651 | 1508 | ||
1652 | int | 1509 | int |
@@ -1659,41 +1516,6 @@ xfs_setsize_buftarg( | |||
1659 | } | 1516 | } |
1660 | 1517 | ||
1661 | STATIC int | 1518 | STATIC int |
1662 | xfs_mapping_buftarg( | ||
1663 | xfs_buftarg_t *btp, | ||
1664 | struct block_device *bdev) | ||
1665 | { | ||
1666 | struct backing_dev_info *bdi; | ||
1667 | struct inode *inode; | ||
1668 | struct address_space *mapping; | ||
1669 | static const struct address_space_operations mapping_aops = { | ||
1670 | .sync_page = block_sync_page, | ||
1671 | .migratepage = fail_migrate_page, | ||
1672 | }; | ||
1673 | |||
1674 | inode = new_inode(bdev->bd_inode->i_sb); | ||
1675 | if (!inode) { | ||
1676 | printk(KERN_WARNING | ||
1677 | "XFS: Cannot allocate mapping inode for device %s\n", | ||
1678 | XFS_BUFTARG_NAME(btp)); | ||
1679 | return ENOMEM; | ||
1680 | } | ||
1681 | inode->i_ino = get_next_ino(); | ||
1682 | inode->i_mode = S_IFBLK; | ||
1683 | inode->i_bdev = bdev; | ||
1684 | inode->i_rdev = bdev->bd_dev; | ||
1685 | bdi = blk_get_backing_dev_info(bdev); | ||
1686 | if (!bdi) | ||
1687 | bdi = &default_backing_dev_info; | ||
1688 | mapping = &inode->i_data; | ||
1689 | mapping->a_ops = &mapping_aops; | ||
1690 | mapping->backing_dev_info = bdi; | ||
1691 | mapping_set_gfp_mask(mapping, GFP_NOFS); | ||
1692 | btp->bt_mapping = mapping; | ||
1693 | return 0; | ||
1694 | } | ||
1695 | |||
1696 | STATIC int | ||
1697 | xfs_alloc_delwrite_queue( | 1519 | xfs_alloc_delwrite_queue( |
1698 | xfs_buftarg_t *btp, | 1520 | xfs_buftarg_t *btp, |
1699 | const char *fsname) | 1521 | const char *fsname) |
@@ -1721,12 +1543,14 @@ xfs_alloc_buftarg( | |||
1721 | btp->bt_mount = mp; | 1543 | btp->bt_mount = mp; |
1722 | btp->bt_dev = bdev->bd_dev; | 1544 | btp->bt_dev = bdev->bd_dev; |
1723 | btp->bt_bdev = bdev; | 1545 | btp->bt_bdev = bdev; |
1546 | btp->bt_bdi = blk_get_backing_dev_info(bdev); | ||
1547 | if (!btp->bt_bdi) | ||
1548 | goto error; | ||
1549 | |||
1724 | INIT_LIST_HEAD(&btp->bt_lru); | 1550 | INIT_LIST_HEAD(&btp->bt_lru); |
1725 | spin_lock_init(&btp->bt_lru_lock); | 1551 | spin_lock_init(&btp->bt_lru_lock); |
1726 | if (xfs_setsize_buftarg_early(btp, bdev)) | 1552 | if (xfs_setsize_buftarg_early(btp, bdev)) |
1727 | goto error; | 1553 | goto error; |
1728 | if (xfs_mapping_buftarg(btp, bdev)) | ||
1729 | goto error; | ||
1730 | if (xfs_alloc_delwrite_queue(btp, fsname)) | 1554 | if (xfs_alloc_delwrite_queue(btp, fsname)) |
1731 | goto error; | 1555 | goto error; |
1732 | btp->bt_shrinker.shrink = xfs_buftarg_shrink; | 1556 | btp->bt_shrinker.shrink = xfs_buftarg_shrink; |
@@ -1948,7 +1772,7 @@ xfsbufd( | |||
1948 | count++; | 1772 | count++; |
1949 | } | 1773 | } |
1950 | if (count) | 1774 | if (count) |
1951 | blk_run_address_space(target->bt_mapping); | 1775 | blk_flush_plug(current); |
1952 | 1776 | ||
1953 | } while (!kthread_should_stop()); | 1777 | } while (!kthread_should_stop()); |
1954 | 1778 | ||
@@ -1996,7 +1820,7 @@ xfs_flush_buftarg( | |||
1996 | 1820 | ||
1997 | if (wait) { | 1821 | if (wait) { |
1998 | /* Expedite and wait for IO to complete. */ | 1822 | /* Expedite and wait for IO to complete. */ |
1999 | blk_run_address_space(target->bt_mapping); | 1823 | blk_flush_plug(current); |
2000 | while (!list_empty(&wait_list)) { | 1824 | while (!list_empty(&wait_list)) { |
2001 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); | 1825 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); |
2002 | 1826 | ||