aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c860
1 files changed, 391 insertions, 469 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 286e36e21dae..5e68099db2a5 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -33,7 +33,6 @@
33#include <linux/migrate.h> 33#include <linux/migrate.h>
34#include <linux/backing-dev.h> 34#include <linux/backing-dev.h>
35#include <linux/freezer.h> 35#include <linux/freezer.h>
36#include <linux/list_sort.h>
37 36
38#include "xfs_sb.h" 37#include "xfs_sb.h"
39#include "xfs_inum.h" 38#include "xfs_inum.h"
@@ -44,12 +43,7 @@
44 43
45static kmem_zone_t *xfs_buf_zone; 44static kmem_zone_t *xfs_buf_zone;
46STATIC int xfsbufd(void *); 45STATIC int xfsbufd(void *);
47STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
48STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 46STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
49static struct shrinker xfs_buf_shake = {
50 .shrink = xfsbufd_wakeup,
51 .seeks = DEFAULT_SEEKS,
52};
53 47
54static struct workqueue_struct *xfslogd_workqueue; 48static struct workqueue_struct *xfslogd_workqueue;
55struct workqueue_struct *xfsdatad_workqueue; 49struct workqueue_struct *xfsdatad_workqueue;
@@ -99,77 +93,79 @@ xfs_buf_vmap_len(
99} 93}
100 94
101/* 95/*
102 * Page Region interfaces. 96 * xfs_buf_lru_add - add a buffer to the LRU.
103 * 97 *
104 * For pages in filesystems where the blocksize is smaller than the 98 * The LRU takes a new reference to the buffer so that it will only be freed
105 * pagesize, we use the page->private field (long) to hold a bitmap 99 * once the shrinker takes the buffer off the LRU.
106 * of uptodate regions within the page.
107 *
108 * Each such region is "bytes per page / bits per long" bytes long.
109 *
110 * NBPPR == number-of-bytes-per-page-region
111 * BTOPR == bytes-to-page-region (rounded up)
112 * BTOPRT == bytes-to-page-region-truncated (rounded down)
113 */ 100 */
114#if (BITS_PER_LONG == 32) 101STATIC void
115#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */ 102xfs_buf_lru_add(
116#elif (BITS_PER_LONG == 64) 103 struct xfs_buf *bp)
117#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
118#else
119#error BITS_PER_LONG must be 32 or 64
120#endif
121#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
122#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
123#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
124
125STATIC unsigned long
126page_region_mask(
127 size_t offset,
128 size_t length)
129{ 104{
130 unsigned long mask; 105 struct xfs_buftarg *btp = bp->b_target;
131 int first, final;
132
133 first = BTOPR(offset);
134 final = BTOPRT(offset + length - 1);
135 first = min(first, final);
136
137 mask = ~0UL;
138 mask <<= BITS_PER_LONG - (final - first);
139 mask >>= BITS_PER_LONG - (final);
140
141 ASSERT(offset + length <= PAGE_CACHE_SIZE);
142 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
143 106
144 return mask; 107 spin_lock(&btp->bt_lru_lock);
108 if (list_empty(&bp->b_lru)) {
109 atomic_inc(&bp->b_hold);
110 list_add_tail(&bp->b_lru, &btp->bt_lru);
111 btp->bt_lru_nr++;
112 }
113 spin_unlock(&btp->bt_lru_lock);
145} 114}
146 115
116/*
117 * xfs_buf_lru_del - remove a buffer from the LRU
118 *
119 * The unlocked check is safe here because it only occurs when there are not
120 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
121 * to optimise the shrinker removing the buffer from the LRU and calling
122 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
123 * bt_lru_lock.
124 */
147STATIC void 125STATIC void
148set_page_region( 126xfs_buf_lru_del(
149 struct page *page, 127 struct xfs_buf *bp)
150 size_t offset,
151 size_t length)
152{ 128{
153 set_page_private(page, 129 struct xfs_buftarg *btp = bp->b_target;
154 page_private(page) | page_region_mask(offset, length));
155 if (page_private(page) == ~0UL)
156 SetPageUptodate(page);
157}
158 130
159STATIC int 131 if (list_empty(&bp->b_lru))
160test_page_region( 132 return;
161 struct page *page,
162 size_t offset,
163 size_t length)
164{
165 unsigned long mask = page_region_mask(offset, length);
166 133
167 return (mask && (page_private(page) & mask) == mask); 134 spin_lock(&btp->bt_lru_lock);
135 if (!list_empty(&bp->b_lru)) {
136 list_del_init(&bp->b_lru);
137 btp->bt_lru_nr--;
138 }
139 spin_unlock(&btp->bt_lru_lock);
168} 140}
169 141
170/* 142/*
171 * Internal xfs_buf_t object manipulation 143 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
144 * b_lru_ref count so that the buffer is freed immediately when the buffer
145 * reference count falls to zero. If the buffer is already on the LRU, we need
146 * to remove the reference that LRU holds on the buffer.
147 *
148 * This prevents build-up of stale buffers on the LRU.
172 */ 149 */
150void
151xfs_buf_stale(
152 struct xfs_buf *bp)
153{
154 bp->b_flags |= XBF_STALE;
155 atomic_set(&(bp)->b_lru_ref, 0);
156 if (!list_empty(&bp->b_lru)) {
157 struct xfs_buftarg *btp = bp->b_target;
158
159 spin_lock(&btp->bt_lru_lock);
160 if (!list_empty(&bp->b_lru)) {
161 list_del_init(&bp->b_lru);
162 btp->bt_lru_nr--;
163 atomic_dec(&bp->b_hold);
164 }
165 spin_unlock(&btp->bt_lru_lock);
166 }
167 ASSERT(atomic_read(&bp->b_hold) >= 1);
168}
173 169
174STATIC void 170STATIC void
175_xfs_buf_initialize( 171_xfs_buf_initialize(
@@ -186,10 +182,12 @@ _xfs_buf_initialize(
186 182
187 memset(bp, 0, sizeof(xfs_buf_t)); 183 memset(bp, 0, sizeof(xfs_buf_t));
188 atomic_set(&bp->b_hold, 1); 184 atomic_set(&bp->b_hold, 1);
185 atomic_set(&bp->b_lru_ref, 1);
189 init_completion(&bp->b_iowait); 186 init_completion(&bp->b_iowait);
187 INIT_LIST_HEAD(&bp->b_lru);
190 INIT_LIST_HEAD(&bp->b_list); 188 INIT_LIST_HEAD(&bp->b_list);
191 INIT_LIST_HEAD(&bp->b_hash_list); 189 RB_CLEAR_NODE(&bp->b_rbnode);
192 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ 190 sema_init(&bp->b_sema, 0); /* held, no waiters */
193 XB_SET_OWNER(bp); 191 XB_SET_OWNER(bp);
194 bp->b_target = target; 192 bp->b_target = target;
195 bp->b_file_offset = range_base; 193 bp->b_file_offset = range_base;
@@ -262,9 +260,9 @@ xfs_buf_free(
262{ 260{
263 trace_xfs_buf_free(bp, _RET_IP_); 261 trace_xfs_buf_free(bp, _RET_IP_);
264 262
265 ASSERT(list_empty(&bp->b_hash_list)); 263 ASSERT(list_empty(&bp->b_lru));
266 264
267 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 265 if (bp->b_flags & _XBF_PAGES) {
268 uint i; 266 uint i;
269 267
270 if (xfs_buf_is_vmapped(bp)) 268 if (xfs_buf_is_vmapped(bp))
@@ -274,56 +272,77 @@ xfs_buf_free(
274 for (i = 0; i < bp->b_page_count; i++) { 272 for (i = 0; i < bp->b_page_count; i++) {
275 struct page *page = bp->b_pages[i]; 273 struct page *page = bp->b_pages[i];
276 274
277 if (bp->b_flags & _XBF_PAGE_CACHE) 275 __free_page(page);
278 ASSERT(!PagePrivate(page));
279 page_cache_release(page);
280 } 276 }
281 } 277 } else if (bp->b_flags & _XBF_KMEM)
278 kmem_free(bp->b_addr);
282 _xfs_buf_free_pages(bp); 279 _xfs_buf_free_pages(bp);
283 xfs_buf_deallocate(bp); 280 xfs_buf_deallocate(bp);
284} 281}
285 282
286/* 283/*
287 * Finds all pages for buffer in question and builds it's page list. 284 * Allocates all the pages for buffer in question and builds it's page list.
288 */ 285 */
289STATIC int 286STATIC int
290_xfs_buf_lookup_pages( 287xfs_buf_allocate_memory(
291 xfs_buf_t *bp, 288 xfs_buf_t *bp,
292 uint flags) 289 uint flags)
293{ 290{
294 struct address_space *mapping = bp->b_target->bt_mapping;
295 size_t blocksize = bp->b_target->bt_bsize;
296 size_t size = bp->b_count_desired; 291 size_t size = bp->b_count_desired;
297 size_t nbytes, offset; 292 size_t nbytes, offset;
298 gfp_t gfp_mask = xb_to_gfp(flags); 293 gfp_t gfp_mask = xb_to_gfp(flags);
299 unsigned short page_count, i; 294 unsigned short page_count, i;
300 pgoff_t first;
301 xfs_off_t end; 295 xfs_off_t end;
302 int error; 296 int error;
303 297
298 /*
299 * for buffers that are contained within a single page, just allocate
300 * the memory from the heap - there's no need for the complexity of
301 * page arrays to keep allocation down to order 0.
302 */
303 if (bp->b_buffer_length < PAGE_SIZE) {
304 bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
305 if (!bp->b_addr) {
306 /* low memory - use alloc_page loop instead */
307 goto use_alloc_page;
308 }
309
310 if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
311 PAGE_MASK) !=
312 ((unsigned long)bp->b_addr & PAGE_MASK)) {
313 /* b_addr spans two pages - use alloc_page instead */
314 kmem_free(bp->b_addr);
315 bp->b_addr = NULL;
316 goto use_alloc_page;
317 }
318 bp->b_offset = offset_in_page(bp->b_addr);
319 bp->b_pages = bp->b_page_array;
320 bp->b_pages[0] = virt_to_page(bp->b_addr);
321 bp->b_page_count = 1;
322 bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
323 return 0;
324 }
325
326use_alloc_page:
304 end = bp->b_file_offset + bp->b_buffer_length; 327 end = bp->b_file_offset + bp->b_buffer_length;
305 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); 328 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
306
307 error = _xfs_buf_get_pages(bp, page_count, flags); 329 error = _xfs_buf_get_pages(bp, page_count, flags);
308 if (unlikely(error)) 330 if (unlikely(error))
309 return error; 331 return error;
310 bp->b_flags |= _XBF_PAGE_CACHE;
311 332
312 offset = bp->b_offset; 333 offset = bp->b_offset;
313 first = bp->b_file_offset >> PAGE_CACHE_SHIFT; 334 bp->b_flags |= _XBF_PAGES;
314 335
315 for (i = 0; i < bp->b_page_count; i++) { 336 for (i = 0; i < bp->b_page_count; i++) {
316 struct page *page; 337 struct page *page;
317 uint retries = 0; 338 uint retries = 0;
318 339retry:
319 retry: 340 page = alloc_page(gfp_mask);
320 page = find_or_create_page(mapping, first + i, gfp_mask);
321 if (unlikely(page == NULL)) { 341 if (unlikely(page == NULL)) {
322 if (flags & XBF_READ_AHEAD) { 342 if (flags & XBF_READ_AHEAD) {
323 bp->b_page_count = i; 343 bp->b_page_count = i;
324 for (i = 0; i < bp->b_page_count; i++) 344 error = ENOMEM;
325 unlock_page(bp->b_pages[i]); 345 goto out_free_pages;
326 return -ENOMEM;
327 } 346 }
328 347
329 /* 348 /*
@@ -333,65 +352,55 @@ _xfs_buf_lookup_pages(
333 * handle buffer allocation failures we can't do much. 352 * handle buffer allocation failures we can't do much.
334 */ 353 */
335 if (!(++retries % 100)) 354 if (!(++retries % 100))
336 printk(KERN_ERR 355 xfs_err(NULL,
337 "XFS: possible memory allocation " 356 "possible memory allocation deadlock in %s (mode:0x%x)",
338 "deadlock in %s (mode:0x%x)\n",
339 __func__, gfp_mask); 357 __func__, gfp_mask);
340 358
341 XFS_STATS_INC(xb_page_retries); 359 XFS_STATS_INC(xb_page_retries);
342 xfsbufd_wakeup(NULL, 0, gfp_mask);
343 congestion_wait(BLK_RW_ASYNC, HZ/50); 360 congestion_wait(BLK_RW_ASYNC, HZ/50);
344 goto retry; 361 goto retry;
345 } 362 }
346 363
347 XFS_STATS_INC(xb_page_found); 364 XFS_STATS_INC(xb_page_found);
348 365
349 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); 366 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
350 size -= nbytes; 367 size -= nbytes;
351
352 ASSERT(!PagePrivate(page));
353 if (!PageUptodate(page)) {
354 page_count--;
355 if (blocksize >= PAGE_CACHE_SIZE) {
356 if (flags & XBF_READ)
357 bp->b_flags |= _XBF_PAGE_LOCKED;
358 } else if (!PagePrivate(page)) {
359 if (test_page_region(page, offset, nbytes))
360 page_count++;
361 }
362 }
363
364 bp->b_pages[i] = page; 368 bp->b_pages[i] = page;
365 offset = 0; 369 offset = 0;
366 } 370 }
371 return 0;
367 372
368 if (!(bp->b_flags & _XBF_PAGE_LOCKED)) { 373out_free_pages:
369 for (i = 0; i < bp->b_page_count; i++) 374 for (i = 0; i < bp->b_page_count; i++)
370 unlock_page(bp->b_pages[i]); 375 __free_page(bp->b_pages[i]);
371 }
372
373 if (page_count == bp->b_page_count)
374 bp->b_flags |= XBF_DONE;
375
376 return error; 376 return error;
377} 377}
378 378
379/* 379/*
380 * Map buffer into kernel address-space if nessecary. 380 * Map buffer into kernel address-space if necessary.
381 */ 381 */
382STATIC int 382STATIC int
383_xfs_buf_map_pages( 383_xfs_buf_map_pages(
384 xfs_buf_t *bp, 384 xfs_buf_t *bp,
385 uint flags) 385 uint flags)
386{ 386{
387 /* A single page buffer is always mappable */ 387 ASSERT(bp->b_flags & _XBF_PAGES);
388 if (bp->b_page_count == 1) { 388 if (bp->b_page_count == 1) {
389 /* A single page buffer is always mappable */
389 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 390 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
390 bp->b_flags |= XBF_MAPPED; 391 bp->b_flags |= XBF_MAPPED;
391 } else if (flags & XBF_MAPPED) { 392 } else if (flags & XBF_MAPPED) {
392 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 393 int retried = 0;
393 -1, PAGE_KERNEL); 394
394 if (unlikely(bp->b_addr == NULL)) 395 do {
396 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
397 -1, PAGE_KERNEL);
398 if (bp->b_addr)
399 break;
400 vm_unmap_aliases();
401 } while (retried++ <= 1);
402
403 if (!bp->b_addr)
395 return -ENOMEM; 404 return -ENOMEM;
396 bp->b_addr += bp->b_offset; 405 bp->b_addr += bp->b_offset;
397 bp->b_flags |= XBF_MAPPED; 406 bp->b_flags |= XBF_MAPPED;
@@ -422,8 +431,10 @@ _xfs_buf_find(
422{ 431{
423 xfs_off_t range_base; 432 xfs_off_t range_base;
424 size_t range_length; 433 size_t range_length;
425 xfs_bufhash_t *hash; 434 struct xfs_perag *pag;
426 xfs_buf_t *bp, *n; 435 struct rb_node **rbp;
436 struct rb_node *parent;
437 xfs_buf_t *bp;
427 438
428 range_base = (ioff << BBSHIFT); 439 range_base = (ioff << BBSHIFT);
429 range_length = (isize << BBSHIFT); 440 range_length = (isize << BBSHIFT);
@@ -432,14 +443,37 @@ _xfs_buf_find(
432 ASSERT(!(range_length < (1 << btp->bt_sshift))); 443 ASSERT(!(range_length < (1 << btp->bt_sshift)));
433 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); 444 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
434 445
435 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; 446 /* get tree root */
436 447 pag = xfs_perag_get(btp->bt_mount,
437 spin_lock(&hash->bh_lock); 448 xfs_daddr_to_agno(btp->bt_mount, ioff));
438 449
439 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { 450 /* walk tree */
440 ASSERT(btp == bp->b_target); 451 spin_lock(&pag->pag_buf_lock);
441 if (bp->b_file_offset == range_base && 452 rbp = &pag->pag_buf_tree.rb_node;
442 bp->b_buffer_length == range_length) { 453 parent = NULL;
454 bp = NULL;
455 while (*rbp) {
456 parent = *rbp;
457 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
458
459 if (range_base < bp->b_file_offset)
460 rbp = &(*rbp)->rb_left;
461 else if (range_base > bp->b_file_offset)
462 rbp = &(*rbp)->rb_right;
463 else {
464 /*
465 * found a block offset match. If the range doesn't
466 * match, the only way this is allowed is if the buffer
467 * in the cache is stale and the transaction that made
468 * it stale has not yet committed. i.e. we are
469 * reallocating a busy extent. Skip this buffer and
470 * continue searching to the right for an exact match.
471 */
472 if (bp->b_buffer_length != range_length) {
473 ASSERT(bp->b_flags & XBF_STALE);
474 rbp = &(*rbp)->rb_right;
475 continue;
476 }
443 atomic_inc(&bp->b_hold); 477 atomic_inc(&bp->b_hold);
444 goto found; 478 goto found;
445 } 479 }
@@ -449,46 +483,42 @@ _xfs_buf_find(
449 if (new_bp) { 483 if (new_bp) {
450 _xfs_buf_initialize(new_bp, btp, range_base, 484 _xfs_buf_initialize(new_bp, btp, range_base,
451 range_length, flags); 485 range_length, flags);
452 new_bp->b_hash = hash; 486 rb_link_node(&new_bp->b_rbnode, parent, rbp);
453 list_add(&new_bp->b_hash_list, &hash->bh_list); 487 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
488 /* the buffer keeps the perag reference until it is freed */
489 new_bp->b_pag = pag;
490 spin_unlock(&pag->pag_buf_lock);
454 } else { 491 } else {
455 XFS_STATS_INC(xb_miss_locked); 492 XFS_STATS_INC(xb_miss_locked);
493 spin_unlock(&pag->pag_buf_lock);
494 xfs_perag_put(pag);
456 } 495 }
457
458 spin_unlock(&hash->bh_lock);
459 return new_bp; 496 return new_bp;
460 497
461found: 498found:
462 spin_unlock(&hash->bh_lock); 499 spin_unlock(&pag->pag_buf_lock);
500 xfs_perag_put(pag);
463 501
464 /* Attempt to get the semaphore without sleeping, 502 if (xfs_buf_cond_lock(bp)) {
465 * if this does not work then we need to drop the 503 /* failed, so wait for the lock if requested. */
466 * spinlock and do a hard attempt on the semaphore.
467 */
468 if (down_trylock(&bp->b_sema)) {
469 if (!(flags & XBF_TRYLOCK)) { 504 if (!(flags & XBF_TRYLOCK)) {
470 /* wait for buffer ownership */
471 xfs_buf_lock(bp); 505 xfs_buf_lock(bp);
472 XFS_STATS_INC(xb_get_locked_waited); 506 XFS_STATS_INC(xb_get_locked_waited);
473 } else { 507 } else {
474 /* We asked for a trylock and failed, no need
475 * to look at file offset and length here, we
476 * know that this buffer at least overlaps our
477 * buffer and is locked, therefore our buffer
478 * either does not exist, or is this buffer.
479 */
480 xfs_buf_rele(bp); 508 xfs_buf_rele(bp);
481 XFS_STATS_INC(xb_busy_locked); 509 XFS_STATS_INC(xb_busy_locked);
482 return NULL; 510 return NULL;
483 } 511 }
484 } else {
485 /* trylock worked */
486 XB_SET_OWNER(bp);
487 } 512 }
488 513
514 /*
515 * if the buffer is stale, clear all the external state associated with
516 * it. We need to keep flags such as how we allocated the buffer memory
517 * intact here.
518 */
489 if (bp->b_flags & XBF_STALE) { 519 if (bp->b_flags & XBF_STALE) {
490 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 520 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
491 bp->b_flags &= XBF_MAPPED; 521 bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
492 } 522 }
493 523
494 trace_xfs_buf_find(bp, flags, _RET_IP_); 524 trace_xfs_buf_find(bp, flags, _RET_IP_);
@@ -509,7 +539,7 @@ xfs_buf_get(
509 xfs_buf_flags_t flags) 539 xfs_buf_flags_t flags)
510{ 540{
511 xfs_buf_t *bp, *new_bp; 541 xfs_buf_t *bp, *new_bp;
512 int error = 0, i; 542 int error = 0;
513 543
514 new_bp = xfs_buf_allocate(flags); 544 new_bp = xfs_buf_allocate(flags);
515 if (unlikely(!new_bp)) 545 if (unlikely(!new_bp))
@@ -517,7 +547,7 @@ xfs_buf_get(
517 547
518 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); 548 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
519 if (bp == new_bp) { 549 if (bp == new_bp) {
520 error = _xfs_buf_lookup_pages(bp, flags); 550 error = xfs_buf_allocate_memory(bp, flags);
521 if (error) 551 if (error)
522 goto no_buffer; 552 goto no_buffer;
523 } else { 553 } else {
@@ -526,14 +556,11 @@ xfs_buf_get(
526 return NULL; 556 return NULL;
527 } 557 }
528 558
529 for (i = 0; i < bp->b_page_count; i++)
530 mark_page_accessed(bp->b_pages[i]);
531
532 if (!(bp->b_flags & XBF_MAPPED)) { 559 if (!(bp->b_flags & XBF_MAPPED)) {
533 error = _xfs_buf_map_pages(bp, flags); 560 error = _xfs_buf_map_pages(bp, flags);
534 if (unlikely(error)) { 561 if (unlikely(error)) {
535 printk(KERN_WARNING "%s: failed to map pages\n", 562 xfs_warn(target->bt_mount,
536 __func__); 563 "%s: failed to map pages\n", __func__);
537 goto no_buffer; 564 goto no_buffer;
538 } 565 }
539 } 566 }
@@ -625,17 +652,47 @@ void
625xfs_buf_readahead( 652xfs_buf_readahead(
626 xfs_buftarg_t *target, 653 xfs_buftarg_t *target,
627 xfs_off_t ioff, 654 xfs_off_t ioff,
628 size_t isize, 655 size_t isize)
629 xfs_buf_flags_t flags)
630{ 656{
631 struct backing_dev_info *bdi; 657 if (bdi_read_congested(target->bt_bdi))
632
633 bdi = target->bt_mapping->backing_dev_info;
634 if (bdi_read_congested(bdi))
635 return; 658 return;
636 659
637 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); 660 xfs_buf_read(target, ioff, isize,
638 xfs_buf_read(target, ioff, isize, flags); 661 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
662}
663
664/*
665 * Read an uncached buffer from disk. Allocates and returns a locked
666 * buffer containing the disk contents or nothing.
667 */
668struct xfs_buf *
669xfs_buf_read_uncached(
670 struct xfs_mount *mp,
671 struct xfs_buftarg *target,
672 xfs_daddr_t daddr,
673 size_t length,
674 int flags)
675{
676 xfs_buf_t *bp;
677 int error;
678
679 bp = xfs_buf_get_uncached(target, length, flags);
680 if (!bp)
681 return NULL;
682
683 /* set up the buffer for a read IO */
684 xfs_buf_lock(bp);
685 XFS_BUF_SET_ADDR(bp, daddr);
686 XFS_BUF_READ(bp);
687 XFS_BUF_BUSY(bp);
688
689 xfsbdstrat(mp, bp);
690 error = xfs_buf_iowait(bp);
691 if (error || bp->b_error) {
692 xfs_buf_relse(bp);
693 return NULL;
694 }
695 return bp;
639} 696}
640 697
641xfs_buf_t * 698xfs_buf_t *
@@ -651,6 +708,27 @@ xfs_buf_get_empty(
651 return bp; 708 return bp;
652} 709}
653 710
711/*
712 * Return a buffer allocated as an empty buffer and associated to external
713 * memory via xfs_buf_associate_memory() back to it's empty state.
714 */
715void
716xfs_buf_set_empty(
717 struct xfs_buf *bp,
718 size_t len)
719{
720 if (bp->b_pages)
721 _xfs_buf_free_pages(bp);
722
723 bp->b_pages = NULL;
724 bp->b_page_count = 0;
725 bp->b_addr = NULL;
726 bp->b_file_offset = 0;
727 bp->b_buffer_length = bp->b_count_desired = len;
728 bp->b_bn = XFS_BUF_DADDR_NULL;
729 bp->b_flags &= ~XBF_MAPPED;
730}
731
654static inline struct page * 732static inline struct page *
655mem_to_page( 733mem_to_page(
656 void *addr) 734 void *addr)
@@ -675,10 +753,10 @@ xfs_buf_associate_memory(
675 size_t buflen; 753 size_t buflen;
676 int page_count; 754 int page_count;
677 755
678 pageaddr = (unsigned long)mem & PAGE_CACHE_MASK; 756 pageaddr = (unsigned long)mem & PAGE_MASK;
679 offset = (unsigned long)mem - pageaddr; 757 offset = (unsigned long)mem - pageaddr;
680 buflen = PAGE_CACHE_ALIGN(len + offset); 758 buflen = PAGE_ALIGN(len + offset);
681 page_count = buflen >> PAGE_CACHE_SHIFT; 759 page_count = buflen >> PAGE_SHIFT;
682 760
683 /* Free any previous set of page pointers */ 761 /* Free any previous set of page pointers */
684 if (bp->b_pages) 762 if (bp->b_pages)
@@ -695,21 +773,21 @@ xfs_buf_associate_memory(
695 773
696 for (i = 0; i < bp->b_page_count; i++) { 774 for (i = 0; i < bp->b_page_count; i++) {
697 bp->b_pages[i] = mem_to_page((void *)pageaddr); 775 bp->b_pages[i] = mem_to_page((void *)pageaddr);
698 pageaddr += PAGE_CACHE_SIZE; 776 pageaddr += PAGE_SIZE;
699 } 777 }
700 778
701 bp->b_count_desired = len; 779 bp->b_count_desired = len;
702 bp->b_buffer_length = buflen; 780 bp->b_buffer_length = buflen;
703 bp->b_flags |= XBF_MAPPED; 781 bp->b_flags |= XBF_MAPPED;
704 bp->b_flags &= ~_XBF_PAGE_LOCKED;
705 782
706 return 0; 783 return 0;
707} 784}
708 785
709xfs_buf_t * 786xfs_buf_t *
710xfs_buf_get_noaddr( 787xfs_buf_get_uncached(
788 struct xfs_buftarg *target,
711 size_t len, 789 size_t len,
712 xfs_buftarg_t *target) 790 int flags)
713{ 791{
714 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; 792 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
715 int error, i; 793 int error, i;
@@ -725,7 +803,7 @@ xfs_buf_get_noaddr(
725 goto fail_free_buf; 803 goto fail_free_buf;
726 804
727 for (i = 0; i < page_count; i++) { 805 for (i = 0; i < page_count; i++) {
728 bp->b_pages[i] = alloc_page(GFP_KERNEL); 806 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
729 if (!bp->b_pages[i]) 807 if (!bp->b_pages[i])
730 goto fail_free_mem; 808 goto fail_free_mem;
731 } 809 }
@@ -733,14 +811,14 @@ xfs_buf_get_noaddr(
733 811
734 error = _xfs_buf_map_pages(bp, XBF_MAPPED); 812 error = _xfs_buf_map_pages(bp, XBF_MAPPED);
735 if (unlikely(error)) { 813 if (unlikely(error)) {
736 printk(KERN_WARNING "%s: failed to map pages\n", 814 xfs_warn(target->bt_mount,
737 __func__); 815 "%s: failed to map pages\n", __func__);
738 goto fail_free_mem; 816 goto fail_free_mem;
739 } 817 }
740 818
741 xfs_buf_unlock(bp); 819 xfs_buf_unlock(bp);
742 820
743 trace_xfs_buf_get_noaddr(bp, _RET_IP_); 821 trace_xfs_buf_get_uncached(bp, _RET_IP_);
744 return bp; 822 return bp;
745 823
746 fail_free_mem: 824 fail_free_mem:
@@ -774,29 +852,32 @@ void
774xfs_buf_rele( 852xfs_buf_rele(
775 xfs_buf_t *bp) 853 xfs_buf_t *bp)
776{ 854{
777 xfs_bufhash_t *hash = bp->b_hash; 855 struct xfs_perag *pag = bp->b_pag;
778 856
779 trace_xfs_buf_rele(bp, _RET_IP_); 857 trace_xfs_buf_rele(bp, _RET_IP_);
780 858
781 if (unlikely(!hash)) { 859 if (!pag) {
782 ASSERT(!bp->b_relse); 860 ASSERT(list_empty(&bp->b_lru));
861 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
783 if (atomic_dec_and_test(&bp->b_hold)) 862 if (atomic_dec_and_test(&bp->b_hold))
784 xfs_buf_free(bp); 863 xfs_buf_free(bp);
785 return; 864 return;
786 } 865 }
787 866
867 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
868
788 ASSERT(atomic_read(&bp->b_hold) > 0); 869 ASSERT(atomic_read(&bp->b_hold) > 0);
789 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { 870 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
790 if (bp->b_relse) { 871 if (!(bp->b_flags & XBF_STALE) &&
791 atomic_inc(&bp->b_hold); 872 atomic_read(&bp->b_lru_ref)) {
792 spin_unlock(&hash->bh_lock); 873 xfs_buf_lru_add(bp);
793 (*(bp->b_relse)) (bp); 874 spin_unlock(&pag->pag_buf_lock);
794 } else if (bp->b_flags & XBF_FS_MANAGED) {
795 spin_unlock(&hash->bh_lock);
796 } else { 875 } else {
876 xfs_buf_lru_del(bp);
797 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); 877 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
798 list_del_init(&bp->b_hash_list); 878 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
799 spin_unlock(&hash->bh_lock); 879 spin_unlock(&pag->pag_buf_lock);
880 xfs_perag_put(pag);
800 xfs_buf_free(bp); 881 xfs_buf_free(bp);
801 } 882 }
802 } 883 }
@@ -804,20 +885,15 @@ xfs_buf_rele(
804 885
805 886
806/* 887/*
807 * Mutual exclusion on buffers. Locking model: 888 * Lock a buffer object, if it is not already locked.
808 * 889 *
809 * Buffers associated with inodes for which buffer locking 890 * If we come across a stale, pinned, locked buffer, we know that we are
810 * is not enabled are not protected by semaphores, and are 891 * being asked to lock a buffer that has been reallocated. Because it is
811 * assumed to be exclusively owned by the caller. There is a 892 * pinned, we know that the log has not been pushed to disk and hence it
812 * spinlock in the buffer, used by the caller when concurrent 893 * will still be locked. Rather than continuing to have trylock attempts
813 * access is possible. 894 * fail until someone else pushes the log, push it ourselves before
814 */ 895 * returning. This means that the xfsaild will not get stuck trying
815 896 * to push on stale inode buffers.
816/*
817 * Locks a buffer object, if it is not already locked.
818 * Note that this in no way locks the underlying pages, so it is only
819 * useful for synchronizing concurrent use of buffer objects, not for
820 * synchronizing independent access to the underlying pages.
821 */ 897 */
822int 898int
823xfs_buf_cond_lock( 899xfs_buf_cond_lock(
@@ -828,6 +904,8 @@ xfs_buf_cond_lock(
828 locked = down_trylock(&bp->b_sema) == 0; 904 locked = down_trylock(&bp->b_sema) == 0;
829 if (locked) 905 if (locked)
830 XB_SET_OWNER(bp); 906 XB_SET_OWNER(bp);
907 else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
908 xfs_log_force(bp->b_target->bt_mount, 0);
831 909
832 trace_xfs_buf_cond_lock(bp, _RET_IP_); 910 trace_xfs_buf_cond_lock(bp, _RET_IP_);
833 return locked ? 0 : -EBUSY; 911 return locked ? 0 : -EBUSY;
@@ -841,10 +919,7 @@ xfs_buf_lock_value(
841} 919}
842 920
843/* 921/*
844 * Locks a buffer object. 922 * Lock a buffer object.
845 * Note that this in no way locks the underlying pages, so it is only
846 * useful for synchronizing concurrent use of buffer objects, not for
847 * synchronizing independent access to the underlying pages.
848 * 923 *
849 * If we come across a stale, pinned, locked buffer, we know that we 924 * If we come across a stale, pinned, locked buffer, we know that we
850 * are being asked to lock a buffer that has been reallocated. Because 925 * are being asked to lock a buffer that has been reallocated. Because
@@ -859,9 +934,7 @@ xfs_buf_lock(
859 trace_xfs_buf_lock(bp, _RET_IP_); 934 trace_xfs_buf_lock(bp, _RET_IP_);
860 935
861 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 936 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
862 xfs_log_force(bp->b_mount, 0); 937 xfs_log_force(bp->b_target->bt_mount, 0);
863 if (atomic_read(&bp->b_io_remaining))
864 blk_run_address_space(bp->b_target->bt_mapping);
865 down(&bp->b_sema); 938 down(&bp->b_sema);
866 XB_SET_OWNER(bp); 939 XB_SET_OWNER(bp);
867 940
@@ -905,9 +978,7 @@ xfs_buf_wait_unpin(
905 set_current_state(TASK_UNINTERRUPTIBLE); 978 set_current_state(TASK_UNINTERRUPTIBLE);
906 if (atomic_read(&bp->b_pin_count) == 0) 979 if (atomic_read(&bp->b_pin_count) == 0)
907 break; 980 break;
908 if (atomic_read(&bp->b_io_remaining)) 981 io_schedule();
909 blk_run_address_space(bp->b_target->bt_mapping);
910 schedule();
911 } 982 }
912 remove_wait_queue(&bp->b_waiters, &wait); 983 remove_wait_queue(&bp->b_waiters, &wait);
913 set_current_state(TASK_RUNNING); 984 set_current_state(TASK_RUNNING);
@@ -924,19 +995,7 @@ xfs_buf_iodone_work(
924 xfs_buf_t *bp = 995 xfs_buf_t *bp =
925 container_of(work, xfs_buf_t, b_iodone_work); 996 container_of(work, xfs_buf_t, b_iodone_work);
926 997
927 /* 998 if (bp->b_iodone)
928 * We can get an EOPNOTSUPP to ordered writes. Here we clear the
929 * ordered flag and reissue them. Because we can't tell the higher
930 * layers directly that they should not issue ordered I/O anymore, they
931 * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
932 */
933 if ((bp->b_error == EOPNOTSUPP) &&
934 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
935 trace_xfs_buf_ordered_retry(bp, _RET_IP_);
936 bp->b_flags &= ~XBF_ORDERED;
937 bp->b_flags |= _XFS_BARRIER_FAILED;
938 xfs_buf_iorequest(bp);
939 } else if (bp->b_iodone)
940 (*(bp->b_iodone))(bp); 999 (*(bp->b_iodone))(bp);
941 else if (bp->b_flags & XBF_ASYNC) 1000 else if (bp->b_flags & XBF_ASYNC)
942 xfs_buf_relse(bp); 1001 xfs_buf_relse(bp);
@@ -982,7 +1041,6 @@ xfs_bwrite(
982{ 1041{
983 int error; 1042 int error;
984 1043
985 bp->b_mount = mp;
986 bp->b_flags |= XBF_WRITE; 1044 bp->b_flags |= XBF_WRITE;
987 bp->b_flags &= ~(XBF_ASYNC | XBF_READ); 1045 bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
988 1046
@@ -1003,8 +1061,6 @@ xfs_bdwrite(
1003{ 1061{
1004 trace_xfs_buf_bdwrite(bp, _RET_IP_); 1062 trace_xfs_buf_bdwrite(bp, _RET_IP_);
1005 1063
1006 bp->b_mount = mp;
1007
1008 bp->b_flags &= ~XBF_READ; 1064 bp->b_flags &= ~XBF_READ;
1009 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); 1065 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1010 1066
@@ -1013,7 +1069,7 @@ xfs_bdwrite(
1013 1069
1014/* 1070/*
1015 * Called when we want to stop a buffer from getting written or read. 1071 * Called when we want to stop a buffer from getting written or read.
1016 * We attach the EIO error, muck with its flags, and call biodone 1072 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1017 * so that the proper iodone callbacks get called. 1073 * so that the proper iodone callbacks get called.
1018 */ 1074 */
1019STATIC int 1075STATIC int
@@ -1030,21 +1086,21 @@ xfs_bioerror(
1030 XFS_BUF_ERROR(bp, EIO); 1086 XFS_BUF_ERROR(bp, EIO);
1031 1087
1032 /* 1088 /*
1033 * We're calling biodone, so delete XBF_DONE flag. 1089 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1034 */ 1090 */
1035 XFS_BUF_UNREAD(bp); 1091 XFS_BUF_UNREAD(bp);
1036 XFS_BUF_UNDELAYWRITE(bp); 1092 XFS_BUF_UNDELAYWRITE(bp);
1037 XFS_BUF_UNDONE(bp); 1093 XFS_BUF_UNDONE(bp);
1038 XFS_BUF_STALE(bp); 1094 XFS_BUF_STALE(bp);
1039 1095
1040 xfs_biodone(bp); 1096 xfs_buf_ioend(bp, 0);
1041 1097
1042 return EIO; 1098 return EIO;
1043} 1099}
1044 1100
1045/* 1101/*
1046 * Same as xfs_bioerror, except that we are releasing the buffer 1102 * Same as xfs_bioerror, except that we are releasing the buffer
1047 * here ourselves, and avoiding the biodone call. 1103 * here ourselves, and avoiding the xfs_buf_ioend call.
1048 * This is meant for userdata errors; metadata bufs come with 1104 * This is meant for userdata errors; metadata bufs come with
1049 * iodone functions attached, so that we can track down errors. 1105 * iodone functions attached, so that we can track down errors.
1050 */ 1106 */
@@ -1093,7 +1149,7 @@ int
1093xfs_bdstrat_cb( 1149xfs_bdstrat_cb(
1094 struct xfs_buf *bp) 1150 struct xfs_buf *bp)
1095{ 1151{
1096 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { 1152 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1097 trace_xfs_bdstrat_shut(bp, _RET_IP_); 1153 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1098 /* 1154 /*
1099 * Metadata write that didn't get logged but 1155 * Metadata write that didn't get logged but
@@ -1134,10 +1190,8 @@ _xfs_buf_ioend(
1134 xfs_buf_t *bp, 1190 xfs_buf_t *bp,
1135 int schedule) 1191 int schedule)
1136{ 1192{
1137 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1193 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1138 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1139 xfs_buf_ioend(bp, schedule); 1194 xfs_buf_ioend(bp, schedule);
1140 }
1141} 1195}
1142 1196
1143STATIC void 1197STATIC void
@@ -1146,35 +1200,12 @@ xfs_buf_bio_end_io(
1146 int error) 1200 int error)
1147{ 1201{
1148 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1202 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1149 unsigned int blocksize = bp->b_target->bt_bsize;
1150 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1151 1203
1152 xfs_buf_ioerror(bp, -error); 1204 xfs_buf_ioerror(bp, -error);
1153 1205
1154 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1206 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1155 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1207 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1156 1208
1157 do {
1158 struct page *page = bvec->bv_page;
1159
1160 ASSERT(!PagePrivate(page));
1161 if (unlikely(bp->b_error)) {
1162 if (bp->b_flags & XBF_READ)
1163 ClearPageUptodate(page);
1164 } else if (blocksize >= PAGE_CACHE_SIZE) {
1165 SetPageUptodate(page);
1166 } else if (!PagePrivate(page) &&
1167 (bp->b_flags & _XBF_PAGE_CACHE)) {
1168 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1169 }
1170
1171 if (--bvec >= bio->bi_io_vec)
1172 prefetchw(&bvec->bv_page->flags);
1173
1174 if (bp->b_flags & _XBF_PAGE_LOCKED)
1175 unlock_page(page);
1176 } while (bvec >= bio->bi_io_vec);
1177
1178 _xfs_buf_ioend(bp, 1); 1209 _xfs_buf_ioend(bp, 1);
1179 bio_put(bio); 1210 bio_put(bio);
1180} 1211}
@@ -1188,14 +1219,13 @@ _xfs_buf_ioapply(
1188 int offset = bp->b_offset; 1219 int offset = bp->b_offset;
1189 int size = bp->b_count_desired; 1220 int size = bp->b_count_desired;
1190 sector_t sector = bp->b_bn; 1221 sector_t sector = bp->b_bn;
1191 unsigned int blocksize = bp->b_target->bt_bsize;
1192 1222
1193 total_nr_pages = bp->b_page_count; 1223 total_nr_pages = bp->b_page_count;
1194 map_i = 0; 1224 map_i = 0;
1195 1225
1196 if (bp->b_flags & XBF_ORDERED) { 1226 if (bp->b_flags & XBF_ORDERED) {
1197 ASSERT(!(bp->b_flags & XBF_READ)); 1227 ASSERT(!(bp->b_flags & XBF_READ));
1198 rw = WRITE_BARRIER; 1228 rw = WRITE_FLUSH_FUA;
1199 } else if (bp->b_flags & XBF_LOG_BUFFER) { 1229 } else if (bp->b_flags & XBF_LOG_BUFFER) {
1200 ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); 1230 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1201 bp->b_flags &= ~_XBF_RUN_QUEUES; 1231 bp->b_flags &= ~_XBF_RUN_QUEUES;
@@ -1209,29 +1239,6 @@ _xfs_buf_ioapply(
1209 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ; 1239 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1210 } 1240 }
1211 1241
1212 /* Special code path for reading a sub page size buffer in --
1213 * we populate up the whole page, and hence the other metadata
1214 * in the same page. This optimization is only valid when the
1215 * filesystem block size is not smaller than the page size.
1216 */
1217 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1218 ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1219 (XBF_READ|_XBF_PAGE_LOCKED)) &&
1220 (blocksize >= PAGE_CACHE_SIZE)) {
1221 bio = bio_alloc(GFP_NOIO, 1);
1222
1223 bio->bi_bdev = bp->b_target->bt_bdev;
1224 bio->bi_sector = sector - (offset >> BBSHIFT);
1225 bio->bi_end_io = xfs_buf_bio_end_io;
1226 bio->bi_private = bp;
1227
1228 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1229 size = 0;
1230
1231 atomic_inc(&bp->b_io_remaining);
1232
1233 goto submit_io;
1234 }
1235 1242
1236next_chunk: 1243next_chunk:
1237 atomic_inc(&bp->b_io_remaining); 1244 atomic_inc(&bp->b_io_remaining);
@@ -1245,8 +1252,9 @@ next_chunk:
1245 bio->bi_end_io = xfs_buf_bio_end_io; 1252 bio->bi_end_io = xfs_buf_bio_end_io;
1246 bio->bi_private = bp; 1253 bio->bi_private = bp;
1247 1254
1255
1248 for (; size && nr_pages; nr_pages--, map_i++) { 1256 for (; size && nr_pages; nr_pages--, map_i++) {
1249 int rbytes, nbytes = PAGE_CACHE_SIZE - offset; 1257 int rbytes, nbytes = PAGE_SIZE - offset;
1250 1258
1251 if (nbytes > size) 1259 if (nbytes > size)
1252 nbytes = size; 1260 nbytes = size;
@@ -1261,7 +1269,6 @@ next_chunk:
1261 total_nr_pages--; 1269 total_nr_pages--;
1262 } 1270 }
1263 1271
1264submit_io:
1265 if (likely(bio->bi_size)) { 1272 if (likely(bio->bi_size)) {
1266 if (xfs_buf_is_vmapped(bp)) { 1273 if (xfs_buf_is_vmapped(bp)) {
1267 flush_kernel_vmap_range(bp->b_addr, 1274 flush_kernel_vmap_range(bp->b_addr,
@@ -1271,18 +1278,7 @@ submit_io:
1271 if (size) 1278 if (size)
1272 goto next_chunk; 1279 goto next_chunk;
1273 } else { 1280 } else {
1274 /*
1275 * if we get here, no pages were added to the bio. However,
1276 * we can't just error out here - if the pages are locked then
1277 * we have to unlock them otherwise we can hang on a later
1278 * access to the page.
1279 */
1280 xfs_buf_ioerror(bp, EIO); 1281 xfs_buf_ioerror(bp, EIO);
1281 if (bp->b_flags & _XBF_PAGE_LOCKED) {
1282 int i;
1283 for (i = 0; i < bp->b_page_count; i++)
1284 unlock_page(bp->b_pages[i]);
1285 }
1286 bio_put(bio); 1282 bio_put(bio);
1287 } 1283 }
1288} 1284}
@@ -1327,8 +1323,6 @@ xfs_buf_iowait(
1327{ 1323{
1328 trace_xfs_buf_iowait(bp, _RET_IP_); 1324 trace_xfs_buf_iowait(bp, _RET_IP_);
1329 1325
1330 if (atomic_read(&bp->b_io_remaining))
1331 blk_run_address_space(bp->b_target->bt_mapping);
1332 wait_for_completion(&bp->b_iowait); 1326 wait_for_completion(&bp->b_iowait);
1333 1327
1334 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1328 trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1346,8 +1340,8 @@ xfs_buf_offset(
1346 return XFS_BUF_PTR(bp) + offset; 1340 return XFS_BUF_PTR(bp) + offset;
1347 1341
1348 offset += bp->b_offset; 1342 offset += bp->b_offset;
1349 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT]; 1343 page = bp->b_pages[offset >> PAGE_SHIFT];
1350 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1)); 1344 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1351} 1345}
1352 1346
1353/* 1347/*
@@ -1369,9 +1363,9 @@ xfs_buf_iomove(
1369 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; 1363 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1370 cpoff = xfs_buf_poff(boff + bp->b_offset); 1364 cpoff = xfs_buf_poff(boff + bp->b_offset);
1371 csize = min_t(size_t, 1365 csize = min_t(size_t,
1372 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff); 1366 PAGE_SIZE-cpoff, bp->b_count_desired-boff);
1373 1367
1374 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); 1368 ASSERT(((csize + cpoff) <= PAGE_SIZE));
1375 1369
1376 switch (mode) { 1370 switch (mode) {
1377 case XBRW_ZERO: 1371 case XBRW_ZERO:
@@ -1394,89 +1388,84 @@ xfs_buf_iomove(
1394 */ 1388 */
1395 1389
1396/* 1390/*
1397 * Wait for any bufs with callbacks that have been submitted but 1391 * Wait for any bufs with callbacks that have been submitted but have not yet
1398 * have not yet returned... walk the hash list for the target. 1392 * returned. These buffers will have an elevated hold count, so wait on those
1393 * while freeing all the buffers only held by the LRU.
1399 */ 1394 */
1400void 1395void
1401xfs_wait_buftarg( 1396xfs_wait_buftarg(
1402 xfs_buftarg_t *btp) 1397 struct xfs_buftarg *btp)
1403{ 1398{
1404 xfs_buf_t *bp, *n; 1399 struct xfs_buf *bp;
1405 xfs_bufhash_t *hash; 1400
1406 uint i; 1401restart:
1407 1402 spin_lock(&btp->bt_lru_lock);
1408 for (i = 0; i < (1 << btp->bt_hashshift); i++) { 1403 while (!list_empty(&btp->bt_lru)) {
1409 hash = &btp->bt_hash[i]; 1404 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1410again: 1405 if (atomic_read(&bp->b_hold) > 1) {
1411 spin_lock(&hash->bh_lock); 1406 spin_unlock(&btp->bt_lru_lock);
1412 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { 1407 delay(100);
1413 ASSERT(btp == bp->b_target); 1408 goto restart;
1414 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1415 spin_unlock(&hash->bh_lock);
1416 /*
1417 * Catch superblock reference count leaks
1418 * immediately
1419 */
1420 BUG_ON(bp->b_bn == 0);
1421 delay(100);
1422 goto again;
1423 }
1424 } 1409 }
1425 spin_unlock(&hash->bh_lock); 1410 /*
1411 * clear the LRU reference count so the bufer doesn't get
1412 * ignored in xfs_buf_rele().
1413 */
1414 atomic_set(&bp->b_lru_ref, 0);
1415 spin_unlock(&btp->bt_lru_lock);
1416 xfs_buf_rele(bp);
1417 spin_lock(&btp->bt_lru_lock);
1426 } 1418 }
1419 spin_unlock(&btp->bt_lru_lock);
1427} 1420}
1428 1421
1429/* 1422int
1430 * Allocate buffer hash table for a given target. 1423xfs_buftarg_shrink(
1431 * For devices containing metadata (i.e. not the log/realtime devices) 1424 struct shrinker *shrink,
1432 * we need to allocate a much larger hash table. 1425 struct shrink_control *sc)
1433 */
1434STATIC void
1435xfs_alloc_bufhash(
1436 xfs_buftarg_t *btp,
1437 int external)
1438{ 1426{
1439 unsigned int i; 1427 struct xfs_buftarg *btp = container_of(shrink,
1428 struct xfs_buftarg, bt_shrinker);
1429 struct xfs_buf *bp;
1430 int nr_to_scan = sc->nr_to_scan;
1431 LIST_HEAD(dispose);
1440 1432
1441 btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */ 1433 if (!nr_to_scan)
1442 btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * 1434 return btp->bt_lru_nr;
1443 sizeof(xfs_bufhash_t));
1444 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1445 spin_lock_init(&btp->bt_hash[i].bh_lock);
1446 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1447 }
1448}
1449 1435
1450STATIC void 1436 spin_lock(&btp->bt_lru_lock);
1451xfs_free_bufhash( 1437 while (!list_empty(&btp->bt_lru)) {
1452 xfs_buftarg_t *btp) 1438 if (nr_to_scan-- <= 0)
1453{ 1439 break;
1454 kmem_free_large(btp->bt_hash);
1455 btp->bt_hash = NULL;
1456}
1457 1440
1458/* 1441 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1459 * buftarg list for delwrite queue processing
1460 */
1461static LIST_HEAD(xfs_buftarg_list);
1462static DEFINE_SPINLOCK(xfs_buftarg_lock);
1463 1442
1464STATIC void 1443 /*
1465xfs_register_buftarg( 1444 * Decrement the b_lru_ref count unless the value is already
1466 xfs_buftarg_t *btp) 1445 * zero. If the value is already zero, we need to reclaim the
1467{ 1446 * buffer, otherwise it gets another trip through the LRU.
1468 spin_lock(&xfs_buftarg_lock); 1447 */
1469 list_add(&btp->bt_list, &xfs_buftarg_list); 1448 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1470 spin_unlock(&xfs_buftarg_lock); 1449 list_move_tail(&bp->b_lru, &btp->bt_lru);
1471} 1450 continue;
1451 }
1472 1452
1473STATIC void 1453 /*
1474xfs_unregister_buftarg( 1454 * remove the buffer from the LRU now to avoid needing another
1475 xfs_buftarg_t *btp) 1455 * lock round trip inside xfs_buf_rele().
1476{ 1456 */
1477 spin_lock(&xfs_buftarg_lock); 1457 list_move(&bp->b_lru, &dispose);
1478 list_del(&btp->bt_list); 1458 btp->bt_lru_nr--;
1479 spin_unlock(&xfs_buftarg_lock); 1459 }
1460 spin_unlock(&btp->bt_lru_lock);
1461
1462 while (!list_empty(&dispose)) {
1463 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1464 list_del_init(&bp->b_lru);
1465 xfs_buf_rele(bp);
1466 }
1467
1468 return btp->bt_lru_nr;
1480} 1469}
1481 1470
1482void 1471void
@@ -1484,18 +1473,13 @@ xfs_free_buftarg(
1484 struct xfs_mount *mp, 1473 struct xfs_mount *mp,
1485 struct xfs_buftarg *btp) 1474 struct xfs_buftarg *btp)
1486{ 1475{
1476 unregister_shrinker(&btp->bt_shrinker);
1477
1487 xfs_flush_buftarg(btp, 1); 1478 xfs_flush_buftarg(btp, 1);
1488 if (mp->m_flags & XFS_MOUNT_BARRIER) 1479 if (mp->m_flags & XFS_MOUNT_BARRIER)
1489 xfs_blkdev_issue_flush(btp); 1480 xfs_blkdev_issue_flush(btp);
1490 xfs_free_bufhash(btp);
1491 iput(btp->bt_mapping->host);
1492 1481
1493 /* Unregister the buftarg first so that we don't get a
1494 * wakeup finding a non-existent task
1495 */
1496 xfs_unregister_buftarg(btp);
1497 kthread_stop(btp->bt_task); 1482 kthread_stop(btp->bt_task);
1498
1499 kmem_free(btp); 1483 kmem_free(btp);
1500} 1484}
1501 1485
@@ -1511,21 +1495,12 @@ xfs_setsize_buftarg_flags(
1511 btp->bt_smask = sectorsize - 1; 1495 btp->bt_smask = sectorsize - 1;
1512 1496
1513 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1497 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1514 printk(KERN_WARNING 1498 xfs_warn(btp->bt_mount,
1515 "XFS: Cannot set_blocksize to %u on device %s\n", 1499 "Cannot set_blocksize to %u on device %s\n",
1516 sectorsize, XFS_BUFTARG_NAME(btp)); 1500 sectorsize, XFS_BUFTARG_NAME(btp));
1517 return EINVAL; 1501 return EINVAL;
1518 } 1502 }
1519 1503
1520 if (verbose &&
1521 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1522 printk(KERN_WARNING
1523 "XFS: %u byte sectors in use on device %s. "
1524 "This is suboptimal; %u or greater is ideal.\n",
1525 sectorsize, XFS_BUFTARG_NAME(btp),
1526 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1527 }
1528
1529 return 0; 1504 return 0;
1530} 1505}
1531 1506
@@ -1540,7 +1515,7 @@ xfs_setsize_buftarg_early(
1540 struct block_device *bdev) 1515 struct block_device *bdev)
1541{ 1516{
1542 return xfs_setsize_buftarg_flags(btp, 1517 return xfs_setsize_buftarg_flags(btp,
1543 PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0); 1518 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1544} 1519}
1545 1520
1546int 1521int
@@ -1553,62 +1528,22 @@ xfs_setsize_buftarg(
1553} 1528}
1554 1529
1555STATIC int 1530STATIC int
1556xfs_mapping_buftarg(
1557 xfs_buftarg_t *btp,
1558 struct block_device *bdev)
1559{
1560 struct backing_dev_info *bdi;
1561 struct inode *inode;
1562 struct address_space *mapping;
1563 static const struct address_space_operations mapping_aops = {
1564 .sync_page = block_sync_page,
1565 .migratepage = fail_migrate_page,
1566 };
1567
1568 inode = new_inode(bdev->bd_inode->i_sb);
1569 if (!inode) {
1570 printk(KERN_WARNING
1571 "XFS: Cannot allocate mapping inode for device %s\n",
1572 XFS_BUFTARG_NAME(btp));
1573 return ENOMEM;
1574 }
1575 inode->i_mode = S_IFBLK;
1576 inode->i_bdev = bdev;
1577 inode->i_rdev = bdev->bd_dev;
1578 bdi = blk_get_backing_dev_info(bdev);
1579 if (!bdi)
1580 bdi = &default_backing_dev_info;
1581 mapping = &inode->i_data;
1582 mapping->a_ops = &mapping_aops;
1583 mapping->backing_dev_info = bdi;
1584 mapping_set_gfp_mask(mapping, GFP_NOFS);
1585 btp->bt_mapping = mapping;
1586 return 0;
1587}
1588
1589STATIC int
1590xfs_alloc_delwrite_queue( 1531xfs_alloc_delwrite_queue(
1591 xfs_buftarg_t *btp, 1532 xfs_buftarg_t *btp,
1592 const char *fsname) 1533 const char *fsname)
1593{ 1534{
1594 int error = 0;
1595
1596 INIT_LIST_HEAD(&btp->bt_list);
1597 INIT_LIST_HEAD(&btp->bt_delwrite_queue); 1535 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1598 spin_lock_init(&btp->bt_delwrite_lock); 1536 spin_lock_init(&btp->bt_delwrite_lock);
1599 btp->bt_flags = 0; 1537 btp->bt_flags = 0;
1600 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname); 1538 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1601 if (IS_ERR(btp->bt_task)) { 1539 if (IS_ERR(btp->bt_task))
1602 error = PTR_ERR(btp->bt_task); 1540 return PTR_ERR(btp->bt_task);
1603 goto out_error; 1541 return 0;
1604 }
1605 xfs_register_buftarg(btp);
1606out_error:
1607 return error;
1608} 1542}
1609 1543
1610xfs_buftarg_t * 1544xfs_buftarg_t *
1611xfs_alloc_buftarg( 1545xfs_alloc_buftarg(
1546 struct xfs_mount *mp,
1612 struct block_device *bdev, 1547 struct block_device *bdev,
1613 int external, 1548 int external,
1614 const char *fsname) 1549 const char *fsname)
@@ -1617,15 +1552,22 @@ xfs_alloc_buftarg(
1617 1552
1618 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); 1553 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1619 1554
1555 btp->bt_mount = mp;
1620 btp->bt_dev = bdev->bd_dev; 1556 btp->bt_dev = bdev->bd_dev;
1621 btp->bt_bdev = bdev; 1557 btp->bt_bdev = bdev;
1622 if (xfs_setsize_buftarg_early(btp, bdev)) 1558 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1559 if (!btp->bt_bdi)
1623 goto error; 1560 goto error;
1624 if (xfs_mapping_buftarg(btp, bdev)) 1561
1562 INIT_LIST_HEAD(&btp->bt_lru);
1563 spin_lock_init(&btp->bt_lru_lock);
1564 if (xfs_setsize_buftarg_early(btp, bdev))
1625 goto error; 1565 goto error;
1626 if (xfs_alloc_delwrite_queue(btp, fsname)) 1566 if (xfs_alloc_delwrite_queue(btp, fsname))
1627 goto error; 1567 goto error;
1628 xfs_alloc_bufhash(btp, external); 1568 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1569 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1570 register_shrinker(&btp->bt_shrinker);
1629 return btp; 1571 return btp;
1630 1572
1631error: 1573error:
@@ -1730,27 +1672,6 @@ xfs_buf_runall_queues(
1730 flush_workqueue(queue); 1672 flush_workqueue(queue);
1731} 1673}
1732 1674
1733STATIC int
1734xfsbufd_wakeup(
1735 struct shrinker *shrink,
1736 int priority,
1737 gfp_t mask)
1738{
1739 xfs_buftarg_t *btp;
1740
1741 spin_lock(&xfs_buftarg_lock);
1742 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1743 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1744 continue;
1745 if (list_empty(&btp->bt_delwrite_queue))
1746 continue;
1747 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1748 wake_up_process(btp->bt_task);
1749 }
1750 spin_unlock(&xfs_buftarg_lock);
1751 return 0;
1752}
1753
1754/* 1675/*
1755 * Move as many buffers as specified to the supplied list 1676 * Move as many buffers as specified to the supplied list
1756 * idicating if we skipped any buffers to prevent deadlocks. 1677 * idicating if we skipped any buffers to prevent deadlocks.
@@ -1771,7 +1692,6 @@ xfs_buf_delwri_split(
1771 INIT_LIST_HEAD(list); 1692 INIT_LIST_HEAD(list);
1772 spin_lock(dwlk); 1693 spin_lock(dwlk);
1773 list_for_each_entry_safe(bp, n, dwq, b_list) { 1694 list_for_each_entry_safe(bp, n, dwq, b_list) {
1774 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1775 ASSERT(bp->b_flags & XBF_DELWRI); 1695 ASSERT(bp->b_flags & XBF_DELWRI);
1776 1696
1777 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { 1697 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1785,6 +1705,7 @@ xfs_buf_delwri_split(
1785 _XBF_RUN_QUEUES); 1705 _XBF_RUN_QUEUES);
1786 bp->b_flags |= XBF_WRITE; 1706 bp->b_flags |= XBF_WRITE;
1787 list_move_tail(&bp->b_list, list); 1707 list_move_tail(&bp->b_list, list);
1708 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1788 } else 1709 } else
1789 skipped++; 1710 skipped++;
1790 } 1711 }
@@ -1838,8 +1759,8 @@ xfsbufd(
1838 do { 1759 do {
1839 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1760 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1840 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); 1761 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1841 int count = 0;
1842 struct list_head tmp; 1762 struct list_head tmp;
1763 struct blk_plug plug;
1843 1764
1844 if (unlikely(freezing(current))) { 1765 if (unlikely(freezing(current))) {
1845 set_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1766 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1855,16 +1776,15 @@ xfsbufd(
1855 1776
1856 xfs_buf_delwri_split(target, &tmp, age); 1777 xfs_buf_delwri_split(target, &tmp, age);
1857 list_sort(NULL, &tmp, xfs_buf_cmp); 1778 list_sort(NULL, &tmp, xfs_buf_cmp);
1779
1780 blk_start_plug(&plug);
1858 while (!list_empty(&tmp)) { 1781 while (!list_empty(&tmp)) {
1859 struct xfs_buf *bp; 1782 struct xfs_buf *bp;
1860 bp = list_first_entry(&tmp, struct xfs_buf, b_list); 1783 bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1861 list_del_init(&bp->b_list); 1784 list_del_init(&bp->b_list);
1862 xfs_bdstrat_cb(bp); 1785 xfs_bdstrat_cb(bp);
1863 count++;
1864 } 1786 }
1865 if (count) 1787 blk_finish_plug(&plug);
1866 blk_run_address_space(target->bt_mapping);
1867
1868 } while (!kthread_should_stop()); 1788 } while (!kthread_should_stop());
1869 1789
1870 return 0; 1790 return 0;
@@ -1884,6 +1804,7 @@ xfs_flush_buftarg(
1884 int pincount = 0; 1804 int pincount = 0;
1885 LIST_HEAD(tmp_list); 1805 LIST_HEAD(tmp_list);
1886 LIST_HEAD(wait_list); 1806 LIST_HEAD(wait_list);
1807 struct blk_plug plug;
1887 1808
1888 xfs_buf_runall_queues(xfsconvertd_workqueue); 1809 xfs_buf_runall_queues(xfsconvertd_workqueue);
1889 xfs_buf_runall_queues(xfsdatad_workqueue); 1810 xfs_buf_runall_queues(xfsdatad_workqueue);
@@ -1898,6 +1819,8 @@ xfs_flush_buftarg(
1898 * we do that after issuing all the IO. 1819 * we do that after issuing all the IO.
1899 */ 1820 */
1900 list_sort(NULL, &tmp_list, xfs_buf_cmp); 1821 list_sort(NULL, &tmp_list, xfs_buf_cmp);
1822
1823 blk_start_plug(&plug);
1901 while (!list_empty(&tmp_list)) { 1824 while (!list_empty(&tmp_list)) {
1902 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list); 1825 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1903 ASSERT(target == bp->b_target); 1826 ASSERT(target == bp->b_target);
@@ -1908,15 +1831,15 @@ xfs_flush_buftarg(
1908 } 1831 }
1909 xfs_bdstrat_cb(bp); 1832 xfs_bdstrat_cb(bp);
1910 } 1833 }
1834 blk_finish_plug(&plug);
1911 1835
1912 if (wait) { 1836 if (wait) {
1913 /* Expedite and wait for IO to complete. */ 1837 /* Wait for IO to complete. */
1914 blk_run_address_space(target->bt_mapping);
1915 while (!list_empty(&wait_list)) { 1838 while (!list_empty(&wait_list)) {
1916 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 1839 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1917 1840
1918 list_del_init(&bp->b_list); 1841 list_del_init(&bp->b_list);
1919 xfs_iowait(bp); 1842 xfs_buf_iowait(bp);
1920 xfs_buf_relse(bp); 1843 xfs_buf_relse(bp);
1921 } 1844 }
1922 } 1845 }
@@ -1933,19 +1856,19 @@ xfs_buf_init(void)
1933 goto out; 1856 goto out;
1934 1857
1935 xfslogd_workqueue = alloc_workqueue("xfslogd", 1858 xfslogd_workqueue = alloc_workqueue("xfslogd",
1936 WQ_RESCUER | WQ_HIGHPRI, 1); 1859 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1937 if (!xfslogd_workqueue) 1860 if (!xfslogd_workqueue)
1938 goto out_free_buf_zone; 1861 goto out_free_buf_zone;
1939 1862
1940 xfsdatad_workqueue = create_workqueue("xfsdatad"); 1863 xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
1941 if (!xfsdatad_workqueue) 1864 if (!xfsdatad_workqueue)
1942 goto out_destroy_xfslogd_workqueue; 1865 goto out_destroy_xfslogd_workqueue;
1943 1866
1944 xfsconvertd_workqueue = create_workqueue("xfsconvertd"); 1867 xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
1868 WQ_MEM_RECLAIM, 1);
1945 if (!xfsconvertd_workqueue) 1869 if (!xfsconvertd_workqueue)
1946 goto out_destroy_xfsdatad_workqueue; 1870 goto out_destroy_xfsdatad_workqueue;
1947 1871
1948 register_shrinker(&xfs_buf_shake);
1949 return 0; 1872 return 0;
1950 1873
1951 out_destroy_xfsdatad_workqueue: 1874 out_destroy_xfsdatad_workqueue:
@@ -1961,7 +1884,6 @@ xfs_buf_init(void)
1961void 1884void
1962xfs_buf_terminate(void) 1885xfs_buf_terminate(void)
1963{ 1886{
1964 unregister_shrinker(&xfs_buf_shake);
1965 destroy_workqueue(xfsconvertd_workqueue); 1887 destroy_workqueue(xfsconvertd_workqueue);
1966 destroy_workqueue(xfsdatad_workqueue); 1888 destroy_workqueue(xfsdatad_workqueue);
1967 destroy_workqueue(xfslogd_workqueue); 1889 destroy_workqueue(xfslogd_workqueue);