aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2011-04-09 04:05:30 -0400
committerTakashi Iwai <tiwai@suse.de>2011-04-09 04:05:30 -0400
commit664cee46e755b37204f1731cb8726db610f3486d (patch)
tree11ed0d43eff14123534785cf25c0a2143e134e7e /fs/xfs/linux-2.6/xfs_buf.c
parenta0334c50bf0ba7c720ed00f931e721c989efd233 (diff)
parent4e29402fe4b2006c994eed5020c42b2cc87d9b42 (diff)
Merge branch 'fix/asoc' into for-linus
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c344
1 files changed, 86 insertions, 258 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index c05324d3282c..596bb2c9de42 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -94,75 +94,6 @@ xfs_buf_vmap_len(
94} 94}
95 95
96/* 96/*
97 * Page Region interfaces.
98 *
99 * For pages in filesystems where the blocksize is smaller than the
100 * pagesize, we use the page->private field (long) to hold a bitmap
101 * of uptodate regions within the page.
102 *
103 * Each such region is "bytes per page / bits per long" bytes long.
104 *
105 * NBPPR == number-of-bytes-per-page-region
106 * BTOPR == bytes-to-page-region (rounded up)
107 * BTOPRT == bytes-to-page-region-truncated (rounded down)
108 */
109#if (BITS_PER_LONG == 32)
110#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
111#elif (BITS_PER_LONG == 64)
112#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
113#else
114#error BITS_PER_LONG must be 32 or 64
115#endif
116#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
117#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
118#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
119
120STATIC unsigned long
121page_region_mask(
122 size_t offset,
123 size_t length)
124{
125 unsigned long mask;
126 int first, final;
127
128 first = BTOPR(offset);
129 final = BTOPRT(offset + length - 1);
130 first = min(first, final);
131
132 mask = ~0UL;
133 mask <<= BITS_PER_LONG - (final - first);
134 mask >>= BITS_PER_LONG - (final);
135
136 ASSERT(offset + length <= PAGE_CACHE_SIZE);
137 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
138
139 return mask;
140}
141
142STATIC void
143set_page_region(
144 struct page *page,
145 size_t offset,
146 size_t length)
147{
148 set_page_private(page,
149 page_private(page) | page_region_mask(offset, length));
150 if (page_private(page) == ~0UL)
151 SetPageUptodate(page);
152}
153
154STATIC int
155test_page_region(
156 struct page *page,
157 size_t offset,
158 size_t length)
159{
160 unsigned long mask = page_region_mask(offset, length);
161
162 return (mask && (page_private(page) & mask) == mask);
163}
164
165/*
166 * xfs_buf_lru_add - add a buffer to the LRU. 97 * xfs_buf_lru_add - add a buffer to the LRU.
167 * 98 *
168 * The LRU takes a new reference to the buffer so that it will only be freed 99 * The LRU takes a new reference to the buffer so that it will only be freed
@@ -332,7 +263,7 @@ xfs_buf_free(
332 263
333 ASSERT(list_empty(&bp->b_lru)); 264 ASSERT(list_empty(&bp->b_lru));
334 265
335 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 266 if (bp->b_flags & _XBF_PAGES) {
336 uint i; 267 uint i;
337 268
338 if (xfs_buf_is_vmapped(bp)) 269 if (xfs_buf_is_vmapped(bp))
@@ -342,25 +273,22 @@ xfs_buf_free(
342 for (i = 0; i < bp->b_page_count; i++) { 273 for (i = 0; i < bp->b_page_count; i++) {
343 struct page *page = bp->b_pages[i]; 274 struct page *page = bp->b_pages[i];
344 275
345 if (bp->b_flags & _XBF_PAGE_CACHE) 276 __free_page(page);
346 ASSERT(!PagePrivate(page));
347 page_cache_release(page);
348 } 277 }
349 } 278 } else if (bp->b_flags & _XBF_KMEM)
279 kmem_free(bp->b_addr);
350 _xfs_buf_free_pages(bp); 280 _xfs_buf_free_pages(bp);
351 xfs_buf_deallocate(bp); 281 xfs_buf_deallocate(bp);
352} 282}
353 283
354/* 284/*
355 * Finds all pages for buffer in question and builds it's page list. 285 * Allocates all the pages for buffer in question and builds it's page list.
356 */ 286 */
357STATIC int 287STATIC int
358_xfs_buf_lookup_pages( 288xfs_buf_allocate_memory(
359 xfs_buf_t *bp, 289 xfs_buf_t *bp,
360 uint flags) 290 uint flags)
361{ 291{
362 struct address_space *mapping = bp->b_target->bt_mapping;
363 size_t blocksize = bp->b_target->bt_bsize;
364 size_t size = bp->b_count_desired; 292 size_t size = bp->b_count_desired;
365 size_t nbytes, offset; 293 size_t nbytes, offset;
366 gfp_t gfp_mask = xb_to_gfp(flags); 294 gfp_t gfp_mask = xb_to_gfp(flags);
@@ -369,29 +297,55 @@ _xfs_buf_lookup_pages(
369 xfs_off_t end; 297 xfs_off_t end;
370 int error; 298 int error;
371 299
300 /*
301 * for buffers that are contained within a single page, just allocate
302 * the memory from the heap - there's no need for the complexity of
303 * page arrays to keep allocation down to order 0.
304 */
305 if (bp->b_buffer_length < PAGE_SIZE) {
306 bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
307 if (!bp->b_addr) {
308 /* low memory - use alloc_page loop instead */
309 goto use_alloc_page;
310 }
311
312 if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
313 PAGE_MASK) !=
314 ((unsigned long)bp->b_addr & PAGE_MASK)) {
315 /* b_addr spans two pages - use alloc_page instead */
316 kmem_free(bp->b_addr);
317 bp->b_addr = NULL;
318 goto use_alloc_page;
319 }
320 bp->b_offset = offset_in_page(bp->b_addr);
321 bp->b_pages = bp->b_page_array;
322 bp->b_pages[0] = virt_to_page(bp->b_addr);
323 bp->b_page_count = 1;
324 bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
325 return 0;
326 }
327
328use_alloc_page:
372 end = bp->b_file_offset + bp->b_buffer_length; 329 end = bp->b_file_offset + bp->b_buffer_length;
373 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); 330 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
374
375 error = _xfs_buf_get_pages(bp, page_count, flags); 331 error = _xfs_buf_get_pages(bp, page_count, flags);
376 if (unlikely(error)) 332 if (unlikely(error))
377 return error; 333 return error;
378 bp->b_flags |= _XBF_PAGE_CACHE;
379 334
380 offset = bp->b_offset; 335 offset = bp->b_offset;
381 first = bp->b_file_offset >> PAGE_CACHE_SHIFT; 336 first = bp->b_file_offset >> PAGE_SHIFT;
337 bp->b_flags |= _XBF_PAGES;
382 338
383 for (i = 0; i < bp->b_page_count; i++) { 339 for (i = 0; i < bp->b_page_count; i++) {
384 struct page *page; 340 struct page *page;
385 uint retries = 0; 341 uint retries = 0;
386 342retry:
387 retry: 343 page = alloc_page(gfp_mask);
388 page = find_or_create_page(mapping, first + i, gfp_mask);
389 if (unlikely(page == NULL)) { 344 if (unlikely(page == NULL)) {
390 if (flags & XBF_READ_AHEAD) { 345 if (flags & XBF_READ_AHEAD) {
391 bp->b_page_count = i; 346 bp->b_page_count = i;
392 for (i = 0; i < bp->b_page_count; i++) 347 error = ENOMEM;
393 unlock_page(bp->b_pages[i]); 348 goto out_free_pages;
394 return -ENOMEM;
395 } 349 }
396 350
397 /* 351 /*
@@ -412,33 +366,16 @@ _xfs_buf_lookup_pages(
412 366
413 XFS_STATS_INC(xb_page_found); 367 XFS_STATS_INC(xb_page_found);
414 368
415 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); 369 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
416 size -= nbytes; 370 size -= nbytes;
417
418 ASSERT(!PagePrivate(page));
419 if (!PageUptodate(page)) {
420 page_count--;
421 if (blocksize >= PAGE_CACHE_SIZE) {
422 if (flags & XBF_READ)
423 bp->b_flags |= _XBF_PAGE_LOCKED;
424 } else if (!PagePrivate(page)) {
425 if (test_page_region(page, offset, nbytes))
426 page_count++;
427 }
428 }
429
430 bp->b_pages[i] = page; 371 bp->b_pages[i] = page;
431 offset = 0; 372 offset = 0;
432 } 373 }
374 return 0;
433 375
434 if (!(bp->b_flags & _XBF_PAGE_LOCKED)) { 376out_free_pages:
435 for (i = 0; i < bp->b_page_count; i++) 377 for (i = 0; i < bp->b_page_count; i++)
436 unlock_page(bp->b_pages[i]); 378 __free_page(bp->b_pages[i]);
437 }
438
439 if (page_count == bp->b_page_count)
440 bp->b_flags |= XBF_DONE;
441
442 return error; 379 return error;
443} 380}
444 381
@@ -450,14 +387,23 @@ _xfs_buf_map_pages(
450 xfs_buf_t *bp, 387 xfs_buf_t *bp,
451 uint flags) 388 uint flags)
452{ 389{
453 /* A single page buffer is always mappable */ 390 ASSERT(bp->b_flags & _XBF_PAGES);
454 if (bp->b_page_count == 1) { 391 if (bp->b_page_count == 1) {
392 /* A single page buffer is always mappable */
455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 393 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
456 bp->b_flags |= XBF_MAPPED; 394 bp->b_flags |= XBF_MAPPED;
457 } else if (flags & XBF_MAPPED) { 395 } else if (flags & XBF_MAPPED) {
458 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 396 int retried = 0;
459 -1, PAGE_KERNEL); 397
460 if (unlikely(bp->b_addr == NULL)) 398 do {
399 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
400 -1, PAGE_KERNEL);
401 if (bp->b_addr)
402 break;
403 vm_unmap_aliases();
404 } while (retried++ <= 1);
405
406 if (!bp->b_addr)
461 return -ENOMEM; 407 return -ENOMEM;
462 bp->b_addr += bp->b_offset; 408 bp->b_addr += bp->b_offset;
463 bp->b_flags |= XBF_MAPPED; 409 bp->b_flags |= XBF_MAPPED;
@@ -568,9 +514,14 @@ found:
568 } 514 }
569 } 515 }
570 516
517 /*
518 * if the buffer is stale, clear all the external state associated with
519 * it. We need to keep flags such as how we allocated the buffer memory
520 * intact here.
521 */
571 if (bp->b_flags & XBF_STALE) { 522 if (bp->b_flags & XBF_STALE) {
572 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 523 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
573 bp->b_flags &= XBF_MAPPED; 524 bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
574 } 525 }
575 526
576 trace_xfs_buf_find(bp, flags, _RET_IP_); 527 trace_xfs_buf_find(bp, flags, _RET_IP_);
@@ -591,7 +542,7 @@ xfs_buf_get(
591 xfs_buf_flags_t flags) 542 xfs_buf_flags_t flags)
592{ 543{
593 xfs_buf_t *bp, *new_bp; 544 xfs_buf_t *bp, *new_bp;
594 int error = 0, i; 545 int error = 0;
595 546
596 new_bp = xfs_buf_allocate(flags); 547 new_bp = xfs_buf_allocate(flags);
597 if (unlikely(!new_bp)) 548 if (unlikely(!new_bp))
@@ -599,7 +550,7 @@ xfs_buf_get(
599 550
600 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); 551 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
601 if (bp == new_bp) { 552 if (bp == new_bp) {
602 error = _xfs_buf_lookup_pages(bp, flags); 553 error = xfs_buf_allocate_memory(bp, flags);
603 if (error) 554 if (error)
604 goto no_buffer; 555 goto no_buffer;
605 } else { 556 } else {
@@ -608,9 +559,6 @@ xfs_buf_get(
608 return NULL; 559 return NULL;
609 } 560 }
610 561
611 for (i = 0; i < bp->b_page_count; i++)
612 mark_page_accessed(bp->b_pages[i]);
613
614 if (!(bp->b_flags & XBF_MAPPED)) { 562 if (!(bp->b_flags & XBF_MAPPED)) {
615 error = _xfs_buf_map_pages(bp, flags); 563 error = _xfs_buf_map_pages(bp, flags);
616 if (unlikely(error)) { 564 if (unlikely(error)) {
@@ -711,8 +659,7 @@ xfs_buf_readahead(
711{ 659{
712 struct backing_dev_info *bdi; 660 struct backing_dev_info *bdi;
713 661
714 bdi = target->bt_mapping->backing_dev_info; 662 if (bdi_read_congested(target->bt_bdi))
715 if (bdi_read_congested(bdi))
716 return; 663 return;
717 664
718 xfs_buf_read(target, ioff, isize, 665 xfs_buf_read(target, ioff, isize,
@@ -790,10 +737,10 @@ xfs_buf_associate_memory(
790 size_t buflen; 737 size_t buflen;
791 int page_count; 738 int page_count;
792 739
793 pageaddr = (unsigned long)mem & PAGE_CACHE_MASK; 740 pageaddr = (unsigned long)mem & PAGE_MASK;
794 offset = (unsigned long)mem - pageaddr; 741 offset = (unsigned long)mem - pageaddr;
795 buflen = PAGE_CACHE_ALIGN(len + offset); 742 buflen = PAGE_ALIGN(len + offset);
796 page_count = buflen >> PAGE_CACHE_SHIFT; 743 page_count = buflen >> PAGE_SHIFT;
797 744
798 /* Free any previous set of page pointers */ 745 /* Free any previous set of page pointers */
799 if (bp->b_pages) 746 if (bp->b_pages)
@@ -810,13 +757,12 @@ xfs_buf_associate_memory(
810 757
811 for (i = 0; i < bp->b_page_count; i++) { 758 for (i = 0; i < bp->b_page_count; i++) {
812 bp->b_pages[i] = mem_to_page((void *)pageaddr); 759 bp->b_pages[i] = mem_to_page((void *)pageaddr);
813 pageaddr += PAGE_CACHE_SIZE; 760 pageaddr += PAGE_SIZE;
814 } 761 }
815 762
816 bp->b_count_desired = len; 763 bp->b_count_desired = len;
817 bp->b_buffer_length = buflen; 764 bp->b_buffer_length = buflen;
818 bp->b_flags |= XBF_MAPPED; 765 bp->b_flags |= XBF_MAPPED;
819 bp->b_flags &= ~_XBF_PAGE_LOCKED;
820 766
821 return 0; 767 return 0;
822} 768}
@@ -923,20 +869,7 @@ xfs_buf_rele(
923 869
924 870
925/* 871/*
926 * Mutual exclusion on buffers. Locking model: 872 * Lock a buffer object, if it is not already locked.
927 *
928 * Buffers associated with inodes for which buffer locking
929 * is not enabled are not protected by semaphores, and are
930 * assumed to be exclusively owned by the caller. There is a
931 * spinlock in the buffer, used by the caller when concurrent
932 * access is possible.
933 */
934
935/*
936 * Locks a buffer object, if it is not already locked. Note that this in
937 * no way locks the underlying pages, so it is only useful for
938 * synchronizing concurrent use of buffer objects, not for synchronizing
939 * independent access to the underlying pages.
940 * 873 *
941 * If we come across a stale, pinned, locked buffer, we know that we are 874 * If we come across a stale, pinned, locked buffer, we know that we are
942 * being asked to lock a buffer that has been reallocated. Because it is 875 * being asked to lock a buffer that has been reallocated. Because it is
@@ -970,10 +903,7 @@ xfs_buf_lock_value(
970} 903}
971 904
972/* 905/*
973 * Locks a buffer object. 906 * Lock a buffer object.
974 * Note that this in no way locks the underlying pages, so it is only
975 * useful for synchronizing concurrent use of buffer objects, not for
976 * synchronizing independent access to the underlying pages.
977 * 907 *
978 * If we come across a stale, pinned, locked buffer, we know that we 908 * If we come across a stale, pinned, locked buffer, we know that we
979 * are being asked to lock a buffer that has been reallocated. Because 909 * are being asked to lock a buffer that has been reallocated. Because
@@ -1246,10 +1176,8 @@ _xfs_buf_ioend(
1246 xfs_buf_t *bp, 1176 xfs_buf_t *bp,
1247 int schedule) 1177 int schedule)
1248{ 1178{
1249 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1179 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1250 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1251 xfs_buf_ioend(bp, schedule); 1180 xfs_buf_ioend(bp, schedule);
1252 }
1253} 1181}
1254 1182
1255STATIC void 1183STATIC void
@@ -1258,35 +1186,12 @@ xfs_buf_bio_end_io(
1258 int error) 1186 int error)
1259{ 1187{
1260 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1188 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1261 unsigned int blocksize = bp->b_target->bt_bsize;
1262 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1263 1189
1264 xfs_buf_ioerror(bp, -error); 1190 xfs_buf_ioerror(bp, -error);
1265 1191
1266 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1192 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1267 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1193 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1268 1194
1269 do {
1270 struct page *page = bvec->bv_page;
1271
1272 ASSERT(!PagePrivate(page));
1273 if (unlikely(bp->b_error)) {
1274 if (bp->b_flags & XBF_READ)
1275 ClearPageUptodate(page);
1276 } else if (blocksize >= PAGE_CACHE_SIZE) {
1277 SetPageUptodate(page);
1278 } else if (!PagePrivate(page) &&
1279 (bp->b_flags & _XBF_PAGE_CACHE)) {
1280 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1281 }
1282
1283 if (--bvec >= bio->bi_io_vec)
1284 prefetchw(&bvec->bv_page->flags);
1285
1286 if (bp->b_flags & _XBF_PAGE_LOCKED)
1287 unlock_page(page);
1288 } while (bvec >= bio->bi_io_vec);
1289
1290 _xfs_buf_ioend(bp, 1); 1195 _xfs_buf_ioend(bp, 1);
1291 bio_put(bio); 1196 bio_put(bio);
1292} 1197}
@@ -1300,7 +1205,6 @@ _xfs_buf_ioapply(
1300 int offset = bp->b_offset; 1205 int offset = bp->b_offset;
1301 int size = bp->b_count_desired; 1206 int size = bp->b_count_desired;
1302 sector_t sector = bp->b_bn; 1207 sector_t sector = bp->b_bn;
1303 unsigned int blocksize = bp->b_target->bt_bsize;
1304 1208
1305 total_nr_pages = bp->b_page_count; 1209 total_nr_pages = bp->b_page_count;
1306 map_i = 0; 1210 map_i = 0;
@@ -1321,29 +1225,6 @@ _xfs_buf_ioapply(
1321 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ; 1225 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1322 } 1226 }
1323 1227
1324 /* Special code path for reading a sub page size buffer in --
1325 * we populate up the whole page, and hence the other metadata
1326 * in the same page. This optimization is only valid when the
1327 * filesystem block size is not smaller than the page size.
1328 */
1329 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1330 ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1331 (XBF_READ|_XBF_PAGE_LOCKED)) &&
1332 (blocksize >= PAGE_CACHE_SIZE)) {
1333 bio = bio_alloc(GFP_NOIO, 1);
1334
1335 bio->bi_bdev = bp->b_target->bt_bdev;
1336 bio->bi_sector = sector - (offset >> BBSHIFT);
1337 bio->bi_end_io = xfs_buf_bio_end_io;
1338 bio->bi_private = bp;
1339
1340 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1341 size = 0;
1342
1343 atomic_inc(&bp->b_io_remaining);
1344
1345 goto submit_io;
1346 }
1347 1228
1348next_chunk: 1229next_chunk:
1349 atomic_inc(&bp->b_io_remaining); 1230 atomic_inc(&bp->b_io_remaining);
@@ -1357,8 +1238,9 @@ next_chunk:
1357 bio->bi_end_io = xfs_buf_bio_end_io; 1238 bio->bi_end_io = xfs_buf_bio_end_io;
1358 bio->bi_private = bp; 1239 bio->bi_private = bp;
1359 1240
1241
1360 for (; size && nr_pages; nr_pages--, map_i++) { 1242 for (; size && nr_pages; nr_pages--, map_i++) {
1361 int rbytes, nbytes = PAGE_CACHE_SIZE - offset; 1243 int rbytes, nbytes = PAGE_SIZE - offset;
1362 1244
1363 if (nbytes > size) 1245 if (nbytes > size)
1364 nbytes = size; 1246 nbytes = size;
@@ -1373,7 +1255,6 @@ next_chunk:
1373 total_nr_pages--; 1255 total_nr_pages--;
1374 } 1256 }
1375 1257
1376submit_io:
1377 if (likely(bio->bi_size)) { 1258 if (likely(bio->bi_size)) {
1378 if (xfs_buf_is_vmapped(bp)) { 1259 if (xfs_buf_is_vmapped(bp)) {
1379 flush_kernel_vmap_range(bp->b_addr, 1260 flush_kernel_vmap_range(bp->b_addr,
@@ -1383,18 +1264,7 @@ submit_io:
1383 if (size) 1264 if (size)
1384 goto next_chunk; 1265 goto next_chunk;
1385 } else { 1266 } else {
1386 /*
1387 * if we get here, no pages were added to the bio. However,
1388 * we can't just error out here - if the pages are locked then
1389 * we have to unlock them otherwise we can hang on a later
1390 * access to the page.
1391 */
1392 xfs_buf_ioerror(bp, EIO); 1267 xfs_buf_ioerror(bp, EIO);
1393 if (bp->b_flags & _XBF_PAGE_LOCKED) {
1394 int i;
1395 for (i = 0; i < bp->b_page_count; i++)
1396 unlock_page(bp->b_pages[i]);
1397 }
1398 bio_put(bio); 1268 bio_put(bio);
1399 } 1269 }
1400} 1270}
@@ -1458,8 +1328,8 @@ xfs_buf_offset(
1458 return XFS_BUF_PTR(bp) + offset; 1328 return XFS_BUF_PTR(bp) + offset;
1459 1329
1460 offset += bp->b_offset; 1330 offset += bp->b_offset;
1461 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT]; 1331 page = bp->b_pages[offset >> PAGE_SHIFT];
1462 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1)); 1332 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1463} 1333}
1464 1334
1465/* 1335/*
@@ -1481,9 +1351,9 @@ xfs_buf_iomove(
1481 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; 1351 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1482 cpoff = xfs_buf_poff(boff + bp->b_offset); 1352 cpoff = xfs_buf_poff(boff + bp->b_offset);
1483 csize = min_t(size_t, 1353 csize = min_t(size_t,
1484 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff); 1354 PAGE_SIZE-cpoff, bp->b_count_desired-boff);
1485 1355
1486 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); 1356 ASSERT(((csize + cpoff) <= PAGE_SIZE));
1487 1357
1488 switch (mode) { 1358 switch (mode) {
1489 case XBRW_ZERO: 1359 case XBRW_ZERO:
@@ -1596,7 +1466,6 @@ xfs_free_buftarg(
1596 xfs_flush_buftarg(btp, 1); 1466 xfs_flush_buftarg(btp, 1);
1597 if (mp->m_flags & XFS_MOUNT_BARRIER) 1467 if (mp->m_flags & XFS_MOUNT_BARRIER)
1598 xfs_blkdev_issue_flush(btp); 1468 xfs_blkdev_issue_flush(btp);
1599 iput(btp->bt_mapping->host);
1600 1469
1601 kthread_stop(btp->bt_task); 1470 kthread_stop(btp->bt_task);
1602 kmem_free(btp); 1471 kmem_free(btp);
@@ -1620,15 +1489,6 @@ xfs_setsize_buftarg_flags(
1620 return EINVAL; 1489 return EINVAL;
1621 } 1490 }
1622 1491
1623 if (verbose &&
1624 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1625 printk(KERN_WARNING
1626 "XFS: %u byte sectors in use on device %s. "
1627 "This is suboptimal; %u or greater is ideal.\n",
1628 sectorsize, XFS_BUFTARG_NAME(btp),
1629 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1630 }
1631
1632 return 0; 1492 return 0;
1633} 1493}
1634 1494
@@ -1643,7 +1503,7 @@ xfs_setsize_buftarg_early(
1643 struct block_device *bdev) 1503 struct block_device *bdev)
1644{ 1504{
1645 return xfs_setsize_buftarg_flags(btp, 1505 return xfs_setsize_buftarg_flags(btp,
1646 PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0); 1506 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1647} 1507}
1648 1508
1649int 1509int
@@ -1656,40 +1516,6 @@ xfs_setsize_buftarg(
1656} 1516}
1657 1517
1658STATIC int 1518STATIC int
1659xfs_mapping_buftarg(
1660 xfs_buftarg_t *btp,
1661 struct block_device *bdev)
1662{
1663 struct backing_dev_info *bdi;
1664 struct inode *inode;
1665 struct address_space *mapping;
1666 static const struct address_space_operations mapping_aops = {
1667 .migratepage = fail_migrate_page,
1668 };
1669
1670 inode = new_inode(bdev->bd_inode->i_sb);
1671 if (!inode) {
1672 printk(KERN_WARNING
1673 "XFS: Cannot allocate mapping inode for device %s\n",
1674 XFS_BUFTARG_NAME(btp));
1675 return ENOMEM;
1676 }
1677 inode->i_ino = get_next_ino();
1678 inode->i_mode = S_IFBLK;
1679 inode->i_bdev = bdev;
1680 inode->i_rdev = bdev->bd_dev;
1681 bdi = blk_get_backing_dev_info(bdev);
1682 if (!bdi)
1683 bdi = &default_backing_dev_info;
1684 mapping = &inode->i_data;
1685 mapping->a_ops = &mapping_aops;
1686 mapping->backing_dev_info = bdi;
1687 mapping_set_gfp_mask(mapping, GFP_NOFS);
1688 btp->bt_mapping = mapping;
1689 return 0;
1690}
1691
1692STATIC int
1693xfs_alloc_delwrite_queue( 1519xfs_alloc_delwrite_queue(
1694 xfs_buftarg_t *btp, 1520 xfs_buftarg_t *btp,
1695 const char *fsname) 1521 const char *fsname)
@@ -1717,12 +1543,14 @@ xfs_alloc_buftarg(
1717 btp->bt_mount = mp; 1543 btp->bt_mount = mp;
1718 btp->bt_dev = bdev->bd_dev; 1544 btp->bt_dev = bdev->bd_dev;
1719 btp->bt_bdev = bdev; 1545 btp->bt_bdev = bdev;
1546 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1547 if (!btp->bt_bdi)
1548 goto error;
1549
1720 INIT_LIST_HEAD(&btp->bt_lru); 1550 INIT_LIST_HEAD(&btp->bt_lru);
1721 spin_lock_init(&btp->bt_lru_lock); 1551 spin_lock_init(&btp->bt_lru_lock);
1722 if (xfs_setsize_buftarg_early(btp, bdev)) 1552 if (xfs_setsize_buftarg_early(btp, bdev))
1723 goto error; 1553 goto error;
1724 if (xfs_mapping_buftarg(btp, bdev))
1725 goto error;
1726 if (xfs_alloc_delwrite_queue(btp, fsname)) 1554 if (xfs_alloc_delwrite_queue(btp, fsname))
1727 goto error; 1555 goto error;
1728 btp->bt_shrinker.shrink = xfs_buftarg_shrink; 1556 btp->bt_shrinker.shrink = xfs_buftarg_shrink;