aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNathan Scott <nathans@sgi.com>2006-01-10 23:39:08 -0500
committerNathan Scott <nathans@sgi.com>2006-01-10 23:39:08 -0500
commitce8e922c0e79c8093452ba9a124981332b75706b (patch)
tree0f681391461d4d6bbccd3bf88a7762cc7daa8852
parent68bdb6eabcd2869caa795019961a5445a11b5bc1 (diff)
[XFS] Complete the pagebuf -> xfs_buf naming convention transition,
finally. SGI-PV: 947038 SGI-Modid: xfs-linux-melb:xfs-kern:24866a Signed-off-by: Nathan Scott <nathans@sgi.com>
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c1258
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h693
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c10
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.h18
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c14
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_rw.c9
10 files changed, 895 insertions, 1118 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 94d3cdfbf9b8..3f6b9e29850c 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -273,7 +273,7 @@ xfs_map_at_offset(
273 273
274 lock_buffer(bh); 274 lock_buffer(bh);
275 bh->b_blocknr = bn; 275 bh->b_blocknr = bn;
276 bh->b_bdev = iomapp->iomap_target->pbr_bdev; 276 bh->b_bdev = iomapp->iomap_target->bt_bdev;
277 set_buffer_mapped(bh); 277 set_buffer_mapped(bh);
278 clear_buffer_delay(bh); 278 clear_buffer_delay(bh);
279} 279}
@@ -982,7 +982,7 @@ __linvfs_get_block(
982 } 982 }
983 983
984 /* If this is a realtime file, data might be on a new device */ 984 /* If this is a realtime file, data might be on a new device */
985 bh_result->b_bdev = iomap.iomap_target->pbr_bdev; 985 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
986 986
987 /* If we previously allocated a block out beyond eof and 987 /* If we previously allocated a block out beyond eof and
988 * we are now coming back to use it then we will need to 988 * we are now coming back to use it then we will need to
@@ -1097,7 +1097,7 @@ linvfs_direct_IO(
1097 iocb->private = xfs_alloc_ioend(inode); 1097 iocb->private = xfs_alloc_ioend(inode);
1098 1098
1099 ret = blockdev_direct_IO_own_locking(rw, iocb, inode, 1099 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1100 iomap.iomap_target->pbr_bdev, 1100 iomap.iomap_target->bt_bdev,
1101 iov, offset, nr_segs, 1101 iov, offset, nr_segs,
1102 linvfs_get_blocks_direct, 1102 linvfs_get_blocks_direct,
1103 linvfs_end_io_direct); 1103 linvfs_end_io_direct);
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 2a8acd38fa1e..cb77f99cbef1 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -31,77 +31,77 @@
31#include <linux/kthread.h> 31#include <linux/kthread.h>
32#include "xfs_linux.h" 32#include "xfs_linux.h"
33 33
34STATIC kmem_cache_t *pagebuf_zone; 34STATIC kmem_zone_t *xfs_buf_zone;
35STATIC kmem_shaker_t pagebuf_shake; 35STATIC kmem_shaker_t xfs_buf_shake;
36STATIC int xfsbufd(void *); 36STATIC int xfsbufd(void *);
37STATIC int xfsbufd_wakeup(int, gfp_t); 37STATIC int xfsbufd_wakeup(int, gfp_t);
38STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); 38STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
39 39
40STATIC struct workqueue_struct *xfslogd_workqueue; 40STATIC struct workqueue_struct *xfslogd_workqueue;
41struct workqueue_struct *xfsdatad_workqueue; 41struct workqueue_struct *xfsdatad_workqueue;
42 42
43#ifdef PAGEBUF_TRACE 43#ifdef XFS_BUF_TRACE
44void 44void
45pagebuf_trace( 45xfs_buf_trace(
46 xfs_buf_t *pb, 46 xfs_buf_t *bp,
47 char *id, 47 char *id,
48 void *data, 48 void *data,
49 void *ra) 49 void *ra)
50{ 50{
51 ktrace_enter(pagebuf_trace_buf, 51 ktrace_enter(xfs_buf_trace_buf,
52 pb, id, 52 bp, id,
53 (void *)(unsigned long)pb->pb_flags, 53 (void *)(unsigned long)bp->b_flags,
54 (void *)(unsigned long)pb->pb_hold.counter, 54 (void *)(unsigned long)bp->b_hold.counter,
55 (void *)(unsigned long)pb->pb_sema.count.counter, 55 (void *)(unsigned long)bp->b_sema.count.counter,
56 (void *)current, 56 (void *)current,
57 data, ra, 57 data, ra,
58 (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff), 58 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
59 (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff), 59 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
60 (void *)(unsigned long)pb->pb_buffer_length, 60 (void *)(unsigned long)bp->b_buffer_length,
61 NULL, NULL, NULL, NULL, NULL); 61 NULL, NULL, NULL, NULL, NULL);
62} 62}
63ktrace_t *pagebuf_trace_buf; 63ktrace_t *xfs_buf_trace_buf;
64#define PAGEBUF_TRACE_SIZE 4096 64#define XFS_BUF_TRACE_SIZE 4096
65#define PB_TRACE(pb, id, data) \ 65#define XB_TRACE(bp, id, data) \
66 pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0)) 66 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
67#else 67#else
68#define PB_TRACE(pb, id, data) do { } while (0) 68#define XB_TRACE(bp, id, data) do { } while (0)
69#endif 69#endif
70 70
71#ifdef PAGEBUF_LOCK_TRACKING 71#ifdef XFS_BUF_LOCK_TRACKING
72# define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid) 72# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
73# define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1) 73# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
74# define PB_GET_OWNER(pb) ((pb)->pb_last_holder) 74# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
75#else 75#else
76# define PB_SET_OWNER(pb) do { } while (0) 76# define XB_SET_OWNER(bp) do { } while (0)
77# define PB_CLEAR_OWNER(pb) do { } while (0) 77# define XB_CLEAR_OWNER(bp) do { } while (0)
78# define PB_GET_OWNER(pb) do { } while (0) 78# define XB_GET_OWNER(bp) do { } while (0)
79#endif 79#endif
80 80
81#define pb_to_gfp(flags) \ 81#define xb_to_gfp(flags) \
82 ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \ 82 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
83 ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) 83 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
84 84
85#define pb_to_km(flags) \ 85#define xb_to_km(flags) \
86 (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) 86 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
87 87
88#define pagebuf_allocate(flags) \ 88#define xfs_buf_allocate(flags) \
89 kmem_zone_alloc(pagebuf_zone, pb_to_km(flags)) 89 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
90#define pagebuf_deallocate(pb) \ 90#define xfs_buf_deallocate(bp) \
91 kmem_zone_free(pagebuf_zone, (pb)); 91 kmem_zone_free(xfs_buf_zone, (bp));
92 92
93/* 93/*
94 * Page Region interfaces. 94 * Page Region interfaces.
95 * 95 *
96 * For pages in filesystems where the blocksize is smaller than the 96 * For pages in filesystems where the blocksize is smaller than the
97 * pagesize, we use the page->private field (long) to hold a bitmap 97 * pagesize, we use the page->private field (long) to hold a bitmap
98 * of uptodate regions within the page. 98 * of uptodate regions within the page.
99 * 99 *
100 * Each such region is "bytes per page / bits per long" bytes long. 100 * Each such region is "bytes per page / bits per long" bytes long.
101 * 101 *
102 * NBPPR == number-of-bytes-per-page-region 102 * NBPPR == number-of-bytes-per-page-region
103 * BTOPR == bytes-to-page-region (rounded up) 103 * BTOPR == bytes-to-page-region (rounded up)
104 * BTOPRT == bytes-to-page-region-truncated (rounded down) 104 * BTOPRT == bytes-to-page-region-truncated (rounded down)
105 */ 105 */
106#if (BITS_PER_LONG == 32) 106#if (BITS_PER_LONG == 32)
107#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */ 107#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
@@ -160,7 +160,7 @@ test_page_region(
160} 160}
161 161
162/* 162/*
163 * Mapping of multi-page buffers into contiguous virtual space 163 * Mapping of multi-page buffers into contiguous virtual space
164 */ 164 */
165 165
166typedef struct a_list { 166typedef struct a_list {
@@ -173,7 +173,7 @@ STATIC int as_list_len;
173STATIC DEFINE_SPINLOCK(as_lock); 173STATIC DEFINE_SPINLOCK(as_lock);
174 174
175/* 175/*
176 * Try to batch vunmaps because they are costly. 176 * Try to batch vunmaps because they are costly.
177 */ 177 */
178STATIC void 178STATIC void
179free_address( 179free_address(
@@ -216,83 +216,83 @@ purge_addresses(void)
216} 216}
217 217
218/* 218/*
219 * Internal pagebuf object manipulation 219 * Internal xfs_buf_t object manipulation
220 */ 220 */
221 221
222STATIC void 222STATIC void
223_pagebuf_initialize( 223_xfs_buf_initialize(
224 xfs_buf_t *pb, 224 xfs_buf_t *bp,
225 xfs_buftarg_t *target, 225 xfs_buftarg_t *target,
226 loff_t range_base, 226 loff_t range_base,
227 size_t range_length, 227 size_t range_length,
228 page_buf_flags_t flags) 228 xfs_buf_flags_t flags)
229{ 229{
230 /* 230 /*
231 * We don't want certain flags to appear in pb->pb_flags. 231 * We don't want certain flags to appear in b_flags.
232 */ 232 */
233 flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD); 233 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
234 234
235 memset(pb, 0, sizeof(xfs_buf_t)); 235 memset(bp, 0, sizeof(xfs_buf_t));
236 atomic_set(&pb->pb_hold, 1); 236 atomic_set(&bp->b_hold, 1);
237 init_MUTEX_LOCKED(&pb->pb_iodonesema); 237 init_MUTEX_LOCKED(&bp->b_iodonesema);
238 INIT_LIST_HEAD(&pb->pb_list); 238 INIT_LIST_HEAD(&bp->b_list);
239 INIT_LIST_HEAD(&pb->pb_hash_list); 239 INIT_LIST_HEAD(&bp->b_hash_list);
240 init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */ 240 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
241 PB_SET_OWNER(pb); 241 XB_SET_OWNER(bp);
242 pb->pb_target = target; 242 bp->b_target = target;
243 pb->pb_file_offset = range_base; 243 bp->b_file_offset = range_base;
244 /* 244 /*
245 * Set buffer_length and count_desired to the same value initially. 245 * Set buffer_length and count_desired to the same value initially.
246 * I/O routines should use count_desired, which will be the same in 246 * I/O routines should use count_desired, which will be the same in
247 * most cases but may be reset (e.g. XFS recovery). 247 * most cases but may be reset (e.g. XFS recovery).
248 */ 248 */
249 pb->pb_buffer_length = pb->pb_count_desired = range_length; 249 bp->b_buffer_length = bp->b_count_desired = range_length;
250 pb->pb_flags = flags; 250 bp->b_flags = flags;
251 pb->pb_bn = XFS_BUF_DADDR_NULL; 251 bp->b_bn = XFS_BUF_DADDR_NULL;
252 atomic_set(&pb->pb_pin_count, 0); 252 atomic_set(&bp->b_pin_count, 0);
253 init_waitqueue_head(&pb->pb_waiters); 253 init_waitqueue_head(&bp->b_waiters);
254 254
255 XFS_STATS_INC(pb_create); 255 XFS_STATS_INC(xb_create);
256 PB_TRACE(pb, "initialize", target); 256 XB_TRACE(bp, "initialize", target);
257} 257}
258 258
259/* 259/*
260 * Allocate a page array capable of holding a specified number 260 * Allocate a page array capable of holding a specified number
261 * of pages, and point the page buf at it. 261 * of pages, and point the page buf at it.
262 */ 262 */
263STATIC int 263STATIC int
264_pagebuf_get_pages( 264_xfs_buf_get_pages(
265 xfs_buf_t *pb, 265 xfs_buf_t *bp,
266 int page_count, 266 int page_count,
267 page_buf_flags_t flags) 267 xfs_buf_flags_t flags)
268{ 268{
269 /* Make sure that we have a page list */ 269 /* Make sure that we have a page list */
270 if (pb->pb_pages == NULL) { 270 if (bp->b_pages == NULL) {
271 pb->pb_offset = page_buf_poff(pb->pb_file_offset); 271 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
272 pb->pb_page_count = page_count; 272 bp->b_page_count = page_count;
273 if (page_count <= PB_PAGES) { 273 if (page_count <= XB_PAGES) {
274 pb->pb_pages = pb->pb_page_array; 274 bp->b_pages = bp->b_page_array;
275 } else { 275 } else {
276 pb->pb_pages = kmem_alloc(sizeof(struct page *) * 276 bp->b_pages = kmem_alloc(sizeof(struct page *) *
277 page_count, pb_to_km(flags)); 277 page_count, xb_to_km(flags));
278 if (pb->pb_pages == NULL) 278 if (bp->b_pages == NULL)
279 return -ENOMEM; 279 return -ENOMEM;
280 } 280 }
281 memset(pb->pb_pages, 0, sizeof(struct page *) * page_count); 281 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
282 } 282 }
283 return 0; 283 return 0;
284} 284}
285 285
286/* 286/*
287 * Frees pb_pages if it was malloced. 287 * Frees b_pages if it was allocated.
288 */ 288 */
289STATIC void 289STATIC void
290_pagebuf_free_pages( 290_xfs_buf_free_pages(
291 xfs_buf_t *bp) 291 xfs_buf_t *bp)
292{ 292{
293 if (bp->pb_pages != bp->pb_page_array) { 293 if (bp->b_pages != bp->b_page_array) {
294 kmem_free(bp->pb_pages, 294 kmem_free(bp->b_pages,
295 bp->pb_page_count * sizeof(struct page *)); 295 bp->b_page_count * sizeof(struct page *));
296 } 296 }
297} 297}
298 298
@@ -300,79 +300,79 @@ _pagebuf_free_pages(
300 * Releases the specified buffer. 300 * Releases the specified buffer.
301 * 301 *
302 * The modification state of any associated pages is left unchanged. 302 * The modification state of any associated pages is left unchanged.
303 * The buffer most not be on any hash - use pagebuf_rele instead for 303 * The buffer most not be on any hash - use xfs_buf_rele instead for
304 * hashed and refcounted buffers 304 * hashed and refcounted buffers
305 */ 305 */
306void 306void
307pagebuf_free( 307xfs_buf_free(
308 xfs_buf_t *bp) 308 xfs_buf_t *bp)
309{ 309{
310 PB_TRACE(bp, "free", 0); 310 XB_TRACE(bp, "free", 0);
311 311
312 ASSERT(list_empty(&bp->pb_hash_list)); 312 ASSERT(list_empty(&bp->b_hash_list));
313 313
314 if (bp->pb_flags & _PBF_PAGE_CACHE) { 314 if (bp->b_flags & _XBF_PAGE_CACHE) {
315 uint i; 315 uint i;
316 316
317 if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1)) 317 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
318 free_address(bp->pb_addr - bp->pb_offset); 318 free_address(bp->b_addr - bp->b_offset);
319 319
320 for (i = 0; i < bp->pb_page_count; i++) 320 for (i = 0; i < bp->b_page_count; i++)
321 page_cache_release(bp->pb_pages[i]); 321 page_cache_release(bp->b_pages[i]);
322 _pagebuf_free_pages(bp); 322 _xfs_buf_free_pages(bp);
323 } else if (bp->pb_flags & _PBF_KMEM_ALLOC) { 323 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
324 /* 324 /*
325 * XXX(hch): bp->pb_count_desired might be incorrect (see 325 * XXX(hch): bp->b_count_desired might be incorrect (see
326 * pagebuf_associate_memory for details), but fortunately 326 * xfs_buf_associate_memory for details), but fortunately
327 * the Linux version of kmem_free ignores the len argument.. 327 * the Linux version of kmem_free ignores the len argument..
328 */ 328 */
329 kmem_free(bp->pb_addr, bp->pb_count_desired); 329 kmem_free(bp->b_addr, bp->b_count_desired);
330 _pagebuf_free_pages(bp); 330 _xfs_buf_free_pages(bp);
331 } 331 }
332 332
333 pagebuf_deallocate(bp); 333 xfs_buf_deallocate(bp);
334} 334}
335 335
336/* 336/*
337 * Finds all pages for buffer in question and builds it's page list. 337 * Finds all pages for buffer in question and builds it's page list.
338 */ 338 */
339STATIC int 339STATIC int
340_pagebuf_lookup_pages( 340_xfs_buf_lookup_pages(
341 xfs_buf_t *bp, 341 xfs_buf_t *bp,
342 uint flags) 342 uint flags)
343{ 343{
344 struct address_space *mapping = bp->pb_target->pbr_mapping; 344 struct address_space *mapping = bp->b_target->bt_mapping;
345 size_t blocksize = bp->pb_target->pbr_bsize; 345 size_t blocksize = bp->b_target->bt_bsize;
346 size_t size = bp->pb_count_desired; 346 size_t size = bp->b_count_desired;
347 size_t nbytes, offset; 347 size_t nbytes, offset;
348 gfp_t gfp_mask = pb_to_gfp(flags); 348 gfp_t gfp_mask = xb_to_gfp(flags);
349 unsigned short page_count, i; 349 unsigned short page_count, i;
350 pgoff_t first; 350 pgoff_t first;
351 loff_t end; 351 loff_t end;
352 int error; 352 int error;
353 353
354 end = bp->pb_file_offset + bp->pb_buffer_length; 354 end = bp->b_file_offset + bp->b_buffer_length;
355 page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset); 355 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
356 356
357 error = _pagebuf_get_pages(bp, page_count, flags); 357 error = _xfs_buf_get_pages(bp, page_count, flags);
358 if (unlikely(error)) 358 if (unlikely(error))
359 return error; 359 return error;
360 bp->pb_flags |= _PBF_PAGE_CACHE; 360 bp->b_flags |= _XBF_PAGE_CACHE;
361 361
362 offset = bp->pb_offset; 362 offset = bp->b_offset;
363 first = bp->pb_file_offset >> PAGE_CACHE_SHIFT; 363 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
364 364
365 for (i = 0; i < bp->pb_page_count; i++) { 365 for (i = 0; i < bp->b_page_count; i++) {
366 struct page *page; 366 struct page *page;
367 uint retries = 0; 367 uint retries = 0;
368 368
369 retry: 369 retry:
370 page = find_or_create_page(mapping, first + i, gfp_mask); 370 page = find_or_create_page(mapping, first + i, gfp_mask);
371 if (unlikely(page == NULL)) { 371 if (unlikely(page == NULL)) {
372 if (flags & PBF_READ_AHEAD) { 372 if (flags & XBF_READ_AHEAD) {
373 bp->pb_page_count = i; 373 bp->b_page_count = i;
374 for (i = 0; i < bp->pb_page_count; i++) 374 for (i = 0; i < bp->b_page_count; i++)
375 unlock_page(bp->pb_pages[i]); 375 unlock_page(bp->b_pages[i]);
376 return -ENOMEM; 376 return -ENOMEM;
377 } 377 }
378 378
@@ -388,13 +388,13 @@ _pagebuf_lookup_pages(
388 "deadlock in %s (mode:0x%x)\n", 388 "deadlock in %s (mode:0x%x)\n",
389 __FUNCTION__, gfp_mask); 389 __FUNCTION__, gfp_mask);
390 390
391 XFS_STATS_INC(pb_page_retries); 391 XFS_STATS_INC(xb_page_retries);
392 xfsbufd_wakeup(0, gfp_mask); 392 xfsbufd_wakeup(0, gfp_mask);
393 blk_congestion_wait(WRITE, HZ/50); 393 blk_congestion_wait(WRITE, HZ/50);
394 goto retry; 394 goto retry;
395 } 395 }
396 396
397 XFS_STATS_INC(pb_page_found); 397 XFS_STATS_INC(xb_page_found);
398 398
399 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); 399 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
400 size -= nbytes; 400 size -= nbytes;
@@ -402,27 +402,27 @@ _pagebuf_lookup_pages(
402 if (!PageUptodate(page)) { 402 if (!PageUptodate(page)) {
403 page_count--; 403 page_count--;
404 if (blocksize >= PAGE_CACHE_SIZE) { 404 if (blocksize >= PAGE_CACHE_SIZE) {
405 if (flags & PBF_READ) 405 if (flags & XBF_READ)
406 bp->pb_locked = 1; 406 bp->b_locked = 1;
407 } else if (!PagePrivate(page)) { 407 } else if (!PagePrivate(page)) {
408 if (test_page_region(page, offset, nbytes)) 408 if (test_page_region(page, offset, nbytes))
409 page_count++; 409 page_count++;
410 } 410 }
411 } 411 }
412 412
413 bp->pb_pages[i] = page; 413 bp->b_pages[i] = page;
414 offset = 0; 414 offset = 0;
415 } 415 }
416 416
417 if (!bp->pb_locked) { 417 if (!bp->b_locked) {
418 for (i = 0; i < bp->pb_page_count; i++) 418 for (i = 0; i < bp->b_page_count; i++)
419 unlock_page(bp->pb_pages[i]); 419 unlock_page(bp->b_pages[i]);
420 } 420 }
421 421
422 if (page_count == bp->pb_page_count) 422 if (page_count == bp->b_page_count)
423 bp->pb_flags |= PBF_DONE; 423 bp->b_flags |= XBF_DONE;
424 424
425 PB_TRACE(bp, "lookup_pages", (long)page_count); 425 XB_TRACE(bp, "lookup_pages", (long)page_count);
426 return error; 426 return error;
427} 427}
428 428
@@ -430,23 +430,23 @@ _pagebuf_lookup_pages(
430 * Map buffer into kernel address-space if nessecary. 430 * Map buffer into kernel address-space if nessecary.
431 */ 431 */
432STATIC int 432STATIC int
433_pagebuf_map_pages( 433_xfs_buf_map_pages(
434 xfs_buf_t *bp, 434 xfs_buf_t *bp,
435 uint flags) 435 uint flags)
436{ 436{
437 /* A single page buffer is always mappable */ 437 /* A single page buffer is always mappable */
438 if (bp->pb_page_count == 1) { 438 if (bp->b_page_count == 1) {
439 bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset; 439 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
440 bp->pb_flags |= PBF_MAPPED; 440 bp->b_flags |= XBF_MAPPED;
441 } else if (flags & PBF_MAPPED) { 441 } else if (flags & XBF_MAPPED) {
442 if (as_list_len > 64) 442 if (as_list_len > 64)
443 purge_addresses(); 443 purge_addresses();
444 bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count, 444 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
445 VM_MAP, PAGE_KERNEL); 445 VM_MAP, PAGE_KERNEL);
446 if (unlikely(bp->pb_addr == NULL)) 446 if (unlikely(bp->b_addr == NULL))
447 return -ENOMEM; 447 return -ENOMEM;
448 bp->pb_addr += bp->pb_offset; 448 bp->b_addr += bp->b_offset;
449 bp->pb_flags |= PBF_MAPPED; 449 bp->b_flags |= XBF_MAPPED;
450 } 450 }
451 451
452 return 0; 452 return 0;
@@ -457,9 +457,7 @@ _pagebuf_map_pages(
457 */ 457 */
458 458
459/* 459/*
460 * _pagebuf_find 460 * Look up, and creates if absent, a lockable buffer for
461 *
462 * Looks up, and creates if absent, a lockable buffer for
463 * a given range of an inode. The buffer is returned 461 * a given range of an inode. The buffer is returned
464 * locked. If other overlapping buffers exist, they are 462 * locked. If other overlapping buffers exist, they are
465 * released before the new buffer is created and locked, 463 * released before the new buffer is created and locked,
@@ -467,55 +465,55 @@ _pagebuf_map_pages(
467 * are unlocked. No I/O is implied by this call. 465 * are unlocked. No I/O is implied by this call.
468 */ 466 */
469xfs_buf_t * 467xfs_buf_t *
470_pagebuf_find( 468_xfs_buf_find(
471 xfs_buftarg_t *btp, /* block device target */ 469 xfs_buftarg_t *btp, /* block device target */
472 loff_t ioff, /* starting offset of range */ 470 loff_t ioff, /* starting offset of range */
473 size_t isize, /* length of range */ 471 size_t isize, /* length of range */
474 page_buf_flags_t flags, /* PBF_TRYLOCK */ 472 xfs_buf_flags_t flags,
475 xfs_buf_t *new_pb)/* newly allocated buffer */ 473 xfs_buf_t *new_bp)
476{ 474{
477 loff_t range_base; 475 loff_t range_base;
478 size_t range_length; 476 size_t range_length;
479 xfs_bufhash_t *hash; 477 xfs_bufhash_t *hash;
480 xfs_buf_t *pb, *n; 478 xfs_buf_t *bp, *n;
481 479
482 range_base = (ioff << BBSHIFT); 480 range_base = (ioff << BBSHIFT);
483 range_length = (isize << BBSHIFT); 481 range_length = (isize << BBSHIFT);
484 482
485 /* Check for IOs smaller than the sector size / not sector aligned */ 483 /* Check for IOs smaller than the sector size / not sector aligned */
486 ASSERT(!(range_length < (1 << btp->pbr_sshift))); 484 ASSERT(!(range_length < (1 << btp->bt_sshift)));
487 ASSERT(!(range_base & (loff_t)btp->pbr_smask)); 485 ASSERT(!(range_base & (loff_t)btp->bt_smask));
488 486
489 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; 487 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
490 488
491 spin_lock(&hash->bh_lock); 489 spin_lock(&hash->bh_lock);
492 490
493 list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) { 491 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
494 ASSERT(btp == pb->pb_target); 492 ASSERT(btp == bp->b_target);
495 if (pb->pb_file_offset == range_base && 493 if (bp->b_file_offset == range_base &&
496 pb->pb_buffer_length == range_length) { 494 bp->b_buffer_length == range_length) {
497 /* 495 /*
498 * If we look at something bring it to the 496 * If we look at something, bring it to the
499 * front of the list for next time. 497 * front of the list for next time.
500 */ 498 */
501 atomic_inc(&pb->pb_hold); 499 atomic_inc(&bp->b_hold);
502 list_move(&pb->pb_hash_list, &hash->bh_list); 500 list_move(&bp->b_hash_list, &hash->bh_list);
503 goto found; 501 goto found;
504 } 502 }
505 } 503 }
506 504
507 /* No match found */ 505 /* No match found */
508 if (new_pb) { 506 if (new_bp) {
509 _pagebuf_initialize(new_pb, btp, range_base, 507 _xfs_buf_initialize(new_bp, btp, range_base,
510 range_length, flags); 508 range_length, flags);
511 new_pb->pb_hash = hash; 509 new_bp->b_hash = hash;
512 list_add(&new_pb->pb_hash_list, &hash->bh_list); 510 list_add(&new_bp->b_hash_list, &hash->bh_list);
513 } else { 511 } else {
514 XFS_STATS_INC(pb_miss_locked); 512 XFS_STATS_INC(xb_miss_locked);
515 } 513 }
516 514
517 spin_unlock(&hash->bh_lock); 515 spin_unlock(&hash->bh_lock);
518 return new_pb; 516 return new_bp;
519 517
520found: 518found:
521 spin_unlock(&hash->bh_lock); 519 spin_unlock(&hash->bh_lock);
@@ -524,74 +522,72 @@ found:
524 * if this does not work then we need to drop the 522 * if this does not work then we need to drop the
525 * spinlock and do a hard attempt on the semaphore. 523 * spinlock and do a hard attempt on the semaphore.
526 */ 524 */
527 if (down_trylock(&pb->pb_sema)) { 525 if (down_trylock(&bp->b_sema)) {
528 if (!(flags & PBF_TRYLOCK)) { 526 if (!(flags & XBF_TRYLOCK)) {
529 /* wait for buffer ownership */ 527 /* wait for buffer ownership */
530 PB_TRACE(pb, "get_lock", 0); 528 XB_TRACE(bp, "get_lock", 0);
531 pagebuf_lock(pb); 529 xfs_buf_lock(bp);
532 XFS_STATS_INC(pb_get_locked_waited); 530 XFS_STATS_INC(xb_get_locked_waited);
533 } else { 531 } else {
534 /* We asked for a trylock and failed, no need 532 /* We asked for a trylock and failed, no need
535 * to look at file offset and length here, we 533 * to look at file offset and length here, we
536 * know that this pagebuf at least overlaps our 534 * know that this buffer at least overlaps our
537 * pagebuf and is locked, therefore our buffer 535 * buffer and is locked, therefore our buffer
538 * either does not exist, or is this buffer 536 * either does not exist, or is this buffer.
539 */ 537 */
540 538 xfs_buf_rele(bp);
541 pagebuf_rele(pb); 539 XFS_STATS_INC(xb_busy_locked);
542 XFS_STATS_INC(pb_busy_locked); 540 return NULL;
543 return (NULL);
544 } 541 }
545 } else { 542 } else {
546 /* trylock worked */ 543 /* trylock worked */
547 PB_SET_OWNER(pb); 544 XB_SET_OWNER(bp);
548 } 545 }
549 546
550 if (pb->pb_flags & PBF_STALE) { 547 if (bp->b_flags & XBF_STALE) {
551 ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0); 548 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
552 pb->pb_flags &= PBF_MAPPED; 549 bp->b_flags &= XBF_MAPPED;
553 } 550 }
554 PB_TRACE(pb, "got_lock", 0); 551 XB_TRACE(bp, "got_lock", 0);
555 XFS_STATS_INC(pb_get_locked); 552 XFS_STATS_INC(xb_get_locked);
556 return (pb); 553 return bp;
557} 554}
558 555
559/* 556/*
560 * xfs_buf_get_flags assembles a buffer covering the specified range. 557 * Assembles a buffer covering the specified range.
561 *
562 * Storage in memory for all portions of the buffer will be allocated, 558 * Storage in memory for all portions of the buffer will be allocated,
563 * although backing storage may not be. 559 * although backing storage may not be.
564 */ 560 */
565xfs_buf_t * 561xfs_buf_t *
566xfs_buf_get_flags( /* allocate a buffer */ 562xfs_buf_get_flags(
567 xfs_buftarg_t *target,/* target for buffer */ 563 xfs_buftarg_t *target,/* target for buffer */
568 loff_t ioff, /* starting offset of range */ 564 loff_t ioff, /* starting offset of range */
569 size_t isize, /* length of range */ 565 size_t isize, /* length of range */
570 page_buf_flags_t flags) /* PBF_TRYLOCK */ 566 xfs_buf_flags_t flags)
571{ 567{
572 xfs_buf_t *pb, *new_pb; 568 xfs_buf_t *bp, *new_bp;
573 int error = 0, i; 569 int error = 0, i;
574 570
575 new_pb = pagebuf_allocate(flags); 571 new_bp = xfs_buf_allocate(flags);
576 if (unlikely(!new_pb)) 572 if (unlikely(!new_bp))
577 return NULL; 573 return NULL;
578 574
579 pb = _pagebuf_find(target, ioff, isize, flags, new_pb); 575 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
580 if (pb == new_pb) { 576 if (bp == new_bp) {
581 error = _pagebuf_lookup_pages(pb, flags); 577 error = _xfs_buf_lookup_pages(bp, flags);
582 if (error) 578 if (error)
583 goto no_buffer; 579 goto no_buffer;
584 } else { 580 } else {
585 pagebuf_deallocate(new_pb); 581 xfs_buf_deallocate(new_bp);
586 if (unlikely(pb == NULL)) 582 if (unlikely(bp == NULL))
587 return NULL; 583 return NULL;
588 } 584 }
589 585
590 for (i = 0; i < pb->pb_page_count; i++) 586 for (i = 0; i < bp->b_page_count; i++)
591 mark_page_accessed(pb->pb_pages[i]); 587 mark_page_accessed(bp->b_pages[i]);
592 588
593 if (!(pb->pb_flags & PBF_MAPPED)) { 589 if (!(bp->b_flags & XBF_MAPPED)) {
594 error = _pagebuf_map_pages(pb, flags); 590 error = _xfs_buf_map_pages(bp, flags);
595 if (unlikely(error)) { 591 if (unlikely(error)) {
596 printk(KERN_WARNING "%s: failed to map pages\n", 592 printk(KERN_WARNING "%s: failed to map pages\n",
597 __FUNCTION__); 593 __FUNCTION__);
@@ -599,22 +595,22 @@ xfs_buf_get_flags( /* allocate a buffer */
599 } 595 }
600 } 596 }
601 597
602 XFS_STATS_INC(pb_get); 598 XFS_STATS_INC(xb_get);
603 599
604 /* 600 /*
605 * Always fill in the block number now, the mapped cases can do 601 * Always fill in the block number now, the mapped cases can do
606 * their own overlay of this later. 602 * their own overlay of this later.
607 */ 603 */
608 pb->pb_bn = ioff; 604 bp->b_bn = ioff;
609 pb->pb_count_desired = pb->pb_buffer_length; 605 bp->b_count_desired = bp->b_buffer_length;
610 606
611 PB_TRACE(pb, "get", (unsigned long)flags); 607 XB_TRACE(bp, "get", (unsigned long)flags);
612 return pb; 608 return bp;
613 609
614 no_buffer: 610 no_buffer:
615 if (flags & (PBF_LOCK | PBF_TRYLOCK)) 611 if (flags & (XBF_LOCK | XBF_TRYLOCK))
616 pagebuf_unlock(pb); 612 xfs_buf_unlock(bp);
617 pagebuf_rele(pb); 613 xfs_buf_rele(bp);
618 return NULL; 614 return NULL;
619} 615}
620 616
@@ -623,73 +619,73 @@ xfs_buf_read_flags(
623 xfs_buftarg_t *target, 619 xfs_buftarg_t *target,
624 loff_t ioff, 620 loff_t ioff,
625 size_t isize, 621 size_t isize,
626 page_buf_flags_t flags) 622 xfs_buf_flags_t flags)
627{ 623{
628 xfs_buf_t *pb; 624 xfs_buf_t *bp;
629 625
630 flags |= PBF_READ; 626 flags |= XBF_READ;
631 627
632 pb = xfs_buf_get_flags(target, ioff, isize, flags); 628 bp = xfs_buf_get_flags(target, ioff, isize, flags);
633 if (pb) { 629 if (bp) {
634 if (!XFS_BUF_ISDONE(pb)) { 630 if (!XFS_BUF_ISDONE(bp)) {
635 PB_TRACE(pb, "read", (unsigned long)flags); 631 XB_TRACE(bp, "read", (unsigned long)flags);
636 XFS_STATS_INC(pb_get_read); 632 XFS_STATS_INC(xb_get_read);
637 pagebuf_iostart(pb, flags); 633 xfs_buf_iostart(bp, flags);
638 } else if (flags & PBF_ASYNC) { 634 } else if (flags & XBF_ASYNC) {
639 PB_TRACE(pb, "read_async", (unsigned long)flags); 635 XB_TRACE(bp, "read_async", (unsigned long)flags);
640 /* 636 /*
641 * Read ahead call which is already satisfied, 637 * Read ahead call which is already satisfied,
642 * drop the buffer 638 * drop the buffer
643 */ 639 */
644 goto no_buffer; 640 goto no_buffer;
645 } else { 641 } else {
646 PB_TRACE(pb, "read_done", (unsigned long)flags); 642 XB_TRACE(bp, "read_done", (unsigned long)flags);
647 /* We do not want read in the flags */ 643 /* We do not want read in the flags */
648 pb->pb_flags &= ~PBF_READ; 644 bp->b_flags &= ~XBF_READ;
649 } 645 }
650 } 646 }
651 647
652 return pb; 648 return bp;
653 649
654 no_buffer: 650 no_buffer:
655 if (flags & (PBF_LOCK | PBF_TRYLOCK)) 651 if (flags & (XBF_LOCK | XBF_TRYLOCK))
656 pagebuf_unlock(pb); 652 xfs_buf_unlock(bp);
657 pagebuf_rele(pb); 653 xfs_buf_rele(bp);
658 return NULL; 654 return NULL;
659} 655}
660 656
661/* 657/*
662 * If we are not low on memory then do the readahead in a deadlock 658 * If we are not low on memory then do the readahead in a deadlock
663 * safe manner. 659 * safe manner.
664 */ 660 */
665void 661void
666pagebuf_readahead( 662xfs_buf_readahead(
667 xfs_buftarg_t *target, 663 xfs_buftarg_t *target,
668 loff_t ioff, 664 loff_t ioff,
669 size_t isize, 665 size_t isize,
670 page_buf_flags_t flags) 666 xfs_buf_flags_t flags)
671{ 667{
672 struct backing_dev_info *bdi; 668 struct backing_dev_info *bdi;
673 669
674 bdi = target->pbr_mapping->backing_dev_info; 670 bdi = target->bt_mapping->backing_dev_info;
675 if (bdi_read_congested(bdi)) 671 if (bdi_read_congested(bdi))
676 return; 672 return;
677 673
678 flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD); 674 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
679 xfs_buf_read_flags(target, ioff, isize, flags); 675 xfs_buf_read_flags(target, ioff, isize, flags);
680} 676}
681 677
682xfs_buf_t * 678xfs_buf_t *
683pagebuf_get_empty( 679xfs_buf_get_empty(
684 size_t len, 680 size_t len,
685 xfs_buftarg_t *target) 681 xfs_buftarg_t *target)
686{ 682{
687 xfs_buf_t *pb; 683 xfs_buf_t *bp;
688 684
689 pb = pagebuf_allocate(0); 685 bp = xfs_buf_allocate(0);
690 if (pb) 686 if (bp)
691 _pagebuf_initialize(pb, target, 0, len, 0); 687 _xfs_buf_initialize(bp, target, 0, len, 0);
692 return pb; 688 return bp;
693} 689}
694 690
695static inline struct page * 691static inline struct page *
@@ -705,8 +701,8 @@ mem_to_page(
705} 701}
706 702
707int 703int
708pagebuf_associate_memory( 704xfs_buf_associate_memory(
709 xfs_buf_t *pb, 705 xfs_buf_t *bp,
710 void *mem, 706 void *mem,
711 size_t len) 707 size_t len)
712{ 708{
@@ -723,40 +719,40 @@ pagebuf_associate_memory(
723 page_count++; 719 page_count++;
724 720
725 /* Free any previous set of page pointers */ 721 /* Free any previous set of page pointers */
726 if (pb->pb_pages) 722 if (bp->b_pages)
727 _pagebuf_free_pages(pb); 723 _xfs_buf_free_pages(bp);
728 724
729 pb->pb_pages = NULL; 725 bp->b_pages = NULL;
730 pb->pb_addr = mem; 726 bp->b_addr = mem;
731 727
732 rval = _pagebuf_get_pages(pb, page_count, 0); 728 rval = _xfs_buf_get_pages(bp, page_count, 0);
733 if (rval) 729 if (rval)
734 return rval; 730 return rval;
735 731
736 pb->pb_offset = offset; 732 bp->b_offset = offset;
737 ptr = (size_t) mem & PAGE_CACHE_MASK; 733 ptr = (size_t) mem & PAGE_CACHE_MASK;
738 end = PAGE_CACHE_ALIGN((size_t) mem + len); 734 end = PAGE_CACHE_ALIGN((size_t) mem + len);
739 end_cur = end; 735 end_cur = end;
740 /* set up first page */ 736 /* set up first page */
741 pb->pb_pages[0] = mem_to_page(mem); 737 bp->b_pages[0] = mem_to_page(mem);
742 738
743 ptr += PAGE_CACHE_SIZE; 739 ptr += PAGE_CACHE_SIZE;
744 pb->pb_page_count = ++i; 740 bp->b_page_count = ++i;
745 while (ptr < end) { 741 while (ptr < end) {
746 pb->pb_pages[i] = mem_to_page((void *)ptr); 742 bp->b_pages[i] = mem_to_page((void *)ptr);
747 pb->pb_page_count = ++i; 743 bp->b_page_count = ++i;
748 ptr += PAGE_CACHE_SIZE; 744 ptr += PAGE_CACHE_SIZE;
749 } 745 }
750 pb->pb_locked = 0; 746 bp->b_locked = 0;
751 747
752 pb->pb_count_desired = pb->pb_buffer_length = len; 748 bp->b_count_desired = bp->b_buffer_length = len;
753 pb->pb_flags |= PBF_MAPPED; 749 bp->b_flags |= XBF_MAPPED;
754 750
755 return 0; 751 return 0;
756} 752}
757 753
758xfs_buf_t * 754xfs_buf_t *
759pagebuf_get_no_daddr( 755xfs_buf_get_noaddr(
760 size_t len, 756 size_t len,
761 xfs_buftarg_t *target) 757 xfs_buftarg_t *target)
762{ 758{
@@ -765,10 +761,10 @@ pagebuf_get_no_daddr(
765 void *data; 761 void *data;
766 int error; 762 int error;
767 763
768 bp = pagebuf_allocate(0); 764 bp = xfs_buf_allocate(0);
769 if (unlikely(bp == NULL)) 765 if (unlikely(bp == NULL))
770 goto fail; 766 goto fail;
771 _pagebuf_initialize(bp, target, 0, len, 0); 767 _xfs_buf_initialize(bp, target, 0, len, 0);
772 768
773 try_again: 769 try_again:
774 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); 770 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
@@ -777,78 +773,73 @@ pagebuf_get_no_daddr(
777 773
778 /* check whether alignment matches.. */ 774 /* check whether alignment matches.. */
779 if ((__psunsigned_t)data != 775 if ((__psunsigned_t)data !=
780 ((__psunsigned_t)data & ~target->pbr_smask)) { 776 ((__psunsigned_t)data & ~target->bt_smask)) {
781 /* .. else double the size and try again */ 777 /* .. else double the size and try again */
782 kmem_free(data, malloc_len); 778 kmem_free(data, malloc_len);
783 malloc_len <<= 1; 779 malloc_len <<= 1;
784 goto try_again; 780 goto try_again;
785 } 781 }
786 782
787 error = pagebuf_associate_memory(bp, data, len); 783 error = xfs_buf_associate_memory(bp, data, len);
788 if (error) 784 if (error)
789 goto fail_free_mem; 785 goto fail_free_mem;
790 bp->pb_flags |= _PBF_KMEM_ALLOC; 786 bp->b_flags |= _XBF_KMEM_ALLOC;
791 787
792 pagebuf_unlock(bp); 788 xfs_buf_unlock(bp);
793 789
794 PB_TRACE(bp, "no_daddr", data); 790 XB_TRACE(bp, "no_daddr", data);
795 return bp; 791 return bp;
796 fail_free_mem: 792 fail_free_mem:
797 kmem_free(data, malloc_len); 793 kmem_free(data, malloc_len);
798 fail_free_buf: 794 fail_free_buf:
799 pagebuf_free(bp); 795 xfs_buf_free(bp);
800 fail: 796 fail:
801 return NULL; 797 return NULL;
802} 798}
803 799
804/* 800/*
805 * pagebuf_hold
806 *
807 * Increment reference count on buffer, to hold the buffer concurrently 801 * Increment reference count on buffer, to hold the buffer concurrently
808 * with another thread which may release (free) the buffer asynchronously. 802 * with another thread which may release (free) the buffer asynchronously.
809 *
810 * Must hold the buffer already to call this function. 803 * Must hold the buffer already to call this function.
811 */ 804 */
812void 805void
813pagebuf_hold( 806xfs_buf_hold(
814 xfs_buf_t *pb) 807 xfs_buf_t *bp)
815{ 808{
816 atomic_inc(&pb->pb_hold); 809 atomic_inc(&bp->b_hold);
817 PB_TRACE(pb, "hold", 0); 810 XB_TRACE(bp, "hold", 0);
818} 811}
819 812
820/* 813/*
821 * pagebuf_rele 814 * Releases a hold on the specified buffer. If the
822 * 815 * the hold count is 1, calls xfs_buf_free.
823 * pagebuf_rele releases a hold on the specified buffer. If the
824 * the hold count is 1, pagebuf_rele calls pagebuf_free.
825 */ 816 */
826void 817void
827pagebuf_rele( 818xfs_buf_rele(
828 xfs_buf_t *pb) 819 xfs_buf_t *bp)
829{ 820{
830 xfs_bufhash_t *hash = pb->pb_hash; 821 xfs_bufhash_t *hash = bp->b_hash;
831 822
832 PB_TRACE(pb, "rele", pb->pb_relse); 823 XB_TRACE(bp, "rele", bp->b_relse);
833 824
834 if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) { 825 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
835 if (pb->pb_relse) { 826 if (bp->b_relse) {
836 atomic_inc(&pb->pb_hold); 827 atomic_inc(&bp->b_hold);
837 spin_unlock(&hash->bh_lock); 828 spin_unlock(&hash->bh_lock);
838 (*(pb->pb_relse)) (pb); 829 (*(bp->b_relse)) (bp);
839 } else if (pb->pb_flags & PBF_FS_MANAGED) { 830 } else if (bp->b_flags & XBF_FS_MANAGED) {
840 spin_unlock(&hash->bh_lock); 831 spin_unlock(&hash->bh_lock);
841 } else { 832 } else {
842 ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q))); 833 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
843 list_del_init(&pb->pb_hash_list); 834 list_del_init(&bp->b_hash_list);
844 spin_unlock(&hash->bh_lock); 835 spin_unlock(&hash->bh_lock);
845 pagebuf_free(pb); 836 xfs_buf_free(bp);
846 } 837 }
847 } else { 838 } else {
848 /* 839 /*
849 * Catch reference count leaks 840 * Catch reference count leaks
850 */ 841 */
851 ASSERT(atomic_read(&pb->pb_hold) >= 0); 842 ASSERT(atomic_read(&bp->b_hold) >= 0);
852 } 843 }
853} 844}
854 845
@@ -864,168 +855,122 @@ pagebuf_rele(
864 */ 855 */
865 856
866/* 857/*
867 * pagebuf_cond_lock 858 * Locks a buffer object, if it is not already locked.
868 * 859 * Note that this in no way locks the underlying pages, so it is only
869 * pagebuf_cond_lock locks a buffer object, if it is not already locked. 860 * useful for synchronizing concurrent use of buffer objects, not for
870 * Note that this in no way 861 * synchronizing independent access to the underlying pages.
871 * locks the underlying pages, so it is only useful for synchronizing
872 * concurrent use of page buffer objects, not for synchronizing independent
873 * access to the underlying pages.
874 */ 862 */
875int 863int
876pagebuf_cond_lock( /* lock buffer, if not locked */ 864xfs_buf_cond_lock(
877 /* returns -EBUSY if locked) */ 865 xfs_buf_t *bp)
878 xfs_buf_t *pb)
879{ 866{
880 int locked; 867 int locked;
881 868
882 locked = down_trylock(&pb->pb_sema) == 0; 869 locked = down_trylock(&bp->b_sema) == 0;
883 if (locked) { 870 if (locked) {
884 PB_SET_OWNER(pb); 871 XB_SET_OWNER(bp);
885 } 872 }
886 PB_TRACE(pb, "cond_lock", (long)locked); 873 XB_TRACE(bp, "cond_lock", (long)locked);
887 return(locked ? 0 : -EBUSY); 874 return locked ? 0 : -EBUSY;
888} 875}
889 876
890#if defined(DEBUG) || defined(XFS_BLI_TRACE) 877#if defined(DEBUG) || defined(XFS_BLI_TRACE)
891/*
892 * pagebuf_lock_value
893 *
894 * Return lock value for a pagebuf
895 */
896int 878int
897pagebuf_lock_value( 879xfs_buf_lock_value(
898 xfs_buf_t *pb) 880 xfs_buf_t *bp)
899{ 881{
900 return(atomic_read(&pb->pb_sema.count)); 882 return atomic_read(&bp->b_sema.count);
901} 883}
902#endif 884#endif
903 885
904/* 886/*
905 * pagebuf_lock 887 * Locks a buffer object.
906 * 888 * Note that this in no way locks the underlying pages, so it is only
907 * pagebuf_lock locks a buffer object. Note that this in no way 889 * useful for synchronizing concurrent use of buffer objects, not for
908 * locks the underlying pages, so it is only useful for synchronizing 890 * synchronizing independent access to the underlying pages.
909 * concurrent use of page buffer objects, not for synchronizing independent
910 * access to the underlying pages.
911 */ 891 */
912int 892void
913pagebuf_lock( 893xfs_buf_lock(
914 xfs_buf_t *pb) 894 xfs_buf_t *bp)
915{ 895{
916 PB_TRACE(pb, "lock", 0); 896 XB_TRACE(bp, "lock", 0);
917 if (atomic_read(&pb->pb_io_remaining)) 897 if (atomic_read(&bp->b_io_remaining))
918 blk_run_address_space(pb->pb_target->pbr_mapping); 898 blk_run_address_space(bp->b_target->bt_mapping);
919 down(&pb->pb_sema); 899 down(&bp->b_sema);
920 PB_SET_OWNER(pb); 900 XB_SET_OWNER(bp);
921 PB_TRACE(pb, "locked", 0); 901 XB_TRACE(bp, "locked", 0);
922 return 0;
923} 902}
924 903
925/* 904/*
926 * pagebuf_unlock 905 * Releases the lock on the buffer object.
927 *
928 * pagebuf_unlock releases the lock on the buffer object created by
929 * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
930 * created by pagebuf_pin).
931 *
932 * If the buffer is marked delwri but is not queued, do so before we 906 * If the buffer is marked delwri but is not queued, do so before we
933 * unlock the buffer as we need to set flags correctly. We also need to 907 * unlock the buffer as we need to set flags correctly. We also need to
934 * take a reference for the delwri queue because the unlocker is going to 908 * take a reference for the delwri queue because the unlocker is going to
935 * drop their's and they don't know we just queued it. 909 * drop their's and they don't know we just queued it.
936 */ 910 */
937void 911void
938pagebuf_unlock( /* unlock buffer */ 912xfs_buf_unlock(
939 xfs_buf_t *pb) /* buffer to unlock */ 913 xfs_buf_t *bp)
940{ 914{
941 if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) { 915 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
942 atomic_inc(&pb->pb_hold); 916 atomic_inc(&bp->b_hold);
943 pb->pb_flags |= PBF_ASYNC; 917 bp->b_flags |= XBF_ASYNC;
944 pagebuf_delwri_queue(pb, 0); 918 xfs_buf_delwri_queue(bp, 0);
945 } 919 }
946 920
947 PB_CLEAR_OWNER(pb); 921 XB_CLEAR_OWNER(bp);
948 up(&pb->pb_sema); 922 up(&bp->b_sema);
949 PB_TRACE(pb, "unlock", 0); 923 XB_TRACE(bp, "unlock", 0);
950} 924}
951 925
952 926
953/* 927/*
954 * Pinning Buffer Storage in Memory 928 * Pinning Buffer Storage in Memory
955 */ 929 * Ensure that no attempt to force a buffer to disk will succeed.
956
957/*
958 * pagebuf_pin
959 *
960 * pagebuf_pin locks all of the memory represented by a buffer in
961 * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for
962 * the same or different buffers affecting a given page, will
963 * properly count the number of outstanding "pin" requests. The
964 * buffer may be released after the pagebuf_pin and a different
965 * buffer used when calling pagebuf_unpin, if desired.
966 * pagebuf_pin should be used by the file system when it wants be
967 * assured that no attempt will be made to force the affected
968 * memory to disk. It does not assure that a given logical page
969 * will not be moved to a different physical page.
970 */ 930 */
971void 931void
972pagebuf_pin( 932xfs_buf_pin(
973 xfs_buf_t *pb) 933 xfs_buf_t *bp)
974{ 934{
975 atomic_inc(&pb->pb_pin_count); 935 atomic_inc(&bp->b_pin_count);
976 PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter); 936 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
977} 937}
978 938
979/*
980 * pagebuf_unpin
981 *
982 * pagebuf_unpin reverses the locking of memory performed by
983 * pagebuf_pin. Note that both functions affected the logical
984 * pages associated with the buffer, not the buffer itself.
985 */
986void 939void
987pagebuf_unpin( 940xfs_buf_unpin(
988 xfs_buf_t *pb) 941 xfs_buf_t *bp)
989{ 942{
990 if (atomic_dec_and_test(&pb->pb_pin_count)) { 943 if (atomic_dec_and_test(&bp->b_pin_count))
991 wake_up_all(&pb->pb_waiters); 944 wake_up_all(&bp->b_waiters);
992 } 945 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
993 PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
994} 946}
995 947
996int 948int
997pagebuf_ispin( 949xfs_buf_ispin(
998 xfs_buf_t *pb) 950 xfs_buf_t *bp)
999{ 951{
1000 return atomic_read(&pb->pb_pin_count); 952 return atomic_read(&bp->b_pin_count);
1001} 953}
1002 954
1003/* 955STATIC void
1004 * pagebuf_wait_unpin 956xfs_buf_wait_unpin(
1005 * 957 xfs_buf_t *bp)
1006 * pagebuf_wait_unpin waits until all of the memory associated
1007 * with the buffer is not longer locked in memory. It returns
1008 * immediately if none of the affected pages are locked.
1009 */
1010static inline void
1011_pagebuf_wait_unpin(
1012 xfs_buf_t *pb)
1013{ 958{
1014 DECLARE_WAITQUEUE (wait, current); 959 DECLARE_WAITQUEUE (wait, current);
1015 960
1016 if (atomic_read(&pb->pb_pin_count) == 0) 961 if (atomic_read(&bp->b_pin_count) == 0)
1017 return; 962 return;
1018 963
1019 add_wait_queue(&pb->pb_waiters, &wait); 964 add_wait_queue(&bp->b_waiters, &wait);
1020 for (;;) { 965 for (;;) {
1021 set_current_state(TASK_UNINTERRUPTIBLE); 966 set_current_state(TASK_UNINTERRUPTIBLE);
1022 if (atomic_read(&pb->pb_pin_count) == 0) 967 if (atomic_read(&bp->b_pin_count) == 0)
1023 break; 968 break;
1024 if (atomic_read(&pb->pb_io_remaining)) 969 if (atomic_read(&bp->b_io_remaining))
1025 blk_run_address_space(pb->pb_target->pbr_mapping); 970 blk_run_address_space(bp->b_target->bt_mapping);
1026 schedule(); 971 schedule();
1027 } 972 }
1028 remove_wait_queue(&pb->pb_waiters, &wait); 973 remove_wait_queue(&bp->b_waiters, &wait);
1029 set_current_state(TASK_RUNNING); 974 set_current_state(TASK_RUNNING);
1030} 975}
1031 976
@@ -1033,241 +978,216 @@ _pagebuf_wait_unpin(
1033 * Buffer Utility Routines 978 * Buffer Utility Routines
1034 */ 979 */
1035 980
1036/*
1037 * pagebuf_iodone
1038 *
1039 * pagebuf_iodone marks a buffer for which I/O is in progress
1040 * done with respect to that I/O. The pb_iodone routine, if
1041 * present, will be called as a side-effect.
1042 */
1043STATIC void 981STATIC void
1044pagebuf_iodone_work( 982xfs_buf_iodone_work(
1045 void *v) 983 void *v)
1046{ 984{
1047 xfs_buf_t *bp = (xfs_buf_t *)v; 985 xfs_buf_t *bp = (xfs_buf_t *)v;
1048 986
1049 if (bp->pb_iodone) 987 if (bp->b_iodone)
1050 (*(bp->pb_iodone))(bp); 988 (*(bp->b_iodone))(bp);
1051 else if (bp->pb_flags & PBF_ASYNC) 989 else if (bp->b_flags & XBF_ASYNC)
1052 xfs_buf_relse(bp); 990 xfs_buf_relse(bp);
1053} 991}
1054 992
1055void 993void
1056pagebuf_iodone( 994xfs_buf_ioend(
1057 xfs_buf_t *pb, 995 xfs_buf_t *bp,
1058 int schedule) 996 int schedule)
1059{ 997{
1060 pb->pb_flags &= ~(PBF_READ | PBF_WRITE); 998 bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1061 if (pb->pb_error == 0) 999 if (bp->b_error == 0)
1062 pb->pb_flags |= PBF_DONE; 1000 bp->b_flags |= XBF_DONE;
1063 1001
1064 PB_TRACE(pb, "iodone", pb->pb_iodone); 1002 XB_TRACE(bp, "iodone", bp->b_iodone);
1065 1003
1066 if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) { 1004 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1067 if (schedule) { 1005 if (schedule) {
1068 INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb); 1006 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1069 queue_work(xfslogd_workqueue, &pb->pb_iodone_work); 1007 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1070 } else { 1008 } else {
1071 pagebuf_iodone_work(pb); 1009 xfs_buf_iodone_work(bp);
1072 } 1010 }
1073 } else { 1011 } else {
1074 up(&pb->pb_iodonesema); 1012 up(&bp->b_iodonesema);
1075 } 1013 }
1076} 1014}
1077 1015
1078/*
1079 * pagebuf_ioerror
1080 *
1081 * pagebuf_ioerror sets the error code for a buffer.
1082 */
1083void 1016void
1084pagebuf_ioerror( /* mark/clear buffer error flag */ 1017xfs_buf_ioerror(
1085 xfs_buf_t *pb, /* buffer to mark */ 1018 xfs_buf_t *bp,
1086 int error) /* error to store (0 if none) */ 1019 int error)
1087{ 1020{
1088 ASSERT(error >= 0 && error <= 0xffff); 1021 ASSERT(error >= 0 && error <= 0xffff);
1089 pb->pb_error = (unsigned short)error; 1022 bp->b_error = (unsigned short)error;
1090 PB_TRACE(pb, "ioerror", (unsigned long)error); 1023 XB_TRACE(bp, "ioerror", (unsigned long)error);
1091} 1024}
1092 1025
1093/* 1026/*
1094 * pagebuf_iostart 1027 * Initiate I/O on a buffer, based on the flags supplied.
1095 * 1028 * The b_iodone routine in the buffer supplied will only be called
1096 * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
1097 * If necessary, it will arrange for any disk space allocation required,
1098 * and it will break up the request if the block mappings require it.
1099 * The pb_iodone routine in the buffer supplied will only be called
1100 * when all of the subsidiary I/O requests, if any, have been completed. 1029 * when all of the subsidiary I/O requests, if any, have been completed.
1101 * pagebuf_iostart calls the pagebuf_ioinitiate routine or
1102 * pagebuf_iorequest, if the former routine is not defined, to start
1103 * the I/O on a given low-level request.
1104 */ 1030 */
1105int 1031int
1106pagebuf_iostart( /* start I/O on a buffer */ 1032xfs_buf_iostart(
1107 xfs_buf_t *pb, /* buffer to start */ 1033 xfs_buf_t *bp,
1108 page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */ 1034 xfs_buf_flags_t flags)
1109 /* PBF_WRITE, PBF_DELWRI, */
1110 /* PBF_DONT_BLOCK */
1111{ 1035{
1112 int status = 0; 1036 int status = 0;
1113 1037
1114 PB_TRACE(pb, "iostart", (unsigned long)flags); 1038 XB_TRACE(bp, "iostart", (unsigned long)flags);
1115 1039
1116 if (flags & PBF_DELWRI) { 1040 if (flags & XBF_DELWRI) {
1117 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC); 1041 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1118 pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC); 1042 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1119 pagebuf_delwri_queue(pb, 1); 1043 xfs_buf_delwri_queue(bp, 1);
1120 return status; 1044 return status;
1121 } 1045 }
1122 1046
1123 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \ 1047 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1124 PBF_READ_AHEAD | _PBF_RUN_QUEUES); 1048 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1125 pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \ 1049 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1126 PBF_READ_AHEAD | _PBF_RUN_QUEUES); 1050 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1127 1051
1128 BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL); 1052 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1129 1053
1130 /* For writes allow an alternate strategy routine to precede 1054 /* For writes allow an alternate strategy routine to precede
1131 * the actual I/O request (which may not be issued at all in 1055 * the actual I/O request (which may not be issued at all in
1132 * a shutdown situation, for example). 1056 * a shutdown situation, for example).
1133 */ 1057 */
1134 status = (flags & PBF_WRITE) ? 1058 status = (flags & XBF_WRITE) ?
1135 pagebuf_iostrategy(pb) : pagebuf_iorequest(pb); 1059 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1136 1060
1137 /* Wait for I/O if we are not an async request. 1061 /* Wait for I/O if we are not an async request.
1138 * Note: async I/O request completion will release the buffer, 1062 * Note: async I/O request completion will release the buffer,
1139 * and that can already be done by this point. So using the 1063 * and that can already be done by this point. So using the
1140 * buffer pointer from here on, after async I/O, is invalid. 1064 * buffer pointer from here on, after async I/O, is invalid.
1141 */ 1065 */
1142 if (!status && !(flags & PBF_ASYNC)) 1066 if (!status && !(flags & XBF_ASYNC))
1143 status = pagebuf_iowait(pb); 1067 status = xfs_buf_iowait(bp);
1144 1068
1145 return status; 1069 return status;
1146} 1070}
1147 1071
1148/*
1149 * Helper routine for pagebuf_iorequest
1150 */
1151
1152STATIC __inline__ int 1072STATIC __inline__ int
1153_pagebuf_iolocked( 1073_xfs_buf_iolocked(
1154 xfs_buf_t *pb) 1074 xfs_buf_t *bp)
1155{ 1075{
1156 ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); 1076 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1157 if (pb->pb_flags & PBF_READ) 1077 if (bp->b_flags & XBF_READ)
1158 return pb->pb_locked; 1078 return bp->b_locked;
1159 return 0; 1079 return 0;
1160} 1080}
1161 1081
1162STATIC __inline__ void 1082STATIC __inline__ void
1163_pagebuf_iodone( 1083_xfs_buf_ioend(
1164 xfs_buf_t *pb, 1084 xfs_buf_t *bp,
1165 int schedule) 1085 int schedule)
1166{ 1086{
1167 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { 1087 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1168 pb->pb_locked = 0; 1088 bp->b_locked = 0;
1169 pagebuf_iodone(pb, schedule); 1089 xfs_buf_ioend(bp, schedule);
1170 } 1090 }
1171} 1091}
1172 1092
1173STATIC int 1093STATIC int
1174bio_end_io_pagebuf( 1094xfs_buf_bio_end_io(
1175 struct bio *bio, 1095 struct bio *bio,
1176 unsigned int bytes_done, 1096 unsigned int bytes_done,
1177 int error) 1097 int error)
1178{ 1098{
1179 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private; 1099 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1180 unsigned int blocksize = pb->pb_target->pbr_bsize; 1100 unsigned int blocksize = bp->b_target->bt_bsize;
1181 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1101 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1182 1102
1183 if (bio->bi_size) 1103 if (bio->bi_size)
1184 return 1; 1104 return 1;
1185 1105
1186 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1106 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1187 pb->pb_error = EIO; 1107 bp->b_error = EIO;
1188 1108
1189 do { 1109 do {
1190 struct page *page = bvec->bv_page; 1110 struct page *page = bvec->bv_page;
1191 1111
1192 if (unlikely(pb->pb_error)) { 1112 if (unlikely(bp->b_error)) {
1193 if (pb->pb_flags & PBF_READ) 1113 if (bp->b_flags & XBF_READ)
1194 ClearPageUptodate(page); 1114 ClearPageUptodate(page);
1195 SetPageError(page); 1115 SetPageError(page);
1196 } else if (blocksize == PAGE_CACHE_SIZE) { 1116 } else if (blocksize >= PAGE_CACHE_SIZE) {
1197 SetPageUptodate(page); 1117 SetPageUptodate(page);
1198 } else if (!PagePrivate(page) && 1118 } else if (!PagePrivate(page) &&
1199 (pb->pb_flags & _PBF_PAGE_CACHE)) { 1119 (bp->b_flags & _XBF_PAGE_CACHE)) {
1200 set_page_region(page, bvec->bv_offset, bvec->bv_len); 1120 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1201 } 1121 }
1202 1122
1203 if (--bvec >= bio->bi_io_vec) 1123 if (--bvec >= bio->bi_io_vec)
1204 prefetchw(&bvec->bv_page->flags); 1124 prefetchw(&bvec->bv_page->flags);
1205 1125
1206 if (_pagebuf_iolocked(pb)) { 1126 if (_xfs_buf_iolocked(bp)) {
1207 unlock_page(page); 1127 unlock_page(page);
1208 } 1128 }
1209 } while (bvec >= bio->bi_io_vec); 1129 } while (bvec >= bio->bi_io_vec);
1210 1130
1211 _pagebuf_iodone(pb, 1); 1131 _xfs_buf_ioend(bp, 1);
1212 bio_put(bio); 1132 bio_put(bio);
1213 return 0; 1133 return 0;
1214} 1134}
1215 1135
1216STATIC void 1136STATIC void
1217_pagebuf_ioapply( 1137_xfs_buf_ioapply(
1218 xfs_buf_t *pb) 1138 xfs_buf_t *bp)
1219{ 1139{
1220 int i, rw, map_i, total_nr_pages, nr_pages; 1140 int i, rw, map_i, total_nr_pages, nr_pages;
1221 struct bio *bio; 1141 struct bio *bio;
1222 int offset = pb->pb_offset; 1142 int offset = bp->b_offset;
1223 int size = pb->pb_count_desired; 1143 int size = bp->b_count_desired;
1224 sector_t sector = pb->pb_bn; 1144 sector_t sector = bp->b_bn;
1225 unsigned int blocksize = pb->pb_target->pbr_bsize; 1145 unsigned int blocksize = bp->b_target->bt_bsize;
1226 int locking = _pagebuf_iolocked(pb); 1146 int locking = _xfs_buf_iolocked(bp);
1227 1147
1228 total_nr_pages = pb->pb_page_count; 1148 total_nr_pages = bp->b_page_count;
1229 map_i = 0; 1149 map_i = 0;
1230 1150
1231 if (pb->pb_flags & _PBF_RUN_QUEUES) { 1151 if (bp->b_flags & _XBF_RUN_QUEUES) {
1232 pb->pb_flags &= ~_PBF_RUN_QUEUES; 1152 bp->b_flags &= ~_XBF_RUN_QUEUES;
1233 rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC; 1153 rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
1234 } else { 1154 } else {
1235 rw = (pb->pb_flags & PBF_READ) ? READ : WRITE; 1155 rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
1236 } 1156 }
1237 1157
1238 if (pb->pb_flags & PBF_ORDERED) { 1158 if (bp->b_flags & XBF_ORDERED) {
1239 ASSERT(!(pb->pb_flags & PBF_READ)); 1159 ASSERT(!(bp->b_flags & XBF_READ));
1240 rw = WRITE_BARRIER; 1160 rw = WRITE_BARRIER;
1241 } 1161 }
1242 1162
1243 /* Special code path for reading a sub page size pagebuf in -- 1163 /* Special code path for reading a sub page size buffer in --
1244 * we populate up the whole page, and hence the other metadata 1164 * we populate up the whole page, and hence the other metadata
1245 * in the same page. This optimization is only valid when the 1165 * in the same page. This optimization is only valid when the
1246 * filesystem block size and the page size are equal. 1166 * filesystem block size is not smaller than the page size.
1247 */ 1167 */
1248 if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) && 1168 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1249 (pb->pb_flags & PBF_READ) && locking && 1169 (bp->b_flags & XBF_READ) && locking &&
1250 (blocksize == PAGE_CACHE_SIZE)) { 1170 (blocksize >= PAGE_CACHE_SIZE)) {
1251 bio = bio_alloc(GFP_NOIO, 1); 1171 bio = bio_alloc(GFP_NOIO, 1);
1252 1172
1253 bio->bi_bdev = pb->pb_target->pbr_bdev; 1173 bio->bi_bdev = bp->b_target->bt_bdev;
1254 bio->bi_sector = sector - (offset >> BBSHIFT); 1174 bio->bi_sector = sector - (offset >> BBSHIFT);
1255 bio->bi_end_io = bio_end_io_pagebuf; 1175 bio->bi_end_io = xfs_buf_bio_end_io;
1256 bio->bi_private = pb; 1176 bio->bi_private = bp;
1257 1177
1258 bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0); 1178 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1259 size = 0; 1179 size = 0;
1260 1180
1261 atomic_inc(&pb->pb_io_remaining); 1181 atomic_inc(&bp->b_io_remaining);
1262 1182
1263 goto submit_io; 1183 goto submit_io;
1264 } 1184 }
1265 1185
1266 /* Lock down the pages which we need to for the request */ 1186 /* Lock down the pages which we need to for the request */
1267 if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) { 1187 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1268 for (i = 0; size; i++) { 1188 for (i = 0; size; i++) {
1269 int nbytes = PAGE_CACHE_SIZE - offset; 1189 int nbytes = PAGE_CACHE_SIZE - offset;
1270 struct page *page = pb->pb_pages[i]; 1190 struct page *page = bp->b_pages[i];
1271 1191
1272 if (nbytes > size) 1192 if (nbytes > size)
1273 nbytes = size; 1193 nbytes = size;
@@ -1277,30 +1197,30 @@ _pagebuf_ioapply(
1277 size -= nbytes; 1197 size -= nbytes;
1278 offset = 0; 1198 offset = 0;
1279 } 1199 }
1280 offset = pb->pb_offset; 1200 offset = bp->b_offset;
1281 size = pb->pb_count_desired; 1201 size = bp->b_count_desired;
1282 } 1202 }
1283 1203
1284next_chunk: 1204next_chunk:
1285 atomic_inc(&pb->pb_io_remaining); 1205 atomic_inc(&bp->b_io_remaining);
1286 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); 1206 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1287 if (nr_pages > total_nr_pages) 1207 if (nr_pages > total_nr_pages)
1288 nr_pages = total_nr_pages; 1208 nr_pages = total_nr_pages;
1289 1209
1290 bio = bio_alloc(GFP_NOIO, nr_pages); 1210 bio = bio_alloc(GFP_NOIO, nr_pages);
1291 bio->bi_bdev = pb->pb_target->pbr_bdev; 1211 bio->bi_bdev = bp->b_target->bt_bdev;
1292 bio->bi_sector = sector; 1212 bio->bi_sector = sector;
1293 bio->bi_end_io = bio_end_io_pagebuf; 1213 bio->bi_end_io = xfs_buf_bio_end_io;
1294 bio->bi_private = pb; 1214 bio->bi_private = bp;
1295 1215
1296 for (; size && nr_pages; nr_pages--, map_i++) { 1216 for (; size && nr_pages; nr_pages--, map_i++) {
1297 int nbytes = PAGE_CACHE_SIZE - offset; 1217 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1298 1218
1299 if (nbytes > size) 1219 if (nbytes > size)
1300 nbytes = size; 1220 nbytes = size;
1301 1221
1302 if (bio_add_page(bio, pb->pb_pages[map_i], 1222 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1303 nbytes, offset) < nbytes) 1223 if (rbytes < nbytes)
1304 break; 1224 break;
1305 1225
1306 offset = 0; 1226 offset = 0;
@@ -1316,107 +1236,102 @@ submit_io:
1316 goto next_chunk; 1236 goto next_chunk;
1317 } else { 1237 } else {
1318 bio_put(bio); 1238 bio_put(bio);
1319 pagebuf_ioerror(pb, EIO); 1239 xfs_buf_ioerror(bp, EIO);
1320 } 1240 }
1321} 1241}
1322 1242
1323/*
1324 * pagebuf_iorequest -- the core I/O request routine.
1325 */
1326int 1243int
1327pagebuf_iorequest( /* start real I/O */ 1244xfs_buf_iorequest(
1328 xfs_buf_t *pb) /* buffer to convey to device */ 1245 xfs_buf_t *bp)
1329{ 1246{
1330 PB_TRACE(pb, "iorequest", 0); 1247 XB_TRACE(bp, "iorequest", 0);
1331 1248
1332 if (pb->pb_flags & PBF_DELWRI) { 1249 if (bp->b_flags & XBF_DELWRI) {
1333 pagebuf_delwri_queue(pb, 1); 1250 xfs_buf_delwri_queue(bp, 1);
1334 return 0; 1251 return 0;
1335 } 1252 }
1336 1253
1337 if (pb->pb_flags & PBF_WRITE) { 1254 if (bp->b_flags & XBF_WRITE) {
1338 _pagebuf_wait_unpin(pb); 1255 xfs_buf_wait_unpin(bp);
1339 } 1256 }
1340 1257
1341 pagebuf_hold(pb); 1258 xfs_buf_hold(bp);
1342 1259
1343 /* Set the count to 1 initially, this will stop an I/O 1260 /* Set the count to 1 initially, this will stop an I/O
1344 * completion callout which happens before we have started 1261 * completion callout which happens before we have started
1345 * all the I/O from calling pagebuf_iodone too early. 1262 * all the I/O from calling xfs_buf_ioend too early.
1346 */ 1263 */
1347 atomic_set(&pb->pb_io_remaining, 1); 1264 atomic_set(&bp->b_io_remaining, 1);
1348 _pagebuf_ioapply(pb); 1265 _xfs_buf_ioapply(bp);
1349 _pagebuf_iodone(pb, 0); 1266 _xfs_buf_ioend(bp, 0);
1350 1267
1351 pagebuf_rele(pb); 1268 xfs_buf_rele(bp);
1352 return 0; 1269 return 0;
1353} 1270}
1354 1271
1355/* 1272/*
1356 * pagebuf_iowait 1273 * Waits for I/O to complete on the buffer supplied.
1357 * 1274 * It returns immediately if no I/O is pending.
1358 * pagebuf_iowait waits for I/O to complete on the buffer supplied. 1275 * It returns the I/O error code, if any, or 0 if there was no error.
1359 * It returns immediately if no I/O is pending. In any case, it returns
1360 * the error code, if any, or 0 if there is no error.
1361 */ 1276 */
1362int 1277int
1363pagebuf_iowait( 1278xfs_buf_iowait(
1364 xfs_buf_t *pb) 1279 xfs_buf_t *bp)
1365{ 1280{
1366 PB_TRACE(pb, "iowait", 0); 1281 XB_TRACE(bp, "iowait", 0);
1367 if (atomic_read(&pb->pb_io_remaining)) 1282 if (atomic_read(&bp->b_io_remaining))
1368 blk_run_address_space(pb->pb_target->pbr_mapping); 1283 blk_run_address_space(bp->b_target->bt_mapping);
1369 down(&pb->pb_iodonesema); 1284 down(&bp->b_iodonesema);
1370 PB_TRACE(pb, "iowaited", (long)pb->pb_error); 1285 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1371 return pb->pb_error; 1286 return bp->b_error;
1372} 1287}
1373 1288
1374caddr_t 1289xfs_caddr_t
1375pagebuf_offset( 1290xfs_buf_offset(
1376 xfs_buf_t *pb, 1291 xfs_buf_t *bp,
1377 size_t offset) 1292 size_t offset)
1378{ 1293{
1379 struct page *page; 1294 struct page *page;
1380 1295
1381 offset += pb->pb_offset; 1296 if (bp->b_flags & XBF_MAPPED)
1297 return XFS_BUF_PTR(bp) + offset;
1382 1298
1383 page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT]; 1299 offset += bp->b_offset;
1384 return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1)); 1300 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1301 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1385} 1302}
1386 1303
1387/* 1304/*
1388 * pagebuf_iomove
1389 *
1390 * Move data into or out of a buffer. 1305 * Move data into or out of a buffer.
1391 */ 1306 */
1392void 1307void
1393pagebuf_iomove( 1308xfs_buf_iomove(
1394 xfs_buf_t *pb, /* buffer to process */ 1309 xfs_buf_t *bp, /* buffer to process */
1395 size_t boff, /* starting buffer offset */ 1310 size_t boff, /* starting buffer offset */
1396 size_t bsize, /* length to copy */ 1311 size_t bsize, /* length to copy */
1397 caddr_t data, /* data address */ 1312 caddr_t data, /* data address */
1398 page_buf_rw_t mode) /* read/write flag */ 1313 xfs_buf_rw_t mode) /* read/write/zero flag */
1399{ 1314{
1400 size_t bend, cpoff, csize; 1315 size_t bend, cpoff, csize;
1401 struct page *page; 1316 struct page *page;
1402 1317
1403 bend = boff + bsize; 1318 bend = boff + bsize;
1404 while (boff < bend) { 1319 while (boff < bend) {
1405 page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)]; 1320 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1406 cpoff = page_buf_poff(boff + pb->pb_offset); 1321 cpoff = xfs_buf_poff(boff + bp->b_offset);
1407 csize = min_t(size_t, 1322 csize = min_t(size_t,
1408 PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff); 1323 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1409 1324
1410 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); 1325 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1411 1326
1412 switch (mode) { 1327 switch (mode) {
1413 case PBRW_ZERO: 1328 case XBRW_ZERO:
1414 memset(page_address(page) + cpoff, 0, csize); 1329 memset(page_address(page) + cpoff, 0, csize);
1415 break; 1330 break;
1416 case PBRW_READ: 1331 case XBRW_READ:
1417 memcpy(data, page_address(page) + cpoff, csize); 1332 memcpy(data, page_address(page) + cpoff, csize);
1418 break; 1333 break;
1419 case PBRW_WRITE: 1334 case XBRW_WRITE:
1420 memcpy(page_address(page) + cpoff, data, csize); 1335 memcpy(page_address(page) + cpoff, data, csize);
1421 } 1336 }
1422 1337
@@ -1426,12 +1341,12 @@ pagebuf_iomove(
1426} 1341}
1427 1342
1428/* 1343/*
1429 * Handling of buftargs. 1344 * Handling of buffer targets (buftargs).
1430 */ 1345 */
1431 1346
1432/* 1347/*
1433 * Wait for any bufs with callbacks that have been submitted but 1348 * Wait for any bufs with callbacks that have been submitted but
1434 * have not yet returned... walk the hash list for the target. 1349 * have not yet returned... walk the hash list for the target.
1435 */ 1350 */
1436void 1351void
1437xfs_wait_buftarg( 1352xfs_wait_buftarg(
@@ -1445,15 +1360,15 @@ xfs_wait_buftarg(
1445 hash = &btp->bt_hash[i]; 1360 hash = &btp->bt_hash[i];
1446again: 1361again:
1447 spin_lock(&hash->bh_lock); 1362 spin_lock(&hash->bh_lock);
1448 list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) { 1363 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1449 ASSERT(btp == bp->pb_target); 1364 ASSERT(btp == bp->b_target);
1450 if (!(bp->pb_flags & PBF_FS_MANAGED)) { 1365 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1451 spin_unlock(&hash->bh_lock); 1366 spin_unlock(&hash->bh_lock);
1452 /* 1367 /*
1453 * Catch superblock reference count leaks 1368 * Catch superblock reference count leaks
1454 * immediately 1369 * immediately
1455 */ 1370 */
1456 BUG_ON(bp->pb_bn == 0); 1371 BUG_ON(bp->b_bn == 0);
1457 delay(100); 1372 delay(100);
1458 goto again; 1373 goto again;
1459 } 1374 }
@@ -1463,9 +1378,9 @@ again:
1463} 1378}
1464 1379
1465/* 1380/*
1466 * Allocate buffer hash table for a given target. 1381 * Allocate buffer hash table for a given target.
1467 * For devices containing metadata (i.e. not the log/realtime devices) 1382 * For devices containing metadata (i.e. not the log/realtime devices)
1468 * we need to allocate a much larger hash table. 1383 * we need to allocate a much larger hash table.
1469 */ 1384 */
1470STATIC void 1385STATIC void
1471xfs_alloc_bufhash( 1386xfs_alloc_bufhash(
@@ -1488,13 +1403,12 @@ STATIC void
1488xfs_free_bufhash( 1403xfs_free_bufhash(
1489 xfs_buftarg_t *btp) 1404 xfs_buftarg_t *btp)
1490{ 1405{
1491 kmem_free(btp->bt_hash, 1406 kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1492 (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1493 btp->bt_hash = NULL; 1407 btp->bt_hash = NULL;
1494} 1408}
1495 1409
1496/* 1410/*
1497 * buftarg list for delwrite queue processing 1411 * buftarg list for delwrite queue processing
1498 */ 1412 */
1499STATIC LIST_HEAD(xfs_buftarg_list); 1413STATIC LIST_HEAD(xfs_buftarg_list);
1500STATIC DEFINE_SPINLOCK(xfs_buftarg_lock); 1414STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
@@ -1524,12 +1438,13 @@ xfs_free_buftarg(
1524{ 1438{
1525 xfs_flush_buftarg(btp, 1); 1439 xfs_flush_buftarg(btp, 1);
1526 if (external) 1440 if (external)
1527 xfs_blkdev_put(btp->pbr_bdev); 1441 xfs_blkdev_put(btp->bt_bdev);
1528 xfs_free_bufhash(btp); 1442 xfs_free_bufhash(btp);
1529 iput(btp->pbr_mapping->host); 1443 iput(btp->bt_mapping->host);
1530 1444
1531 /* unregister the buftarg first so that we don't get a 1445 /* Unregister the buftarg first so that we don't get a
1532 * wakeup finding a non-existent task */ 1446 * wakeup finding a non-existent task
1447 */
1533 xfs_unregister_buftarg(btp); 1448 xfs_unregister_buftarg(btp);
1534 kthread_stop(btp->bt_task); 1449 kthread_stop(btp->bt_task);
1535 1450
@@ -1543,11 +1458,11 @@ xfs_setsize_buftarg_flags(
1543 unsigned int sectorsize, 1458 unsigned int sectorsize,
1544 int verbose) 1459 int verbose)
1545{ 1460{
1546 btp->pbr_bsize = blocksize; 1461 btp->bt_bsize = blocksize;
1547 btp->pbr_sshift = ffs(sectorsize) - 1; 1462 btp->bt_sshift = ffs(sectorsize) - 1;
1548 btp->pbr_smask = sectorsize - 1; 1463 btp->bt_smask = sectorsize - 1;
1549 1464
1550 if (set_blocksize(btp->pbr_bdev, sectorsize)) { 1465 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1551 printk(KERN_WARNING 1466 printk(KERN_WARNING
1552 "XFS: Cannot set_blocksize to %u on device %s\n", 1467 "XFS: Cannot set_blocksize to %u on device %s\n",
1553 sectorsize, XFS_BUFTARG_NAME(btp)); 1468 sectorsize, XFS_BUFTARG_NAME(btp));
@@ -1567,10 +1482,10 @@ xfs_setsize_buftarg_flags(
1567} 1482}
1568 1483
1569/* 1484/*
1570* When allocating the initial buffer target we have not yet 1485 * When allocating the initial buffer target we have not yet
1571* read in the superblock, so don't know what sized sectors 1486 * read in the superblock, so don't know what sized sectors
1572* are being used is at this early stage. Play safe. 1487 * are being used is at this early stage. Play safe.
1573*/ 1488 */
1574STATIC int 1489STATIC int
1575xfs_setsize_buftarg_early( 1490xfs_setsize_buftarg_early(
1576 xfs_buftarg_t *btp, 1491 xfs_buftarg_t *btp,
@@ -1618,7 +1533,7 @@ xfs_mapping_buftarg(
1618 mapping->a_ops = &mapping_aops; 1533 mapping->a_ops = &mapping_aops;
1619 mapping->backing_dev_info = bdi; 1534 mapping->backing_dev_info = bdi;
1620 mapping_set_gfp_mask(mapping, GFP_NOFS); 1535 mapping_set_gfp_mask(mapping, GFP_NOFS);
1621 btp->pbr_mapping = mapping; 1536 btp->bt_mapping = mapping;
1622 return 0; 1537 return 0;
1623} 1538}
1624 1539
@@ -1651,8 +1566,8 @@ xfs_alloc_buftarg(
1651 1566
1652 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); 1567 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1653 1568
1654 btp->pbr_dev = bdev->bd_dev; 1569 btp->bt_dev = bdev->bd_dev;
1655 btp->pbr_bdev = bdev; 1570 btp->bt_bdev = bdev;
1656 if (xfs_setsize_buftarg_early(btp, bdev)) 1571 if (xfs_setsize_buftarg_early(btp, bdev))
1657 goto error; 1572 goto error;
1658 if (xfs_mapping_buftarg(btp, bdev)) 1573 if (xfs_mapping_buftarg(btp, bdev))
@@ -1669,63 +1584,61 @@ error:
1669 1584
1670 1585
1671/* 1586/*
1672 * Pagebuf delayed write buffer handling 1587 * Delayed write buffer handling
1673 */ 1588 */
1674STATIC void 1589STATIC void
1675pagebuf_delwri_queue( 1590xfs_buf_delwri_queue(
1676 xfs_buf_t *pb, 1591 xfs_buf_t *bp,
1677 int unlock) 1592 int unlock)
1678{ 1593{
1679 struct list_head *dwq = &pb->pb_target->bt_delwrite_queue; 1594 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1680 spinlock_t *dwlk = &pb->pb_target->bt_delwrite_lock; 1595 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1681 1596
1682 PB_TRACE(pb, "delwri_q", (long)unlock); 1597 XB_TRACE(bp, "delwri_q", (long)unlock);
1683 ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) == 1598 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1684 (PBF_DELWRI|PBF_ASYNC));
1685 1599
1686 spin_lock(dwlk); 1600 spin_lock(dwlk);
1687 /* If already in the queue, dequeue and place at tail */ 1601 /* If already in the queue, dequeue and place at tail */
1688 if (!list_empty(&pb->pb_list)) { 1602 if (!list_empty(&bp->b_list)) {
1689 ASSERT(pb->pb_flags & _PBF_DELWRI_Q); 1603 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1690 if (unlock) { 1604 if (unlock)
1691 atomic_dec(&pb->pb_hold); 1605 atomic_dec(&bp->b_hold);
1692 } 1606 list_del(&bp->b_list);
1693 list_del(&pb->pb_list);
1694 } 1607 }
1695 1608
1696 pb->pb_flags |= _PBF_DELWRI_Q; 1609 bp->b_flags |= _XBF_DELWRI_Q;
1697 list_add_tail(&pb->pb_list, dwq); 1610 list_add_tail(&bp->b_list, dwq);
1698 pb->pb_queuetime = jiffies; 1611 bp->b_queuetime = jiffies;
1699 spin_unlock(dwlk); 1612 spin_unlock(dwlk);
1700 1613
1701 if (unlock) 1614 if (unlock)
1702 pagebuf_unlock(pb); 1615 xfs_buf_unlock(bp);
1703} 1616}
1704 1617
1705void 1618void
1706pagebuf_delwri_dequeue( 1619xfs_buf_delwri_dequeue(
1707 xfs_buf_t *pb) 1620 xfs_buf_t *bp)
1708{ 1621{
1709 spinlock_t *dwlk = &pb->pb_target->bt_delwrite_lock; 1622 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1710 int dequeued = 0; 1623 int dequeued = 0;
1711 1624
1712 spin_lock(dwlk); 1625 spin_lock(dwlk);
1713 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) { 1626 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1714 ASSERT(pb->pb_flags & _PBF_DELWRI_Q); 1627 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1715 list_del_init(&pb->pb_list); 1628 list_del_init(&bp->b_list);
1716 dequeued = 1; 1629 dequeued = 1;
1717 } 1630 }
1718 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1631 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1719 spin_unlock(dwlk); 1632 spin_unlock(dwlk);
1720 1633
1721 if (dequeued) 1634 if (dequeued)
1722 pagebuf_rele(pb); 1635 xfs_buf_rele(bp);
1723 1636
1724 PB_TRACE(pb, "delwri_dq", (long)dequeued); 1637 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1725} 1638}
1726 1639
1727STATIC void 1640STATIC void
1728pagebuf_runall_queues( 1641xfs_buf_runall_queues(
1729 struct workqueue_struct *queue) 1642 struct workqueue_struct *queue)
1730{ 1643{
1731 flush_workqueue(queue); 1644 flush_workqueue(queue);
@@ -1740,9 +1653,9 @@ xfsbufd_wakeup(
1740 1653
1741 spin_lock(&xfs_buftarg_lock); 1654 spin_lock(&xfs_buftarg_lock);
1742 list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) { 1655 list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) {
1743 if (test_bit(BT_FORCE_SLEEP, &btp->bt_flags)) 1656 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1744 continue; 1657 continue;
1745 set_bit(BT_FORCE_FLUSH, &btp->bt_flags); 1658 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1746 barrier(); 1659 barrier();
1747 wake_up_process(btp->bt_task); 1660 wake_up_process(btp->bt_task);
1748 } 1661 }
@@ -1757,7 +1670,7 @@ xfsbufd(
1757 struct list_head tmp; 1670 struct list_head tmp;
1758 unsigned long age; 1671 unsigned long age;
1759 xfs_buftarg_t *target = (xfs_buftarg_t *)data; 1672 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1760 xfs_buf_t *pb, *n; 1673 xfs_buf_t *bp, *n;
1761 struct list_head *dwq = &target->bt_delwrite_queue; 1674 struct list_head *dwq = &target->bt_delwrite_queue;
1762 spinlock_t *dwlk = &target->bt_delwrite_lock; 1675 spinlock_t *dwlk = &target->bt_delwrite_lock;
1763 1676
@@ -1766,10 +1679,10 @@ xfsbufd(
1766 INIT_LIST_HEAD(&tmp); 1679 INIT_LIST_HEAD(&tmp);
1767 do { 1680 do {
1768 if (unlikely(freezing(current))) { 1681 if (unlikely(freezing(current))) {
1769 set_bit(BT_FORCE_SLEEP, &target->bt_flags); 1682 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1770 refrigerator(); 1683 refrigerator();
1771 } else { 1684 } else {
1772 clear_bit(BT_FORCE_SLEEP, &target->bt_flags); 1685 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1773 } 1686 }
1774 1687
1775 schedule_timeout_interruptible( 1688 schedule_timeout_interruptible(
@@ -1777,49 +1690,49 @@ xfsbufd(
1777 1690
1778 age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1691 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1779 spin_lock(dwlk); 1692 spin_lock(dwlk);
1780 list_for_each_entry_safe(pb, n, dwq, pb_list) { 1693 list_for_each_entry_safe(bp, n, dwq, b_list) {
1781 PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb)); 1694 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1782 ASSERT(pb->pb_flags & PBF_DELWRI); 1695 ASSERT(bp->b_flags & XBF_DELWRI);
1783 1696
1784 if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) { 1697 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1785 if (!test_bit(BT_FORCE_FLUSH, 1698 if (!test_bit(XBT_FORCE_FLUSH,
1786 &target->bt_flags) && 1699 &target->bt_flags) &&
1787 time_before(jiffies, 1700 time_before(jiffies,
1788 pb->pb_queuetime + age)) { 1701 bp->b_queuetime + age)) {
1789 pagebuf_unlock(pb); 1702 xfs_buf_unlock(bp);
1790 break; 1703 break;
1791 } 1704 }
1792 1705
1793 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1706 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1794 pb->pb_flags |= PBF_WRITE; 1707 bp->b_flags |= XBF_WRITE;
1795 list_move(&pb->pb_list, &tmp); 1708 list_move(&bp->b_list, &tmp);
1796 } 1709 }
1797 } 1710 }
1798 spin_unlock(dwlk); 1711 spin_unlock(dwlk);
1799 1712
1800 while (!list_empty(&tmp)) { 1713 while (!list_empty(&tmp)) {
1801 pb = list_entry(tmp.next, xfs_buf_t, pb_list); 1714 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1802 ASSERT(target == pb->pb_target); 1715 ASSERT(target == bp->b_target);
1803 1716
1804 list_del_init(&pb->pb_list); 1717 list_del_init(&bp->b_list);
1805 pagebuf_iostrategy(pb); 1718 xfs_buf_iostrategy(bp);
1806 1719
1807 blk_run_address_space(target->pbr_mapping); 1720 blk_run_address_space(target->bt_mapping);
1808 } 1721 }
1809 1722
1810 if (as_list_len > 0) 1723 if (as_list_len > 0)
1811 purge_addresses(); 1724 purge_addresses();
1812 1725
1813 clear_bit(BT_FORCE_FLUSH, &target->bt_flags); 1726 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1814 } while (!kthread_should_stop()); 1727 } while (!kthread_should_stop());
1815 1728
1816 return 0; 1729 return 0;
1817} 1730}
1818 1731
1819/* 1732/*
1820 * Go through all incore buffers, and release buffers if they belong to 1733 * Go through all incore buffers, and release buffers if they belong to
1821 * the given device. This is used in filesystem error handling to 1734 * the given device. This is used in filesystem error handling to
1822 * preserve the consistency of its metadata. 1735 * preserve the consistency of its metadata.
1823 */ 1736 */
1824int 1737int
1825xfs_flush_buftarg( 1738xfs_flush_buftarg(
@@ -1827,73 +1740,72 @@ xfs_flush_buftarg(
1827 int wait) 1740 int wait)
1828{ 1741{
1829 struct list_head tmp; 1742 struct list_head tmp;
1830 xfs_buf_t *pb, *n; 1743 xfs_buf_t *bp, *n;
1831 int pincount = 0; 1744 int pincount = 0;
1832 struct list_head *dwq = &target->bt_delwrite_queue; 1745 struct list_head *dwq = &target->bt_delwrite_queue;
1833 spinlock_t *dwlk = &target->bt_delwrite_lock; 1746 spinlock_t *dwlk = &target->bt_delwrite_lock;
1834 1747
1835 pagebuf_runall_queues(xfsdatad_workqueue); 1748 xfs_buf_runall_queues(xfsdatad_workqueue);
1836 pagebuf_runall_queues(xfslogd_workqueue); 1749 xfs_buf_runall_queues(xfslogd_workqueue);
1837 1750
1838 INIT_LIST_HEAD(&tmp); 1751 INIT_LIST_HEAD(&tmp);
1839 spin_lock(dwlk); 1752 spin_lock(dwlk);
1840 list_for_each_entry_safe(pb, n, dwq, pb_list) { 1753 list_for_each_entry_safe(bp, n, dwq, b_list) {
1841 1754 ASSERT(bp->b_target == target);
1842 ASSERT(pb->pb_target == target); 1755 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1843 ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)); 1756 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1844 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb)); 1757 if (xfs_buf_ispin(bp)) {
1845 if (pagebuf_ispin(pb)) {
1846 pincount++; 1758 pincount++;
1847 continue; 1759 continue;
1848 } 1760 }
1849 1761
1850 list_move(&pb->pb_list, &tmp); 1762 list_move(&bp->b_list, &tmp);
1851 } 1763 }
1852 spin_unlock(dwlk); 1764 spin_unlock(dwlk);
1853 1765
1854 /* 1766 /*
1855 * Dropped the delayed write list lock, now walk the temporary list 1767 * Dropped the delayed write list lock, now walk the temporary list
1856 */ 1768 */
1857 list_for_each_entry_safe(pb, n, &tmp, pb_list) { 1769 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1858 pagebuf_lock(pb); 1770 xfs_buf_lock(bp);
1859 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1771 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1860 pb->pb_flags |= PBF_WRITE; 1772 bp->b_flags |= XBF_WRITE;
1861 if (wait) 1773 if (wait)
1862 pb->pb_flags &= ~PBF_ASYNC; 1774 bp->b_flags &= ~XBF_ASYNC;
1863 else 1775 else
1864 list_del_init(&pb->pb_list); 1776 list_del_init(&bp->b_list);
1865 1777
1866 pagebuf_iostrategy(pb); 1778 xfs_buf_iostrategy(bp);
1867 } 1779 }
1868 1780
1869 /* 1781 /*
1870 * Remaining list items must be flushed before returning 1782 * Remaining list items must be flushed before returning
1871 */ 1783 */
1872 while (!list_empty(&tmp)) { 1784 while (!list_empty(&tmp)) {
1873 pb = list_entry(tmp.next, xfs_buf_t, pb_list); 1785 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1874 1786
1875 list_del_init(&pb->pb_list); 1787 list_del_init(&bp->b_list);
1876 xfs_iowait(pb); 1788 xfs_iowait(bp);
1877 xfs_buf_relse(pb); 1789 xfs_buf_relse(bp);
1878 } 1790 }
1879 1791
1880 if (wait) 1792 if (wait)
1881 blk_run_address_space(target->pbr_mapping); 1793 blk_run_address_space(target->bt_mapping);
1882 1794
1883 return pincount; 1795 return pincount;
1884} 1796}
1885 1797
1886int __init 1798int __init
1887pagebuf_init(void) 1799xfs_buf_init(void)
1888{ 1800{
1889 int error = -ENOMEM; 1801 int error = -ENOMEM;
1890 1802
1891#ifdef PAGEBUF_TRACE 1803#ifdef XFS_BUF_TRACE
1892 pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP); 1804 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1893#endif 1805#endif
1894 1806
1895 pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); 1807 xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1896 if (!pagebuf_zone) 1808 if (!xfs_buf_zone)
1897 goto out_free_trace_buf; 1809 goto out_free_trace_buf;
1898 1810
1899 xfslogd_workqueue = create_workqueue("xfslogd"); 1811 xfslogd_workqueue = create_workqueue("xfslogd");
@@ -1904,8 +1816,8 @@ pagebuf_init(void)
1904 if (!xfsdatad_workqueue) 1816 if (!xfsdatad_workqueue)
1905 goto out_destroy_xfslogd_workqueue; 1817 goto out_destroy_xfslogd_workqueue;
1906 1818
1907 pagebuf_shake = kmem_shake_register(xfsbufd_wakeup); 1819 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1908 if (!pagebuf_shake) 1820 if (!xfs_buf_shake)
1909 goto out_destroy_xfsdatad_workqueue; 1821 goto out_destroy_xfsdatad_workqueue;
1910 1822
1911 return 0; 1823 return 0;
@@ -1915,22 +1827,22 @@ pagebuf_init(void)
1915 out_destroy_xfslogd_workqueue: 1827 out_destroy_xfslogd_workqueue:
1916 destroy_workqueue(xfslogd_workqueue); 1828 destroy_workqueue(xfslogd_workqueue);
1917 out_free_buf_zone: 1829 out_free_buf_zone:
1918 kmem_zone_destroy(pagebuf_zone); 1830 kmem_zone_destroy(xfs_buf_zone);
1919 out_free_trace_buf: 1831 out_free_trace_buf:
1920#ifdef PAGEBUF_TRACE 1832#ifdef XFS_BUF_TRACE
1921 ktrace_free(pagebuf_trace_buf); 1833 ktrace_free(xfs_buf_trace_buf);
1922#endif 1834#endif
1923 return error; 1835 return error;
1924} 1836}
1925 1837
1926void 1838void
1927pagebuf_terminate(void) 1839xfs_buf_terminate(void)
1928{ 1840{
1929 kmem_shake_deregister(pagebuf_shake); 1841 kmem_shake_deregister(xfs_buf_shake);
1930 destroy_workqueue(xfsdatad_workqueue); 1842 destroy_workqueue(xfsdatad_workqueue);
1931 destroy_workqueue(xfslogd_workqueue); 1843 destroy_workqueue(xfslogd_workqueue);
1932 kmem_zone_destroy(pagebuf_zone); 1844 kmem_zone_destroy(xfs_buf_zone);
1933#ifdef PAGEBUF_TRACE 1845#ifdef XFS_BUF_TRACE
1934 ktrace_free(pagebuf_trace_buf); 1846 ktrace_free(xfs_buf_trace_buf);
1935#endif 1847#endif
1936} 1848}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index f721d47ad4cc..4dd6592d5a4c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -32,44 +32,47 @@
32 * Base types 32 * Base types
33 */ 33 */
34 34
35#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) 35#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
36 36
37#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) 37#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
38#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) 38#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
39#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) 39#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
40#define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) 40#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
41 41
42typedef enum page_buf_rw_e { 42typedef enum {
43 PBRW_READ = 1, /* transfer into target memory */ 43 XBRW_READ = 1, /* transfer into target memory */
44 PBRW_WRITE = 2, /* transfer from target memory */ 44 XBRW_WRITE = 2, /* transfer from target memory */
45 PBRW_ZERO = 3 /* Zero target memory */ 45 XBRW_ZERO = 3, /* Zero target memory */
46} page_buf_rw_t; 46} xfs_buf_rw_t;
47 47
48 48typedef enum {
49typedef enum page_buf_flags_e { /* pb_flags values */ 49 XBF_READ = (1 << 0), /* buffer intended for reading from device */
50 PBF_READ = (1 << 0), /* buffer intended for reading from device */ 50 XBF_WRITE = (1 << 1), /* buffer intended for writing to device */
51 PBF_WRITE = (1 << 1), /* buffer intended for writing to device */ 51 XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */
52 PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */ 52 XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
53 PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ 53 XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
54 PBF_DONE = (1 << 5), /* all pages in the buffer uptodate */ 54 XBF_DELWRI = (1 << 6), /* buffer has dirty pages */
55 PBF_DELWRI = (1 << 6), /* buffer has dirty pages */ 55 XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
56 PBF_STALE = (1 << 7), /* buffer has been staled, do not find it */ 56 XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
57 PBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */ 57 XBF_ORDERED = (1 << 11), /* use ordered writes */
58 PBF_ORDERED = (1 << 11), /* use ordered writes */ 58 XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
59 PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
60 59
61 /* flags used only as arguments to access routines */ 60 /* flags used only as arguments to access routines */
62 PBF_LOCK = (1 << 14), /* lock requested */ 61 XBF_LOCK = (1 << 14), /* lock requested */
63 PBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */ 62 XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
64 PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */ 63 XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
65 64
66 /* flags used only internally */ 65 /* flags used only internally */
67 _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 66 _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
68 _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ 67 _XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
69 _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 68 _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
70 _PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ 69 _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
71} page_buf_flags_t; 70} xfs_buf_flags_t;
72 71
72typedef enum {
73 XBT_FORCE_SLEEP = (0 << 1),
74 XBT_FORCE_FLUSH = (1 << 1),
75} xfs_buftarg_flags_t;
73 76
74typedef struct xfs_bufhash { 77typedef struct xfs_bufhash {
75 struct list_head bh_list; 78 struct list_head bh_list;
@@ -77,14 +80,14 @@ typedef struct xfs_bufhash {
77} xfs_bufhash_t; 80} xfs_bufhash_t;
78 81
79typedef struct xfs_buftarg { 82typedef struct xfs_buftarg {
80 dev_t pbr_dev; 83 dev_t bt_dev;
81 struct block_device *pbr_bdev; 84 struct block_device *bt_bdev;
82 struct address_space *pbr_mapping; 85 struct address_space *bt_mapping;
83 unsigned int pbr_bsize; 86 unsigned int bt_bsize;
84 unsigned int pbr_sshift; 87 unsigned int bt_sshift;
85 size_t pbr_smask; 88 size_t bt_smask;
86 89
87 /* per-device buffer hash table */ 90 /* per device buffer hash table */
88 uint bt_hashmask; 91 uint bt_hashmask;
89 uint bt_hashshift; 92 uint bt_hashshift;
90 xfs_bufhash_t *bt_hash; 93 xfs_bufhash_t *bt_hash;
@@ -94,469 +97,333 @@ typedef struct xfs_buftarg {
94 struct list_head bt_list; 97 struct list_head bt_list;
95 struct list_head bt_delwrite_queue; 98 struct list_head bt_delwrite_queue;
96 spinlock_t bt_delwrite_lock; 99 spinlock_t bt_delwrite_lock;
97 uint bt_flags; 100 unsigned long bt_flags;
98#define BT_FORCE_SLEEP 1
99#define BT_FORCE_FLUSH 2
100} xfs_buftarg_t; 101} xfs_buftarg_t;
101 102
102/* 103/*
103 * xfs_buf_t: Buffer structure for page cache-based buffers 104 * xfs_buf_t: Buffer structure for pagecache-based buffers
104 * 105 *
105 * This buffer structure is used by the page cache buffer management routines 106 * This buffer structure is used by the pagecache buffer management routines
106 * to refer to an assembly of pages forming a logical buffer. The actual I/O 107 * to refer to an assembly of pages forming a logical buffer.
107 * is performed with buffer_head structures, as required by drivers. 108 *
108 * 109 * The buffer structure is used on a temporary basis only, and discarded when
109 * The buffer structure is used on temporary basis only, and discarded when 110 * released. The real data storage is recorded in the pagecache. Buffers are
110 * released. The real data storage is recorded in the page cache. Metadata is
111 * hashed to the block device on which the file system resides. 111 * hashed to the block device on which the file system resides.
112 */ 112 */
113 113
114struct xfs_buf; 114struct xfs_buf;
115typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
116typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
117typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
115 118
116/* call-back function on I/O completion */ 119#define XB_PAGES 2
117typedef void (*page_buf_iodone_t)(struct xfs_buf *);
118/* call-back function on I/O completion */
119typedef void (*page_buf_relse_t)(struct xfs_buf *);
120/* pre-write function */
121typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
122
123#define PB_PAGES 2
124 120
125typedef struct xfs_buf { 121typedef struct xfs_buf {
126 struct semaphore pb_sema; /* semaphore for lockables */ 122 struct semaphore b_sema; /* semaphore for lockables */
127 unsigned long pb_queuetime; /* time buffer was queued */ 123 unsigned long b_queuetime; /* time buffer was queued */
128 atomic_t pb_pin_count; /* pin count */ 124 atomic_t b_pin_count; /* pin count */
129 wait_queue_head_t pb_waiters; /* unpin waiters */ 125 wait_queue_head_t b_waiters; /* unpin waiters */
130 struct list_head pb_list; 126 struct list_head b_list;
131 page_buf_flags_t pb_flags; /* status flags */ 127 xfs_buf_flags_t b_flags; /* status flags */
132 struct list_head pb_hash_list; /* hash table list */ 128 struct list_head b_hash_list; /* hash table list */
133 xfs_bufhash_t *pb_hash; /* hash table list start */ 129 xfs_bufhash_t *b_hash; /* hash table list start */
134 xfs_buftarg_t *pb_target; /* buffer target (device) */ 130 xfs_buftarg_t *b_target; /* buffer target (device) */
135 atomic_t pb_hold; /* reference count */ 131 atomic_t b_hold; /* reference count */
136 xfs_daddr_t pb_bn; /* block number for I/O */ 132 xfs_daddr_t b_bn; /* block number for I/O */
137 loff_t pb_file_offset; /* offset in file */ 133 xfs_off_t b_file_offset; /* offset in file */
138 size_t pb_buffer_length; /* size of buffer in bytes */ 134 size_t b_buffer_length;/* size of buffer in bytes */
139 size_t pb_count_desired; /* desired transfer size */ 135 size_t b_count_desired;/* desired transfer size */
140 void *pb_addr; /* virtual address of buffer */ 136 void *b_addr; /* virtual address of buffer */
141 struct work_struct pb_iodone_work; 137 struct work_struct b_iodone_work;
142 atomic_t pb_io_remaining;/* #outstanding I/O requests */ 138 atomic_t b_io_remaining; /* #outstanding I/O requests */
143 page_buf_iodone_t pb_iodone; /* I/O completion function */ 139 xfs_buf_iodone_t b_iodone; /* I/O completion function */
144 page_buf_relse_t pb_relse; /* releasing function */ 140 xfs_buf_relse_t b_relse; /* releasing function */
145 page_buf_bdstrat_t pb_strat; /* pre-write function */ 141 xfs_buf_bdstrat_t b_strat; /* pre-write function */
146 struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */ 142 struct semaphore b_iodonesema; /* Semaphore for I/O waiters */
147 void *pb_fspriv; 143 void *b_fspriv;
148 void *pb_fspriv2; 144 void *b_fspriv2;
149 void *pb_fspriv3; 145 void *b_fspriv3;
150 unsigned short pb_error; /* error code on I/O */ 146 unsigned short b_error; /* error code on I/O */
151 unsigned short pb_locked; /* page array is locked */ 147 unsigned short b_locked; /* page array is locked */
152 unsigned int pb_page_count; /* size of page array */ 148 unsigned int b_page_count; /* size of page array */
153 unsigned int pb_offset; /* page offset in first page */ 149 unsigned int b_offset; /* page offset in first page */
154 struct page **pb_pages; /* array of page pointers */ 150 struct page **b_pages; /* array of page pointers */
155 struct page *pb_page_array[PB_PAGES]; /* inline pages */ 151 struct page *b_page_array[XB_PAGES]; /* inline pages */
156#ifdef PAGEBUF_LOCK_TRACKING 152#ifdef XFS_BUF_LOCK_TRACKING
157 int pb_last_holder; 153 int b_last_holder;
158#endif 154#endif
159} xfs_buf_t; 155} xfs_buf_t;
160 156
161 157
162/* Finding and Reading Buffers */ 158/* Finding and Reading Buffers */
163 159extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
164extern xfs_buf_t *_pagebuf_find( /* find buffer for block if */ 160 xfs_buf_flags_t, xfs_buf_t *);
165 /* the block is in memory */
166 xfs_buftarg_t *, /* inode for block */
167 loff_t, /* starting offset of range */
168 size_t, /* length of range */
169 page_buf_flags_t, /* PBF_LOCK */
170 xfs_buf_t *); /* newly allocated buffer */
171
172#define xfs_incore(buftarg,blkno,len,lockit) \ 161#define xfs_incore(buftarg,blkno,len,lockit) \
173 _pagebuf_find(buftarg, blkno ,len, lockit, NULL) 162 _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
174
175extern xfs_buf_t *xfs_buf_get_flags( /* allocate a buffer */
176 xfs_buftarg_t *, /* inode for buffer */
177 loff_t, /* starting offset of range */
178 size_t, /* length of range */
179 page_buf_flags_t); /* PBF_LOCK, PBF_READ, */
180 /* PBF_ASYNC */
181 163
164extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
165 xfs_buf_flags_t);
182#define xfs_buf_get(target, blkno, len, flags) \ 166#define xfs_buf_get(target, blkno, len, flags) \
183 xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) 167 xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
184
185extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
186 xfs_buftarg_t *, /* inode for buffer */
187 loff_t, /* starting offset of range */
188 size_t, /* length of range */
189 page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC */
190 168
169extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
170 xfs_buf_flags_t);
191#define xfs_buf_read(target, blkno, len, flags) \ 171#define xfs_buf_read(target, blkno, len, flags) \
192 xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) 172 xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
193 173
194extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */ 174extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
195 /* no memory or disk address */ 175extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
196 size_t len, 176extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
197 xfs_buftarg_t *); /* mount point "fake" inode */ 177extern void xfs_buf_hold(xfs_buf_t *);
198 178extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
199extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */ 179 xfs_buf_flags_t);
200 /* without disk address */
201 size_t len,
202 xfs_buftarg_t *); /* mount point "fake" inode */
203
204extern int pagebuf_associate_memory(
205 xfs_buf_t *,
206 void *,
207 size_t);
208
209extern void pagebuf_hold( /* increment reference count */
210 xfs_buf_t *); /* buffer to hold */
211
212extern void pagebuf_readahead( /* read ahead into cache */
213 xfs_buftarg_t *, /* target for buffer (or NULL) */
214 loff_t, /* starting offset of range */
215 size_t, /* length of range */
216 page_buf_flags_t); /* additional read flags */
217 180
218/* Releasing Buffers */ 181/* Releasing Buffers */
219 182extern void xfs_buf_free(xfs_buf_t *);
220extern void pagebuf_free( /* deallocate a buffer */ 183extern void xfs_buf_rele(xfs_buf_t *);
221 xfs_buf_t *); /* buffer to deallocate */
222
223extern void pagebuf_rele( /* release hold on a buffer */
224 xfs_buf_t *); /* buffer to release */
225 184
226/* Locking and Unlocking Buffers */ 185/* Locking and Unlocking Buffers */
227 186extern int xfs_buf_cond_lock(xfs_buf_t *);
228extern int pagebuf_cond_lock( /* lock buffer, if not locked */ 187extern int xfs_buf_lock_value(xfs_buf_t *);
229 /* (returns -EBUSY if locked) */ 188extern void xfs_buf_lock(xfs_buf_t *);
230 xfs_buf_t *); /* buffer to lock */ 189extern void xfs_buf_unlock(xfs_buf_t *);
231
232extern int pagebuf_lock_value( /* return count on lock */
233 xfs_buf_t *); /* buffer to check */
234
235extern int pagebuf_lock( /* lock buffer */
236 xfs_buf_t *); /* buffer to lock */
237
238extern void pagebuf_unlock( /* unlock buffer */
239 xfs_buf_t *); /* buffer to unlock */
240 190
241/* Buffer Read and Write Routines */ 191/* Buffer Read and Write Routines */
242 192extern void xfs_buf_ioend(xfs_buf_t *, int);
243extern void pagebuf_iodone( /* mark buffer I/O complete */ 193extern void xfs_buf_ioerror(xfs_buf_t *, int);
244 xfs_buf_t *, /* buffer to mark */ 194extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t);
245 int); /* run completion locally, or in 195extern int xfs_buf_iorequest(xfs_buf_t *);
246 * a helper thread. */ 196extern int xfs_buf_iowait(xfs_buf_t *);
247 197extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
248extern void pagebuf_ioerror( /* mark buffer in error (or not) */ 198 xfs_buf_rw_t);
249 xfs_buf_t *, /* buffer to mark */ 199
250 int); /* error to store (0 if none) */ 200static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
251
252extern int pagebuf_iostart( /* start I/O on a buffer */
253 xfs_buf_t *, /* buffer to start */
254 page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */
255 /* PBF_READ, PBF_WRITE, */
256 /* PBF_DELWRI */
257
258extern int pagebuf_iorequest( /* start real I/O */
259 xfs_buf_t *); /* buffer to convey to device */
260
261extern int pagebuf_iowait( /* wait for buffer I/O done */
262 xfs_buf_t *); /* buffer to wait on */
263
264extern void pagebuf_iomove( /* move data in/out of pagebuf */
265 xfs_buf_t *, /* buffer to manipulate */
266 size_t, /* starting buffer offset */
267 size_t, /* length in buffer */
268 caddr_t, /* data pointer */
269 page_buf_rw_t); /* direction */
270
271static inline int pagebuf_iostrategy(xfs_buf_t *pb)
272{ 201{
273 return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb); 202 return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
274} 203}
275 204
276static inline int pagebuf_geterror(xfs_buf_t *pb) 205static inline int xfs_buf_geterror(xfs_buf_t *bp)
277{ 206{
278 return pb ? pb->pb_error : ENOMEM; 207 return bp ? bp->b_error : ENOMEM;
279} 208}
280 209
281/* Buffer Utility Routines */ 210/* Buffer Utility Routines */
282 211extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
283extern caddr_t pagebuf_offset( /* pointer at offset in buffer */
284 xfs_buf_t *, /* buffer to offset into */
285 size_t); /* offset */
286 212
287/* Pinning Buffer Storage in Memory */ 213/* Pinning Buffer Storage in Memory */
288 214extern void xfs_buf_pin(xfs_buf_t *);
289extern void pagebuf_pin( /* pin buffer in memory */ 215extern void xfs_buf_unpin(xfs_buf_t *);
290 xfs_buf_t *); /* buffer to pin */ 216extern int xfs_buf_ispin(xfs_buf_t *);
291
292extern void pagebuf_unpin( /* unpin buffered data */
293 xfs_buf_t *); /* buffer to unpin */
294
295extern int pagebuf_ispin( /* check if buffer is pinned */
296 xfs_buf_t *); /* buffer to check */
297 217
298/* Delayed Write Buffer Routines */ 218/* Delayed Write Buffer Routines */
299 219extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
300extern void pagebuf_delwri_dequeue(xfs_buf_t *);
301 220
302/* Buffer Daemon Setup Routines */ 221/* Buffer Daemon Setup Routines */
222extern int xfs_buf_init(void);
223extern void xfs_buf_terminate(void);
303 224
304extern int pagebuf_init(void); 225#ifdef XFS_BUF_TRACE
305extern void pagebuf_terminate(void); 226extern ktrace_t *xfs_buf_trace_buf;
306 227extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
307
308#ifdef PAGEBUF_TRACE
309extern ktrace_t *pagebuf_trace_buf;
310extern void pagebuf_trace(
311 xfs_buf_t *, /* buffer being traced */
312 char *, /* description of operation */
313 void *, /* arbitrary diagnostic value */
314 void *); /* return address */
315#else 228#else
316# define pagebuf_trace(pb, id, ptr, ra) do { } while (0) 229#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
317#endif 230#endif
318 231
319#define pagebuf_target_name(target) \ 232#define xfs_buf_target_name(target) \
320 ({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; }) 233 ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
321 234
322 235
236#define XFS_B_ASYNC XBF_ASYNC
237#define XFS_B_DELWRI XBF_DELWRI
238#define XFS_B_READ XBF_READ
239#define XFS_B_WRITE XBF_WRITE
240#define XFS_B_STALE XBF_STALE
323 241
324/* These are just for xfs_syncsub... it sets an internal variable 242#define XFS_BUF_TRYLOCK XBF_TRYLOCK
325 * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t 243#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
326 */ 244#define XFS_BUF_LOCK XBF_LOCK
327#define XFS_B_ASYNC PBF_ASYNC 245#define XFS_BUF_MAPPED XBF_MAPPED
328#define XFS_B_DELWRI PBF_DELWRI
329#define XFS_B_READ PBF_READ
330#define XFS_B_WRITE PBF_WRITE
331#define XFS_B_STALE PBF_STALE
332
333#define XFS_BUF_TRYLOCK PBF_TRYLOCK
334#define XFS_INCORE_TRYLOCK PBF_TRYLOCK
335#define XFS_BUF_LOCK PBF_LOCK
336#define XFS_BUF_MAPPED PBF_MAPPED
337
338#define BUF_BUSY PBF_DONT_BLOCK
339
340#define XFS_BUF_BFLAGS(x) ((x)->pb_flags)
341#define XFS_BUF_ZEROFLAGS(x) \
342 ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
343
344#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
345#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
346#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
347#define XFS_BUF_SUPER_STALE(x) do { \
348 XFS_BUF_STALE(x); \
349 pagebuf_delwri_dequeue(x); \
350 XFS_BUF_DONE(x); \
351 } while (0)
352 246
353#define XFS_BUF_MANAGE PBF_FS_MANAGED 247#define BUF_BUSY XBF_DONT_BLOCK
354#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED) 248
355 249#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
356#define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI) 250#define XFS_BUF_ZEROFLAGS(bp) \
357#define XFS_BUF_UNDELAYWRITE(x) pagebuf_delwri_dequeue(x) 251 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI))
358#define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI) 252
359 253#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
360#define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no) 254#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
361#define XFS_BUF_GETERROR(x) pagebuf_geterror(x) 255#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
362#define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0) 256#define XFS_BUF_SUPER_STALE(bp) do { \
363 257 XFS_BUF_STALE(bp); \
364#define XFS_BUF_DONE(x) ((x)->pb_flags |= PBF_DONE) 258 xfs_buf_delwri_dequeue(bp); \
365#define XFS_BUF_UNDONE(x) ((x)->pb_flags &= ~PBF_DONE) 259 XFS_BUF_DONE(bp); \
366#define XFS_BUF_ISDONE(x) ((x)->pb_flags & PBF_DONE) 260 } while (0)
367
368#define XFS_BUF_BUSY(x) do { } while (0)
369#define XFS_BUF_UNBUSY(x) do { } while (0)
370#define XFS_BUF_ISBUSY(x) (1)
371
372#define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC)
373#define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC)
374#define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC)
375
376#define XFS_BUF_ORDERED(x) ((x)->pb_flags |= PBF_ORDERED)
377#define XFS_BUF_UNORDERED(x) ((x)->pb_flags &= ~PBF_ORDERED)
378#define XFS_BUF_ISORDERED(x) ((x)->pb_flags & PBF_ORDERED)
379
380#define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n")
381#define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n")
382#define XFS_BUF_ISSHUT(x) (0)
383
384#define XFS_BUF_HOLD(x) pagebuf_hold(x)
385#define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ)
386#define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ)
387#define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ)
388
389#define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE)
390#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
391#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
392
393#define XFS_BUF_ISUNINITIAL(x) (0)
394#define XFS_BUF_UNUNINITIAL(x) (0)
395
396#define XFS_BUF_BP_ISMAPPED(bp) 1
397
398#define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone
399#define XFS_BUF_SET_IODONE_FUNC(buf, func) \
400 (buf)->pb_iodone = (func)
401#define XFS_BUF_CLR_IODONE_FUNC(buf) \
402 (buf)->pb_iodone = NULL
403#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \
404 (buf)->pb_strat = (func)
405#define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \
406 (buf)->pb_strat = NULL
407
408#define XFS_BUF_FSPRIVATE(buf, type) \
409 ((type)(buf)->pb_fspriv)
410#define XFS_BUF_SET_FSPRIVATE(buf, value) \
411 (buf)->pb_fspriv = (void *)(value)
412#define XFS_BUF_FSPRIVATE2(buf, type) \
413 ((type)(buf)->pb_fspriv2)
414#define XFS_BUF_SET_FSPRIVATE2(buf, value) \
415 (buf)->pb_fspriv2 = (void *)(value)
416#define XFS_BUF_FSPRIVATE3(buf, type) \
417 ((type)(buf)->pb_fspriv3)
418#define XFS_BUF_SET_FSPRIVATE3(buf, value) \
419 (buf)->pb_fspriv3 = (void *)(value)
420#define XFS_BUF_SET_START(buf)
421
422#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
423 (buf)->pb_relse = (value)
424
425#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
426
427static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
428{
429 if (bp->pb_flags & PBF_MAPPED)
430 return XFS_BUF_PTR(bp) + offset;
431 return (xfs_caddr_t) pagebuf_offset(bp, offset);
432}
433 261
434#define XFS_BUF_SET_PTR(bp, val, count) \ 262#define XFS_BUF_MANAGE XBF_FS_MANAGED
435 pagebuf_associate_memory(bp, val, count) 263#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
436#define XFS_BUF_ADDR(bp) ((bp)->pb_bn) 264
437#define XFS_BUF_SET_ADDR(bp, blk) \ 265#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
438 ((bp)->pb_bn = (xfs_daddr_t)(blk)) 266#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
439#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset) 267#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
440#define XFS_BUF_SET_OFFSET(bp, off) \ 268
441 ((bp)->pb_file_offset = (off)) 269#define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no)
442#define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired) 270#define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp)
443#define XFS_BUF_SET_COUNT(bp, cnt) \ 271#define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0)
444 ((bp)->pb_count_desired = (cnt)) 272
445#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length) 273#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
446#define XFS_BUF_SET_SIZE(bp, cnt) \ 274#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
447 ((bp)->pb_buffer_length = (cnt)) 275#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
448#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) 276
449#define XFS_BUF_SET_VTYPE(bp, type) 277#define XFS_BUF_BUSY(bp) do { } while (0)
450#define XFS_BUF_SET_REF(bp, ref) 278#define XFS_BUF_UNBUSY(bp) do { } while (0)
451 279#define XFS_BUF_ISBUSY(bp) (1)
452#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp) 280
453 281#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
454#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp) 282#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
455#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0) 283#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
456#define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp) 284
457#define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp) 285#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
458#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema); 286#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
459 287#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
460/* setup the buffer target from a buftarg structure */ 288
461#define XFS_BUF_SET_TARGET(bp, target) \ 289#define XFS_BUF_SHUT(bp) do { } while (0)
462 (bp)->pb_target = (target) 290#define XFS_BUF_UNSHUT(bp) do { } while (0)
463#define XFS_BUF_TARGET(bp) ((bp)->pb_target) 291#define XFS_BUF_ISSHUT(bp) (0)
464#define XFS_BUFTARG_NAME(target) \ 292
465 pagebuf_target_name(target) 293#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
466 294#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
467#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) 295#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
468#define XFS_BUF_SET_VTYPE(bp, type) 296#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
469#define XFS_BUF_SET_REF(bp, ref) 297
470 298#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
471static inline int xfs_bawrite(void *mp, xfs_buf_t *bp) 299#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
300#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
301
302#define XFS_BUF_ISUNINITIAL(bp) (0)
303#define XFS_BUF_UNUNINITIAL(bp) (0)
304
305#define XFS_BUF_BP_ISMAPPED(bp) (1)
306
307#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
308#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
309#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
310#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
311#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
312
313#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
314#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
315#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
316#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
317#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3)
318#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val))
319#define XFS_BUF_SET_START(bp) do { } while (0)
320#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func))
321
322#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
323#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt)
324#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
325#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
326#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
327#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
328#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
329#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
330#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
331#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
332
333#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0)
334#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
335#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
336
337#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
338
339#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
340#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
341#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
342#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
343#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
344
345#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
346#define XFS_BUF_TARGET(bp) ((bp)->b_target)
347#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
348
349static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
472{ 350{
473 bp->pb_fspriv3 = mp; 351 bp->b_fspriv3 = mp;
474 bp->pb_strat = xfs_bdstrat_cb; 352 bp->b_strat = xfs_bdstrat_cb;
475 pagebuf_delwri_dequeue(bp); 353 xfs_buf_delwri_dequeue(bp);
476 return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES); 354 return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
477} 355}
478 356
479static inline void xfs_buf_relse(xfs_buf_t *bp) 357static inline void xfs_buf_relse(xfs_buf_t *bp)
480{ 358{
481 if (!bp->pb_relse) 359 if (!bp->b_relse)
482 pagebuf_unlock(bp); 360 xfs_buf_unlock(bp);
483 pagebuf_rele(bp); 361 xfs_buf_rele(bp);
484} 362}
485 363
486#define xfs_bpin(bp) pagebuf_pin(bp) 364#define xfs_bpin(bp) xfs_buf_pin(bp)
487#define xfs_bunpin(bp) pagebuf_unpin(bp) 365#define xfs_bunpin(bp) xfs_buf_unpin(bp)
488 366
489#define xfs_buftrace(id, bp) \ 367#define xfs_buftrace(id, bp) \
490 pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0)) 368 xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
491 369
492#define xfs_biodone(pb) \ 370#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
493 pagebuf_iodone(pb, 0)
494 371
495#define xfs_biomove(pb, off, len, data, rw) \ 372#define xfs_biomove(bp, off, len, data, rw) \
496 pagebuf_iomove((pb), (off), (len), (data), \ 373 xfs_buf_iomove((bp), (off), (len), (data), \
497 ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ) 374 ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
498 375
499#define xfs_biozero(pb, off, len) \ 376#define xfs_biozero(bp, off, len) \
500 pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO) 377 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
501 378
502 379
503static inline int XFS_bwrite(xfs_buf_t *pb) 380static inline int XFS_bwrite(xfs_buf_t *bp)
504{ 381{
505 int iowait = (pb->pb_flags & PBF_ASYNC) == 0; 382 int iowait = (bp->b_flags & XBF_ASYNC) == 0;
506 int error = 0; 383 int error = 0;
507 384
508 if (!iowait) 385 if (!iowait)
509 pb->pb_flags |= _PBF_RUN_QUEUES; 386 bp->b_flags |= _XBF_RUN_QUEUES;
510 387
511 pagebuf_delwri_dequeue(pb); 388 xfs_buf_delwri_dequeue(bp);
512 pagebuf_iostrategy(pb); 389 xfs_buf_iostrategy(bp);
513 if (iowait) { 390 if (iowait) {
514 error = pagebuf_iowait(pb); 391 error = xfs_buf_iowait(bp);
515 xfs_buf_relse(pb); 392 xfs_buf_relse(bp);
516 } 393 }
517 return error; 394 return error;
518} 395}
519 396
520#define XFS_bdwrite(pb) \ 397#define XFS_bdwrite(bp) xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC)
521 pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
522 398
523static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp) 399static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
524{ 400{
525 bp->pb_strat = xfs_bdstrat_cb; 401 bp->b_strat = xfs_bdstrat_cb;
526 bp->pb_fspriv3 = mp; 402 bp->b_fspriv3 = mp;
527 403 return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
528 return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
529} 404}
530 405
531#define XFS_bdstrat(bp) pagebuf_iorequest(bp) 406#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
532 407
533#define xfs_iowait(pb) pagebuf_iowait(pb) 408#define xfs_iowait(bp) xfs_buf_iowait(bp)
534 409
535#define xfs_baread(target, rablkno, ralen) \ 410#define xfs_baread(target, rablkno, ralen) \
536 pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK) 411 xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
537
538#define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
539#define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target))
540#define xfs_buf_free(bp) pagebuf_free(bp)
541 412
542 413
543/* 414/*
544 * Handling of buftargs. 415 * Handling of buftargs.
545 */ 416 */
546
547extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int); 417extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
548extern void xfs_free_buftarg(xfs_buftarg_t *, int); 418extern void xfs_free_buftarg(xfs_buftarg_t *, int);
549extern void xfs_wait_buftarg(xfs_buftarg_t *); 419extern void xfs_wait_buftarg(xfs_buftarg_t *);
550extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 420extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
551extern int xfs_flush_buftarg(xfs_buftarg_t *, int); 421extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
552 422
553#define xfs_getsize_buftarg(buftarg) \ 423#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
554 block_size((buftarg)->pbr_bdev) 424#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
555#define xfs_readonly_buftarg(buftarg) \ 425
556 bdev_read_only((buftarg)->pbr_bdev) 426#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
557#define xfs_binval(buftarg) \ 427#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
558 xfs_flush_buftarg(buftarg, 1)
559#define XFS_bflush(buftarg) \
560 xfs_flush_buftarg(buftarg, 1)
561 428
562#endif /* __XFS_BUF_H__ */ 429#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 2b385394f28c..7ad7b680e996 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -750,7 +750,7 @@ xfs_ioctl(
750 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 750 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
751 mp->m_rtdev_targp : mp->m_ddev_targp; 751 mp->m_rtdev_targp : mp->m_ddev_targp;
752 752
753 da.d_mem = da.d_miniosz = 1 << target->pbr_sshift; 753 da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
754 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 754 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
755 755
756 if (copy_to_user(arg, &da, sizeof(da))) 756 if (copy_to_user(arg, &da, sizeof(da)))
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index d8e21ba0cccc..95efe948a095 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -232,7 +232,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
232#define xfs_itruncate_data(ip, off) \ 232#define xfs_itruncate_data(ip, off) \
233 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) 233 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
234#define xfs_statvfs_fsid(statp, mp) \ 234#define xfs_statvfs_fsid(statp, mp) \
235 ({ u64 id = huge_encode_dev((mp)->m_dev); \ 235 ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \
236 __kernel_fsid_t *fsid = &(statp)->f_fsid; \ 236 __kernel_fsid_t *fsid = &(statp)->f_fsid; \
237 (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); }) 237 (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
238 238
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 147a28861f6b..e0ab45fbfebd 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -233,8 +233,8 @@ xfs_read(
233 xfs_buftarg_t *target = 233 xfs_buftarg_t *target =
234 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 234 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
235 mp->m_rtdev_targp : mp->m_ddev_targp; 235 mp->m_rtdev_targp : mp->m_ddev_targp;
236 if ((*offset & target->pbr_smask) || 236 if ((*offset & target->bt_smask) ||
237 (size & target->pbr_smask)) { 237 (size & target->bt_smask)) {
238 if (*offset == ip->i_d.di_size) { 238 if (*offset == ip->i_d.di_size) {
239 return (0); 239 return (0);
240 } 240 }
@@ -618,7 +618,7 @@ xfs_write(
618 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 618 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
619 mp->m_rtdev_targp : mp->m_ddev_targp; 619 mp->m_rtdev_targp : mp->m_ddev_targp;
620 620
621 if ((pos & target->pbr_smask) || (count & target->pbr_smask)) 621 if ((pos & target->bt_smask) || (count & target->bt_smask))
622 return XFS_ERROR(-EINVAL); 622 return XFS_ERROR(-EINVAL);
623 623
624 if (!VN_CACHED(vp) && pos < i_size_read(inode)) 624 if (!VN_CACHED(vp) && pos < i_size_read(inode))
@@ -938,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
938 938
939 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); 939 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
940 if (!XFS_FORCED_SHUTDOWN(mp)) { 940 if (!XFS_FORCED_SHUTDOWN(mp)) {
941 pagebuf_iorequest(bp); 941 xfs_buf_iorequest(bp);
942 return 0; 942 return 0;
943 } else { 943 } else {
944 xfs_buftrace("XFS__BDSTRAT IOERROR", bp); 944 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
@@ -991,7 +991,7 @@ xfsbdstrat(
991 * if (XFS_BUF_IS_GRIO(bp)) { 991 * if (XFS_BUF_IS_GRIO(bp)) {
992 */ 992 */
993 993
994 pagebuf_iorequest(bp); 994 xfs_buf_iorequest(bp);
995 return 0; 995 return 0;
996 } 996 }
997 997
diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/linux-2.6/xfs_stats.h
index 50027c4a5618..8ba7a2fa6c1d 100644
--- a/fs/xfs/linux-2.6/xfs_stats.h
+++ b/fs/xfs/linux-2.6/xfs_stats.h
@@ -109,15 +109,15 @@ struct xfsstats {
109 __uint32_t vn_remove; /* # times vn_remove called */ 109 __uint32_t vn_remove; /* # times vn_remove called */
110 __uint32_t vn_free; /* # times vn_free called */ 110 __uint32_t vn_free; /* # times vn_free called */
111#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9) 111#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
112 __uint32_t pb_get; 112 __uint32_t xb_get;
113 __uint32_t pb_create; 113 __uint32_t xb_create;
114 __uint32_t pb_get_locked; 114 __uint32_t xb_get_locked;
115 __uint32_t pb_get_locked_waited; 115 __uint32_t xb_get_locked_waited;
116 __uint32_t pb_busy_locked; 116 __uint32_t xb_busy_locked;
117 __uint32_t pb_miss_locked; 117 __uint32_t xb_miss_locked;
118 __uint32_t pb_page_retries; 118 __uint32_t xb_page_retries;
119 __uint32_t pb_page_found; 119 __uint32_t xb_page_found;
120 __uint32_t pb_get_read; 120 __uint32_t xb_get_read;
121/* Extra precision counters */ 121/* Extra precision counters */
122 __uint64_t xs_xstrat_bytes; 122 __uint64_t xs_xstrat_bytes;
123 __uint64_t xs_write_bytes; 123 __uint64_t xs_write_bytes;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index d8ec7463d8f4..556c1437b17d 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -309,7 +309,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
309 return; 309 return;
310 } 310 }
311 311
312 if (mp->m_ddev_targp->pbr_bdev->bd_disk->queue->ordered == 312 if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
313 QUEUE_ORDERED_NONE) { 313 QUEUE_ORDERED_NONE) {
314 xfs_fs_cmn_err(CE_NOTE, mp, 314 xfs_fs_cmn_err(CE_NOTE, mp,
315 "Disabling barriers, not supported by the underlying device"); 315 "Disabling barriers, not supported by the underlying device");
@@ -330,7 +330,7 @@ void
330xfs_blkdev_issue_flush( 330xfs_blkdev_issue_flush(
331 xfs_buftarg_t *buftarg) 331 xfs_buftarg_t *buftarg)
332{ 332{
333 blkdev_issue_flush(buftarg->pbr_bdev, NULL); 333 blkdev_issue_flush(buftarg->bt_bdev, NULL);
334} 334}
335 335
336STATIC struct inode * 336STATIC struct inode *
@@ -969,9 +969,9 @@ init_xfs_fs( void )
969 if (error < 0) 969 if (error < 0)
970 goto undo_zones; 970 goto undo_zones;
971 971
972 error = pagebuf_init(); 972 error = xfs_buf_init();
973 if (error < 0) 973 if (error < 0)
974 goto undo_pagebuf; 974 goto undo_buffers;
975 975
976 vn_init(); 976 vn_init();
977 xfs_init(); 977 xfs_init();
@@ -985,9 +985,9 @@ init_xfs_fs( void )
985 return 0; 985 return 0;
986 986
987undo_register: 987undo_register:
988 pagebuf_terminate(); 988 xfs_buf_terminate();
989 989
990undo_pagebuf: 990undo_buffers:
991 linvfs_destroy_zones(); 991 linvfs_destroy_zones();
992 992
993undo_zones: 993undo_zones:
@@ -1001,7 +1001,7 @@ exit_xfs_fs( void )
1001 XFS_DM_EXIT(&xfs_fs_type); 1001 XFS_DM_EXIT(&xfs_fs_type);
1002 unregister_filesystem(&xfs_fs_type); 1002 unregister_filesystem(&xfs_fs_type);
1003 xfs_cleanup(); 1003 xfs_cleanup();
1004 pagebuf_terminate(); 1004 xfs_buf_terminate();
1005 linvfs_destroy_zones(); 1005 linvfs_destroy_zones();
1006 ktrace_uninit(); 1006 ktrace_uninit();
1007} 1007}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index fbfa240bd139..cd3cf9613a00 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -308,7 +308,6 @@ typedef struct xfs_mount {
308 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ 308 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
309 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ 309 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
310 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ 310 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
311#define m_dev m_ddev_targp->pbr_dev
312 __uint8_t m_dircook_elog; /* log d-cookie entry bits */ 311 __uint8_t m_dircook_elog; /* log d-cookie entry bits */
313 __uint8_t m_blkbit_log; /* blocklog + NBBY */ 312 __uint8_t m_blkbit_log; /* blocklog + NBBY */
314 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ 313 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index c4b20872f07d..a59c102cf214 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -238,6 +238,7 @@ xfs_bioerror_relse(
238 } 238 }
239 return (EIO); 239 return (EIO);
240} 240}
241
241/* 242/*
242 * Prints out an ALERT message about I/O error. 243 * Prints out an ALERT message about I/O error.
243 */ 244 */
@@ -252,11 +253,9 @@ xfs_ioerror_alert(
252 "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx" 253 "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx"
253 " (\"%s\") error %d buf count %zd", 254 " (\"%s\") error %d buf count %zd",
254 (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname, 255 (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
255 XFS_BUFTARG_NAME(bp->pb_target), 256 XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
256 (__uint64_t)blkno, 257 (__uint64_t)blkno, func,
257 func, 258 XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp));
258 XFS_BUF_GETERROR(bp),
259 XFS_BUF_COUNT(bp));
260} 259}
261 260
262/* 261/*