diff options
author | Nathan Scott <nathans@sgi.com> | 2006-01-10 23:39:08 -0500 |
---|---|---|
committer | Nathan Scott <nathans@sgi.com> | 2006-01-10 23:39:08 -0500 |
commit | ce8e922c0e79c8093452ba9a124981332b75706b (patch) | |
tree | 0f681391461d4d6bbccd3bf88a7762cc7daa8852 /fs/xfs/linux-2.6/xfs_buf.c | |
parent | 68bdb6eabcd2869caa795019961a5445a11b5bc1 (diff) |
[XFS] Complete the pagebuf -> xfs_buf naming convention transition,
finally.
SGI-PV: 947038
SGI-Modid: xfs-linux-melb:xfs-kern:24866a
Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 1258 |
1 files changed, 585 insertions, 673 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 2a8acd38fa1e..cb77f99cbef1 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -31,77 +31,77 @@ | |||
31 | #include <linux/kthread.h> | 31 | #include <linux/kthread.h> |
32 | #include "xfs_linux.h" | 32 | #include "xfs_linux.h" |
33 | 33 | ||
34 | STATIC kmem_cache_t *pagebuf_zone; | 34 | STATIC kmem_zone_t *xfs_buf_zone; |
35 | STATIC kmem_shaker_t pagebuf_shake; | 35 | STATIC kmem_shaker_t xfs_buf_shake; |
36 | STATIC int xfsbufd(void *); | 36 | STATIC int xfsbufd(void *); |
37 | STATIC int xfsbufd_wakeup(int, gfp_t); | 37 | STATIC int xfsbufd_wakeup(int, gfp_t); |
38 | STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); | 38 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); |
39 | 39 | ||
40 | STATIC struct workqueue_struct *xfslogd_workqueue; | 40 | STATIC struct workqueue_struct *xfslogd_workqueue; |
41 | struct workqueue_struct *xfsdatad_workqueue; | 41 | struct workqueue_struct *xfsdatad_workqueue; |
42 | 42 | ||
43 | #ifdef PAGEBUF_TRACE | 43 | #ifdef XFS_BUF_TRACE |
44 | void | 44 | void |
45 | pagebuf_trace( | 45 | xfs_buf_trace( |
46 | xfs_buf_t *pb, | 46 | xfs_buf_t *bp, |
47 | char *id, | 47 | char *id, |
48 | void *data, | 48 | void *data, |
49 | void *ra) | 49 | void *ra) |
50 | { | 50 | { |
51 | ktrace_enter(pagebuf_trace_buf, | 51 | ktrace_enter(xfs_buf_trace_buf, |
52 | pb, id, | 52 | bp, id, |
53 | (void *)(unsigned long)pb->pb_flags, | 53 | (void *)(unsigned long)bp->b_flags, |
54 | (void *)(unsigned long)pb->pb_hold.counter, | 54 | (void *)(unsigned long)bp->b_hold.counter, |
55 | (void *)(unsigned long)pb->pb_sema.count.counter, | 55 | (void *)(unsigned long)bp->b_sema.count.counter, |
56 | (void *)current, | 56 | (void *)current, |
57 | data, ra, | 57 | data, ra, |
58 | (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff), | 58 | (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff), |
59 | (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff), | 59 | (void *)(unsigned long)(bp->b_file_offset & 0xffffffff), |
60 | (void *)(unsigned long)pb->pb_buffer_length, | 60 | (void *)(unsigned long)bp->b_buffer_length, |
61 | NULL, NULL, NULL, NULL, NULL); | 61 | NULL, NULL, NULL, NULL, NULL); |
62 | } | 62 | } |
63 | ktrace_t *pagebuf_trace_buf; | 63 | ktrace_t *xfs_buf_trace_buf; |
64 | #define PAGEBUF_TRACE_SIZE 4096 | 64 | #define XFS_BUF_TRACE_SIZE 4096 |
65 | #define PB_TRACE(pb, id, data) \ | 65 | #define XB_TRACE(bp, id, data) \ |
66 | pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0)) | 66 | xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0)) |
67 | #else | 67 | #else |
68 | #define PB_TRACE(pb, id, data) do { } while (0) | 68 | #define XB_TRACE(bp, id, data) do { } while (0) |
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | #ifdef PAGEBUF_LOCK_TRACKING | 71 | #ifdef XFS_BUF_LOCK_TRACKING |
72 | # define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid) | 72 | # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) |
73 | # define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1) | 73 | # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) |
74 | # define PB_GET_OWNER(pb) ((pb)->pb_last_holder) | 74 | # define XB_GET_OWNER(bp) ((bp)->b_last_holder) |
75 | #else | 75 | #else |
76 | # define PB_SET_OWNER(pb) do { } while (0) | 76 | # define XB_SET_OWNER(bp) do { } while (0) |
77 | # define PB_CLEAR_OWNER(pb) do { } while (0) | 77 | # define XB_CLEAR_OWNER(bp) do { } while (0) |
78 | # define PB_GET_OWNER(pb) do { } while (0) | 78 | # define XB_GET_OWNER(bp) do { } while (0) |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | #define pb_to_gfp(flags) \ | 81 | #define xb_to_gfp(flags) \ |
82 | ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \ | 82 | ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \ |
83 | ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) | 83 | ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) |
84 | 84 | ||
85 | #define pb_to_km(flags) \ | 85 | #define xb_to_km(flags) \ |
86 | (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) | 86 | (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) |
87 | 87 | ||
88 | #define pagebuf_allocate(flags) \ | 88 | #define xfs_buf_allocate(flags) \ |
89 | kmem_zone_alloc(pagebuf_zone, pb_to_km(flags)) | 89 | kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags)) |
90 | #define pagebuf_deallocate(pb) \ | 90 | #define xfs_buf_deallocate(bp) \ |
91 | kmem_zone_free(pagebuf_zone, (pb)); | 91 | kmem_zone_free(xfs_buf_zone, (bp)); |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * Page Region interfaces. | 94 | * Page Region interfaces. |
95 | * | 95 | * |
96 | * For pages in filesystems where the blocksize is smaller than the | 96 | * For pages in filesystems where the blocksize is smaller than the |
97 | * pagesize, we use the page->private field (long) to hold a bitmap | 97 | * pagesize, we use the page->private field (long) to hold a bitmap |
98 | * of uptodate regions within the page. | 98 | * of uptodate regions within the page. |
99 | * | 99 | * |
100 | * Each such region is "bytes per page / bits per long" bytes long. | 100 | * Each such region is "bytes per page / bits per long" bytes long. |
101 | * | 101 | * |
102 | * NBPPR == number-of-bytes-per-page-region | 102 | * NBPPR == number-of-bytes-per-page-region |
103 | * BTOPR == bytes-to-page-region (rounded up) | 103 | * BTOPR == bytes-to-page-region (rounded up) |
104 | * BTOPRT == bytes-to-page-region-truncated (rounded down) | 104 | * BTOPRT == bytes-to-page-region-truncated (rounded down) |
105 | */ | 105 | */ |
106 | #if (BITS_PER_LONG == 32) | 106 | #if (BITS_PER_LONG == 32) |
107 | #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */ | 107 | #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */ |
@@ -160,7 +160,7 @@ test_page_region( | |||
160 | } | 160 | } |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * Mapping of multi-page buffers into contiguous virtual space | 163 | * Mapping of multi-page buffers into contiguous virtual space |
164 | */ | 164 | */ |
165 | 165 | ||
166 | typedef struct a_list { | 166 | typedef struct a_list { |
@@ -173,7 +173,7 @@ STATIC int as_list_len; | |||
173 | STATIC DEFINE_SPINLOCK(as_lock); | 173 | STATIC DEFINE_SPINLOCK(as_lock); |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * Try to batch vunmaps because they are costly. | 176 | * Try to batch vunmaps because they are costly. |
177 | */ | 177 | */ |
178 | STATIC void | 178 | STATIC void |
179 | free_address( | 179 | free_address( |
@@ -216,83 +216,83 @@ purge_addresses(void) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * Internal pagebuf object manipulation | 219 | * Internal xfs_buf_t object manipulation |
220 | */ | 220 | */ |
221 | 221 | ||
222 | STATIC void | 222 | STATIC void |
223 | _pagebuf_initialize( | 223 | _xfs_buf_initialize( |
224 | xfs_buf_t *pb, | 224 | xfs_buf_t *bp, |
225 | xfs_buftarg_t *target, | 225 | xfs_buftarg_t *target, |
226 | loff_t range_base, | 226 | loff_t range_base, |
227 | size_t range_length, | 227 | size_t range_length, |
228 | page_buf_flags_t flags) | 228 | xfs_buf_flags_t flags) |
229 | { | 229 | { |
230 | /* | 230 | /* |
231 | * We don't want certain flags to appear in pb->pb_flags. | 231 | * We don't want certain flags to appear in b_flags. |
232 | */ | 232 | */ |
233 | flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD); | 233 | flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD); |
234 | 234 | ||
235 | memset(pb, 0, sizeof(xfs_buf_t)); | 235 | memset(bp, 0, sizeof(xfs_buf_t)); |
236 | atomic_set(&pb->pb_hold, 1); | 236 | atomic_set(&bp->b_hold, 1); |
237 | init_MUTEX_LOCKED(&pb->pb_iodonesema); | 237 | init_MUTEX_LOCKED(&bp->b_iodonesema); |
238 | INIT_LIST_HEAD(&pb->pb_list); | 238 | INIT_LIST_HEAD(&bp->b_list); |
239 | INIT_LIST_HEAD(&pb->pb_hash_list); | 239 | INIT_LIST_HEAD(&bp->b_hash_list); |
240 | init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */ | 240 | init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ |
241 | PB_SET_OWNER(pb); | 241 | XB_SET_OWNER(bp); |
242 | pb->pb_target = target; | 242 | bp->b_target = target; |
243 | pb->pb_file_offset = range_base; | 243 | bp->b_file_offset = range_base; |
244 | /* | 244 | /* |
245 | * Set buffer_length and count_desired to the same value initially. | 245 | * Set buffer_length and count_desired to the same value initially. |
246 | * I/O routines should use count_desired, which will be the same in | 246 | * I/O routines should use count_desired, which will be the same in |
247 | * most cases but may be reset (e.g. XFS recovery). | 247 | * most cases but may be reset (e.g. XFS recovery). |
248 | */ | 248 | */ |
249 | pb->pb_buffer_length = pb->pb_count_desired = range_length; | 249 | bp->b_buffer_length = bp->b_count_desired = range_length; |
250 | pb->pb_flags = flags; | 250 | bp->b_flags = flags; |
251 | pb->pb_bn = XFS_BUF_DADDR_NULL; | 251 | bp->b_bn = XFS_BUF_DADDR_NULL; |
252 | atomic_set(&pb->pb_pin_count, 0); | 252 | atomic_set(&bp->b_pin_count, 0); |
253 | init_waitqueue_head(&pb->pb_waiters); | 253 | init_waitqueue_head(&bp->b_waiters); |
254 | 254 | ||
255 | XFS_STATS_INC(pb_create); | 255 | XFS_STATS_INC(xb_create); |
256 | PB_TRACE(pb, "initialize", target); | 256 | XB_TRACE(bp, "initialize", target); |
257 | } | 257 | } |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Allocate a page array capable of holding a specified number | 260 | * Allocate a page array capable of holding a specified number |
261 | * of pages, and point the page buf at it. | 261 | * of pages, and point the page buf at it. |
262 | */ | 262 | */ |
263 | STATIC int | 263 | STATIC int |
264 | _pagebuf_get_pages( | 264 | _xfs_buf_get_pages( |
265 | xfs_buf_t *pb, | 265 | xfs_buf_t *bp, |
266 | int page_count, | 266 | int page_count, |
267 | page_buf_flags_t flags) | 267 | xfs_buf_flags_t flags) |
268 | { | 268 | { |
269 | /* Make sure that we have a page list */ | 269 | /* Make sure that we have a page list */ |
270 | if (pb->pb_pages == NULL) { | 270 | if (bp->b_pages == NULL) { |
271 | pb->pb_offset = page_buf_poff(pb->pb_file_offset); | 271 | bp->b_offset = xfs_buf_poff(bp->b_file_offset); |
272 | pb->pb_page_count = page_count; | 272 | bp->b_page_count = page_count; |
273 | if (page_count <= PB_PAGES) { | 273 | if (page_count <= XB_PAGES) { |
274 | pb->pb_pages = pb->pb_page_array; | 274 | bp->b_pages = bp->b_page_array; |
275 | } else { | 275 | } else { |
276 | pb->pb_pages = kmem_alloc(sizeof(struct page *) * | 276 | bp->b_pages = kmem_alloc(sizeof(struct page *) * |
277 | page_count, pb_to_km(flags)); | 277 | page_count, xb_to_km(flags)); |
278 | if (pb->pb_pages == NULL) | 278 | if (bp->b_pages == NULL) |
279 | return -ENOMEM; | 279 | return -ENOMEM; |
280 | } | 280 | } |
281 | memset(pb->pb_pages, 0, sizeof(struct page *) * page_count); | 281 | memset(bp->b_pages, 0, sizeof(struct page *) * page_count); |
282 | } | 282 | } |
283 | return 0; | 283 | return 0; |
284 | } | 284 | } |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * Frees pb_pages if it was malloced. | 287 | * Frees b_pages if it was allocated. |
288 | */ | 288 | */ |
289 | STATIC void | 289 | STATIC void |
290 | _pagebuf_free_pages( | 290 | _xfs_buf_free_pages( |
291 | xfs_buf_t *bp) | 291 | xfs_buf_t *bp) |
292 | { | 292 | { |
293 | if (bp->pb_pages != bp->pb_page_array) { | 293 | if (bp->b_pages != bp->b_page_array) { |
294 | kmem_free(bp->pb_pages, | 294 | kmem_free(bp->b_pages, |
295 | bp->pb_page_count * sizeof(struct page *)); | 295 | bp->b_page_count * sizeof(struct page *)); |
296 | } | 296 | } |
297 | } | 297 | } |
298 | 298 | ||
@@ -300,79 +300,79 @@ _pagebuf_free_pages( | |||
300 | * Releases the specified buffer. | 300 | * Releases the specified buffer. |
301 | * | 301 | * |
302 | * The modification state of any associated pages is left unchanged. | 302 | * The modification state of any associated pages is left unchanged. |
303 | * The buffer most not be on any hash - use pagebuf_rele instead for | 303 | * The buffer most not be on any hash - use xfs_buf_rele instead for |
304 | * hashed and refcounted buffers | 304 | * hashed and refcounted buffers |
305 | */ | 305 | */ |
306 | void | 306 | void |
307 | pagebuf_free( | 307 | xfs_buf_free( |
308 | xfs_buf_t *bp) | 308 | xfs_buf_t *bp) |
309 | { | 309 | { |
310 | PB_TRACE(bp, "free", 0); | 310 | XB_TRACE(bp, "free", 0); |
311 | 311 | ||
312 | ASSERT(list_empty(&bp->pb_hash_list)); | 312 | ASSERT(list_empty(&bp->b_hash_list)); |
313 | 313 | ||
314 | if (bp->pb_flags & _PBF_PAGE_CACHE) { | 314 | if (bp->b_flags & _XBF_PAGE_CACHE) { |
315 | uint i; | 315 | uint i; |
316 | 316 | ||
317 | if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1)) | 317 | if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) |
318 | free_address(bp->pb_addr - bp->pb_offset); | 318 | free_address(bp->b_addr - bp->b_offset); |
319 | 319 | ||
320 | for (i = 0; i < bp->pb_page_count; i++) | 320 | for (i = 0; i < bp->b_page_count; i++) |
321 | page_cache_release(bp->pb_pages[i]); | 321 | page_cache_release(bp->b_pages[i]); |
322 | _pagebuf_free_pages(bp); | 322 | _xfs_buf_free_pages(bp); |
323 | } else if (bp->pb_flags & _PBF_KMEM_ALLOC) { | 323 | } else if (bp->b_flags & _XBF_KMEM_ALLOC) { |
324 | /* | 324 | /* |
325 | * XXX(hch): bp->pb_count_desired might be incorrect (see | 325 | * XXX(hch): bp->b_count_desired might be incorrect (see |
326 | * pagebuf_associate_memory for details), but fortunately | 326 | * xfs_buf_associate_memory for details), but fortunately |
327 | * the Linux version of kmem_free ignores the len argument.. | 327 | * the Linux version of kmem_free ignores the len argument.. |
328 | */ | 328 | */ |
329 | kmem_free(bp->pb_addr, bp->pb_count_desired); | 329 | kmem_free(bp->b_addr, bp->b_count_desired); |
330 | _pagebuf_free_pages(bp); | 330 | _xfs_buf_free_pages(bp); |
331 | } | 331 | } |
332 | 332 | ||
333 | pagebuf_deallocate(bp); | 333 | xfs_buf_deallocate(bp); |
334 | } | 334 | } |
335 | 335 | ||
336 | /* | 336 | /* |
337 | * Finds all pages for buffer in question and builds it's page list. | 337 | * Finds all pages for buffer in question and builds it's page list. |
338 | */ | 338 | */ |
339 | STATIC int | 339 | STATIC int |
340 | _pagebuf_lookup_pages( | 340 | _xfs_buf_lookup_pages( |
341 | xfs_buf_t *bp, | 341 | xfs_buf_t *bp, |
342 | uint flags) | 342 | uint flags) |
343 | { | 343 | { |
344 | struct address_space *mapping = bp->pb_target->pbr_mapping; | 344 | struct address_space *mapping = bp->b_target->bt_mapping; |
345 | size_t blocksize = bp->pb_target->pbr_bsize; | 345 | size_t blocksize = bp->b_target->bt_bsize; |
346 | size_t size = bp->pb_count_desired; | 346 | size_t size = bp->b_count_desired; |
347 | size_t nbytes, offset; | 347 | size_t nbytes, offset; |
348 | gfp_t gfp_mask = pb_to_gfp(flags); | 348 | gfp_t gfp_mask = xb_to_gfp(flags); |
349 | unsigned short page_count, i; | 349 | unsigned short page_count, i; |
350 | pgoff_t first; | 350 | pgoff_t first; |
351 | loff_t end; | 351 | loff_t end; |
352 | int error; | 352 | int error; |
353 | 353 | ||
354 | end = bp->pb_file_offset + bp->pb_buffer_length; | 354 | end = bp->b_file_offset + bp->b_buffer_length; |
355 | page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset); | 355 | page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); |
356 | 356 | ||
357 | error = _pagebuf_get_pages(bp, page_count, flags); | 357 | error = _xfs_buf_get_pages(bp, page_count, flags); |
358 | if (unlikely(error)) | 358 | if (unlikely(error)) |
359 | return error; | 359 | return error; |
360 | bp->pb_flags |= _PBF_PAGE_CACHE; | 360 | bp->b_flags |= _XBF_PAGE_CACHE; |
361 | 361 | ||
362 | offset = bp->pb_offset; | 362 | offset = bp->b_offset; |
363 | first = bp->pb_file_offset >> PAGE_CACHE_SHIFT; | 363 | first = bp->b_file_offset >> PAGE_CACHE_SHIFT; |
364 | 364 | ||
365 | for (i = 0; i < bp->pb_page_count; i++) { | 365 | for (i = 0; i < bp->b_page_count; i++) { |
366 | struct page *page; | 366 | struct page *page; |
367 | uint retries = 0; | 367 | uint retries = 0; |
368 | 368 | ||
369 | retry: | 369 | retry: |
370 | page = find_or_create_page(mapping, first + i, gfp_mask); | 370 | page = find_or_create_page(mapping, first + i, gfp_mask); |
371 | if (unlikely(page == NULL)) { | 371 | if (unlikely(page == NULL)) { |
372 | if (flags & PBF_READ_AHEAD) { | 372 | if (flags & XBF_READ_AHEAD) { |
373 | bp->pb_page_count = i; | 373 | bp->b_page_count = i; |
374 | for (i = 0; i < bp->pb_page_count; i++) | 374 | for (i = 0; i < bp->b_page_count; i++) |
375 | unlock_page(bp->pb_pages[i]); | 375 | unlock_page(bp->b_pages[i]); |
376 | return -ENOMEM; | 376 | return -ENOMEM; |
377 | } | 377 | } |
378 | 378 | ||
@@ -388,13 +388,13 @@ _pagebuf_lookup_pages( | |||
388 | "deadlock in %s (mode:0x%x)\n", | 388 | "deadlock in %s (mode:0x%x)\n", |
389 | __FUNCTION__, gfp_mask); | 389 | __FUNCTION__, gfp_mask); |
390 | 390 | ||
391 | XFS_STATS_INC(pb_page_retries); | 391 | XFS_STATS_INC(xb_page_retries); |
392 | xfsbufd_wakeup(0, gfp_mask); | 392 | xfsbufd_wakeup(0, gfp_mask); |
393 | blk_congestion_wait(WRITE, HZ/50); | 393 | blk_congestion_wait(WRITE, HZ/50); |
394 | goto retry; | 394 | goto retry; |
395 | } | 395 | } |
396 | 396 | ||
397 | XFS_STATS_INC(pb_page_found); | 397 | XFS_STATS_INC(xb_page_found); |
398 | 398 | ||
399 | nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); | 399 | nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); |
400 | size -= nbytes; | 400 | size -= nbytes; |
@@ -402,27 +402,27 @@ _pagebuf_lookup_pages( | |||
402 | if (!PageUptodate(page)) { | 402 | if (!PageUptodate(page)) { |
403 | page_count--; | 403 | page_count--; |
404 | if (blocksize >= PAGE_CACHE_SIZE) { | 404 | if (blocksize >= PAGE_CACHE_SIZE) { |
405 | if (flags & PBF_READ) | 405 | if (flags & XBF_READ) |
406 | bp->pb_locked = 1; | 406 | bp->b_locked = 1; |
407 | } else if (!PagePrivate(page)) { | 407 | } else if (!PagePrivate(page)) { |
408 | if (test_page_region(page, offset, nbytes)) | 408 | if (test_page_region(page, offset, nbytes)) |
409 | page_count++; | 409 | page_count++; |
410 | } | 410 | } |
411 | } | 411 | } |
412 | 412 | ||
413 | bp->pb_pages[i] = page; | 413 | bp->b_pages[i] = page; |
414 | offset = 0; | 414 | offset = 0; |
415 | } | 415 | } |
416 | 416 | ||
417 | if (!bp->pb_locked) { | 417 | if (!bp->b_locked) { |
418 | for (i = 0; i < bp->pb_page_count; i++) | 418 | for (i = 0; i < bp->b_page_count; i++) |
419 | unlock_page(bp->pb_pages[i]); | 419 | unlock_page(bp->b_pages[i]); |
420 | } | 420 | } |
421 | 421 | ||
422 | if (page_count == bp->pb_page_count) | 422 | if (page_count == bp->b_page_count) |
423 | bp->pb_flags |= PBF_DONE; | 423 | bp->b_flags |= XBF_DONE; |
424 | 424 | ||
425 | PB_TRACE(bp, "lookup_pages", (long)page_count); | 425 | XB_TRACE(bp, "lookup_pages", (long)page_count); |
426 | return error; | 426 | return error; |
427 | } | 427 | } |
428 | 428 | ||
@@ -430,23 +430,23 @@ _pagebuf_lookup_pages( | |||
430 | * Map buffer into kernel address-space if nessecary. | 430 | * Map buffer into kernel address-space if nessecary. |
431 | */ | 431 | */ |
432 | STATIC int | 432 | STATIC int |
433 | _pagebuf_map_pages( | 433 | _xfs_buf_map_pages( |
434 | xfs_buf_t *bp, | 434 | xfs_buf_t *bp, |
435 | uint flags) | 435 | uint flags) |
436 | { | 436 | { |
437 | /* A single page buffer is always mappable */ | 437 | /* A single page buffer is always mappable */ |
438 | if (bp->pb_page_count == 1) { | 438 | if (bp->b_page_count == 1) { |
439 | bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset; | 439 | bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; |
440 | bp->pb_flags |= PBF_MAPPED; | 440 | bp->b_flags |= XBF_MAPPED; |
441 | } else if (flags & PBF_MAPPED) { | 441 | } else if (flags & XBF_MAPPED) { |
442 | if (as_list_len > 64) | 442 | if (as_list_len > 64) |
443 | purge_addresses(); | 443 | purge_addresses(); |
444 | bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count, | 444 | bp->b_addr = vmap(bp->b_pages, bp->b_page_count, |
445 | VM_MAP, PAGE_KERNEL); | 445 | VM_MAP, PAGE_KERNEL); |
446 | if (unlikely(bp->pb_addr == NULL)) | 446 | if (unlikely(bp->b_addr == NULL)) |
447 | return -ENOMEM; | 447 | return -ENOMEM; |
448 | bp->pb_addr += bp->pb_offset; | 448 | bp->b_addr += bp->b_offset; |
449 | bp->pb_flags |= PBF_MAPPED; | 449 | bp->b_flags |= XBF_MAPPED; |
450 | } | 450 | } |
451 | 451 | ||
452 | return 0; | 452 | return 0; |
@@ -457,9 +457,7 @@ _pagebuf_map_pages( | |||
457 | */ | 457 | */ |
458 | 458 | ||
459 | /* | 459 | /* |
460 | * _pagebuf_find | 460 | * Look up, and creates if absent, a lockable buffer for |
461 | * | ||
462 | * Looks up, and creates if absent, a lockable buffer for | ||
463 | * a given range of an inode. The buffer is returned | 461 | * a given range of an inode. The buffer is returned |
464 | * locked. If other overlapping buffers exist, they are | 462 | * locked. If other overlapping buffers exist, they are |
465 | * released before the new buffer is created and locked, | 463 | * released before the new buffer is created and locked, |
@@ -467,55 +465,55 @@ _pagebuf_map_pages( | |||
467 | * are unlocked. No I/O is implied by this call. | 465 | * are unlocked. No I/O is implied by this call. |
468 | */ | 466 | */ |
469 | xfs_buf_t * | 467 | xfs_buf_t * |
470 | _pagebuf_find( | 468 | _xfs_buf_find( |
471 | xfs_buftarg_t *btp, /* block device target */ | 469 | xfs_buftarg_t *btp, /* block device target */ |
472 | loff_t ioff, /* starting offset of range */ | 470 | loff_t ioff, /* starting offset of range */ |
473 | size_t isize, /* length of range */ | 471 | size_t isize, /* length of range */ |
474 | page_buf_flags_t flags, /* PBF_TRYLOCK */ | 472 | xfs_buf_flags_t flags, |
475 | xfs_buf_t *new_pb)/* newly allocated buffer */ | 473 | xfs_buf_t *new_bp) |
476 | { | 474 | { |
477 | loff_t range_base; | 475 | loff_t range_base; |
478 | size_t range_length; | 476 | size_t range_length; |
479 | xfs_bufhash_t *hash; | 477 | xfs_bufhash_t *hash; |
480 | xfs_buf_t *pb, *n; | 478 | xfs_buf_t *bp, *n; |
481 | 479 | ||
482 | range_base = (ioff << BBSHIFT); | 480 | range_base = (ioff << BBSHIFT); |
483 | range_length = (isize << BBSHIFT); | 481 | range_length = (isize << BBSHIFT); |
484 | 482 | ||
485 | /* Check for IOs smaller than the sector size / not sector aligned */ | 483 | /* Check for IOs smaller than the sector size / not sector aligned */ |
486 | ASSERT(!(range_length < (1 << btp->pbr_sshift))); | 484 | ASSERT(!(range_length < (1 << btp->bt_sshift))); |
487 | ASSERT(!(range_base & (loff_t)btp->pbr_smask)); | 485 | ASSERT(!(range_base & (loff_t)btp->bt_smask)); |
488 | 486 | ||
489 | hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; | 487 | hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; |
490 | 488 | ||
491 | spin_lock(&hash->bh_lock); | 489 | spin_lock(&hash->bh_lock); |
492 | 490 | ||
493 | list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) { | 491 | list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { |
494 | ASSERT(btp == pb->pb_target); | 492 | ASSERT(btp == bp->b_target); |
495 | if (pb->pb_file_offset == range_base && | 493 | if (bp->b_file_offset == range_base && |
496 | pb->pb_buffer_length == range_length) { | 494 | bp->b_buffer_length == range_length) { |
497 | /* | 495 | /* |
498 | * If we look at something bring it to the | 496 | * If we look at something, bring it to the |
499 | * front of the list for next time. | 497 | * front of the list for next time. |
500 | */ | 498 | */ |
501 | atomic_inc(&pb->pb_hold); | 499 | atomic_inc(&bp->b_hold); |
502 | list_move(&pb->pb_hash_list, &hash->bh_list); | 500 | list_move(&bp->b_hash_list, &hash->bh_list); |
503 | goto found; | 501 | goto found; |
504 | } | 502 | } |
505 | } | 503 | } |
506 | 504 | ||
507 | /* No match found */ | 505 | /* No match found */ |
508 | if (new_pb) { | 506 | if (new_bp) { |
509 | _pagebuf_initialize(new_pb, btp, range_base, | 507 | _xfs_buf_initialize(new_bp, btp, range_base, |
510 | range_length, flags); | 508 | range_length, flags); |
511 | new_pb->pb_hash = hash; | 509 | new_bp->b_hash = hash; |
512 | list_add(&new_pb->pb_hash_list, &hash->bh_list); | 510 | list_add(&new_bp->b_hash_list, &hash->bh_list); |
513 | } else { | 511 | } else { |
514 | XFS_STATS_INC(pb_miss_locked); | 512 | XFS_STATS_INC(xb_miss_locked); |
515 | } | 513 | } |
516 | 514 | ||
517 | spin_unlock(&hash->bh_lock); | 515 | spin_unlock(&hash->bh_lock); |
518 | return new_pb; | 516 | return new_bp; |
519 | 517 | ||
520 | found: | 518 | found: |
521 | spin_unlock(&hash->bh_lock); | 519 | spin_unlock(&hash->bh_lock); |
@@ -524,74 +522,72 @@ found: | |||
524 | * if this does not work then we need to drop the | 522 | * if this does not work then we need to drop the |
525 | * spinlock and do a hard attempt on the semaphore. | 523 | * spinlock and do a hard attempt on the semaphore. |
526 | */ | 524 | */ |
527 | if (down_trylock(&pb->pb_sema)) { | 525 | if (down_trylock(&bp->b_sema)) { |
528 | if (!(flags & PBF_TRYLOCK)) { | 526 | if (!(flags & XBF_TRYLOCK)) { |
529 | /* wait for buffer ownership */ | 527 | /* wait for buffer ownership */ |
530 | PB_TRACE(pb, "get_lock", 0); | 528 | XB_TRACE(bp, "get_lock", 0); |
531 | pagebuf_lock(pb); | 529 | xfs_buf_lock(bp); |
532 | XFS_STATS_INC(pb_get_locked_waited); | 530 | XFS_STATS_INC(xb_get_locked_waited); |
533 | } else { | 531 | } else { |
534 | /* We asked for a trylock and failed, no need | 532 | /* We asked for a trylock and failed, no need |
535 | * to look at file offset and length here, we | 533 | * to look at file offset and length here, we |
536 | * know that this pagebuf at least overlaps our | 534 | * know that this buffer at least overlaps our |
537 | * pagebuf and is locked, therefore our buffer | 535 | * buffer and is locked, therefore our buffer |
538 | * either does not exist, or is this buffer | 536 | * either does not exist, or is this buffer. |
539 | */ | 537 | */ |
540 | 538 | xfs_buf_rele(bp); | |
541 | pagebuf_rele(pb); | 539 | XFS_STATS_INC(xb_busy_locked); |
542 | XFS_STATS_INC(pb_busy_locked); | 540 | return NULL; |
543 | return (NULL); | ||
544 | } | 541 | } |
545 | } else { | 542 | } else { |
546 | /* trylock worked */ | 543 | /* trylock worked */ |
547 | PB_SET_OWNER(pb); | 544 | XB_SET_OWNER(bp); |
548 | } | 545 | } |
549 | 546 | ||
550 | if (pb->pb_flags & PBF_STALE) { | 547 | if (bp->b_flags & XBF_STALE) { |
551 | ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0); | 548 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); |
552 | pb->pb_flags &= PBF_MAPPED; | 549 | bp->b_flags &= XBF_MAPPED; |
553 | } | 550 | } |
554 | PB_TRACE(pb, "got_lock", 0); | 551 | XB_TRACE(bp, "got_lock", 0); |
555 | XFS_STATS_INC(pb_get_locked); | 552 | XFS_STATS_INC(xb_get_locked); |
556 | return (pb); | 553 | return bp; |
557 | } | 554 | } |
558 | 555 | ||
559 | /* | 556 | /* |
560 | * xfs_buf_get_flags assembles a buffer covering the specified range. | 557 | * Assembles a buffer covering the specified range. |
561 | * | ||
562 | * Storage in memory for all portions of the buffer will be allocated, | 558 | * Storage in memory for all portions of the buffer will be allocated, |
563 | * although backing storage may not be. | 559 | * although backing storage may not be. |
564 | */ | 560 | */ |
565 | xfs_buf_t * | 561 | xfs_buf_t * |
566 | xfs_buf_get_flags( /* allocate a buffer */ | 562 | xfs_buf_get_flags( |
567 | xfs_buftarg_t *target,/* target for buffer */ | 563 | xfs_buftarg_t *target,/* target for buffer */ |
568 | loff_t ioff, /* starting offset of range */ | 564 | loff_t ioff, /* starting offset of range */ |
569 | size_t isize, /* length of range */ | 565 | size_t isize, /* length of range */ |
570 | page_buf_flags_t flags) /* PBF_TRYLOCK */ | 566 | xfs_buf_flags_t flags) |
571 | { | 567 | { |
572 | xfs_buf_t *pb, *new_pb; | 568 | xfs_buf_t *bp, *new_bp; |
573 | int error = 0, i; | 569 | int error = 0, i; |
574 | 570 | ||
575 | new_pb = pagebuf_allocate(flags); | 571 | new_bp = xfs_buf_allocate(flags); |
576 | if (unlikely(!new_pb)) | 572 | if (unlikely(!new_bp)) |
577 | return NULL; | 573 | return NULL; |
578 | 574 | ||
579 | pb = _pagebuf_find(target, ioff, isize, flags, new_pb); | 575 | bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); |
580 | if (pb == new_pb) { | 576 | if (bp == new_bp) { |
581 | error = _pagebuf_lookup_pages(pb, flags); | 577 | error = _xfs_buf_lookup_pages(bp, flags); |
582 | if (error) | 578 | if (error) |
583 | goto no_buffer; | 579 | goto no_buffer; |
584 | } else { | 580 | } else { |
585 | pagebuf_deallocate(new_pb); | 581 | xfs_buf_deallocate(new_bp); |
586 | if (unlikely(pb == NULL)) | 582 | if (unlikely(bp == NULL)) |
587 | return NULL; | 583 | return NULL; |
588 | } | 584 | } |
589 | 585 | ||
590 | for (i = 0; i < pb->pb_page_count; i++) | 586 | for (i = 0; i < bp->b_page_count; i++) |
591 | mark_page_accessed(pb->pb_pages[i]); | 587 | mark_page_accessed(bp->b_pages[i]); |
592 | 588 | ||
593 | if (!(pb->pb_flags & PBF_MAPPED)) { | 589 | if (!(bp->b_flags & XBF_MAPPED)) { |
594 | error = _pagebuf_map_pages(pb, flags); | 590 | error = _xfs_buf_map_pages(bp, flags); |
595 | if (unlikely(error)) { | 591 | if (unlikely(error)) { |
596 | printk(KERN_WARNING "%s: failed to map pages\n", | 592 | printk(KERN_WARNING "%s: failed to map pages\n", |
597 | __FUNCTION__); | 593 | __FUNCTION__); |
@@ -599,22 +595,22 @@ xfs_buf_get_flags( /* allocate a buffer */ | |||
599 | } | 595 | } |
600 | } | 596 | } |
601 | 597 | ||
602 | XFS_STATS_INC(pb_get); | 598 | XFS_STATS_INC(xb_get); |
603 | 599 | ||
604 | /* | 600 | /* |
605 | * Always fill in the block number now, the mapped cases can do | 601 | * Always fill in the block number now, the mapped cases can do |
606 | * their own overlay of this later. | 602 | * their own overlay of this later. |
607 | */ | 603 | */ |
608 | pb->pb_bn = ioff; | 604 | bp->b_bn = ioff; |
609 | pb->pb_count_desired = pb->pb_buffer_length; | 605 | bp->b_count_desired = bp->b_buffer_length; |
610 | 606 | ||
611 | PB_TRACE(pb, "get", (unsigned long)flags); | 607 | XB_TRACE(bp, "get", (unsigned long)flags); |
612 | return pb; | 608 | return bp; |
613 | 609 | ||
614 | no_buffer: | 610 | no_buffer: |
615 | if (flags & (PBF_LOCK | PBF_TRYLOCK)) | 611 | if (flags & (XBF_LOCK | XBF_TRYLOCK)) |
616 | pagebuf_unlock(pb); | 612 | xfs_buf_unlock(bp); |
617 | pagebuf_rele(pb); | 613 | xfs_buf_rele(bp); |
618 | return NULL; | 614 | return NULL; |
619 | } | 615 | } |
620 | 616 | ||
@@ -623,73 +619,73 @@ xfs_buf_read_flags( | |||
623 | xfs_buftarg_t *target, | 619 | xfs_buftarg_t *target, |
624 | loff_t ioff, | 620 | loff_t ioff, |
625 | size_t isize, | 621 | size_t isize, |
626 | page_buf_flags_t flags) | 622 | xfs_buf_flags_t flags) |
627 | { | 623 | { |
628 | xfs_buf_t *pb; | 624 | xfs_buf_t *bp; |
629 | 625 | ||
630 | flags |= PBF_READ; | 626 | flags |= XBF_READ; |
631 | 627 | ||
632 | pb = xfs_buf_get_flags(target, ioff, isize, flags); | 628 | bp = xfs_buf_get_flags(target, ioff, isize, flags); |
633 | if (pb) { | 629 | if (bp) { |
634 | if (!XFS_BUF_ISDONE(pb)) { | 630 | if (!XFS_BUF_ISDONE(bp)) { |
635 | PB_TRACE(pb, "read", (unsigned long)flags); | 631 | XB_TRACE(bp, "read", (unsigned long)flags); |
636 | XFS_STATS_INC(pb_get_read); | 632 | XFS_STATS_INC(xb_get_read); |
637 | pagebuf_iostart(pb, flags); | 633 | xfs_buf_iostart(bp, flags); |
638 | } else if (flags & PBF_ASYNC) { | 634 | } else if (flags & XBF_ASYNC) { |
639 | PB_TRACE(pb, "read_async", (unsigned long)flags); | 635 | XB_TRACE(bp, "read_async", (unsigned long)flags); |
640 | /* | 636 | /* |
641 | * Read ahead call which is already satisfied, | 637 | * Read ahead call which is already satisfied, |
642 | * drop the buffer | 638 | * drop the buffer |
643 | */ | 639 | */ |
644 | goto no_buffer; | 640 | goto no_buffer; |
645 | } else { | 641 | } else { |
646 | PB_TRACE(pb, "read_done", (unsigned long)flags); | 642 | XB_TRACE(bp, "read_done", (unsigned long)flags); |
647 | /* We do not want read in the flags */ | 643 | /* We do not want read in the flags */ |
648 | pb->pb_flags &= ~PBF_READ; | 644 | bp->b_flags &= ~XBF_READ; |
649 | } | 645 | } |
650 | } | 646 | } |
651 | 647 | ||
652 | return pb; | 648 | return bp; |
653 | 649 | ||
654 | no_buffer: | 650 | no_buffer: |
655 | if (flags & (PBF_LOCK | PBF_TRYLOCK)) | 651 | if (flags & (XBF_LOCK | XBF_TRYLOCK)) |
656 | pagebuf_unlock(pb); | 652 | xfs_buf_unlock(bp); |
657 | pagebuf_rele(pb); | 653 | xfs_buf_rele(bp); |
658 | return NULL; | 654 | return NULL; |
659 | } | 655 | } |
660 | 656 | ||
661 | /* | 657 | /* |
662 | * If we are not low on memory then do the readahead in a deadlock | 658 | * If we are not low on memory then do the readahead in a deadlock |
663 | * safe manner. | 659 | * safe manner. |
664 | */ | 660 | */ |
665 | void | 661 | void |
666 | pagebuf_readahead( | 662 | xfs_buf_readahead( |
667 | xfs_buftarg_t *target, | 663 | xfs_buftarg_t *target, |
668 | loff_t ioff, | 664 | loff_t ioff, |
669 | size_t isize, | 665 | size_t isize, |
670 | page_buf_flags_t flags) | 666 | xfs_buf_flags_t flags) |
671 | { | 667 | { |
672 | struct backing_dev_info *bdi; | 668 | struct backing_dev_info *bdi; |
673 | 669 | ||
674 | bdi = target->pbr_mapping->backing_dev_info; | 670 | bdi = target->bt_mapping->backing_dev_info; |
675 | if (bdi_read_congested(bdi)) | 671 | if (bdi_read_congested(bdi)) |
676 | return; | 672 | return; |
677 | 673 | ||
678 | flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD); | 674 | flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); |
679 | xfs_buf_read_flags(target, ioff, isize, flags); | 675 | xfs_buf_read_flags(target, ioff, isize, flags); |
680 | } | 676 | } |
681 | 677 | ||
682 | xfs_buf_t * | 678 | xfs_buf_t * |
683 | pagebuf_get_empty( | 679 | xfs_buf_get_empty( |
684 | size_t len, | 680 | size_t len, |
685 | xfs_buftarg_t *target) | 681 | xfs_buftarg_t *target) |
686 | { | 682 | { |
687 | xfs_buf_t *pb; | 683 | xfs_buf_t *bp; |
688 | 684 | ||
689 | pb = pagebuf_allocate(0); | 685 | bp = xfs_buf_allocate(0); |
690 | if (pb) | 686 | if (bp) |
691 | _pagebuf_initialize(pb, target, 0, len, 0); | 687 | _xfs_buf_initialize(bp, target, 0, len, 0); |
692 | return pb; | 688 | return bp; |
693 | } | 689 | } |
694 | 690 | ||
695 | static inline struct page * | 691 | static inline struct page * |
@@ -705,8 +701,8 @@ mem_to_page( | |||
705 | } | 701 | } |
706 | 702 | ||
707 | int | 703 | int |
708 | pagebuf_associate_memory( | 704 | xfs_buf_associate_memory( |
709 | xfs_buf_t *pb, | 705 | xfs_buf_t *bp, |
710 | void *mem, | 706 | void *mem, |
711 | size_t len) | 707 | size_t len) |
712 | { | 708 | { |
@@ -723,40 +719,40 @@ pagebuf_associate_memory( | |||
723 | page_count++; | 719 | page_count++; |
724 | 720 | ||
725 | /* Free any previous set of page pointers */ | 721 | /* Free any previous set of page pointers */ |
726 | if (pb->pb_pages) | 722 | if (bp->b_pages) |
727 | _pagebuf_free_pages(pb); | 723 | _xfs_buf_free_pages(bp); |
728 | 724 | ||
729 | pb->pb_pages = NULL; | 725 | bp->b_pages = NULL; |
730 | pb->pb_addr = mem; | 726 | bp->b_addr = mem; |
731 | 727 | ||
732 | rval = _pagebuf_get_pages(pb, page_count, 0); | 728 | rval = _xfs_buf_get_pages(bp, page_count, 0); |
733 | if (rval) | 729 | if (rval) |
734 | return rval; | 730 | return rval; |
735 | 731 | ||
736 | pb->pb_offset = offset; | 732 | bp->b_offset = offset; |
737 | ptr = (size_t) mem & PAGE_CACHE_MASK; | 733 | ptr = (size_t) mem & PAGE_CACHE_MASK; |
738 | end = PAGE_CACHE_ALIGN((size_t) mem + len); | 734 | end = PAGE_CACHE_ALIGN((size_t) mem + len); |
739 | end_cur = end; | 735 | end_cur = end; |
740 | /* set up first page */ | 736 | /* set up first page */ |
741 | pb->pb_pages[0] = mem_to_page(mem); | 737 | bp->b_pages[0] = mem_to_page(mem); |
742 | 738 | ||
743 | ptr += PAGE_CACHE_SIZE; | 739 | ptr += PAGE_CACHE_SIZE; |
744 | pb->pb_page_count = ++i; | 740 | bp->b_page_count = ++i; |
745 | while (ptr < end) { | 741 | while (ptr < end) { |
746 | pb->pb_pages[i] = mem_to_page((void *)ptr); | 742 | bp->b_pages[i] = mem_to_page((void *)ptr); |
747 | pb->pb_page_count = ++i; | 743 | bp->b_page_count = ++i; |
748 | ptr += PAGE_CACHE_SIZE; | 744 | ptr += PAGE_CACHE_SIZE; |
749 | } | 745 | } |
750 | pb->pb_locked = 0; | 746 | bp->b_locked = 0; |
751 | 747 | ||
752 | pb->pb_count_desired = pb->pb_buffer_length = len; | 748 | bp->b_count_desired = bp->b_buffer_length = len; |
753 | pb->pb_flags |= PBF_MAPPED; | 749 | bp->b_flags |= XBF_MAPPED; |
754 | 750 | ||
755 | return 0; | 751 | return 0; |
756 | } | 752 | } |
757 | 753 | ||
758 | xfs_buf_t * | 754 | xfs_buf_t * |
759 | pagebuf_get_no_daddr( | 755 | xfs_buf_get_noaddr( |
760 | size_t len, | 756 | size_t len, |
761 | xfs_buftarg_t *target) | 757 | xfs_buftarg_t *target) |
762 | { | 758 | { |
@@ -765,10 +761,10 @@ pagebuf_get_no_daddr( | |||
765 | void *data; | 761 | void *data; |
766 | int error; | 762 | int error; |
767 | 763 | ||
768 | bp = pagebuf_allocate(0); | 764 | bp = xfs_buf_allocate(0); |
769 | if (unlikely(bp == NULL)) | 765 | if (unlikely(bp == NULL)) |
770 | goto fail; | 766 | goto fail; |
771 | _pagebuf_initialize(bp, target, 0, len, 0); | 767 | _xfs_buf_initialize(bp, target, 0, len, 0); |
772 | 768 | ||
773 | try_again: | 769 | try_again: |
774 | data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); | 770 | data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); |
@@ -777,78 +773,73 @@ pagebuf_get_no_daddr( | |||
777 | 773 | ||
778 | /* check whether alignment matches.. */ | 774 | /* check whether alignment matches.. */ |
779 | if ((__psunsigned_t)data != | 775 | if ((__psunsigned_t)data != |
780 | ((__psunsigned_t)data & ~target->pbr_smask)) { | 776 | ((__psunsigned_t)data & ~target->bt_smask)) { |
781 | /* .. else double the size and try again */ | 777 | /* .. else double the size and try again */ |
782 | kmem_free(data, malloc_len); | 778 | kmem_free(data, malloc_len); |
783 | malloc_len <<= 1; | 779 | malloc_len <<= 1; |
784 | goto try_again; | 780 | goto try_again; |
785 | } | 781 | } |
786 | 782 | ||
787 | error = pagebuf_associate_memory(bp, data, len); | 783 | error = xfs_buf_associate_memory(bp, data, len); |
788 | if (error) | 784 | if (error) |
789 | goto fail_free_mem; | 785 | goto fail_free_mem; |
790 | bp->pb_flags |= _PBF_KMEM_ALLOC; | 786 | bp->b_flags |= _XBF_KMEM_ALLOC; |
791 | 787 | ||
792 | pagebuf_unlock(bp); | 788 | xfs_buf_unlock(bp); |
793 | 789 | ||
794 | PB_TRACE(bp, "no_daddr", data); | 790 | XB_TRACE(bp, "no_daddr", data); |
795 | return bp; | 791 | return bp; |
796 | fail_free_mem: | 792 | fail_free_mem: |
797 | kmem_free(data, malloc_len); | 793 | kmem_free(data, malloc_len); |
798 | fail_free_buf: | 794 | fail_free_buf: |
799 | pagebuf_free(bp); | 795 | xfs_buf_free(bp); |
800 | fail: | 796 | fail: |
801 | return NULL; | 797 | return NULL; |
802 | } | 798 | } |
803 | 799 | ||
804 | /* | 800 | /* |
805 | * pagebuf_hold | ||
806 | * | ||
807 | * Increment reference count on buffer, to hold the buffer concurrently | 801 | * Increment reference count on buffer, to hold the buffer concurrently |
808 | * with another thread which may release (free) the buffer asynchronously. | 802 | * with another thread which may release (free) the buffer asynchronously. |
809 | * | ||
810 | * Must hold the buffer already to call this function. | 803 | * Must hold the buffer already to call this function. |
811 | */ | 804 | */ |
812 | void | 805 | void |
813 | pagebuf_hold( | 806 | xfs_buf_hold( |
814 | xfs_buf_t *pb) | 807 | xfs_buf_t *bp) |
815 | { | 808 | { |
816 | atomic_inc(&pb->pb_hold); | 809 | atomic_inc(&bp->b_hold); |
817 | PB_TRACE(pb, "hold", 0); | 810 | XB_TRACE(bp, "hold", 0); |
818 | } | 811 | } |
819 | 812 | ||
820 | /* | 813 | /* |
821 | * pagebuf_rele | 814 | * Releases a hold on the specified buffer. If the |
822 | * | 815 | * the hold count is 1, calls xfs_buf_free. |
823 | * pagebuf_rele releases a hold on the specified buffer. If the | ||
824 | * the hold count is 1, pagebuf_rele calls pagebuf_free. | ||
825 | */ | 816 | */ |
826 | void | 817 | void |
827 | pagebuf_rele( | 818 | xfs_buf_rele( |
828 | xfs_buf_t *pb) | 819 | xfs_buf_t *bp) |
829 | { | 820 | { |
830 | xfs_bufhash_t *hash = pb->pb_hash; | 821 | xfs_bufhash_t *hash = bp->b_hash; |
831 | 822 | ||
832 | PB_TRACE(pb, "rele", pb->pb_relse); | 823 | XB_TRACE(bp, "rele", bp->b_relse); |
833 | 824 | ||
834 | if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) { | 825 | if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { |
835 | if (pb->pb_relse) { | 826 | if (bp->b_relse) { |
836 | atomic_inc(&pb->pb_hold); | 827 | atomic_inc(&bp->b_hold); |
837 | spin_unlock(&hash->bh_lock); | 828 | spin_unlock(&hash->bh_lock); |
838 | (*(pb->pb_relse)) (pb); | 829 | (*(bp->b_relse)) (bp); |
839 | } else if (pb->pb_flags & PBF_FS_MANAGED) { | 830 | } else if (bp->b_flags & XBF_FS_MANAGED) { |
840 | spin_unlock(&hash->bh_lock); | 831 | spin_unlock(&hash->bh_lock); |
841 | } else { | 832 | } else { |
842 | ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q))); | 833 | ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); |
843 | list_del_init(&pb->pb_hash_list); | 834 | list_del_init(&bp->b_hash_list); |
844 | spin_unlock(&hash->bh_lock); | 835 | spin_unlock(&hash->bh_lock); |
845 | pagebuf_free(pb); | 836 | xfs_buf_free(bp); |
846 | } | 837 | } |
847 | } else { | 838 | } else { |
848 | /* | 839 | /* |
849 | * Catch reference count leaks | 840 | * Catch reference count leaks |
850 | */ | 841 | */ |
851 | ASSERT(atomic_read(&pb->pb_hold) >= 0); | 842 | ASSERT(atomic_read(&bp->b_hold) >= 0); |
852 | } | 843 | } |
853 | } | 844 | } |
854 | 845 | ||
@@ -864,168 +855,122 @@ pagebuf_rele( | |||
864 | */ | 855 | */ |
865 | 856 | ||
866 | /* | 857 | /* |
867 | * pagebuf_cond_lock | 858 | * Locks a buffer object, if it is not already locked. |
868 | * | 859 | * Note that this in no way locks the underlying pages, so it is only |
869 | * pagebuf_cond_lock locks a buffer object, if it is not already locked. | 860 | * useful for synchronizing concurrent use of buffer objects, not for |
870 | * Note that this in no way | 861 | * synchronizing independent access to the underlying pages. |
871 | * locks the underlying pages, so it is only useful for synchronizing | ||
872 | * concurrent use of page buffer objects, not for synchronizing independent | ||
873 | * access to the underlying pages. | ||
874 | */ | 862 | */ |
875 | int | 863 | int |
876 | pagebuf_cond_lock( /* lock buffer, if not locked */ | 864 | xfs_buf_cond_lock( |
877 | /* returns -EBUSY if locked) */ | 865 | xfs_buf_t *bp) |
878 | xfs_buf_t *pb) | ||
879 | { | 866 | { |
880 | int locked; | 867 | int locked; |
881 | 868 | ||
882 | locked = down_trylock(&pb->pb_sema) == 0; | 869 | locked = down_trylock(&bp->b_sema) == 0; |
883 | if (locked) { | 870 | if (locked) { |
884 | PB_SET_OWNER(pb); | 871 | XB_SET_OWNER(bp); |
885 | } | 872 | } |
886 | PB_TRACE(pb, "cond_lock", (long)locked); | 873 | XB_TRACE(bp, "cond_lock", (long)locked); |
887 | return(locked ? 0 : -EBUSY); | 874 | return locked ? 0 : -EBUSY; |
888 | } | 875 | } |
889 | 876 | ||
890 | #if defined(DEBUG) || defined(XFS_BLI_TRACE) | 877 | #if defined(DEBUG) || defined(XFS_BLI_TRACE) |
891 | /* | ||
892 | * pagebuf_lock_value | ||
893 | * | ||
894 | * Return lock value for a pagebuf | ||
895 | */ | ||
896 | int | 878 | int |
897 | pagebuf_lock_value( | 879 | xfs_buf_lock_value( |
898 | xfs_buf_t *pb) | 880 | xfs_buf_t *bp) |
899 | { | 881 | { |
900 | return(atomic_read(&pb->pb_sema.count)); | 882 | return atomic_read(&bp->b_sema.count); |
901 | } | 883 | } |
902 | #endif | 884 | #endif |
903 | 885 | ||
904 | /* | 886 | /* |
905 | * pagebuf_lock | 887 | * Locks a buffer object. |
906 | * | 888 | * Note that this in no way locks the underlying pages, so it is only |
907 | * pagebuf_lock locks a buffer object. Note that this in no way | 889 | * useful for synchronizing concurrent use of buffer objects, not for |
908 | * locks the underlying pages, so it is only useful for synchronizing | 890 | * synchronizing independent access to the underlying pages. |
909 | * concurrent use of page buffer objects, not for synchronizing independent | ||
910 | * access to the underlying pages. | ||
911 | */ | 891 | */ |
912 | int | 892 | void |
913 | pagebuf_lock( | 893 | xfs_buf_lock( |
914 | xfs_buf_t *pb) | 894 | xfs_buf_t *bp) |
915 | { | 895 | { |
916 | PB_TRACE(pb, "lock", 0); | 896 | XB_TRACE(bp, "lock", 0); |
917 | if (atomic_read(&pb->pb_io_remaining)) | 897 | if (atomic_read(&bp->b_io_remaining)) |
918 | blk_run_address_space(pb->pb_target->pbr_mapping); | 898 | blk_run_address_space(bp->b_target->bt_mapping); |
919 | down(&pb->pb_sema); | 899 | down(&bp->b_sema); |
920 | PB_SET_OWNER(pb); | 900 | XB_SET_OWNER(bp); |
921 | PB_TRACE(pb, "locked", 0); | 901 | XB_TRACE(bp, "locked", 0); |
922 | return 0; | ||
923 | } | 902 | } |
924 | 903 | ||
925 | /* | 904 | /* |
926 | * pagebuf_unlock | 905 | * Releases the lock on the buffer object. |
927 | * | ||
928 | * pagebuf_unlock releases the lock on the buffer object created by | ||
929 | * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages | ||
930 | * created by pagebuf_pin). | ||
931 | * | ||
932 | * If the buffer is marked delwri but is not queued, do so before we | 906 | * If the buffer is marked delwri but is not queued, do so before we |
933 | * unlock the buffer as we need to set flags correctly. We also need to | 907 | * unlock the buffer as we need to set flags correctly. We also need to |
934 | * take a reference for the delwri queue because the unlocker is going to | 908 | * take a reference for the delwri queue because the unlocker is going to |
935 | * drop their's and they don't know we just queued it. | 909 | * drop their's and they don't know we just queued it. |
936 | */ | 910 | */ |
937 | void | 911 | void |
938 | pagebuf_unlock( /* unlock buffer */ | 912 | xfs_buf_unlock( |
939 | xfs_buf_t *pb) /* buffer to unlock */ | 913 | xfs_buf_t *bp) |
940 | { | 914 | { |
941 | if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) { | 915 | if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) { |
942 | atomic_inc(&pb->pb_hold); | 916 | atomic_inc(&bp->b_hold); |
943 | pb->pb_flags |= PBF_ASYNC; | 917 | bp->b_flags |= XBF_ASYNC; |
944 | pagebuf_delwri_queue(pb, 0); | 918 | xfs_buf_delwri_queue(bp, 0); |
945 | } | 919 | } |
946 | 920 | ||
947 | PB_CLEAR_OWNER(pb); | 921 | XB_CLEAR_OWNER(bp); |
948 | up(&pb->pb_sema); | 922 | up(&bp->b_sema); |
949 | PB_TRACE(pb, "unlock", 0); | 923 | XB_TRACE(bp, "unlock", 0); |
950 | } | 924 | } |
951 | 925 | ||
952 | 926 | ||
953 | /* | 927 | /* |
954 | * Pinning Buffer Storage in Memory | 928 | * Pinning Buffer Storage in Memory |
955 | */ | 929 | * Ensure that no attempt to force a buffer to disk will succeed. |
956 | |||
957 | /* | ||
958 | * pagebuf_pin | ||
959 | * | ||
960 | * pagebuf_pin locks all of the memory represented by a buffer in | ||
961 | * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for | ||
962 | * the same or different buffers affecting a given page, will | ||
963 | * properly count the number of outstanding "pin" requests. The | ||
964 | * buffer may be released after the pagebuf_pin and a different | ||
965 | * buffer used when calling pagebuf_unpin, if desired. | ||
966 | * pagebuf_pin should be used by the file system when it wants be | ||
967 | * assured that no attempt will be made to force the affected | ||
968 | * memory to disk. It does not assure that a given logical page | ||
969 | * will not be moved to a different physical page. | ||
970 | */ | 930 | */ |
971 | void | 931 | void |
972 | pagebuf_pin( | 932 | xfs_buf_pin( |
973 | xfs_buf_t *pb) | 933 | xfs_buf_t *bp) |
974 | { | 934 | { |
975 | atomic_inc(&pb->pb_pin_count); | 935 | atomic_inc(&bp->b_pin_count); |
976 | PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter); | 936 | XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter); |
977 | } | 937 | } |
978 | 938 | ||
979 | /* | ||
980 | * pagebuf_unpin | ||
981 | * | ||
982 | * pagebuf_unpin reverses the locking of memory performed by | ||
983 | * pagebuf_pin. Note that both functions affected the logical | ||
984 | * pages associated with the buffer, not the buffer itself. | ||
985 | */ | ||
986 | void | 939 | void |
987 | pagebuf_unpin( | 940 | xfs_buf_unpin( |
988 | xfs_buf_t *pb) | 941 | xfs_buf_t *bp) |
989 | { | 942 | { |
990 | if (atomic_dec_and_test(&pb->pb_pin_count)) { | 943 | if (atomic_dec_and_test(&bp->b_pin_count)) |
991 | wake_up_all(&pb->pb_waiters); | 944 | wake_up_all(&bp->b_waiters); |
992 | } | 945 | XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter); |
993 | PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter); | ||
994 | } | 946 | } |
995 | 947 | ||
996 | int | 948 | int |
997 | pagebuf_ispin( | 949 | xfs_buf_ispin( |
998 | xfs_buf_t *pb) | 950 | xfs_buf_t *bp) |
999 | { | 951 | { |
1000 | return atomic_read(&pb->pb_pin_count); | 952 | return atomic_read(&bp->b_pin_count); |
1001 | } | 953 | } |
1002 | 954 | ||
1003 | /* | 955 | STATIC void |
1004 | * pagebuf_wait_unpin | 956 | xfs_buf_wait_unpin( |
1005 | * | 957 | xfs_buf_t *bp) |
1006 | * pagebuf_wait_unpin waits until all of the memory associated | ||
1007 | * with the buffer is not longer locked in memory. It returns | ||
1008 | * immediately if none of the affected pages are locked. | ||
1009 | */ | ||
1010 | static inline void | ||
1011 | _pagebuf_wait_unpin( | ||
1012 | xfs_buf_t *pb) | ||
1013 | { | 958 | { |
1014 | DECLARE_WAITQUEUE (wait, current); | 959 | DECLARE_WAITQUEUE (wait, current); |
1015 | 960 | ||
1016 | if (atomic_read(&pb->pb_pin_count) == 0) | 961 | if (atomic_read(&bp->b_pin_count) == 0) |
1017 | return; | 962 | return; |
1018 | 963 | ||
1019 | add_wait_queue(&pb->pb_waiters, &wait); | 964 | add_wait_queue(&bp->b_waiters, &wait); |
1020 | for (;;) { | 965 | for (;;) { |
1021 | set_current_state(TASK_UNINTERRUPTIBLE); | 966 | set_current_state(TASK_UNINTERRUPTIBLE); |
1022 | if (atomic_read(&pb->pb_pin_count) == 0) | 967 | if (atomic_read(&bp->b_pin_count) == 0) |
1023 | break; | 968 | break; |
1024 | if (atomic_read(&pb->pb_io_remaining)) | 969 | if (atomic_read(&bp->b_io_remaining)) |
1025 | blk_run_address_space(pb->pb_target->pbr_mapping); | 970 | blk_run_address_space(bp->b_target->bt_mapping); |
1026 | schedule(); | 971 | schedule(); |
1027 | } | 972 | } |
1028 | remove_wait_queue(&pb->pb_waiters, &wait); | 973 | remove_wait_queue(&bp->b_waiters, &wait); |
1029 | set_current_state(TASK_RUNNING); | 974 | set_current_state(TASK_RUNNING); |
1030 | } | 975 | } |
1031 | 976 | ||
@@ -1033,241 +978,216 @@ _pagebuf_wait_unpin( | |||
1033 | * Buffer Utility Routines | 978 | * Buffer Utility Routines |
1034 | */ | 979 | */ |
1035 | 980 | ||
1036 | /* | ||
1037 | * pagebuf_iodone | ||
1038 | * | ||
1039 | * pagebuf_iodone marks a buffer for which I/O is in progress | ||
1040 | * done with respect to that I/O. The pb_iodone routine, if | ||
1041 | * present, will be called as a side-effect. | ||
1042 | */ | ||
1043 | STATIC void | 981 | STATIC void |
1044 | pagebuf_iodone_work( | 982 | xfs_buf_iodone_work( |
1045 | void *v) | 983 | void *v) |
1046 | { | 984 | { |
1047 | xfs_buf_t *bp = (xfs_buf_t *)v; | 985 | xfs_buf_t *bp = (xfs_buf_t *)v; |
1048 | 986 | ||
1049 | if (bp->pb_iodone) | 987 | if (bp->b_iodone) |
1050 | (*(bp->pb_iodone))(bp); | 988 | (*(bp->b_iodone))(bp); |
1051 | else if (bp->pb_flags & PBF_ASYNC) | 989 | else if (bp->b_flags & XBF_ASYNC) |
1052 | xfs_buf_relse(bp); | 990 | xfs_buf_relse(bp); |
1053 | } | 991 | } |
1054 | 992 | ||
1055 | void | 993 | void |
1056 | pagebuf_iodone( | 994 | xfs_buf_ioend( |
1057 | xfs_buf_t *pb, | 995 | xfs_buf_t *bp, |
1058 | int schedule) | 996 | int schedule) |
1059 | { | 997 | { |
1060 | pb->pb_flags &= ~(PBF_READ | PBF_WRITE); | 998 | bp->b_flags &= ~(XBF_READ | XBF_WRITE); |
1061 | if (pb->pb_error == 0) | 999 | if (bp->b_error == 0) |
1062 | pb->pb_flags |= PBF_DONE; | 1000 | bp->b_flags |= XBF_DONE; |
1063 | 1001 | ||
1064 | PB_TRACE(pb, "iodone", pb->pb_iodone); | 1002 | XB_TRACE(bp, "iodone", bp->b_iodone); |
1065 | 1003 | ||
1066 | if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) { | 1004 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { |
1067 | if (schedule) { | 1005 | if (schedule) { |
1068 | INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb); | 1006 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); |
1069 | queue_work(xfslogd_workqueue, &pb->pb_iodone_work); | 1007 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); |
1070 | } else { | 1008 | } else { |
1071 | pagebuf_iodone_work(pb); | 1009 | xfs_buf_iodone_work(bp); |
1072 | } | 1010 | } |
1073 | } else { | 1011 | } else { |
1074 | up(&pb->pb_iodonesema); | 1012 | up(&bp->b_iodonesema); |
1075 | } | 1013 | } |
1076 | } | 1014 | } |
1077 | 1015 | ||
1078 | /* | ||
1079 | * pagebuf_ioerror | ||
1080 | * | ||
1081 | * pagebuf_ioerror sets the error code for a buffer. | ||
1082 | */ | ||
1083 | void | 1016 | void |
1084 | pagebuf_ioerror( /* mark/clear buffer error flag */ | 1017 | xfs_buf_ioerror( |
1085 | xfs_buf_t *pb, /* buffer to mark */ | 1018 | xfs_buf_t *bp, |
1086 | int error) /* error to store (0 if none) */ | 1019 | int error) |
1087 | { | 1020 | { |
1088 | ASSERT(error >= 0 && error <= 0xffff); | 1021 | ASSERT(error >= 0 && error <= 0xffff); |
1089 | pb->pb_error = (unsigned short)error; | 1022 | bp->b_error = (unsigned short)error; |
1090 | PB_TRACE(pb, "ioerror", (unsigned long)error); | 1023 | XB_TRACE(bp, "ioerror", (unsigned long)error); |
1091 | } | 1024 | } |
1092 | 1025 | ||
1093 | /* | 1026 | /* |
1094 | * pagebuf_iostart | 1027 | * Initiate I/O on a buffer, based on the flags supplied. |
1095 | * | 1028 | * The b_iodone routine in the buffer supplied will only be called |
1096 | * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied. | ||
1097 | * If necessary, it will arrange for any disk space allocation required, | ||
1098 | * and it will break up the request if the block mappings require it. | ||
1099 | * The pb_iodone routine in the buffer supplied will only be called | ||
1100 | * when all of the subsidiary I/O requests, if any, have been completed. | 1029 | * when all of the subsidiary I/O requests, if any, have been completed. |
1101 | * pagebuf_iostart calls the pagebuf_ioinitiate routine or | ||
1102 | * pagebuf_iorequest, if the former routine is not defined, to start | ||
1103 | * the I/O on a given low-level request. | ||
1104 | */ | 1030 | */ |
1105 | int | 1031 | int |
1106 | pagebuf_iostart( /* start I/O on a buffer */ | 1032 | xfs_buf_iostart( |
1107 | xfs_buf_t *pb, /* buffer to start */ | 1033 | xfs_buf_t *bp, |
1108 | page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */ | 1034 | xfs_buf_flags_t flags) |
1109 | /* PBF_WRITE, PBF_DELWRI, */ | ||
1110 | /* PBF_DONT_BLOCK */ | ||
1111 | { | 1035 | { |
1112 | int status = 0; | 1036 | int status = 0; |
1113 | 1037 | ||
1114 | PB_TRACE(pb, "iostart", (unsigned long)flags); | 1038 | XB_TRACE(bp, "iostart", (unsigned long)flags); |
1115 | 1039 | ||
1116 | if (flags & PBF_DELWRI) { | 1040 | if (flags & XBF_DELWRI) { |
1117 | pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC); | 1041 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC); |
1118 | pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC); | 1042 | bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC); |
1119 | pagebuf_delwri_queue(pb, 1); | 1043 | xfs_buf_delwri_queue(bp, 1); |
1120 | return status; | 1044 | return status; |
1121 | } | 1045 | } |
1122 | 1046 | ||
1123 | pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \ | 1047 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ |
1124 | PBF_READ_AHEAD | _PBF_RUN_QUEUES); | 1048 | XBF_READ_AHEAD | _XBF_RUN_QUEUES); |
1125 | pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \ | 1049 | bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \ |
1126 | PBF_READ_AHEAD | _PBF_RUN_QUEUES); | 1050 | XBF_READ_AHEAD | _XBF_RUN_QUEUES); |
1127 | 1051 | ||
1128 | BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL); | 1052 | BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL); |
1129 | 1053 | ||
1130 | /* For writes allow an alternate strategy routine to precede | 1054 | /* For writes allow an alternate strategy routine to precede |
1131 | * the actual I/O request (which may not be issued at all in | 1055 | * the actual I/O request (which may not be issued at all in |
1132 | * a shutdown situation, for example). | 1056 | * a shutdown situation, for example). |
1133 | */ | 1057 | */ |
1134 | status = (flags & PBF_WRITE) ? | 1058 | status = (flags & XBF_WRITE) ? |
1135 | pagebuf_iostrategy(pb) : pagebuf_iorequest(pb); | 1059 | xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp); |
1136 | 1060 | ||
1137 | /* Wait for I/O if we are not an async request. | 1061 | /* Wait for I/O if we are not an async request. |
1138 | * Note: async I/O request completion will release the buffer, | 1062 | * Note: async I/O request completion will release the buffer, |
1139 | * and that can already be done by this point. So using the | 1063 | * and that can already be done by this point. So using the |
1140 | * buffer pointer from here on, after async I/O, is invalid. | 1064 | * buffer pointer from here on, after async I/O, is invalid. |
1141 | */ | 1065 | */ |
1142 | if (!status && !(flags & PBF_ASYNC)) | 1066 | if (!status && !(flags & XBF_ASYNC)) |
1143 | status = pagebuf_iowait(pb); | 1067 | status = xfs_buf_iowait(bp); |
1144 | 1068 | ||
1145 | return status; | 1069 | return status; |
1146 | } | 1070 | } |
1147 | 1071 | ||
1148 | /* | ||
1149 | * Helper routine for pagebuf_iorequest | ||
1150 | */ | ||
1151 | |||
1152 | STATIC __inline__ int | 1072 | STATIC __inline__ int |
1153 | _pagebuf_iolocked( | 1073 | _xfs_buf_iolocked( |
1154 | xfs_buf_t *pb) | 1074 | xfs_buf_t *bp) |
1155 | { | 1075 | { |
1156 | ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); | 1076 | ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE)); |
1157 | if (pb->pb_flags & PBF_READ) | 1077 | if (bp->b_flags & XBF_READ) |
1158 | return pb->pb_locked; | 1078 | return bp->b_locked; |
1159 | return 0; | 1079 | return 0; |
1160 | } | 1080 | } |
1161 | 1081 | ||
1162 | STATIC __inline__ void | 1082 | STATIC __inline__ void |
1163 | _pagebuf_iodone( | 1083 | _xfs_buf_ioend( |
1164 | xfs_buf_t *pb, | 1084 | xfs_buf_t *bp, |
1165 | int schedule) | 1085 | int schedule) |
1166 | { | 1086 | { |
1167 | if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { | 1087 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { |
1168 | pb->pb_locked = 0; | 1088 | bp->b_locked = 0; |
1169 | pagebuf_iodone(pb, schedule); | 1089 | xfs_buf_ioend(bp, schedule); |
1170 | } | 1090 | } |
1171 | } | 1091 | } |
1172 | 1092 | ||
1173 | STATIC int | 1093 | STATIC int |
1174 | bio_end_io_pagebuf( | 1094 | xfs_buf_bio_end_io( |
1175 | struct bio *bio, | 1095 | struct bio *bio, |
1176 | unsigned int bytes_done, | 1096 | unsigned int bytes_done, |
1177 | int error) | 1097 | int error) |
1178 | { | 1098 | { |
1179 | xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private; | 1099 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; |
1180 | unsigned int blocksize = pb->pb_target->pbr_bsize; | 1100 | unsigned int blocksize = bp->b_target->bt_bsize; |
1181 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 1101 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
1182 | 1102 | ||
1183 | if (bio->bi_size) | 1103 | if (bio->bi_size) |
1184 | return 1; | 1104 | return 1; |
1185 | 1105 | ||
1186 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 1106 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) |
1187 | pb->pb_error = EIO; | 1107 | bp->b_error = EIO; |
1188 | 1108 | ||
1189 | do { | 1109 | do { |
1190 | struct page *page = bvec->bv_page; | 1110 | struct page *page = bvec->bv_page; |
1191 | 1111 | ||
1192 | if (unlikely(pb->pb_error)) { | 1112 | if (unlikely(bp->b_error)) { |
1193 | if (pb->pb_flags & PBF_READ) | 1113 | if (bp->b_flags & XBF_READ) |
1194 | ClearPageUptodate(page); | 1114 | ClearPageUptodate(page); |
1195 | SetPageError(page); | 1115 | SetPageError(page); |
1196 | } else if (blocksize == PAGE_CACHE_SIZE) { | 1116 | } else if (blocksize >= PAGE_CACHE_SIZE) { |
1197 | SetPageUptodate(page); | 1117 | SetPageUptodate(page); |
1198 | } else if (!PagePrivate(page) && | 1118 | } else if (!PagePrivate(page) && |
1199 | (pb->pb_flags & _PBF_PAGE_CACHE)) { | 1119 | (bp->b_flags & _XBF_PAGE_CACHE)) { |
1200 | set_page_region(page, bvec->bv_offset, bvec->bv_len); | 1120 | set_page_region(page, bvec->bv_offset, bvec->bv_len); |
1201 | } | 1121 | } |
1202 | 1122 | ||
1203 | if (--bvec >= bio->bi_io_vec) | 1123 | if (--bvec >= bio->bi_io_vec) |
1204 | prefetchw(&bvec->bv_page->flags); | 1124 | prefetchw(&bvec->bv_page->flags); |
1205 | 1125 | ||
1206 | if (_pagebuf_iolocked(pb)) { | 1126 | if (_xfs_buf_iolocked(bp)) { |
1207 | unlock_page(page); | 1127 | unlock_page(page); |
1208 | } | 1128 | } |
1209 | } while (bvec >= bio->bi_io_vec); | 1129 | } while (bvec >= bio->bi_io_vec); |
1210 | 1130 | ||
1211 | _pagebuf_iodone(pb, 1); | 1131 | _xfs_buf_ioend(bp, 1); |
1212 | bio_put(bio); | 1132 | bio_put(bio); |
1213 | return 0; | 1133 | return 0; |
1214 | } | 1134 | } |
1215 | 1135 | ||
1216 | STATIC void | 1136 | STATIC void |
1217 | _pagebuf_ioapply( | 1137 | _xfs_buf_ioapply( |
1218 | xfs_buf_t *pb) | 1138 | xfs_buf_t *bp) |
1219 | { | 1139 | { |
1220 | int i, rw, map_i, total_nr_pages, nr_pages; | 1140 | int i, rw, map_i, total_nr_pages, nr_pages; |
1221 | struct bio *bio; | 1141 | struct bio *bio; |
1222 | int offset = pb->pb_offset; | 1142 | int offset = bp->b_offset; |
1223 | int size = pb->pb_count_desired; | 1143 | int size = bp->b_count_desired; |
1224 | sector_t sector = pb->pb_bn; | 1144 | sector_t sector = bp->b_bn; |
1225 | unsigned int blocksize = pb->pb_target->pbr_bsize; | 1145 | unsigned int blocksize = bp->b_target->bt_bsize; |
1226 | int locking = _pagebuf_iolocked(pb); | 1146 | int locking = _xfs_buf_iolocked(bp); |
1227 | 1147 | ||
1228 | total_nr_pages = pb->pb_page_count; | 1148 | total_nr_pages = bp->b_page_count; |
1229 | map_i = 0; | 1149 | map_i = 0; |
1230 | 1150 | ||
1231 | if (pb->pb_flags & _PBF_RUN_QUEUES) { | 1151 | if (bp->b_flags & _XBF_RUN_QUEUES) { |
1232 | pb->pb_flags &= ~_PBF_RUN_QUEUES; | 1152 | bp->b_flags &= ~_XBF_RUN_QUEUES; |
1233 | rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC; | 1153 | rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC; |
1234 | } else { | 1154 | } else { |
1235 | rw = (pb->pb_flags & PBF_READ) ? READ : WRITE; | 1155 | rw = (bp->b_flags & XBF_READ) ? READ : WRITE; |
1236 | } | 1156 | } |
1237 | 1157 | ||
1238 | if (pb->pb_flags & PBF_ORDERED) { | 1158 | if (bp->b_flags & XBF_ORDERED) { |
1239 | ASSERT(!(pb->pb_flags & PBF_READ)); | 1159 | ASSERT(!(bp->b_flags & XBF_READ)); |
1240 | rw = WRITE_BARRIER; | 1160 | rw = WRITE_BARRIER; |
1241 | } | 1161 | } |
1242 | 1162 | ||
1243 | /* Special code path for reading a sub page size pagebuf in -- | 1163 | /* Special code path for reading a sub page size buffer in -- |
1244 | * we populate up the whole page, and hence the other metadata | 1164 | * we populate up the whole page, and hence the other metadata |
1245 | * in the same page. This optimization is only valid when the | 1165 | * in the same page. This optimization is only valid when the |
1246 | * filesystem block size and the page size are equal. | 1166 | * filesystem block size is not smaller than the page size. |
1247 | */ | 1167 | */ |
1248 | if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) && | 1168 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && |
1249 | (pb->pb_flags & PBF_READ) && locking && | 1169 | (bp->b_flags & XBF_READ) && locking && |
1250 | (blocksize == PAGE_CACHE_SIZE)) { | 1170 | (blocksize >= PAGE_CACHE_SIZE)) { |
1251 | bio = bio_alloc(GFP_NOIO, 1); | 1171 | bio = bio_alloc(GFP_NOIO, 1); |
1252 | 1172 | ||
1253 | bio->bi_bdev = pb->pb_target->pbr_bdev; | 1173 | bio->bi_bdev = bp->b_target->bt_bdev; |
1254 | bio->bi_sector = sector - (offset >> BBSHIFT); | 1174 | bio->bi_sector = sector - (offset >> BBSHIFT); |
1255 | bio->bi_end_io = bio_end_io_pagebuf; | 1175 | bio->bi_end_io = xfs_buf_bio_end_io; |
1256 | bio->bi_private = pb; | 1176 | bio->bi_private = bp; |
1257 | 1177 | ||
1258 | bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0); | 1178 | bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0); |
1259 | size = 0; | 1179 | size = 0; |
1260 | 1180 | ||
1261 | atomic_inc(&pb->pb_io_remaining); | 1181 | atomic_inc(&bp->b_io_remaining); |
1262 | 1182 | ||
1263 | goto submit_io; | 1183 | goto submit_io; |
1264 | } | 1184 | } |
1265 | 1185 | ||
1266 | /* Lock down the pages which we need to for the request */ | 1186 | /* Lock down the pages which we need to for the request */ |
1267 | if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) { | 1187 | if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) { |
1268 | for (i = 0; size; i++) { | 1188 | for (i = 0; size; i++) { |
1269 | int nbytes = PAGE_CACHE_SIZE - offset; | 1189 | int nbytes = PAGE_CACHE_SIZE - offset; |
1270 | struct page *page = pb->pb_pages[i]; | 1190 | struct page *page = bp->b_pages[i]; |
1271 | 1191 | ||
1272 | if (nbytes > size) | 1192 | if (nbytes > size) |
1273 | nbytes = size; | 1193 | nbytes = size; |
@@ -1277,30 +1197,30 @@ _pagebuf_ioapply( | |||
1277 | size -= nbytes; | 1197 | size -= nbytes; |
1278 | offset = 0; | 1198 | offset = 0; |
1279 | } | 1199 | } |
1280 | offset = pb->pb_offset; | 1200 | offset = bp->b_offset; |
1281 | size = pb->pb_count_desired; | 1201 | size = bp->b_count_desired; |
1282 | } | 1202 | } |
1283 | 1203 | ||
1284 | next_chunk: | 1204 | next_chunk: |
1285 | atomic_inc(&pb->pb_io_remaining); | 1205 | atomic_inc(&bp->b_io_remaining); |
1286 | nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); | 1206 | nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); |
1287 | if (nr_pages > total_nr_pages) | 1207 | if (nr_pages > total_nr_pages) |
1288 | nr_pages = total_nr_pages; | 1208 | nr_pages = total_nr_pages; |
1289 | 1209 | ||
1290 | bio = bio_alloc(GFP_NOIO, nr_pages); | 1210 | bio = bio_alloc(GFP_NOIO, nr_pages); |
1291 | bio->bi_bdev = pb->pb_target->pbr_bdev; | 1211 | bio->bi_bdev = bp->b_target->bt_bdev; |
1292 | bio->bi_sector = sector; | 1212 | bio->bi_sector = sector; |
1293 | bio->bi_end_io = bio_end_io_pagebuf; | 1213 | bio->bi_end_io = xfs_buf_bio_end_io; |
1294 | bio->bi_private = pb; | 1214 | bio->bi_private = bp; |
1295 | 1215 | ||
1296 | for (; size && nr_pages; nr_pages--, map_i++) { | 1216 | for (; size && nr_pages; nr_pages--, map_i++) { |
1297 | int nbytes = PAGE_CACHE_SIZE - offset; | 1217 | int rbytes, nbytes = PAGE_CACHE_SIZE - offset; |
1298 | 1218 | ||
1299 | if (nbytes > size) | 1219 | if (nbytes > size) |
1300 | nbytes = size; | 1220 | nbytes = size; |
1301 | 1221 | ||
1302 | if (bio_add_page(bio, pb->pb_pages[map_i], | 1222 | rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset); |
1303 | nbytes, offset) < nbytes) | 1223 | if (rbytes < nbytes) |
1304 | break; | 1224 | break; |
1305 | 1225 | ||
1306 | offset = 0; | 1226 | offset = 0; |
@@ -1316,107 +1236,102 @@ submit_io: | |||
1316 | goto next_chunk; | 1236 | goto next_chunk; |
1317 | } else { | 1237 | } else { |
1318 | bio_put(bio); | 1238 | bio_put(bio); |
1319 | pagebuf_ioerror(pb, EIO); | 1239 | xfs_buf_ioerror(bp, EIO); |
1320 | } | 1240 | } |
1321 | } | 1241 | } |
1322 | 1242 | ||
1323 | /* | ||
1324 | * pagebuf_iorequest -- the core I/O request routine. | ||
1325 | */ | ||
1326 | int | 1243 | int |
1327 | pagebuf_iorequest( /* start real I/O */ | 1244 | xfs_buf_iorequest( |
1328 | xfs_buf_t *pb) /* buffer to convey to device */ | 1245 | xfs_buf_t *bp) |
1329 | { | 1246 | { |
1330 | PB_TRACE(pb, "iorequest", 0); | 1247 | XB_TRACE(bp, "iorequest", 0); |
1331 | 1248 | ||
1332 | if (pb->pb_flags & PBF_DELWRI) { | 1249 | if (bp->b_flags & XBF_DELWRI) { |
1333 | pagebuf_delwri_queue(pb, 1); | 1250 | xfs_buf_delwri_queue(bp, 1); |
1334 | return 0; | 1251 | return 0; |
1335 | } | 1252 | } |
1336 | 1253 | ||
1337 | if (pb->pb_flags & PBF_WRITE) { | 1254 | if (bp->b_flags & XBF_WRITE) { |
1338 | _pagebuf_wait_unpin(pb); | 1255 | xfs_buf_wait_unpin(bp); |
1339 | } | 1256 | } |
1340 | 1257 | ||
1341 | pagebuf_hold(pb); | 1258 | xfs_buf_hold(bp); |
1342 | 1259 | ||
1343 | /* Set the count to 1 initially, this will stop an I/O | 1260 | /* Set the count to 1 initially, this will stop an I/O |
1344 | * completion callout which happens before we have started | 1261 | * completion callout which happens before we have started |
1345 | * all the I/O from calling pagebuf_iodone too early. | 1262 | * all the I/O from calling xfs_buf_ioend too early. |
1346 | */ | 1263 | */ |
1347 | atomic_set(&pb->pb_io_remaining, 1); | 1264 | atomic_set(&bp->b_io_remaining, 1); |
1348 | _pagebuf_ioapply(pb); | 1265 | _xfs_buf_ioapply(bp); |
1349 | _pagebuf_iodone(pb, 0); | 1266 | _xfs_buf_ioend(bp, 0); |
1350 | 1267 | ||
1351 | pagebuf_rele(pb); | 1268 | xfs_buf_rele(bp); |
1352 | return 0; | 1269 | return 0; |
1353 | } | 1270 | } |
1354 | 1271 | ||
1355 | /* | 1272 | /* |
1356 | * pagebuf_iowait | 1273 | * Waits for I/O to complete on the buffer supplied. |
1357 | * | 1274 | * It returns immediately if no I/O is pending. |
1358 | * pagebuf_iowait waits for I/O to complete on the buffer supplied. | 1275 | * It returns the I/O error code, if any, or 0 if there was no error. |
1359 | * It returns immediately if no I/O is pending. In any case, it returns | ||
1360 | * the error code, if any, or 0 if there is no error. | ||
1361 | */ | 1276 | */ |
1362 | int | 1277 | int |
1363 | pagebuf_iowait( | 1278 | xfs_buf_iowait( |
1364 | xfs_buf_t *pb) | 1279 | xfs_buf_t *bp) |
1365 | { | 1280 | { |
1366 | PB_TRACE(pb, "iowait", 0); | 1281 | XB_TRACE(bp, "iowait", 0); |
1367 | if (atomic_read(&pb->pb_io_remaining)) | 1282 | if (atomic_read(&bp->b_io_remaining)) |
1368 | blk_run_address_space(pb->pb_target->pbr_mapping); | 1283 | blk_run_address_space(bp->b_target->bt_mapping); |
1369 | down(&pb->pb_iodonesema); | 1284 | down(&bp->b_iodonesema); |
1370 | PB_TRACE(pb, "iowaited", (long)pb->pb_error); | 1285 | XB_TRACE(bp, "iowaited", (long)bp->b_error); |
1371 | return pb->pb_error; | 1286 | return bp->b_error; |
1372 | } | 1287 | } |
1373 | 1288 | ||
1374 | caddr_t | 1289 | xfs_caddr_t |
1375 | pagebuf_offset( | 1290 | xfs_buf_offset( |
1376 | xfs_buf_t *pb, | 1291 | xfs_buf_t *bp, |
1377 | size_t offset) | 1292 | size_t offset) |
1378 | { | 1293 | { |
1379 | struct page *page; | 1294 | struct page *page; |
1380 | 1295 | ||
1381 | offset += pb->pb_offset; | 1296 | if (bp->b_flags & XBF_MAPPED) |
1297 | return XFS_BUF_PTR(bp) + offset; | ||
1382 | 1298 | ||
1383 | page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT]; | 1299 | offset += bp->b_offset; |
1384 | return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1)); | 1300 | page = bp->b_pages[offset >> PAGE_CACHE_SHIFT]; |
1301 | return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1)); | ||
1385 | } | 1302 | } |
1386 | 1303 | ||
1387 | /* | 1304 | /* |
1388 | * pagebuf_iomove | ||
1389 | * | ||
1390 | * Move data into or out of a buffer. | 1305 | * Move data into or out of a buffer. |
1391 | */ | 1306 | */ |
1392 | void | 1307 | void |
1393 | pagebuf_iomove( | 1308 | xfs_buf_iomove( |
1394 | xfs_buf_t *pb, /* buffer to process */ | 1309 | xfs_buf_t *bp, /* buffer to process */ |
1395 | size_t boff, /* starting buffer offset */ | 1310 | size_t boff, /* starting buffer offset */ |
1396 | size_t bsize, /* length to copy */ | 1311 | size_t bsize, /* length to copy */ |
1397 | caddr_t data, /* data address */ | 1312 | caddr_t data, /* data address */ |
1398 | page_buf_rw_t mode) /* read/write flag */ | 1313 | xfs_buf_rw_t mode) /* read/write/zero flag */ |
1399 | { | 1314 | { |
1400 | size_t bend, cpoff, csize; | 1315 | size_t bend, cpoff, csize; |
1401 | struct page *page; | 1316 | struct page *page; |
1402 | 1317 | ||
1403 | bend = boff + bsize; | 1318 | bend = boff + bsize; |
1404 | while (boff < bend) { | 1319 | while (boff < bend) { |
1405 | page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)]; | 1320 | page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; |
1406 | cpoff = page_buf_poff(boff + pb->pb_offset); | 1321 | cpoff = xfs_buf_poff(boff + bp->b_offset); |
1407 | csize = min_t(size_t, | 1322 | csize = min_t(size_t, |
1408 | PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff); | 1323 | PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff); |
1409 | 1324 | ||
1410 | ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); | 1325 | ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); |
1411 | 1326 | ||
1412 | switch (mode) { | 1327 | switch (mode) { |
1413 | case PBRW_ZERO: | 1328 | case XBRW_ZERO: |
1414 | memset(page_address(page) + cpoff, 0, csize); | 1329 | memset(page_address(page) + cpoff, 0, csize); |
1415 | break; | 1330 | break; |
1416 | case PBRW_READ: | 1331 | case XBRW_READ: |
1417 | memcpy(data, page_address(page) + cpoff, csize); | 1332 | memcpy(data, page_address(page) + cpoff, csize); |
1418 | break; | 1333 | break; |
1419 | case PBRW_WRITE: | 1334 | case XBRW_WRITE: |
1420 | memcpy(page_address(page) + cpoff, data, csize); | 1335 | memcpy(page_address(page) + cpoff, data, csize); |
1421 | } | 1336 | } |
1422 | 1337 | ||
@@ -1426,12 +1341,12 @@ pagebuf_iomove( | |||
1426 | } | 1341 | } |
1427 | 1342 | ||
1428 | /* | 1343 | /* |
1429 | * Handling of buftargs. | 1344 | * Handling of buffer targets (buftargs). |
1430 | */ | 1345 | */ |
1431 | 1346 | ||
1432 | /* | 1347 | /* |
1433 | * Wait for any bufs with callbacks that have been submitted but | 1348 | * Wait for any bufs with callbacks that have been submitted but |
1434 | * have not yet returned... walk the hash list for the target. | 1349 | * have not yet returned... walk the hash list for the target. |
1435 | */ | 1350 | */ |
1436 | void | 1351 | void |
1437 | xfs_wait_buftarg( | 1352 | xfs_wait_buftarg( |
@@ -1445,15 +1360,15 @@ xfs_wait_buftarg( | |||
1445 | hash = &btp->bt_hash[i]; | 1360 | hash = &btp->bt_hash[i]; |
1446 | again: | 1361 | again: |
1447 | spin_lock(&hash->bh_lock); | 1362 | spin_lock(&hash->bh_lock); |
1448 | list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) { | 1363 | list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { |
1449 | ASSERT(btp == bp->pb_target); | 1364 | ASSERT(btp == bp->b_target); |
1450 | if (!(bp->pb_flags & PBF_FS_MANAGED)) { | 1365 | if (!(bp->b_flags & XBF_FS_MANAGED)) { |
1451 | spin_unlock(&hash->bh_lock); | 1366 | spin_unlock(&hash->bh_lock); |
1452 | /* | 1367 | /* |
1453 | * Catch superblock reference count leaks | 1368 | * Catch superblock reference count leaks |
1454 | * immediately | 1369 | * immediately |
1455 | */ | 1370 | */ |
1456 | BUG_ON(bp->pb_bn == 0); | 1371 | BUG_ON(bp->b_bn == 0); |
1457 | delay(100); | 1372 | delay(100); |
1458 | goto again; | 1373 | goto again; |
1459 | } | 1374 | } |
@@ -1463,9 +1378,9 @@ again: | |||
1463 | } | 1378 | } |
1464 | 1379 | ||
1465 | /* | 1380 | /* |
1466 | * Allocate buffer hash table for a given target. | 1381 | * Allocate buffer hash table for a given target. |
1467 | * For devices containing metadata (i.e. not the log/realtime devices) | 1382 | * For devices containing metadata (i.e. not the log/realtime devices) |
1468 | * we need to allocate a much larger hash table. | 1383 | * we need to allocate a much larger hash table. |
1469 | */ | 1384 | */ |
1470 | STATIC void | 1385 | STATIC void |
1471 | xfs_alloc_bufhash( | 1386 | xfs_alloc_bufhash( |
@@ -1488,13 +1403,12 @@ STATIC void | |||
1488 | xfs_free_bufhash( | 1403 | xfs_free_bufhash( |
1489 | xfs_buftarg_t *btp) | 1404 | xfs_buftarg_t *btp) |
1490 | { | 1405 | { |
1491 | kmem_free(btp->bt_hash, | 1406 | kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t)); |
1492 | (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t)); | ||
1493 | btp->bt_hash = NULL; | 1407 | btp->bt_hash = NULL; |
1494 | } | 1408 | } |
1495 | 1409 | ||
1496 | /* | 1410 | /* |
1497 | * buftarg list for delwrite queue processing | 1411 | * buftarg list for delwrite queue processing |
1498 | */ | 1412 | */ |
1499 | STATIC LIST_HEAD(xfs_buftarg_list); | 1413 | STATIC LIST_HEAD(xfs_buftarg_list); |
1500 | STATIC DEFINE_SPINLOCK(xfs_buftarg_lock); | 1414 | STATIC DEFINE_SPINLOCK(xfs_buftarg_lock); |
@@ -1524,12 +1438,13 @@ xfs_free_buftarg( | |||
1524 | { | 1438 | { |
1525 | xfs_flush_buftarg(btp, 1); | 1439 | xfs_flush_buftarg(btp, 1); |
1526 | if (external) | 1440 | if (external) |
1527 | xfs_blkdev_put(btp->pbr_bdev); | 1441 | xfs_blkdev_put(btp->bt_bdev); |
1528 | xfs_free_bufhash(btp); | 1442 | xfs_free_bufhash(btp); |
1529 | iput(btp->pbr_mapping->host); | 1443 | iput(btp->bt_mapping->host); |
1530 | 1444 | ||
1531 | /* unregister the buftarg first so that we don't get a | 1445 | /* Unregister the buftarg first so that we don't get a |
1532 | * wakeup finding a non-existent task */ | 1446 | * wakeup finding a non-existent task |
1447 | */ | ||
1533 | xfs_unregister_buftarg(btp); | 1448 | xfs_unregister_buftarg(btp); |
1534 | kthread_stop(btp->bt_task); | 1449 | kthread_stop(btp->bt_task); |
1535 | 1450 | ||
@@ -1543,11 +1458,11 @@ xfs_setsize_buftarg_flags( | |||
1543 | unsigned int sectorsize, | 1458 | unsigned int sectorsize, |
1544 | int verbose) | 1459 | int verbose) |
1545 | { | 1460 | { |
1546 | btp->pbr_bsize = blocksize; | 1461 | btp->bt_bsize = blocksize; |
1547 | btp->pbr_sshift = ffs(sectorsize) - 1; | 1462 | btp->bt_sshift = ffs(sectorsize) - 1; |
1548 | btp->pbr_smask = sectorsize - 1; | 1463 | btp->bt_smask = sectorsize - 1; |
1549 | 1464 | ||
1550 | if (set_blocksize(btp->pbr_bdev, sectorsize)) { | 1465 | if (set_blocksize(btp->bt_bdev, sectorsize)) { |
1551 | printk(KERN_WARNING | 1466 | printk(KERN_WARNING |
1552 | "XFS: Cannot set_blocksize to %u on device %s\n", | 1467 | "XFS: Cannot set_blocksize to %u on device %s\n", |
1553 | sectorsize, XFS_BUFTARG_NAME(btp)); | 1468 | sectorsize, XFS_BUFTARG_NAME(btp)); |
@@ -1567,10 +1482,10 @@ xfs_setsize_buftarg_flags( | |||
1567 | } | 1482 | } |
1568 | 1483 | ||
1569 | /* | 1484 | /* |
1570 | * When allocating the initial buffer target we have not yet | 1485 | * When allocating the initial buffer target we have not yet |
1571 | * read in the superblock, so don't know what sized sectors | 1486 | * read in the superblock, so don't know what sized sectors |
1572 | * are being used is at this early stage. Play safe. | 1487 | * are being used is at this early stage. Play safe. |
1573 | */ | 1488 | */ |
1574 | STATIC int | 1489 | STATIC int |
1575 | xfs_setsize_buftarg_early( | 1490 | xfs_setsize_buftarg_early( |
1576 | xfs_buftarg_t *btp, | 1491 | xfs_buftarg_t *btp, |
@@ -1618,7 +1533,7 @@ xfs_mapping_buftarg( | |||
1618 | mapping->a_ops = &mapping_aops; | 1533 | mapping->a_ops = &mapping_aops; |
1619 | mapping->backing_dev_info = bdi; | 1534 | mapping->backing_dev_info = bdi; |
1620 | mapping_set_gfp_mask(mapping, GFP_NOFS); | 1535 | mapping_set_gfp_mask(mapping, GFP_NOFS); |
1621 | btp->pbr_mapping = mapping; | 1536 | btp->bt_mapping = mapping; |
1622 | return 0; | 1537 | return 0; |
1623 | } | 1538 | } |
1624 | 1539 | ||
@@ -1651,8 +1566,8 @@ xfs_alloc_buftarg( | |||
1651 | 1566 | ||
1652 | btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); | 1567 | btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); |
1653 | 1568 | ||
1654 | btp->pbr_dev = bdev->bd_dev; | 1569 | btp->bt_dev = bdev->bd_dev; |
1655 | btp->pbr_bdev = bdev; | 1570 | btp->bt_bdev = bdev; |
1656 | if (xfs_setsize_buftarg_early(btp, bdev)) | 1571 | if (xfs_setsize_buftarg_early(btp, bdev)) |
1657 | goto error; | 1572 | goto error; |
1658 | if (xfs_mapping_buftarg(btp, bdev)) | 1573 | if (xfs_mapping_buftarg(btp, bdev)) |
@@ -1669,63 +1584,61 @@ error: | |||
1669 | 1584 | ||
1670 | 1585 | ||
1671 | /* | 1586 | /* |
1672 | * Pagebuf delayed write buffer handling | 1587 | * Delayed write buffer handling |
1673 | */ | 1588 | */ |
1674 | STATIC void | 1589 | STATIC void |
1675 | pagebuf_delwri_queue( | 1590 | xfs_buf_delwri_queue( |
1676 | xfs_buf_t *pb, | 1591 | xfs_buf_t *bp, |
1677 | int unlock) | 1592 | int unlock) |
1678 | { | 1593 | { |
1679 | struct list_head *dwq = &pb->pb_target->bt_delwrite_queue; | 1594 | struct list_head *dwq = &bp->b_target->bt_delwrite_queue; |
1680 | spinlock_t *dwlk = &pb->pb_target->bt_delwrite_lock; | 1595 | spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; |
1681 | 1596 | ||
1682 | PB_TRACE(pb, "delwri_q", (long)unlock); | 1597 | XB_TRACE(bp, "delwri_q", (long)unlock); |
1683 | ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) == | 1598 | ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); |
1684 | (PBF_DELWRI|PBF_ASYNC)); | ||
1685 | 1599 | ||
1686 | spin_lock(dwlk); | 1600 | spin_lock(dwlk); |
1687 | /* If already in the queue, dequeue and place at tail */ | 1601 | /* If already in the queue, dequeue and place at tail */ |
1688 | if (!list_empty(&pb->pb_list)) { | 1602 | if (!list_empty(&bp->b_list)) { |
1689 | ASSERT(pb->pb_flags & _PBF_DELWRI_Q); | 1603 | ASSERT(bp->b_flags & _XBF_DELWRI_Q); |
1690 | if (unlock) { | 1604 | if (unlock) |
1691 | atomic_dec(&pb->pb_hold); | 1605 | atomic_dec(&bp->b_hold); |
1692 | } | 1606 | list_del(&bp->b_list); |
1693 | list_del(&pb->pb_list); | ||
1694 | } | 1607 | } |
1695 | 1608 | ||
1696 | pb->pb_flags |= _PBF_DELWRI_Q; | 1609 | bp->b_flags |= _XBF_DELWRI_Q; |
1697 | list_add_tail(&pb->pb_list, dwq); | 1610 | list_add_tail(&bp->b_list, dwq); |
1698 | pb->pb_queuetime = jiffies; | 1611 | bp->b_queuetime = jiffies; |
1699 | spin_unlock(dwlk); | 1612 | spin_unlock(dwlk); |
1700 | 1613 | ||
1701 | if (unlock) | 1614 | if (unlock) |
1702 | pagebuf_unlock(pb); | 1615 | xfs_buf_unlock(bp); |
1703 | } | 1616 | } |
1704 | 1617 | ||
1705 | void | 1618 | void |
1706 | pagebuf_delwri_dequeue( | 1619 | xfs_buf_delwri_dequeue( |
1707 | xfs_buf_t *pb) | 1620 | xfs_buf_t *bp) |
1708 | { | 1621 | { |
1709 | spinlock_t *dwlk = &pb->pb_target->bt_delwrite_lock; | 1622 | spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; |
1710 | int dequeued = 0; | 1623 | int dequeued = 0; |
1711 | 1624 | ||
1712 | spin_lock(dwlk); | 1625 | spin_lock(dwlk); |
1713 | if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) { | 1626 | if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) { |
1714 | ASSERT(pb->pb_flags & _PBF_DELWRI_Q); | 1627 | ASSERT(bp->b_flags & _XBF_DELWRI_Q); |
1715 | list_del_init(&pb->pb_list); | 1628 | list_del_init(&bp->b_list); |
1716 | dequeued = 1; | 1629 | dequeued = 1; |
1717 | } | 1630 | } |
1718 | pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); | 1631 | bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); |
1719 | spin_unlock(dwlk); | 1632 | spin_unlock(dwlk); |
1720 | 1633 | ||
1721 | if (dequeued) | 1634 | if (dequeued) |
1722 | pagebuf_rele(pb); | 1635 | xfs_buf_rele(bp); |
1723 | 1636 | ||
1724 | PB_TRACE(pb, "delwri_dq", (long)dequeued); | 1637 | XB_TRACE(bp, "delwri_dq", (long)dequeued); |
1725 | } | 1638 | } |
1726 | 1639 | ||
1727 | STATIC void | 1640 | STATIC void |
1728 | pagebuf_runall_queues( | 1641 | xfs_buf_runall_queues( |
1729 | struct workqueue_struct *queue) | 1642 | struct workqueue_struct *queue) |
1730 | { | 1643 | { |
1731 | flush_workqueue(queue); | 1644 | flush_workqueue(queue); |
@@ -1740,9 +1653,9 @@ xfsbufd_wakeup( | |||
1740 | 1653 | ||
1741 | spin_lock(&xfs_buftarg_lock); | 1654 | spin_lock(&xfs_buftarg_lock); |
1742 | list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) { | 1655 | list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) { |
1743 | if (test_bit(BT_FORCE_SLEEP, &btp->bt_flags)) | 1656 | if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags)) |
1744 | continue; | 1657 | continue; |
1745 | set_bit(BT_FORCE_FLUSH, &btp->bt_flags); | 1658 | set_bit(XBT_FORCE_FLUSH, &btp->bt_flags); |
1746 | barrier(); | 1659 | barrier(); |
1747 | wake_up_process(btp->bt_task); | 1660 | wake_up_process(btp->bt_task); |
1748 | } | 1661 | } |
@@ -1757,7 +1670,7 @@ xfsbufd( | |||
1757 | struct list_head tmp; | 1670 | struct list_head tmp; |
1758 | unsigned long age; | 1671 | unsigned long age; |
1759 | xfs_buftarg_t *target = (xfs_buftarg_t *)data; | 1672 | xfs_buftarg_t *target = (xfs_buftarg_t *)data; |
1760 | xfs_buf_t *pb, *n; | 1673 | xfs_buf_t *bp, *n; |
1761 | struct list_head *dwq = &target->bt_delwrite_queue; | 1674 | struct list_head *dwq = &target->bt_delwrite_queue; |
1762 | spinlock_t *dwlk = &target->bt_delwrite_lock; | 1675 | spinlock_t *dwlk = &target->bt_delwrite_lock; |
1763 | 1676 | ||
@@ -1766,10 +1679,10 @@ xfsbufd( | |||
1766 | INIT_LIST_HEAD(&tmp); | 1679 | INIT_LIST_HEAD(&tmp); |
1767 | do { | 1680 | do { |
1768 | if (unlikely(freezing(current))) { | 1681 | if (unlikely(freezing(current))) { |
1769 | set_bit(BT_FORCE_SLEEP, &target->bt_flags); | 1682 | set_bit(XBT_FORCE_SLEEP, &target->bt_flags); |
1770 | refrigerator(); | 1683 | refrigerator(); |
1771 | } else { | 1684 | } else { |
1772 | clear_bit(BT_FORCE_SLEEP, &target->bt_flags); | 1685 | clear_bit(XBT_FORCE_SLEEP, &target->bt_flags); |
1773 | } | 1686 | } |
1774 | 1687 | ||
1775 | schedule_timeout_interruptible( | 1688 | schedule_timeout_interruptible( |
@@ -1777,49 +1690,49 @@ xfsbufd( | |||
1777 | 1690 | ||
1778 | age = xfs_buf_age_centisecs * msecs_to_jiffies(10); | 1691 | age = xfs_buf_age_centisecs * msecs_to_jiffies(10); |
1779 | spin_lock(dwlk); | 1692 | spin_lock(dwlk); |
1780 | list_for_each_entry_safe(pb, n, dwq, pb_list) { | 1693 | list_for_each_entry_safe(bp, n, dwq, b_list) { |
1781 | PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb)); | 1694 | XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp)); |
1782 | ASSERT(pb->pb_flags & PBF_DELWRI); | 1695 | ASSERT(bp->b_flags & XBF_DELWRI); |
1783 | 1696 | ||
1784 | if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) { | 1697 | if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { |
1785 | if (!test_bit(BT_FORCE_FLUSH, | 1698 | if (!test_bit(XBT_FORCE_FLUSH, |
1786 | &target->bt_flags) && | 1699 | &target->bt_flags) && |
1787 | time_before(jiffies, | 1700 | time_before(jiffies, |
1788 | pb->pb_queuetime + age)) { | 1701 | bp->b_queuetime + age)) { |
1789 | pagebuf_unlock(pb); | 1702 | xfs_buf_unlock(bp); |
1790 | break; | 1703 | break; |
1791 | } | 1704 | } |
1792 | 1705 | ||
1793 | pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); | 1706 | bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); |
1794 | pb->pb_flags |= PBF_WRITE; | 1707 | bp->b_flags |= XBF_WRITE; |
1795 | list_move(&pb->pb_list, &tmp); | 1708 | list_move(&bp->b_list, &tmp); |
1796 | } | 1709 | } |
1797 | } | 1710 | } |
1798 | spin_unlock(dwlk); | 1711 | spin_unlock(dwlk); |
1799 | 1712 | ||
1800 | while (!list_empty(&tmp)) { | 1713 | while (!list_empty(&tmp)) { |
1801 | pb = list_entry(tmp.next, xfs_buf_t, pb_list); | 1714 | bp = list_entry(tmp.next, xfs_buf_t, b_list); |
1802 | ASSERT(target == pb->pb_target); | 1715 | ASSERT(target == bp->b_target); |
1803 | 1716 | ||
1804 | list_del_init(&pb->pb_list); | 1717 | list_del_init(&bp->b_list); |
1805 | pagebuf_iostrategy(pb); | 1718 | xfs_buf_iostrategy(bp); |
1806 | 1719 | ||
1807 | blk_run_address_space(target->pbr_mapping); | 1720 | blk_run_address_space(target->bt_mapping); |
1808 | } | 1721 | } |
1809 | 1722 | ||
1810 | if (as_list_len > 0) | 1723 | if (as_list_len > 0) |
1811 | purge_addresses(); | 1724 | purge_addresses(); |
1812 | 1725 | ||
1813 | clear_bit(BT_FORCE_FLUSH, &target->bt_flags); | 1726 | clear_bit(XBT_FORCE_FLUSH, &target->bt_flags); |
1814 | } while (!kthread_should_stop()); | 1727 | } while (!kthread_should_stop()); |
1815 | 1728 | ||
1816 | return 0; | 1729 | return 0; |
1817 | } | 1730 | } |
1818 | 1731 | ||
1819 | /* | 1732 | /* |
1820 | * Go through all incore buffers, and release buffers if they belong to | 1733 | * Go through all incore buffers, and release buffers if they belong to |
1821 | * the given device. This is used in filesystem error handling to | 1734 | * the given device. This is used in filesystem error handling to |
1822 | * preserve the consistency of its metadata. | 1735 | * preserve the consistency of its metadata. |
1823 | */ | 1736 | */ |
1824 | int | 1737 | int |
1825 | xfs_flush_buftarg( | 1738 | xfs_flush_buftarg( |
@@ -1827,73 +1740,72 @@ xfs_flush_buftarg( | |||
1827 | int wait) | 1740 | int wait) |
1828 | { | 1741 | { |
1829 | struct list_head tmp; | 1742 | struct list_head tmp; |
1830 | xfs_buf_t *pb, *n; | 1743 | xfs_buf_t *bp, *n; |
1831 | int pincount = 0; | 1744 | int pincount = 0; |
1832 | struct list_head *dwq = &target->bt_delwrite_queue; | 1745 | struct list_head *dwq = &target->bt_delwrite_queue; |
1833 | spinlock_t *dwlk = &target->bt_delwrite_lock; | 1746 | spinlock_t *dwlk = &target->bt_delwrite_lock; |
1834 | 1747 | ||
1835 | pagebuf_runall_queues(xfsdatad_workqueue); | 1748 | xfs_buf_runall_queues(xfsdatad_workqueue); |
1836 | pagebuf_runall_queues(xfslogd_workqueue); | 1749 | xfs_buf_runall_queues(xfslogd_workqueue); |
1837 | 1750 | ||
1838 | INIT_LIST_HEAD(&tmp); | 1751 | INIT_LIST_HEAD(&tmp); |
1839 | spin_lock(dwlk); | 1752 | spin_lock(dwlk); |
1840 | list_for_each_entry_safe(pb, n, dwq, pb_list) { | 1753 | list_for_each_entry_safe(bp, n, dwq, b_list) { |
1841 | 1754 | ASSERT(bp->b_target == target); | |
1842 | ASSERT(pb->pb_target == target); | 1755 | ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q)); |
1843 | ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)); | 1756 | XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp)); |
1844 | PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb)); | 1757 | if (xfs_buf_ispin(bp)) { |
1845 | if (pagebuf_ispin(pb)) { | ||
1846 | pincount++; | 1758 | pincount++; |
1847 | continue; | 1759 | continue; |
1848 | } | 1760 | } |
1849 | 1761 | ||
1850 | list_move(&pb->pb_list, &tmp); | 1762 | list_move(&bp->b_list, &tmp); |
1851 | } | 1763 | } |
1852 | spin_unlock(dwlk); | 1764 | spin_unlock(dwlk); |
1853 | 1765 | ||
1854 | /* | 1766 | /* |
1855 | * Dropped the delayed write list lock, now walk the temporary list | 1767 | * Dropped the delayed write list lock, now walk the temporary list |
1856 | */ | 1768 | */ |
1857 | list_for_each_entry_safe(pb, n, &tmp, pb_list) { | 1769 | list_for_each_entry_safe(bp, n, &tmp, b_list) { |
1858 | pagebuf_lock(pb); | 1770 | xfs_buf_lock(bp); |
1859 | pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); | 1771 | bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); |
1860 | pb->pb_flags |= PBF_WRITE; | 1772 | bp->b_flags |= XBF_WRITE; |
1861 | if (wait) | 1773 | if (wait) |
1862 | pb->pb_flags &= ~PBF_ASYNC; | 1774 | bp->b_flags &= ~XBF_ASYNC; |
1863 | else | 1775 | else |
1864 | list_del_init(&pb->pb_list); | 1776 | list_del_init(&bp->b_list); |
1865 | 1777 | ||
1866 | pagebuf_iostrategy(pb); | 1778 | xfs_buf_iostrategy(bp); |
1867 | } | 1779 | } |
1868 | 1780 | ||
1869 | /* | 1781 | /* |
1870 | * Remaining list items must be flushed before returning | 1782 | * Remaining list items must be flushed before returning |
1871 | */ | 1783 | */ |
1872 | while (!list_empty(&tmp)) { | 1784 | while (!list_empty(&tmp)) { |
1873 | pb = list_entry(tmp.next, xfs_buf_t, pb_list); | 1785 | bp = list_entry(tmp.next, xfs_buf_t, b_list); |
1874 | 1786 | ||
1875 | list_del_init(&pb->pb_list); | 1787 | list_del_init(&bp->b_list); |
1876 | xfs_iowait(pb); | 1788 | xfs_iowait(bp); |
1877 | xfs_buf_relse(pb); | 1789 | xfs_buf_relse(bp); |
1878 | } | 1790 | } |
1879 | 1791 | ||
1880 | if (wait) | 1792 | if (wait) |
1881 | blk_run_address_space(target->pbr_mapping); | 1793 | blk_run_address_space(target->bt_mapping); |
1882 | 1794 | ||
1883 | return pincount; | 1795 | return pincount; |
1884 | } | 1796 | } |
1885 | 1797 | ||
1886 | int __init | 1798 | int __init |
1887 | pagebuf_init(void) | 1799 | xfs_buf_init(void) |
1888 | { | 1800 | { |
1889 | int error = -ENOMEM; | 1801 | int error = -ENOMEM; |
1890 | 1802 | ||
1891 | #ifdef PAGEBUF_TRACE | 1803 | #ifdef XFS_BUF_TRACE |
1892 | pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP); | 1804 | xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); |
1893 | #endif | 1805 | #endif |
1894 | 1806 | ||
1895 | pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); | 1807 | xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); |
1896 | if (!pagebuf_zone) | 1808 | if (!xfs_buf_zone) |
1897 | goto out_free_trace_buf; | 1809 | goto out_free_trace_buf; |
1898 | 1810 | ||
1899 | xfslogd_workqueue = create_workqueue("xfslogd"); | 1811 | xfslogd_workqueue = create_workqueue("xfslogd"); |
@@ -1904,8 +1816,8 @@ pagebuf_init(void) | |||
1904 | if (!xfsdatad_workqueue) | 1816 | if (!xfsdatad_workqueue) |
1905 | goto out_destroy_xfslogd_workqueue; | 1817 | goto out_destroy_xfslogd_workqueue; |
1906 | 1818 | ||
1907 | pagebuf_shake = kmem_shake_register(xfsbufd_wakeup); | 1819 | xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup); |
1908 | if (!pagebuf_shake) | 1820 | if (!xfs_buf_shake) |
1909 | goto out_destroy_xfsdatad_workqueue; | 1821 | goto out_destroy_xfsdatad_workqueue; |
1910 | 1822 | ||
1911 | return 0; | 1823 | return 0; |
@@ -1915,22 +1827,22 @@ pagebuf_init(void) | |||
1915 | out_destroy_xfslogd_workqueue: | 1827 | out_destroy_xfslogd_workqueue: |
1916 | destroy_workqueue(xfslogd_workqueue); | 1828 | destroy_workqueue(xfslogd_workqueue); |
1917 | out_free_buf_zone: | 1829 | out_free_buf_zone: |
1918 | kmem_zone_destroy(pagebuf_zone); | 1830 | kmem_zone_destroy(xfs_buf_zone); |
1919 | out_free_trace_buf: | 1831 | out_free_trace_buf: |
1920 | #ifdef PAGEBUF_TRACE | 1832 | #ifdef XFS_BUF_TRACE |
1921 | ktrace_free(pagebuf_trace_buf); | 1833 | ktrace_free(xfs_buf_trace_buf); |
1922 | #endif | 1834 | #endif |
1923 | return error; | 1835 | return error; |
1924 | } | 1836 | } |
1925 | 1837 | ||
1926 | void | 1838 | void |
1927 | pagebuf_terminate(void) | 1839 | xfs_buf_terminate(void) |
1928 | { | 1840 | { |
1929 | kmem_shake_deregister(pagebuf_shake); | 1841 | kmem_shake_deregister(xfs_buf_shake); |
1930 | destroy_workqueue(xfsdatad_workqueue); | 1842 | destroy_workqueue(xfsdatad_workqueue); |
1931 | destroy_workqueue(xfslogd_workqueue); | 1843 | destroy_workqueue(xfslogd_workqueue); |
1932 | kmem_zone_destroy(pagebuf_zone); | 1844 | kmem_zone_destroy(xfs_buf_zone); |
1933 | #ifdef PAGEBUF_TRACE | 1845 | #ifdef XFS_BUF_TRACE |
1934 | ktrace_free(pagebuf_trace_buf); | 1846 | ktrace_free(xfs_buf_trace_buf); |
1935 | #endif | 1847 | #endif |
1936 | } | 1848 | } |